id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/f-lib-0.1.0.tar.gz/f-lib-0.1.0/.github/ISSUE_TEMPLATE/bug_report.md
|
---
name: Bug report
about: Create a report to help us improve
title: "[BUG] report"
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here. Please provide links to code samples where applicable.
|
PypiClean
|
/pulumi_google_native-0.31.2a1689827148.tar.gz/pulumi_google_native-0.31.2a1689827148/pulumi_google_native/dataproc/v1/cluster.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ClusterArgs', 'Cluster']
@pulumi.input_type
class ClusterArgs:
def __init__(__self__, *,
cluster_name: pulumi.Input[str],
region: pulumi.Input[str],
action_on_failed_primary_workers: Optional[pulumi.Input[str]] = None,
config: Optional[pulumi.Input['ClusterConfigArgs']] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
virtual_cluster_config: Optional[pulumi.Input['VirtualClusterConfigArgs']] = None):
"""
The set of arguments for constructing a Cluster resource.
:param pulumi.Input[str] cluster_name: The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.
:param pulumi.Input[str] action_on_failed_primary_workers: Optional. Failure action when primary worker creation fails.
:param pulumi.Input['ClusterConfigArgs'] config: Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.
:param pulumi.Input[str] project: The Google Cloud Platform project ID that the cluster belongs to.
:param pulumi.Input[str] request_id: Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
:param pulumi.Input['VirtualClusterConfigArgs'] virtual_cluster_config: Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.
"""
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "region", region)
if action_on_failed_primary_workers is not None:
pulumi.set(__self__, "action_on_failed_primary_workers", action_on_failed_primary_workers)
if config is not None:
pulumi.set(__self__, "config", config)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
if virtual_cluster_config is not None:
pulumi.set(__self__, "virtual_cluster_config", virtual_cluster_config)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="actionOnFailedPrimaryWorkers")
def action_on_failed_primary_workers(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Failure action when primary worker creation fails.
"""
return pulumi.get(self, "action_on_failed_primary_workers")
@action_on_failed_primary_workers.setter
def action_on_failed_primary_workers(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_on_failed_primary_workers", value)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input['ClusterConfigArgs']]:
"""
Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input['ClusterConfigArgs']]):
pulumi.set(self, "config", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The Google Cloud Platform project ID that the cluster belongs to.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
"""
Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
"""
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
@property
@pulumi.getter(name="virtualClusterConfig")
def virtual_cluster_config(self) -> Optional[pulumi.Input['VirtualClusterConfigArgs']]:
"""
Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.
"""
return pulumi.get(self, "virtual_cluster_config")
@virtual_cluster_config.setter
def virtual_cluster_config(self, value: Optional[pulumi.Input['VirtualClusterConfigArgs']]):
pulumi.set(self, "virtual_cluster_config", value)
class Cluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action_on_failed_primary_workers: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
config: Optional[pulumi.Input[pulumi.InputType['ClusterConfigArgs']]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
virtual_cluster_config: Optional[pulumi.Input[pulumi.InputType['VirtualClusterConfigArgs']]] = None,
__props__=None):
"""
Creates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action_on_failed_primary_workers: Optional. Failure action when primary worker creation fails.
:param pulumi.Input[str] cluster_name: The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.
:param pulumi.Input[pulumi.InputType['ClusterConfigArgs']] config: Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.
:param pulumi.Input[str] project: The Google Cloud Platform project ID that the cluster belongs to.
:param pulumi.Input[str] request_id: Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
:param pulumi.Input[pulumi.InputType['VirtualClusterConfigArgs']] virtual_cluster_config: Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param ClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action_on_failed_primary_workers: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
config: Optional[pulumi.Input[pulumi.InputType['ClusterConfigArgs']]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
virtual_cluster_config: Optional[pulumi.Input[pulumi.InputType['VirtualClusterConfigArgs']]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ClusterArgs.__new__(ClusterArgs)
__props__.__dict__["action_on_failed_primary_workers"] = action_on_failed_primary_workers
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["config"] = config
__props__.__dict__["labels"] = labels
__props__.__dict__["project"] = project
if region is None and not opts.urn:
raise TypeError("Missing required property 'region'")
__props__.__dict__["region"] = region
__props__.__dict__["request_id"] = request_id
__props__.__dict__["virtual_cluster_config"] = virtual_cluster_config
__props__.__dict__["cluster_uuid"] = None
__props__.__dict__["metrics"] = None
__props__.__dict__["status"] = None
__props__.__dict__["status_history"] = None
replace_on_changes = pulumi.ResourceOptions(replace_on_changes=["project", "region"])
opts = pulumi.ResourceOptions.merge(opts, replace_on_changes)
super(Cluster, __self__).__init__(
'google-native:dataproc/v1:Cluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Cluster':
"""
Get an existing Cluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ClusterArgs.__new__(ClusterArgs)
__props__.__dict__["action_on_failed_primary_workers"] = None
__props__.__dict__["cluster_name"] = None
__props__.__dict__["cluster_uuid"] = None
__props__.__dict__["config"] = None
__props__.__dict__["labels"] = None
__props__.__dict__["metrics"] = None
__props__.__dict__["project"] = None
__props__.__dict__["region"] = None
__props__.__dict__["request_id"] = None
__props__.__dict__["status"] = None
__props__.__dict__["status_history"] = None
__props__.__dict__["virtual_cluster_config"] = None
return Cluster(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="actionOnFailedPrimaryWorkers")
def action_on_failed_primary_workers(self) -> pulumi.Output[Optional[str]]:
"""
Optional. Failure action when primary worker creation fails.
"""
return pulumi.get(self, "action_on_failed_primary_workers")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Output[str]:
"""
The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.
"""
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="clusterUuid")
def cluster_uuid(self) -> pulumi.Output[str]:
"""
A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.
"""
return pulumi.get(self, "cluster_uuid")
@property
@pulumi.getter
def config(self) -> pulumi.Output['outputs.ClusterConfigResponse']:
"""
Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified.
"""
return pulumi.get(self, "config")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, str]]:
"""
Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def metrics(self) -> pulumi.Output['outputs.ClusterMetricsResponse']:
"""
Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.
"""
return pulumi.get(self, "metrics")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
return pulumi.get(self, "region")
@property
@pulumi.getter(name="requestId")
def request_id(self) -> pulumi.Output[Optional[str]]:
"""
Optional. A unique ID used to identify the request. If the server receives two CreateClusterRequest (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s with the same id, then the second request will be ignored and the first google.longrunning.Operation created and stored in the backend is returned.It is recommended to always set this value to a UUID (https://en.wikipedia.org/wiki/Universally_unique_identifier).The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters.
"""
return pulumi.get(self, "request_id")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.ClusterStatusResponse']:
"""
Cluster status.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="statusHistory")
def status_history(self) -> pulumi.Output[Sequence['outputs.ClusterStatusResponse']]:
"""
The previous cluster status.
"""
return pulumi.get(self, "status_history")
@property
@pulumi.getter(name="virtualClusterConfig")
def virtual_cluster_config(self) -> pulumi.Output['outputs.VirtualClusterConfigResponse']:
"""
Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.
"""
return pulumi.get(self, "virtual_cluster_config")
|
PypiClean
|
/cic_cli-0.5.5-py3-none-any.whl/cic/contract/processor.py
|
import logging
logg = logging.getLogger(__name__)
class ContractProcessor:
"""Drives the serialization and publishing of contracts, proofs and metadata for the token.
:param proof: Proof object to publish
:type proof: cic.proof.Proof
:param attachment: Attachment object to publish
:type attachment: cic.attachment.Attachment
:param metadata: Metadata object to publish
:type metadata: cic.meta.Meta
:param writer: Writer interface receiving the output of the processor
:type writer: cic.writers.OutputWriter
:param extensions: Extension contexts to publish to
:type extensions: list of cic.extension.Extension
"""
def __init__(
self,
proof=None,
attachment=None,
metadata=None,
outputs_writer=None,
extensions=[],
):
self.token_address = None
self.extensions = extensions
self.cores = {
"metadata": metadata,
"attachment": attachment,
"proof": proof,
}
self.outputs = []
self.__outputs_writer = outputs_writer
def writer(self):
"""Return the writer instance that the process is using.
:rtype: cic.writers.OutputWriter
:return: Writer
"""
return self.__outputs_writer
def get_outputs(self):
"""Return all written outputs.
This will return nothing unless the process method has been executed.
:rtype: bytes
:return: Outputs
"""
outputs = []
for ext in self.extensions:
outputs += ext.outputs
outputs += self.outputs
return outputs
def process(self, writer=None):
"""Serializes and publishes all token data.
Calls the process method on each extension. For each extension, the process method on attachment, proof and metadata, in that order, for any of them that have provided at processor object instantiation.
All output written to the publish writer will also be cached so that it subsequently be recalled using the get_outputs method.
:param writer: Writer to use for publishing.
:type writer: cic.writers.OutputWriter
"""
tasks = [
"attachment",
"proof",
"metadata",
]
for ext in self.extensions:
(token_address, token_symbol) = ext.process()
for task in tasks:
a = self.cores.get(task)
if a is None:
logg.debug(f'skipping missing task receiver "{task}"')
continue
logg.debug(f'Processing "{ext}:{task}"')
v = a.process(
token_address=token_address,
token_symbol=token_symbol,
writer=self.__outputs_writer,
)
self.outputs.append(v)
|
PypiClean
|
/vultr-python-client-1.0.2.tar.gz/vultr-python-client-1.0.2/vultr_python_client/paths/domains_dns_domain_soa/get.py
|
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from vultr_python_client import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from vultr_python_client import schemas # noqa: F401
from vultr_python_client.model.dns_soa import DnsSoa
from . import path
# Path params
DnsDomainSchema = schemas.StrSchema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'dns-domain': typing.Union[DnsDomainSchema, str, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_dns_domain = api_client.PathParameter(
name="dns-domain",
style=api_client.ParameterStyle.SIMPLE,
schema=DnsDomainSchema,
required=True,
)
_auth = [
'APIKey',
]
class SchemaFor200ResponseBodyApplicationJson(
schemas.DictSchema
):
class MetaOapg:
class properties:
@staticmethod
def dns_soa() -> typing.Type['DnsSoa']:
return DnsSoa
__annotations__ = {
"dns_soa": dns_soa,
}
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["dns_soa"]) -> 'DnsSoa': ...
@typing.overload
def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
def __getitem__(self, name: typing.Union[typing_extensions.Literal["dns_soa", ], str]):
# dict_instance[name] accessor
return super().__getitem__(name)
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["dns_soa"]) -> typing.Union['DnsSoa', schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["dns_soa", ], str]):
return super().get_item_oapg(name)
def __new__(
cls,
*_args: typing.Union[dict, frozendict.frozendict, ],
dns_soa: typing.Union['DnsSoa', schemas.Unset] = schemas.unset,
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
) -> 'SchemaFor200ResponseBodyApplicationJson':
return super().__new__(
cls,
*_args,
dns_soa=dns_soa,
_configuration=_configuration,
**kwargs,
)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: schemas.Unset = schemas.unset
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
)
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: schemas.Unset = schemas.unset
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
)
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: schemas.Unset = schemas.unset
headers: schemas.Unset = schemas.unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
)
_status_code_to_response = {
'200': _response_for_200,
'400': _response_for_400,
'401': _response_for_401,
'404': _response_for_404,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _get_dns_domain_soa_oapg(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _get_dns_domain_soa_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _get_dns_domain_soa_oapg(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _get_dns_domain_soa_oapg(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Get SOA information
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_dns_domain,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class GetDnsDomainSoa(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def get_dns_domain_soa(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get_dns_domain_soa(
self,
skip_deserialization: typing_extensions.Literal[True],
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get_dns_domain_soa(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get_dns_domain_soa(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._get_dns_domain_soa_oapg(
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get(
self,
skip_deserialization: typing_extensions.Literal[True],
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._get_dns_domain_soa_oapg(
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
PypiClean
|
/dolbyio-rest-apis-cli-3.6.2.tar.gz/dolbyio-rest-apis-cli-3.6.2/client/src/dolbyio_rest_apis/media/diagnose.py
|
from dolbyio_rest_apis.core.urls import get_mapi_url
from dolbyio_rest_apis.media.internal.http_context import MediaHttpContext
from dolbyio_rest_apis.media.models.diagnose_response import DiagnoseJob
async def start(
access_token: str,
job_content: str,
) -> str or None:
r"""
Starts Diagnosing.
The Dolby.io Media Analyze Audio Diagnose API provides a quick diagnosis for discovering audio quality issues with your media.
See: https://docs.dolby.io/media-apis/reference/media-diagnose-post
Beta API
This API is being made available as an early preview.
If you have feedback on how you'd like to use the API please reach out to share your feedback with our team.
https://dolby.io/contact
Args:
access_token: Access token to use for authentication.
job_content: Content of the job description as a JSON payload.
You can find the definition at this URL: https://docs.dolby.io/media-apis/reference/media-diagnose-post
Returns:
The job identifier.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
print('''Beta API
This API is being made available as an early preview.
If you have feedback on how you\'d like to use the API please reach out to share your feedback with our team.
https://dolby.io/contact''')
async with MediaHttpContext() as http_context:
json_response = await http_context.requests_post(
access_token=access_token,
url=f'{get_mapi_url()}/media/diagnose',
payload=job_content,
)
if 'job_id' in json_response:
return json_response['job_id']
async def get_results(
access_token: str,
job_id: str,
) -> DiagnoseJob:
r"""
Gets Enhance Results
For a given job_id, this method will check if the processing task has completed and return the enhanced results.
When the status is Success you'll be able to retrieve your result from the output location you provided in the original POST.
See: https://docs.dolby.io/media-apis/reference/media-diagnose-get
Beta API
This API is being made available as an early preview.
If you have feedback on how you'd like to use the API please reach out to share your feedback with our team.
https://dolby.io/contact
Args:
access_token: Access token to use for authentication.
job_id: The job identifier.
Returns:
An :class:`DiagnoseJob` object.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
params = {
'job_id': job_id
}
async with MediaHttpContext() as http_context:
json_response = await http_context.requests_get(
access_token=access_token,
url=f'{get_mapi_url()}/media/diagnose',
params=params
)
return DiagnoseJob(job_id, json_response)
|
PypiClean
|
/rethinkdb_next-2.2.0.post1.tar.gz/rethinkdb_next-2.2.0.post1/rethinkdb/ast.py
|
__all__ = ['expr', 'RqlQuery', 'ReQLEncoder', 'ReQLDecoder']
import datetime
import collections
import base64
import binascii
import json as py_json
import threading
from .errors import ReqlDriverError, ReqlDriverCompileError, QueryPrinter, T
from . import ql2_pb2 as p
pTerm = p.Term.TermType
try:
unicode
except NameError:
unicode = str
try:
xrange
except NameError:
xrange = range
try:
{}.iteritems
dict_items = lambda d: d.iteritems()
except AttributeError:
dict_items = lambda d: d.items()
class Repl(object):
threadData = threading.local()
replActive = False
@classmethod
def get(cls):
if 'repl' in cls.threadData.__dict__:
return cls.threadData.repl
else:
return None
@classmethod
def set(cls, conn):
cls.threadData.repl = conn
cls.replActive = True
# This is both an external function and one used extensively
# internally to convert coerce python values to RQL types
def expr(val, nesting_depth=20):
'''
Convert a Python primitive into a RQL primitive value
'''
if not isinstance(nesting_depth, int):
raise ReqlDriverCompileError("Second argument to `r.expr` must be a number.")
if nesting_depth <= 0:
raise ReqlDriverCompileError("Nesting depth limit exceeded.")
if isinstance(val, RqlQuery):
return val
elif isinstance(val, collections.Callable):
return Func(val)
elif isinstance(val, (datetime.datetime, datetime.date)):
if not hasattr(val, 'tzinfo') or not val.tzinfo:
raise ReqlDriverCompileError("""Cannot convert %s to ReQL time object
without timezone information. You can add timezone information with
the third party module \"pytz\" or by constructing ReQL compatible
timezone values with r.make_timezone(\"[+-]HH:MM\"). Alternatively,
use one of ReQL's bultin time constructors, r.now, r.time,
or r.iso8601.
""" % (type(val).__name__))
return ISO8601(val.isoformat())
elif isinstance(val, RqlBinary):
return Binary(val)
elif isinstance(val, (str, unicode)):
return Datum(val)
elif isinstance(val, bytes):
return Binary(val)
elif isinstance(val, collections.Mapping):
# MakeObj doesn't take the dict as a keyword args to avoid
# conflicting with the `self` parameter.
obj = {}
for k, v in dict_items(val):
obj[k] = expr(v, nesting_depth - 1)
return MakeObj(obj)
elif isinstance(val, collections.Iterable):
val = [expr(v, nesting_depth - 1) for v in val]
return MakeArray(*val)
else:
return Datum(val)
class RqlQuery(object):
# Instantiate this AST node with the given pos and opt args
def __init__(self, *args, **optargs):
self._args = [expr(e) for e in args]
self.optargs = {}
for k, v in dict_items(optargs):
self.optargs[k] = expr(v)
# Send this query to the server to be executed
def run(self, c=None, **global_optargs):
if c is None:
c = Repl.get()
if c is None:
if Repl.replActive:
raise ReqlDriverError("RqlQuery.run must be given" +
" a connection to run on. A default connection" +
" has been set with `repl()` on another thread," +
" but not this one.")
else:
raise ReqlDriverError("RqlQuery.run must be given" +
" a connection to run on.")
return c._start(self, **global_optargs)
def __str__(self):
qp = QueryPrinter(self)
return qp.print_query()
def __repr__(self):
return "<RqlQuery instance: %s >" % str(self)
# Compile this query to a json-serializable object
def build(self):
res = [self.tt, self._args]
if len(self.optargs) > 0:
res.append(self.optargs)
return res
# The following are all operators and methods that operate on
# Rql queries to build up more complex operations
# Comparison operators
def __eq__(self, other):
return Eq(self, other)
def __ne__(self, other):
return Ne(self, other)
def __lt__(self, other):
return Lt(self, other)
def __le__(self, other):
return Le(self, other)
def __gt__(self, other):
return Gt(self, other)
def __ge__(self, other):
return Ge(self, other)
# Numeric operators
def __invert__(self):
return Not(self)
def __add__(self, other):
return Add(self, other)
def __radd__(self, other):
return Add(other, self)
def __sub__(self, other):
return Sub(self, other)
def __rsub__(self, other):
return Sub(other, self)
def __mul__(self, other):
return Mul(self, other)
def __rmul__(self, other):
return Mul(other, self)
def __div__(self, other):
return Div(self, other)
def __rdiv__(self, other):
return Div(other, self)
def __truediv__(self, other):
return Div(self, other)
def __rtruediv__(self, other):
return Div(other, self)
def __mod__(self, other):
return Mod(self, other)
def __rmod__(self, other):
return Mod(other, self)
def __and__(self, other):
query = And(self, other)
query.set_infix()
return query
def __rand__(self, other):
query = And(other, self)
query.set_infix()
return query
def __or__(self, other):
query = Or(self, other)
query.set_infix()
return query
def __ror__(self, other):
query = Or(other, self)
query.set_infix()
return query
# Non-operator versions of the above
def eq(self, *args):
return Eq(self, *args)
def ne(self, *args):
return Ne(self, *args)
def lt(self, *args):
return Lt(self, *args)
def le(self, *args):
return Le(self, *args)
def gt(self, *args):
return Gt(self, *args)
def ge(self, *args):
return Ge(self, *args)
def add(self, *args):
return Add(self, *args)
def sub(self, *args):
return Sub(self, *args)
def mul(self, *args):
return Mul(self, *args)
def div(self, *args):
return Div(self, *args)
def mod(self, *args):
return Mod(self, *args)
def floor(self, *args):
return Floor(self, *args)
def ceil(self, *args):
return Ceil(self, *args)
def round(self, *args):
return Round(self, *args)
def and_(self, *args):
return And(self, *args)
def or_(self, *args):
return Or(self, *args)
def not_(self, *args):
return Not(self, *args)
# N.B. Cannot use 'in' operator because it must return a boolean
def contains(self, *args):
return Contains(self, *[func_wrap(arg) for arg in args])
def has_fields(self, *args):
return HasFields(self, *args)
def with_fields(self, *args):
return WithFields(self, *args)
def keys(self, *args):
return Keys(self, *args)
def values(self, *args):
return Values(self, *args)
def changes(self, *args, **kwargs):
return Changes(self, *args, **kwargs)
# Polymorphic object/sequence operations
def pluck(self, *args):
return Pluck(self, *args)
def without(self, *args):
return Without(self, *args)
def do(self, *args):
return FunCall(self, *args)
def default(self, *args):
return Default(self, *args)
def update(self, *args, **kwargs):
return Update(self, *[func_wrap(arg) for arg in args], **kwargs)
def replace(self, *args, **kwargs):
return Replace(self, *[func_wrap(arg) for arg in args], **kwargs)
def delete(self, *args, **kwargs):
return Delete(self, *args, **kwargs)
# Rql type inspection
def coerce_to(self, *args):
return CoerceTo(self, *args)
def ungroup(self, *args):
return Ungroup(self, *args)
def type_of(self, *args):
return TypeOf(self, *args)
def merge(self, *args):
return Merge(self, *[func_wrap(arg) for arg in args])
def append(self, *args):
return Append(self, *args)
def prepend(self, *args):
return Prepend(self, *args)
def difference(self, *args):
return Difference(self, *args)
def set_insert(self, *args):
return SetInsert(self, *args)
def set_union(self, *args):
return SetUnion(self, *args)
def set_intersection(self, *args):
return SetIntersection(self, *args)
def set_difference(self, *args):
return SetDifference(self, *args)
# Operator used for get attr / nth / slice. Non-operator versions below
# in cases of ambiguity
def __getitem__(self, index):
if isinstance(index, slice):
if index.stop:
return Slice(self, index.start or 0, index.stop,
bracket_operator=True)
else:
return Slice(self, index.start or 0, -1,
right_bound='closed', bracket_operator=True)
else:
return Bracket(self, index, bracket_operator=True)
def __iter__(*args, **kwargs):
raise ReqlDriverError(
"__iter__ called on an RqlQuery object.\n"
"To iterate over the results of a query, call run first.\n"
"To iterate inside a query, use map or for_each.")
def get_field(self, *args):
return GetField(self, *args)
def nth(self, *args):
return Nth(self, *args)
def to_json(self, *args):
return ToJsonString(self, *args)
def to_json_string(self, *args):
return ToJsonString(self, *args)
def match(self, *args):
return Match(self, *args)
def split(self, *args):
return Split(self, *args)
def upcase(self, *args):
return Upcase(self, *args)
def downcase(self, *args):
return Downcase(self, *args)
def is_empty(self, *args):
return IsEmpty(self, *args)
def offsets_of(self, *args):
return OffsetsOf(self, *[func_wrap(arg) for arg in args])
def slice(self, *args, **kwargs):
return Slice(self, *args, **kwargs)
def skip(self, *args):
return Skip(self, *args)
def limit(self, *args):
return Limit(self, *args)
def reduce(self, *args):
return Reduce(self, *[func_wrap(arg) for arg in args])
def sum(self, *args):
return Sum(self, *[func_wrap(arg) for arg in args])
def avg(self, *args):
return Avg(self, *[func_wrap(arg) for arg in args])
def min(self, *args, **kwargs):
return Min(self, *[func_wrap(arg) for arg in args], **kwargs)
def max(self, *args, **kwargs):
return Max(self, *[func_wrap(arg) for arg in args], **kwargs)
def map(self, *args):
if len(args) > 0:
# `func_wrap` only the last argument
return Map(self, *(args[:-1] + (func_wrap(args[-1]), )))
else:
return Map(self)
def fold(self, *args, **kwargs):
if len(args) > 0:
# `func_wrap` only the last argument before optional arguments
# Also `func_wrap` keyword arguments
# Nice syntax not supported by python2.6
kwfuncargs = {}
for arg_name in kwargs:
kwfuncargs[arg_name] = func_wrap(kwargs[arg_name])
return Fold(self, *(args[:-1] + (func_wrap(args[-1]), )), **kwfuncargs)
else:
return Fold(self)
def filter(self, *args, **kwargs):
return Filter(self, *[func_wrap(arg) for arg in args], **kwargs)
def concat_map(self, *args):
return ConcatMap(self, *[func_wrap(arg) for arg in args])
def order_by(self, *args, **kwargs):
args = [arg if isinstance(arg, (Asc, Desc)) else func_wrap(arg)
for arg in args]
return OrderBy(self, *args, **kwargs)
def between(self, *args, **kwargs):
return Between(self, *args, **kwargs)
def distinct(self, *args, **kwargs):
return Distinct(self, *args, **kwargs)
# NB: Can't overload __len__ because Python doesn't
# allow us to return a non-integer
def count(self, *args):
return Count(self, *[func_wrap(arg) for arg in args])
def union(self, *args, **kwargs):
func_kwargs = {}
for key in kwargs:
func_kwargs[key] = func_wrap(kwargs[key])
return Union(self, *args, **func_kwargs)
def inner_join(self, *args):
return InnerJoin(self, *args)
def outer_join(self, *args):
return OuterJoin(self, *args)
def eq_join(self, *args, **kwargs):
return EqJoin(self, *[func_wrap(arg) for arg in args], **kwargs)
def zip(self, *args):
return Zip(self, *args)
def group(self, *args, **kwargs):
return Group(self, *[func_wrap(arg) for arg in args], **kwargs)
def branch(self, *args):
return Branch(self, *args)
def for_each(self, *args):
return ForEach(self, *[func_wrap(arg) for arg in args])
def info(self, *args):
return Info(self, *args)
# Array only operations
def insert_at(self, *args):
return InsertAt(self, *args)
def splice_at(self, *args):
return SpliceAt(self, *args)
def delete_at(self, *args):
return DeleteAt(self, *args)
def change_at(self, *args):
return ChangeAt(self, *args)
def sample(self, *args):
return Sample(self, *args)
# Time support
def to_iso8601(self, *args):
return ToISO8601(self, *args)
def to_epoch_time(self, *args):
return ToEpochTime(self, *args)
def during(self, *args, **kwargs):
return During(self, *args, **kwargs)
def date(self, *args):
return Date(self, *args)
def time_of_day(self, *args):
return TimeOfDay(self, *args)
def timezone(self, *args):
return Timezone(self, *args)
def year(self, *args):
return Year(self, *args)
def month(self, *args):
return Month(self, *args)
def day(self, *args):
return Day(self, *args)
def day_of_week(self, *args):
return DayOfWeek(self, *args)
def day_of_year(self, *args):
return DayOfYear(self, *args)
def hours(self, *args):
return Hours(self, *args)
def minutes(self, *args):
return Minutes(self, *args)
def seconds(self, *args):
return Seconds(self, *args)
def in_timezone(self, *args):
return InTimezone(self, *args)
# Geospatial support
def to_geojson(self, *args):
return ToGeoJson(self, *args)
def distance(self, *args, **kwargs):
return Distance(self, *args, **kwargs)
def intersects(self, *args):
return Intersects(self, *args)
def includes(self, *args):
return Includes(self, *args)
def fill(self, *args):
return Fill(self, *args)
def polygon_sub(self, *args):
return PolygonSub(self, *args)
# These classes define how nodes are printed by overloading `compose`
def needs_wrap(arg):
return isinstance(arg, (Datum, MakeArray, MakeObj))
class RqlBoolOperQuery(RqlQuery):
def __init__(self, *args, **optargs):
self.infix = False
RqlQuery.__init__(self, *args, **optargs)
def set_infix(self):
self.infix = True
def compose(self, args, optargs):
t_args = [T('r.expr(', args[i], ')')
if needs_wrap(self._args[i]) else args[i]
for i in xrange(len(args))]
if self.infix:
return T('(', T(*t_args, intsp=[' ', self.st_infix, ' ']), ')')
else:
return T('r.', self.st, '(', T(*t_args, intsp=', '), ')')
class RqlBiOperQuery(RqlQuery):
def compose(self, args, optargs):
t_args = [T('r.expr(', args[i], ')')
if needs_wrap(self._args[i]) else args[i]
for i in xrange(len(args))]
return T('(', T(*t_args, intsp=[' ', self.st, ' ']), ')')
class RqlBiCompareOperQuery(RqlBiOperQuery):
def __init__(self, *args, **optargs):
RqlBiOperQuery.__init__(self, *args, **optargs)
for arg in args:
try:
if arg.infix:
err = (
"Calling '%s' on result of infix bitwise operator:\n"
"%s.\n"
"This is almost always a precedence error.\n"
"Note that `a < b | b < c` <==> `a < (b | b) < c`.\n"
"If you really want this behavior, use `.or_` or "
"`.and_` instead.")
raise ReqlDriverCompileError(err %
(self.st,
QueryPrinter(self).print_query()))
except AttributeError:
pass # No infix attribute, so not possible to be an infix bool operator
class RqlTopLevelQuery(RqlQuery):
def compose(self, args, optargs):
args.extend([T(k, '=', v) for k, v in dict_items(optargs)])
return T('r.', self.st, '(', T(*(args), intsp=', '), ')')
class RqlMethodQuery(RqlQuery):
def compose(self, args, optargs):
if len(args) == 0:
return T('r.', self.st, '()')
if needs_wrap(self._args[0]):
args[0] = T('r.expr(', args[0], ')')
restargs = args[1:]
restargs.extend([T(k, '=', v) for k, v in dict_items(optargs)])
restargs = T(*restargs, intsp=', ')
return T(args[0], '.', self.st, '(', restargs, ')')
class RqlBracketQuery(RqlMethodQuery):
def __init__(self, *args, **optargs):
if 'bracket_operator' in optargs:
self.bracket_operator = optargs['bracket_operator']
del optargs['bracket_operator']
else:
self.bracket_operator = False
RqlMethodQuery.__init__(self, *args, **optargs)
def compose(self, args, optargs):
if self.bracket_operator:
if needs_wrap(self._args[0]):
args[0] = T('r.expr(', args[0], ')')
return T(args[0], '[', T(*args[1:], intsp=[',']), ']')
else:
return RqlMethodQuery.compose(self, args, optargs)
class RqlTzinfo(datetime.tzinfo):
def __init__(self, offsetstr):
hours, minutes = map(int, offsetstr.split(':'))
self.offsetstr = offsetstr
self.delta = datetime.timedelta(hours=hours, minutes=minutes)
def __getinitargs__(self):
return (self.offsetstr,)
def __copy__(self):
return RqlTzinfo(self.offsetstr)
def __deepcopy__(self, memo):
return RqlTzinfo(self.offsetstr)
def utcoffset(self, dt):
return self.delta
def tzname(self, dt):
return self.offsetstr
def dst(self, dt):
return datetime.timedelta(0)
# Python only allows immutable built-in types to be hashed, such as
# for keys in a dict This means we can't use lists or dicts as keys in
# grouped data objects, so we convert them to tuples and frozensets,
# respectively. This may make it a little harder for users to work
# with converted grouped data, unless they do a simple iteration over
# the result
def recursively_make_hashable(obj):
if isinstance(obj, list):
return tuple([recursively_make_hashable(i) for i in obj])
elif isinstance(obj, dict):
return frozenset([(k, recursively_make_hashable(v))
for k, v in dict_items(obj)])
return obj
class ReQLEncoder(py_json.JSONEncoder):
'''
Default JSONEncoder subclass to handle query conversion.
'''
def __init__(self):
py_json.JSONEncoder.__init__(self, ensure_ascii=False, allow_nan=False,
check_circular=False, separators=(',', ':'))
def default(self, obj):
if isinstance(obj, RqlQuery):
return obj.build()
return py_json.JSONEncoder.default(self, obj)
class ReQLDecoder(py_json.JSONDecoder):
'''
Default JSONDecoder subclass to handle pseudo-type conversion.
'''
def __init__(self, reql_format_opts=None):
py_json.JSONDecoder.__init__(self, object_hook=self.convert_pseudotype)
self.reql_format_opts = reql_format_opts or {}
def convert_time(self, obj):
if 'epoch_time' not in obj:
raise ReqlDriverError(('pseudo-type TIME object %s does not ' +
'have expected field "epoch_time".')
% py_json.dumps(obj))
if 'timezone' in obj:
return datetime.datetime.fromtimestamp(obj['epoch_time'],
RqlTzinfo(obj['timezone']))
else:
return datetime.datetime.utcfromtimestamp(obj['epoch_time'])
def convert_grouped_data(self, obj):
if 'data' not in obj:
raise ReqlDriverError(('pseudo-type GROUPED_DATA object' +
' %s does not have the expected field "data".')
% py_json.dumps(obj))
return dict([(recursively_make_hashable(k), v) for k, v in obj['data']])
def convert_binary(self, obj):
if 'data' not in obj:
raise ReqlDriverError(('pseudo-type BINARY object %s does not have ' +
'the expected field "data".')
% py_json.dumps(obj))
return RqlBinary(base64.b64decode(obj['data'].encode('utf-8')))
def convert_pseudotype(self, obj):
reql_type = obj.get('$reql_type$')
if reql_type is not None:
if reql_type == 'TIME':
time_format = self.reql_format_opts.get('time_format')
if time_format is None or time_format == 'native':
# Convert to native python datetime object
return self.convert_time(obj)
elif time_format != 'raw':
raise ReqlDriverError("Unknown time_format run option \"%s\"."
% time_format)
elif reql_type == 'GROUPED_DATA':
group_format = self.reql_format_opts.get('group_format')
if group_format is None or group_format == 'native':
return self.convert_grouped_data(obj)
elif group_format != 'raw':
raise ReqlDriverError("Unknown group_format run option \"%s\"."
% group_format)
elif reql_type == 'GEOMETRY':
# No special support for this. Just return the raw object
return obj
elif reql_type == 'BINARY':
binary_format = self.reql_format_opts.get('binary_format')
if binary_format is None or binary_format == 'native':
return self.convert_binary(obj)
elif binary_format != 'raw':
raise ReqlDriverError("Unknown binary_format run option \"%s\"."
% binary_format)
else:
raise ReqlDriverError("Unknown pseudo-type %s" % reql_type)
# If there was no pseudotype, or the relevant format is raw, return
# the original object
return obj
# This class handles the conversion of RQL terminal types in both directions
# Going to the server though it does not support R_ARRAY or R_OBJECT as those
# are alternately handled by the MakeArray and MakeObject nodes. Why do this?
# MakeArray and MakeObject are more flexible, allowing us to construct array
# and object expressions from nested RQL expressions. Constructing pure
# R_ARRAYs and R_OBJECTs would require verifying that at all nested levels
# our arrays and objects are composed only of basic types.
class Datum(RqlQuery):
_args = []
optargs = {}
def __init__(self, val):
self.data = val
def build(self):
return self.data
def compose(self, args, optargs):
return repr(self.data)
class MakeArray(RqlQuery):
tt = pTerm.MAKE_ARRAY
def compose(self, args, optargs):
return T('[', T(*args, intsp=', '), ']')
class MakeObj(RqlQuery):
tt = pTerm.MAKE_OBJ
# We cannot inherit from RqlQuery because of potential conflicts with
# the `self` parameter. This is not a problem for other RqlQuery sub-
# classes unless we add a 'self' optional argument to one of them.
def __init__(self, obj_dict):
self._args = []
self.optargs = {}
for k, v in dict_items(obj_dict):
if not isinstance(k, (str, unicode)):
raise ReqlDriverCompileError("Object keys must be strings.")
self.optargs[k] = expr(v)
def build(self):
return self.optargs
def compose(self, args, optargs):
return T('r.expr({', T(*[T(repr(k), ': ', v)
for k, v in dict_items(optargs)],
intsp=', '), '})')
class Var(RqlQuery):
tt = pTerm.VAR
def compose(self, args, optargs):
return 'var_' + args[0]
class JavaScript(RqlTopLevelQuery):
tt = pTerm.JAVASCRIPT
st = "js"
class Http(RqlTopLevelQuery):
tt = pTerm.HTTP
st = "http"
class UserError(RqlTopLevelQuery):
tt = pTerm.ERROR
st = "error"
class Random(RqlTopLevelQuery):
tt = pTerm.RANDOM
st = "random"
class Changes(RqlMethodQuery):
tt = pTerm.CHANGES
st = "changes"
class Default(RqlMethodQuery):
tt = pTerm.DEFAULT
st = "default"
class ImplicitVar(RqlQuery):
tt = pTerm.IMPLICIT_VAR
def __call__(self, *args, **kwargs):
raise TypeError("'r.row' is not callable, use 'r.row[...]' instead")
def compose(self, args, optargs):
return 'r.row'
class Eq(RqlBiCompareOperQuery):
tt = pTerm.EQ
st = "=="
class Ne(RqlBiCompareOperQuery):
tt = pTerm.NE
st = "!="
class Lt(RqlBiCompareOperQuery):
tt = pTerm.LT
st = "<"
class Le(RqlBiCompareOperQuery):
tt = pTerm.LE
st = "<="
class Gt(RqlBiCompareOperQuery):
tt = pTerm.GT
st = ">"
class Ge(RqlBiCompareOperQuery):
tt = pTerm.GE
st = ">="
class Not(RqlQuery):
tt = pTerm.NOT
def compose(self, args, optargs):
if isinstance(self._args[0], Datum):
args[0] = T('r.expr(', args[0], ')')
return T('(~', args[0], ')')
class Add(RqlBiOperQuery):
tt = pTerm.ADD
st = "+"
class Sub(RqlBiOperQuery):
tt = pTerm.SUB
st = "-"
class Mul(RqlBiOperQuery):
tt = pTerm.MUL
st = "*"
class Div(RqlBiOperQuery):
tt = pTerm.DIV
st = "/"
class Mod(RqlBiOperQuery):
tt = pTerm.MOD
st = "%"
class Floor(RqlMethodQuery):
tt = pTerm.FLOOR
st = 'floor'
class Ceil(RqlMethodQuery):
tt = pTerm.CEIL
st = 'ceil'
class Round(RqlMethodQuery):
tt = pTerm.ROUND
st = 'round'
class Append(RqlMethodQuery):
tt = pTerm.APPEND
st = "append"
class Prepend(RqlMethodQuery):
tt = pTerm.PREPEND
st = "prepend"
class Difference(RqlMethodQuery):
tt = pTerm.DIFFERENCE
st = "difference"
class SetInsert(RqlMethodQuery):
tt = pTerm.SET_INSERT
st = "set_insert"
class SetUnion(RqlMethodQuery):
tt = pTerm.SET_UNION
st = "set_union"
class SetIntersection(RqlMethodQuery):
tt = pTerm.SET_INTERSECTION
st = "set_intersection"
class SetDifference(RqlMethodQuery):
tt = pTerm.SET_DIFFERENCE
st = "set_difference"
class Slice(RqlBracketQuery):
tt = pTerm.SLICE
st = 'slice'
# Slice has a special bracket syntax, implemented here
def compose(self, args, optargs):
if self.bracket_operator:
if needs_wrap(self._args[0]):
args[0] = T('r.expr(', args[0], ')')
return T(args[0], '[', args[1], ':', args[2], ']')
else:
return RqlBracketQuery.compose(self, args, optargs)
class Skip(RqlMethodQuery):
tt = pTerm.SKIP
st = 'skip'
class Limit(RqlMethodQuery):
tt = pTerm.LIMIT
st = 'limit'
class GetField(RqlBracketQuery):
tt = pTerm.GET_FIELD
st = 'get_field'
class Bracket(RqlBracketQuery):
tt = pTerm.BRACKET
st = 'bracket'
class Contains(RqlMethodQuery):
tt = pTerm.CONTAINS
st = 'contains'
class HasFields(RqlMethodQuery):
tt = pTerm.HAS_FIELDS
st = 'has_fields'
class WithFields(RqlMethodQuery):
tt = pTerm.WITH_FIELDS
st = 'with_fields'
class Keys(RqlMethodQuery):
tt = pTerm.KEYS
st = 'keys'
class Values(RqlMethodQuery):
tt = pTerm.VALUES
st = 'values'
class Object(RqlMethodQuery):
tt = pTerm.OBJECT
st = 'object'
class Pluck(RqlMethodQuery):
tt = pTerm.PLUCK
st = 'pluck'
class Without(RqlMethodQuery):
tt = pTerm.WITHOUT
st = 'without'
class Merge(RqlMethodQuery):
tt = pTerm.MERGE
st = 'merge'
class Between(RqlMethodQuery):
tt = pTerm.BETWEEN
st = 'between'
class DB(RqlTopLevelQuery):
tt = pTerm.DB
st = 'db'
def table_list(self, *args):
return TableList(self, *args)
def config(self, *args):
return Config(self, *args)
def wait(self, *args, **kwargs):
return Wait(self, *args, **kwargs)
def reconfigure(self, *args, **kwargs):
return Reconfigure(self, *args, **kwargs)
def rebalance(self, *args, **kwargs):
return Rebalance(self, *args, **kwargs)
def table_create(self, *args, **kwargs):
return TableCreate(self, *args, **kwargs)
def table_drop(self, *args):
return TableDrop(self, *args)
def table(self, *args, **kwargs):
return Table(self, *args, **kwargs)
class FunCall(RqlQuery):
tt = pTerm.FUNCALL
# This object should be constructed with arguments first, and the
# function itself as the last parameter. This makes it easier for
# the places where this object is constructed. The actual wire
# format is function first, arguments last, so we flip them around
# before passing it down to the base class constructor.
def __init__(self, *args):
if len(args) == 0:
raise ReqlDriverCompileError("Expected 1 or more arguments but found 0.")
args = [func_wrap(args[-1])] + list(args[:-1])
RqlQuery.__init__(self, *args)
def compose(self, args, optargs):
if len(args) != 2:
return T('r.do(', T(T(*(args[1:]), intsp=', '), args[0],
intsp=', '), ')')
if isinstance(self._args[1], Datum):
args[1] = T('r.expr(', args[1], ')')
return T(args[1], '.do(', args[0], ')')
class Table(RqlQuery):
tt = pTerm.TABLE
st = 'table'
def insert(self, *args, **kwargs):
return Insert(self, *[expr(arg) for arg in args], **kwargs)
def get(self, *args):
return Get(self, *args)
def get_all(self, *args, **kwargs):
return GetAll(self, *args, **kwargs)
def index_create(self, *args, **kwargs):
if len(args) > 1:
args = [args[0]] + [func_wrap(arg) for arg in args[1:]]
return IndexCreate(self, *args, **kwargs)
def index_drop(self, *args):
return IndexDrop(self, *args)
def index_rename(self, *args, **kwargs):
return IndexRename(self, *args, **kwargs)
def index_list(self, *args):
return IndexList(self, *args)
def index_status(self, *args):
return IndexStatus(self, *args)
def index_wait(self, *args):
return IndexWait(self, *args)
def status(self, *args):
return Status(self, *args)
def config(self, *args):
return Config(self, *args)
def wait(self, *args, **kwargs):
return Wait(self, *args, **kwargs)
def reconfigure(self, *args, **kwargs):
return Reconfigure(self, *args, **kwargs)
def rebalance(self, *args, **kwargs):
return Rebalance(self, *args, **kwargs)
def sync(self, *args):
return Sync(self, *args)
def get_intersecting(self, *args, **kwargs):
return GetIntersecting(self, *args, **kwargs)
def get_nearest(self, *args, **kwargs):
return GetNearest(self, *args, **kwargs)
def uuid(self, *args, **kwargs):
return UUID(self, *args, **kwargs)
def compose(self, args, optargs):
args.extend([T(k, '=', v) for k, v in dict_items(optargs)])
if isinstance(self._args[0], DB):
return T(args[0], '.table(', T(*(args[1:]), intsp=', '), ')')
else:
return T('r.table(', T(*(args), intsp=', '), ')')
class Get(RqlMethodQuery):
tt = pTerm.GET
st = 'get'
class GetAll(RqlMethodQuery):
tt = pTerm.GET_ALL
st = 'get_all'
class GetIntersecting(RqlMethodQuery):
tt = pTerm.GET_INTERSECTING
st = 'get_intersecting'
class GetNearest(RqlMethodQuery):
tt = pTerm.GET_NEAREST
st = 'get_nearest'
class UUID(RqlMethodQuery):
tt = pTerm.UUID
st = 'uuid'
class Reduce(RqlMethodQuery):
tt = pTerm.REDUCE
st = 'reduce'
class Sum(RqlMethodQuery):
tt = pTerm.SUM
st = 'sum'
class Avg(RqlMethodQuery):
tt = pTerm.AVG
st = 'avg'
class Min(RqlMethodQuery):
tt = pTerm.MIN
st = 'min'
class Max(RqlMethodQuery):
tt = pTerm.MAX
st = 'max'
class Map(RqlMethodQuery):
tt = pTerm.MAP
st = 'map'
class Fold(RqlMethodQuery):
tt = pTerm.FOLD
st = 'fold'
class Filter(RqlMethodQuery):
tt = pTerm.FILTER
st = 'filter'
class ConcatMap(RqlMethodQuery):
tt = pTerm.CONCAT_MAP
st = 'concat_map'
class OrderBy(RqlMethodQuery):
tt = pTerm.ORDER_BY
st = 'order_by'
class Distinct(RqlMethodQuery):
tt = pTerm.DISTINCT
st = 'distinct'
class Count(RqlMethodQuery):
tt = pTerm.COUNT
st = 'count'
class Union(RqlMethodQuery):
tt = pTerm.UNION
st = 'union'
class Nth(RqlBracketQuery):
tt = pTerm.NTH
st = 'nth'
class Match(RqlMethodQuery):
tt = pTerm.MATCH
st = 'match'
class ToJsonString(RqlMethodQuery):
tt = pTerm.TO_JSON_STRING
st = 'to_json_string'
class Split(RqlMethodQuery):
tt = pTerm.SPLIT
st = 'split'
class Upcase(RqlMethodQuery):
tt = pTerm.UPCASE
st = 'upcase'
class Downcase(RqlMethodQuery):
tt = pTerm.DOWNCASE
st = 'downcase'
class OffsetsOf(RqlMethodQuery):
tt = pTerm.OFFSETS_OF
st = 'offsets_of'
class IsEmpty(RqlMethodQuery):
tt = pTerm.IS_EMPTY
st = 'is_empty'
class Group(RqlMethodQuery):
tt = pTerm.GROUP
st = 'group'
class InnerJoin(RqlMethodQuery):
tt = pTerm.INNER_JOIN
st = 'inner_join'
class OuterJoin(RqlMethodQuery):
tt = pTerm.OUTER_JOIN
st = 'outer_join'
class EqJoin(RqlMethodQuery):
tt = pTerm.EQ_JOIN
st = 'eq_join'
class Zip(RqlMethodQuery):
tt = pTerm.ZIP
st = 'zip'
class CoerceTo(RqlMethodQuery):
tt = pTerm.COERCE_TO
st = 'coerce_to'
class Ungroup(RqlMethodQuery):
tt = pTerm.UNGROUP
st = 'ungroup'
class TypeOf(RqlMethodQuery):
tt = pTerm.TYPE_OF
st = 'type_of'
class Update(RqlMethodQuery):
tt = pTerm.UPDATE
st = 'update'
class Delete(RqlMethodQuery):
tt = pTerm.DELETE
st = 'delete'
class Replace(RqlMethodQuery):
tt = pTerm.REPLACE
st = 'replace'
class Insert(RqlMethodQuery):
tt = pTerm.INSERT
st = 'insert'
class DbCreate(RqlTopLevelQuery):
tt = pTerm.DB_CREATE
st = "db_create"
class DbDrop(RqlTopLevelQuery):
tt = pTerm.DB_DROP
st = "db_drop"
class DbList(RqlTopLevelQuery):
tt = pTerm.DB_LIST
st = "db_list"
class TableCreate(RqlMethodQuery):
tt = pTerm.TABLE_CREATE
st = "table_create"
class TableCreateTL(RqlTopLevelQuery):
tt = pTerm.TABLE_CREATE
st = "table_create"
class TableDrop(RqlMethodQuery):
tt = pTerm.TABLE_DROP
st = "table_drop"
class TableDropTL(RqlTopLevelQuery):
tt = pTerm.TABLE_DROP
st = "table_drop"
class TableList(RqlMethodQuery):
tt = pTerm.TABLE_LIST
st = "table_list"
class TableListTL(RqlTopLevelQuery):
tt = pTerm.TABLE_LIST
st = "table_list"
class IndexCreate(RqlMethodQuery):
tt = pTerm.INDEX_CREATE
st = 'index_create'
class IndexDrop(RqlMethodQuery):
tt = pTerm.INDEX_DROP
st = 'index_drop'
class IndexRename(RqlMethodQuery):
tt = pTerm.INDEX_RENAME
st = 'index_rename'
class IndexList(RqlMethodQuery):
tt = pTerm.INDEX_LIST
st = 'index_list'
class IndexStatus(RqlMethodQuery):
tt = pTerm.INDEX_STATUS
st = 'index_status'
class IndexWait(RqlMethodQuery):
tt = pTerm.INDEX_WAIT
st = 'index_wait'
class Config(RqlMethodQuery):
tt = pTerm.CONFIG
st = "config"
class Status(RqlMethodQuery):
tt = pTerm.STATUS
st = "status"
class Wait(RqlMethodQuery):
tt = pTerm.WAIT
st = "wait"
class WaitTL(RqlTopLevelQuery):
tt = pTerm.WAIT
st = "wait"
class Reconfigure(RqlMethodQuery):
tt = pTerm.RECONFIGURE
st = 'reconfigure'
class ReconfigureTL(RqlTopLevelQuery):
tt = pTerm.RECONFIGURE
st = 'reconfigure'
class Rebalance(RqlMethodQuery):
tt = pTerm.REBALANCE
st = 'rebalance'
class RebalanceTL(RqlTopLevelQuery):
tt = pTerm.REBALANCE
st = 'rebalance'
class Sync(RqlMethodQuery):
tt = pTerm.SYNC
st = 'sync'
class Branch(RqlTopLevelQuery):
tt = pTerm.BRANCH
st = "branch"
class Or(RqlBoolOperQuery):
tt = pTerm.OR
st = "or_"
st_infix = "|"
class And(RqlBoolOperQuery):
tt = pTerm.AND
st = "and_"
st_infix = "&"
class ForEach(RqlMethodQuery):
tt = pTerm.FOR_EACH
st = 'for_each'
class Info(RqlMethodQuery):
tt = pTerm.INFO
st = 'info'
class InsertAt(RqlMethodQuery):
tt = pTerm.INSERT_AT
st = 'insert_at'
class SpliceAt(RqlMethodQuery):
tt = pTerm.SPLICE_AT
st = 'splice_at'
class DeleteAt(RqlMethodQuery):
tt = pTerm.DELETE_AT
st = 'delete_at'
class ChangeAt(RqlMethodQuery):
tt = pTerm.CHANGE_AT
st = 'change_at'
class Sample(RqlMethodQuery):
tt = pTerm.SAMPLE
st = 'sample'
class Json(RqlTopLevelQuery):
tt = pTerm.JSON
st = 'json'
class Args(RqlTopLevelQuery):
tt = pTerm.ARGS
st = 'args'
# Use this class as a wrapper to 'bytes' so we can tell the difference
# in Python2 (when reusing the result of a previous query).
class RqlBinary(bytes):
def __new__(cls, *args, **kwargs):
return bytes.__new__(cls, *args, **kwargs)
def __repr__(self):
excerpt = binascii.hexlify(self[0:6]).decode('utf-8')
excerpt = ' '.join([excerpt[i:i+2]
for i in xrange(0, len(excerpt), 2)])
excerpt = ', \'%s%s\'' % (excerpt, '...' if len(self) > 6 else '') \
if len(self) > 0 else ''
return "<binary, %d byte%s%s>" % (len(self), 's'
if len(self) != 1 else '', excerpt)
class Binary(RqlTopLevelQuery):
# Note: this term isn't actually serialized, it should exist only
# in the client
tt = pTerm.BINARY
st = 'binary'
def __init__(self, data):
# We only allow 'bytes' objects to be serialized as binary
# Python 2 - `bytes` is equivalent to `str`, either will be accepted
# Python 3 - `unicode` is equivalent to `str`, neither will be accepted
if isinstance(data, RqlQuery):
RqlTopLevelQuery.__init__(self, data)
elif isinstance(data, unicode):
raise ReqlDriverCompileError("Cannot convert a unicode string to binary, "
"use `unicode.encode()` to specify the "
"encoding.")
elif not isinstance(data, bytes):
raise ReqlDriverCompileError(("Cannot convert %s to binary, convert the "
"object to a `bytes` object first.")
% type(data).__name__)
else:
self.base64_data = base64.b64encode(data)
# Kind of a hack to get around composing
self._args = []
self.optargs = {}
def compose(self, args, optargs):
if len(self._args) == 0:
return T('r.', self.st, '(bytes(<data>))')
else:
return RqlTopLevelQuery.compose(self, args, optargs)
def build(self):
if len(self._args) == 0:
return {'$reql_type$': 'BINARY',
'data': self.base64_data.decode('utf-8')}
else:
return RqlTopLevelQuery.build(self)
class Range(RqlTopLevelQuery):
tt = pTerm.RANGE
st = 'range'
class ToISO8601(RqlMethodQuery):
tt = pTerm.TO_ISO8601
st = 'to_iso8601'
class During(RqlMethodQuery):
tt = pTerm.DURING
st = 'during'
class Date(RqlMethodQuery):
tt = pTerm.DATE
st = 'date'
class TimeOfDay(RqlMethodQuery):
tt = pTerm.TIME_OF_DAY
st = 'time_of_day'
class Timezone(RqlMethodQuery):
tt = pTerm.TIMEZONE
st = 'timezone'
class Year(RqlMethodQuery):
tt = pTerm.YEAR
st = 'year'
class Month(RqlMethodQuery):
tt = pTerm.MONTH
st = 'month'
class Day(RqlMethodQuery):
tt = pTerm.DAY
st = 'day'
class DayOfWeek(RqlMethodQuery):
tt = pTerm.DAY_OF_WEEK
st = 'day_of_week'
class DayOfYear(RqlMethodQuery):
tt = pTerm.DAY_OF_YEAR
st = 'day_of_year'
class Hours(RqlMethodQuery):
tt = pTerm.HOURS
st = 'hours'
class Minutes(RqlMethodQuery):
tt = pTerm.MINUTES
st = 'minutes'
class Seconds(RqlMethodQuery):
tt = pTerm.SECONDS
st = 'seconds'
class Time(RqlTopLevelQuery):
tt = pTerm.TIME
st = 'time'
class ISO8601(RqlTopLevelQuery):
tt = pTerm.ISO8601
st = 'iso8601'
class EpochTime(RqlTopLevelQuery):
tt = pTerm.EPOCH_TIME
st = 'epoch_time'
class Now(RqlTopLevelQuery):
tt = pTerm.NOW
st = 'now'
class InTimezone(RqlMethodQuery):
tt = pTerm.IN_TIMEZONE
st = 'in_timezone'
class ToEpochTime(RqlMethodQuery):
tt = pTerm.TO_EPOCH_TIME
st = 'to_epoch_time'
class GeoJson(RqlTopLevelQuery):
tt = pTerm.GEOJSON
st = 'geojson'
class ToGeoJson(RqlMethodQuery):
tt = pTerm.TO_GEOJSON
st = 'to_geojson'
class Point(RqlTopLevelQuery):
tt = pTerm.POINT
st = 'point'
class Line(RqlTopLevelQuery):
tt = pTerm.LINE
st = 'line'
class Polygon(RqlTopLevelQuery):
tt = pTerm.POLYGON
st = 'polygon'
class Distance(RqlMethodQuery):
tt = pTerm.DISTANCE
st = 'distance'
class Intersects(RqlMethodQuery):
tt = pTerm.INTERSECTS
st = 'intersects'
class Includes(RqlMethodQuery):
tt = pTerm.INCLUDES
st = 'includes'
class Circle(RqlTopLevelQuery):
tt = pTerm.CIRCLE
st = 'circle'
class Fill(RqlMethodQuery):
tt = pTerm.FILL
st = 'fill'
class PolygonSub(RqlMethodQuery):
tt = pTerm.POLYGON_SUB
st = 'polygon_sub'
# Returns True if IMPLICIT_VAR is found in the subquery
def _ivar_scan(query):
if not isinstance(query, RqlQuery):
return False
if isinstance(query, ImplicitVar):
return True
if any([_ivar_scan(arg) for arg in query._args]):
return True
if any([_ivar_scan(arg) for k, arg in dict_items(query.optargs)]):
return True
return False
# Called on arguments that should be functions
def func_wrap(val):
val = expr(val)
if _ivar_scan(val):
return Func(lambda x: val)
return val
class Func(RqlQuery):
tt = pTerm.FUNC
lock = threading.Lock()
nextVarId = 1
def __init__(self, lmbd):
vrs = []
vrids = []
try:
code = lmbd.func_code
except AttributeError:
code = lmbd.__code__
for i in xrange(code.co_argcount):
Func.lock.acquire()
var_id = Func.nextVarId
Func.nextVarId += 1
Func.lock.release()
vrs.append(Var(var_id))
vrids.append(var_id)
self.vrs = vrs
self._args = [MakeArray(*vrids), expr(lmbd(*vrs))]
self.optargs = {}
def compose(self, args, optargs):
return T('lambda ', T(*[v.compose([v._args[0].compose(None, None)],
[]) for v in self.vrs],
intsp=', '), ': ', args[1])
class Asc(RqlTopLevelQuery):
tt = pTerm.ASC
st = 'asc'
class Desc(RqlTopLevelQuery):
tt = pTerm.DESC
st = 'desc'
class Literal(RqlTopLevelQuery):
tt = pTerm.LITERAL
st = 'literal'
|
PypiClean
|
/aws_solutions_constructs.aws_events_rule_lambda-1.181.1-py3-none-any.whl/aws_solutions_constructs/aws_events_rule_lambda/__init__.py
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from ._jsii import *
import aws_cdk.aws_events
import aws_cdk.aws_lambda
import aws_cdk.core
class EventsRuleToLambda(
aws_cdk.core.Construct,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-solutions-constructs/aws-events-rule-lambda.EventsRuleToLambda",
):
def __init__(
self,
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
event_rule_props: typing.Union[aws_cdk.aws_events.RuleProps, typing.Dict[str, typing.Any]],
event_bus_props: typing.Optional[typing.Union[aws_cdk.aws_events.EventBusProps, typing.Dict[str, typing.Any]]] = None,
existing_event_bus_interface: typing.Optional[aws_cdk.aws_events.IEventBus] = None,
existing_lambda_obj: typing.Optional[aws_cdk.aws_lambda.Function] = None,
lambda_function_props: typing.Optional[typing.Union[aws_cdk.aws_lambda.FunctionProps, typing.Dict[str, typing.Any]]] = None,
) -> None:
'''
:param scope: - represents the scope for all the resources.
:param id: - this is a a scope-unique id.
:param event_rule_props: User provided eventRuleProps to override the defaults. Default: - None
:param event_bus_props: A new custom EventBus is created with provided props. Default: - None
:param existing_event_bus_interface: Existing instance of a custom EventBus. Default: - None
:param existing_lambda_obj: Existing instance of Lambda Function object, providing both this and ``lambdaFunctionProps`` will cause an error. Default: - None
:param lambda_function_props: User provided props to override the default props for the Lambda function. Default: - Default props are used
:access: public
:summary: Constructs a new instance of the EventsRuleToLambda class.
'''
if __debug__:
def stub(
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
event_rule_props: typing.Union[aws_cdk.aws_events.RuleProps, typing.Dict[str, typing.Any]],
event_bus_props: typing.Optional[typing.Union[aws_cdk.aws_events.EventBusProps, typing.Dict[str, typing.Any]]] = None,
existing_event_bus_interface: typing.Optional[aws_cdk.aws_events.IEventBus] = None,
existing_lambda_obj: typing.Optional[aws_cdk.aws_lambda.Function] = None,
lambda_function_props: typing.Optional[typing.Union[aws_cdk.aws_lambda.FunctionProps, typing.Dict[str, typing.Any]]] = None,
) -> None:
...
type_hints = typing.get_type_hints(stub)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
props = EventsRuleToLambdaProps(
event_rule_props=event_rule_props,
event_bus_props=event_bus_props,
existing_event_bus_interface=existing_event_bus_interface,
existing_lambda_obj=existing_lambda_obj,
lambda_function_props=lambda_function_props,
)
jsii.create(self.__class__, self, [scope, id, props])
@builtins.property
@jsii.member(jsii_name="eventsRule")
def events_rule(self) -> aws_cdk.aws_events.Rule:
return typing.cast(aws_cdk.aws_events.Rule, jsii.get(self, "eventsRule"))
@builtins.property
@jsii.member(jsii_name="lambdaFunction")
def lambda_function(self) -> aws_cdk.aws_lambda.Function:
return typing.cast(aws_cdk.aws_lambda.Function, jsii.get(self, "lambdaFunction"))
@builtins.property
@jsii.member(jsii_name="eventBus")
def event_bus(self) -> typing.Optional[aws_cdk.aws_events.IEventBus]:
return typing.cast(typing.Optional[aws_cdk.aws_events.IEventBus], jsii.get(self, "eventBus"))
@jsii.data_type(
jsii_type="@aws-solutions-constructs/aws-events-rule-lambda.EventsRuleToLambdaProps",
jsii_struct_bases=[],
name_mapping={
"event_rule_props": "eventRuleProps",
"event_bus_props": "eventBusProps",
"existing_event_bus_interface": "existingEventBusInterface",
"existing_lambda_obj": "existingLambdaObj",
"lambda_function_props": "lambdaFunctionProps",
},
)
class EventsRuleToLambdaProps:
def __init__(
self,
*,
event_rule_props: typing.Union[aws_cdk.aws_events.RuleProps, typing.Dict[str, typing.Any]],
event_bus_props: typing.Optional[typing.Union[aws_cdk.aws_events.EventBusProps, typing.Dict[str, typing.Any]]] = None,
existing_event_bus_interface: typing.Optional[aws_cdk.aws_events.IEventBus] = None,
existing_lambda_obj: typing.Optional[aws_cdk.aws_lambda.Function] = None,
lambda_function_props: typing.Optional[typing.Union[aws_cdk.aws_lambda.FunctionProps, typing.Dict[str, typing.Any]]] = None,
) -> None:
'''
:param event_rule_props: User provided eventRuleProps to override the defaults. Default: - None
:param event_bus_props: A new custom EventBus is created with provided props. Default: - None
:param existing_event_bus_interface: Existing instance of a custom EventBus. Default: - None
:param existing_lambda_obj: Existing instance of Lambda Function object, providing both this and ``lambdaFunctionProps`` will cause an error. Default: - None
:param lambda_function_props: User provided props to override the default props for the Lambda function. Default: - Default props are used
:summary: The properties for the CloudFrontToApiGateway Construct
'''
if isinstance(event_rule_props, dict):
event_rule_props = aws_cdk.aws_events.RuleProps(**event_rule_props)
if isinstance(event_bus_props, dict):
event_bus_props = aws_cdk.aws_events.EventBusProps(**event_bus_props)
if isinstance(lambda_function_props, dict):
lambda_function_props = aws_cdk.aws_lambda.FunctionProps(**lambda_function_props)
if __debug__:
def stub(
*,
event_rule_props: typing.Union[aws_cdk.aws_events.RuleProps, typing.Dict[str, typing.Any]],
event_bus_props: typing.Optional[typing.Union[aws_cdk.aws_events.EventBusProps, typing.Dict[str, typing.Any]]] = None,
existing_event_bus_interface: typing.Optional[aws_cdk.aws_events.IEventBus] = None,
existing_lambda_obj: typing.Optional[aws_cdk.aws_lambda.Function] = None,
lambda_function_props: typing.Optional[typing.Union[aws_cdk.aws_lambda.FunctionProps, typing.Dict[str, typing.Any]]] = None,
) -> None:
...
type_hints = typing.get_type_hints(stub)
check_type(argname="argument event_rule_props", value=event_rule_props, expected_type=type_hints["event_rule_props"])
check_type(argname="argument event_bus_props", value=event_bus_props, expected_type=type_hints["event_bus_props"])
check_type(argname="argument existing_event_bus_interface", value=existing_event_bus_interface, expected_type=type_hints["existing_event_bus_interface"])
check_type(argname="argument existing_lambda_obj", value=existing_lambda_obj, expected_type=type_hints["existing_lambda_obj"])
check_type(argname="argument lambda_function_props", value=lambda_function_props, expected_type=type_hints["lambda_function_props"])
self._values: typing.Dict[str, typing.Any] = {
"event_rule_props": event_rule_props,
}
if event_bus_props is not None:
self._values["event_bus_props"] = event_bus_props
if existing_event_bus_interface is not None:
self._values["existing_event_bus_interface"] = existing_event_bus_interface
if existing_lambda_obj is not None:
self._values["existing_lambda_obj"] = existing_lambda_obj
if lambda_function_props is not None:
self._values["lambda_function_props"] = lambda_function_props
@builtins.property
def event_rule_props(self) -> aws_cdk.aws_events.RuleProps:
'''User provided eventRuleProps to override the defaults.
:default: - None
'''
result = self._values.get("event_rule_props")
assert result is not None, "Required property 'event_rule_props' is missing"
return typing.cast(aws_cdk.aws_events.RuleProps, result)
@builtins.property
def event_bus_props(self) -> typing.Optional[aws_cdk.aws_events.EventBusProps]:
'''A new custom EventBus is created with provided props.
:default: - None
'''
result = self._values.get("event_bus_props")
return typing.cast(typing.Optional[aws_cdk.aws_events.EventBusProps], result)
@builtins.property
def existing_event_bus_interface(
self,
) -> typing.Optional[aws_cdk.aws_events.IEventBus]:
'''Existing instance of a custom EventBus.
:default: - None
'''
result = self._values.get("existing_event_bus_interface")
return typing.cast(typing.Optional[aws_cdk.aws_events.IEventBus], result)
@builtins.property
def existing_lambda_obj(self) -> typing.Optional[aws_cdk.aws_lambda.Function]:
'''Existing instance of Lambda Function object, providing both this and ``lambdaFunctionProps`` will cause an error.
:default: - None
'''
result = self._values.get("existing_lambda_obj")
return typing.cast(typing.Optional[aws_cdk.aws_lambda.Function], result)
@builtins.property
def lambda_function_props(
self,
) -> typing.Optional[aws_cdk.aws_lambda.FunctionProps]:
'''User provided props to override the default props for the Lambda function.
:default: - Default props are used
'''
result = self._values.get("lambda_function_props")
return typing.cast(typing.Optional[aws_cdk.aws_lambda.FunctionProps], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "EventsRuleToLambdaProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"EventsRuleToLambda",
"EventsRuleToLambdaProps",
]
publication.publish()
|
PypiClean
|
/napari_brainways-0.1.8.4-py3-none-any.whl/napari_brainways/widgets/cell_detector_widget.py
|
from typing import Tuple
import magicgui
from qtpy.QtWidgets import QLabel, QPushButton, QVBoxLayout, QWidget
class CellDetectorWidget(QWidget):
def __init__(self, controller):
super().__init__()
self.controller = controller
self.stardist_label = QLabel(
text='by <a href="https://github.com/stardist/stardist">StarDist</a>'
)
self.stardist_label.setOpenExternalLinks(True)
self.cell_detector_params_widget = magicgui.magicgui(
self.controller.on_params_changed,
normalizer={
"label": "Normalizer",
"widget_type": "RadioButtons",
"orientation": "horizontal",
"choices": [
("Quantile", "quantile"),
# ("Value", "value"),
("CLAHE", "clahe"),
("None", "none"),
],
},
# normalizer_range={
# "label": "Range",
# "widget_type": "RangeSlider",
# "min": 0,
# "max": 1000,
# "step": 1,
# },
min_value={
"label": "Low",
"widget_type": "Slider",
"min": 0,
"max": 1000,
"step": 1,
},
max_value={
"label": "High",
"widget_type": "Slider",
"min": 0,
"max": 1000,
"step": 1,
},
auto_call=True,
)
self.cell_detector_params_widget.native.layout().setContentsMargins(0, 0, 0, 0)
self.run_preview_button = QPushButton("Run on preview")
self.run_preview_button.clicked.connect(
self.controller.run_cell_detector_preview_async
)
self.setLayout(QVBoxLayout())
self.layout().addWidget(self.stardist_label)
self.layout().addWidget(self.cell_detector_params_widget.native)
self.layout().addWidget(self.run_preview_button)
def set_cell_detector_params(
self,
normalizer: str,
normalizer_range: Tuple[float, float],
unique: bool,
):
widget = self.cell_detector_params_widget
widget._auto_call = False
widget.normalizer.value = normalizer
widget.min_value.value = int(normalizer_range[0] * 1000)
widget.max_value.value = int(normalizer_range[1] * 1000)
widget.unique.value = unique
widget._auto_call = True
|
PypiClean
|
/js.highcharts-3.0.7.tar.gz/js.highcharts-3.0.7/js/highcharts/resources/themes/gray.js
|
Highcharts.theme = {
colors: ["#DDDF0D", "#7798BF", "#55BF3B", "#DF5353", "#aaeeee", "#ff0066", "#eeaaee",
"#55BF3B", "#DF5353", "#7798BF", "#aaeeee"],
chart: {
backgroundColor: {
linearGradient: { x1: 0, y1: 0, x2: 0, y2: 1 },
stops: [
[0, 'rgb(96, 96, 96)'],
[1, 'rgb(16, 16, 16)']
]
},
borderWidth: 0,
borderRadius: 15,
plotBackgroundColor: null,
plotShadow: false,
plotBorderWidth: 0
},
title: {
style: {
color: '#FFF',
font: '16px Lucida Grande, Lucida Sans Unicode, Verdana, Arial, Helvetica, sans-serif'
}
},
subtitle: {
style: {
color: '#DDD',
font: '12px Lucida Grande, Lucida Sans Unicode, Verdana, Arial, Helvetica, sans-serif'
}
},
xAxis: {
gridLineWidth: 0,
lineColor: '#999',
tickColor: '#999',
labels: {
style: {
color: '#999',
fontWeight: 'bold'
}
},
title: {
style: {
color: '#AAA',
font: 'bold 12px Lucida Grande, Lucida Sans Unicode, Verdana, Arial, Helvetica, sans-serif'
}
}
},
yAxis: {
alternateGridColor: null,
minorTickInterval: null,
gridLineColor: 'rgba(255, 255, 255, .1)',
minorGridLineColor: 'rgba(255,255,255,0.07)',
lineWidth: 0,
tickWidth: 0,
labels: {
style: {
color: '#999',
fontWeight: 'bold'
}
},
title: {
style: {
color: '#AAA',
font: 'bold 12px Lucida Grande, Lucida Sans Unicode, Verdana, Arial, Helvetica, sans-serif'
}
}
},
legend: {
itemStyle: {
color: '#CCC'
},
itemHoverStyle: {
color: '#FFF'
},
itemHiddenStyle: {
color: '#333'
}
},
labels: {
style: {
color: '#CCC'
}
},
tooltip: {
backgroundColor: {
linearGradient: { x1: 0, y1: 0, x2: 0, y2: 1 },
stops: [
[0, 'rgba(96, 96, 96, .8)'],
[1, 'rgba(16, 16, 16, .8)']
]
},
borderWidth: 0,
style: {
color: '#FFF'
}
},
plotOptions: {
series: {
shadow: true
},
line: {
dataLabels: {
color: '#CCC'
},
marker: {
lineColor: '#333'
}
},
spline: {
marker: {
lineColor: '#333'
}
},
scatter: {
marker: {
lineColor: '#333'
}
},
candlestick: {
lineColor: 'white'
}
},
toolbar: {
itemStyle: {
color: '#CCC'
}
},
navigation: {
buttonOptions: {
symbolStroke: '#DDDDDD',
hoverSymbolStroke: '#FFFFFF',
theme: {
fill: {
linearGradient: { x1: 0, y1: 0, x2: 0, y2: 1 },
stops: [
[0.4, '#606060'],
[0.6, '#333333']
]
},
stroke: '#000000'
}
}
},
// scroll charts
rangeSelector: {
buttonTheme: {
fill: {
linearGradient: { x1: 0, y1: 0, x2: 0, y2: 1 },
stops: [
[0.4, '#888'],
[0.6, '#555']
]
},
stroke: '#000000',
style: {
color: '#CCC',
fontWeight: 'bold'
},
states: {
hover: {
fill: {
linearGradient: { x1: 0, y1: 0, x2: 0, y2: 1 },
stops: [
[0.4, '#BBB'],
[0.6, '#888']
]
},
stroke: '#000000',
style: {
color: 'white'
}
},
select: {
fill: {
linearGradient: { x1: 0, y1: 0, x2: 0, y2: 1 },
stops: [
[0.1, '#000'],
[0.3, '#333']
]
},
stroke: '#000000',
style: {
color: 'yellow'
}
}
}
},
inputStyle: {
backgroundColor: '#333',
color: 'silver'
},
labelStyle: {
color: 'silver'
}
},
navigator: {
handles: {
backgroundColor: '#666',
borderColor: '#AAA'
},
outlineColor: '#CCC',
maskFill: 'rgba(16, 16, 16, 0.5)',
series: {
color: '#7798BF',
lineColor: '#A6C7ED'
}
},
scrollbar: {
barBackgroundColor: {
linearGradient: { x1: 0, y1: 0, x2: 0, y2: 1 },
stops: [
[0.4, '#888'],
[0.6, '#555']
]
},
barBorderColor: '#CCC',
buttonArrowColor: '#CCC',
buttonBackgroundColor: {
linearGradient: { x1: 0, y1: 0, x2: 0, y2: 1 },
stops: [
[0.4, '#888'],
[0.6, '#555']
]
},
buttonBorderColor: '#CCC',
rifleColor: '#FFF',
trackBackgroundColor: {
linearGradient: { x1: 0, y1: 0, x2: 0, y2: 1 },
stops: [
[0, '#000'],
[1, '#333']
]
},
trackBorderColor: '#666'
},
// special colors for some of the demo examples
legendBackgroundColor: 'rgba(48, 48, 48, 0.8)',
legendBackgroundColorSolid: 'rgb(70, 70, 70)',
dataLabelsColor: '#444',
textColor: '#E0E0E0',
maskColor: 'rgba(255,255,255,0.3)'
};
// Apply the theme
var highchartsOptions = Highcharts.setOptions(Highcharts.theme);
|
PypiClean
|
/model_compression_toolkit-1.9.1.tar.gz/model_compression_toolkit-1.9.1/model_compression_toolkit/core/pytorch/default_framework_info.py
|
from torch.nn import Hardsigmoid, ReLU, ReLU6, Softmax, Sigmoid
from torch.nn.functional import hardsigmoid, relu, relu6, softmax
from torch.nn import Conv2d, ConvTranspose2d, Linear
from torch import sigmoid
from model_compression_toolkit.core.common.defaultdict import DefaultDict
from model_compression_toolkit.core.common.framework_info import FrameworkInfo, ChannelAxis
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
from model_compression_toolkit.constants import SOFTMAX_THRESHOLD
from model_compression_toolkit.core.pytorch.constants import KERNEL
from model_compression_toolkit.core.pytorch.quantizer.fake_quant_builder import power_of_two_quantization, \
symmetric_quantization, uniform_quantization
from model_compression_toolkit.core.pytorch.quantizer.lut_fake_quant import activation_lut_kmean_quantizer
"""
Map each layer to a list of its' weights attributes that should get quantized.
If a layer that is not listed here is queried, [None] is returned.
"""
KERNEL_ATTRIBUTES = DefaultDict({Conv2d: [KERNEL],
ConvTranspose2d: [KERNEL],
Linear: [KERNEL]},
lambda: [None])
"""
Map a layer to its kernel's output and input channels indices.
Map's values are tuples of (output_channel_index, input_channel_index).
Default value is returned for layers that are not included.
"""
DEFAULT_CHANNEL_AXIS_DICT = DefaultDict({Conv2d: (0, 1),
Linear: (0, 1),
ConvTranspose2d: (1, 0)},
lambda: (None, None))
"""
Map a layer to its output channel axis.
Where axis=-1 is the last axis
"""
DEFAULT_OUT_CHANNEL_AXIS_DICT = DefaultDict({Conv2d: 1,
Linear: -1,
ConvTranspose2d: 1},
lambda: 1)
"""
Map from an activation function to its min/max output values (if known).
The values are used for tensor min/max values initialization.
"""
ACTIVATION2MINMAX = {} # should be an empty dict in Pytorch
"""
Map from an Pytorch module to its min/max output values (if known).
The values are used for tensor min/max values initialization.
"""
LAYER2MINMAX = {Softmax: (0, SOFTMAX_THRESHOLD),
softmax: (0, SOFTMAX_THRESHOLD),
Sigmoid: (0, 1),
sigmoid: (0, 1),
Hardsigmoid: (0, 1),
hardsigmoid: (0, 1),
ReLU: (0, None),
relu: (0, None),
ReLU6: (0, None),
relu6: (0, None)}
"""
Mapping from a QuantizationMethod to an activation quantizer function.
"""
ACTIVATION_QUANTIZER_MAPPING = {QuantizationMethod.POWER_OF_TWO: power_of_two_quantization,
QuantizationMethod.SYMMETRIC: symmetric_quantization,
QuantizationMethod.UNIFORM: uniform_quantization,
QuantizationMethod.LUT_POT_QUANTIZER: activation_lut_kmean_quantizer}
DEFAULT_PYTORCH_INFO = FrameworkInfo(ACTIVATION_QUANTIZER_MAPPING,
DEFAULT_CHANNEL_AXIS_DICT,
ACTIVATION2MINMAX,
LAYER2MINMAX,
KERNEL_ATTRIBUTES,
DEFAULT_OUT_CHANNEL_AXIS_DICT)
|
PypiClean
|
/echarts-china-counties-pypkg-0.0.2.tar.gz/echarts-china-counties-pypkg-0.0.2/echarts_china_counties_pypkg/resources/echarts-china-counties-js/2d4990b79e3145cb1c5dda1e9ea0a565.js
|
(function (root, factory) {if (typeof define === 'function' && define.amd) {define(['exports', 'echarts'], factory);} else if (typeof exports === 'object' && typeof exports.nodeName !== 'string') {factory(exports, require('echarts'));} else {factory({}, root.echarts);}}(this, function (exports, echarts) {var log = function (msg) {if (typeof console !== 'undefined') {console && console.error && console.error(msg);}};if (!echarts) {log('ECharts is not Loaded');return;}if (!echarts.registerMap) {log('ECharts Map is not loaded');return;}echarts.registerMap('昌吉市', {"type":"FeatureCollection","features":[{"type":"Feature","id":"652301","properties":{"name":"昌吉市","cp":[87.267532,44.014435],"childNum":1},"geometry":{"type":"Polygon","coordinates":["@@CA@@@AA@AAAC@@AAAA@A@A@@@@@@@A@@AAA@@@@A@@BAAA@@AA@AAAA@@A@@A@AA@@AABA@@@@@@@A@@AA@@@@AAA@@@@@@AB@@@@AA@@@@@@@B@@A@@@@@@@AA@@A@@@A@@@AB@@@@A@@@@@A@@AA@@AA@@A@A@A@@A@@AA@@A@@@@@@@@BA@@@@@@@A@A@@@@@A@@B@@@@A@A@@AA@@@@A@@@A@@BA@@A@@@AAA@AA@@@A@@@@@A@@@@A@A@@@A@ABA@A@AA@AA@AA@AA@@@A@A@AB@AA@@@@@@A@@@@@@CA@@A@A@@@A@A@AA@@@@A@@@@@A@@@A@@@@A@@@AAA@A@@AA@@AA@@C@@@A@AA@@@AAA@@@A@A@@AA@@AAA@AA@AAA@@AAA@@@@AA@@@A@@@A@@AA@C@@A@@@@BAA@@@@A@@A@AA@@@A@@@@AA@@AA@@A@@AA@@@@AA@A@@@A@@@@A@@@A@@AAA@AA@@CAA@AA@@@A@ABA@@AA@A@@@A@@@@@A@AAA@@AA@@BCA@BA@A@@@A@AB@@A@@@A@A@A@AA@BA@A@@@A@ABA@A@@AA@ABABABA@A@AB@@@@A@@@A@A@@B@BA@@D@@@DADA@@BA@@B@B@B@@@BA@ABA@@B@FAFCBAFA@@DCBAB@BA@AB@B@BC@ABC@@@AB@BA@A@A@@KMACCCEIGECKACAK@IDE@EEEGGIGECIEGGCAAA@ALQAG@AAIAQ@IAAACEAG@M@G@EE@K@ICGEGEGIEAACCCCAC@IAKAGB@A@@CAC@ACAAAAA@AA@A@AAA@A@AAA@AAAAC@AA@@AA@@A@@@@@@A@@@A@AA@@AB@@@@A@@B@AA@AACACAA@@@AA@A@@AA@A@A@@@AA@@AAAA@@CACACAA@CAAAAA@A@A@A@A@AA@@A@C@@B@B@DAH@DAB@BA@@@@B@DAB@B@BA@@BABAB@@ABAB@DAD@@@B@BAB@@@@A@@@ABGBEACCC@@KEIAKAIAEAAA@@AA@@@A@@@A@A@E@@AC@AAC@ACCA@EAEA@@GECE@@AE@@ACCAKCSCA@GASGE@EAUCQAWF[NKLMDSDOEOOIE[KGAQAWHYTKFaJSFUDaF]DcBgJ@FCB@BEDGAK@KB@BE@EDCB@HBNBFEJKHMR]\\IHGHEHEJEH@FFF@D@J@HGPCNFBFALBHHBDAJAFEDCFEFAD@B@D@DFHAFEFADAFFFJJNHDD@B@FIFKFIDIFQNCDCFAHADBFBHANCN@D@BBBDBDAHALFNBF@FDDBBDFBDDDFJP@HBLBHDDDFAFIHAHBL@LBHABAFAFADCBIFMHKHIHADAF@F@B@F@FBDAFCD@DBHFNFDFDBFHJFHFBF@FALCJ@H@DBHBHFHDBDAB@DAJ@DDDDFHDHDBBDDDDFJBDAF@BBBFDFBHDFBTDDDB@DBJBJFHDHBFDDDP@FBDDHBFFDDBLCTBNBHDBJBB@TDFDFD@DDFFDNHNDJ@FADAJCF@NBBDJDNBJBFBFFFDDDFBTFRDHDFBBBBHBFBFLLFHADADCFAD@DBBDDHFDFBFDDFFDDFBFBHHFDJFFDLRJLDFJFH@HD@@@DAFAFAFGFOLKFCFCDEJ@FBJDJFHJFJADAHADCF@JBBFDDHN@DBD@JAHBD@F@DEJCF@DGFKDGDGBABEHCLBLBF@F@@@@BD@@@@@BDJFHDDJHDBH@FAR@DBJBHDFBLCHAF@DBBBDDHFFFB@PBBB@BADAFEH@D@DDDBFHJBJDLBFDLBFBFDDFDFDXLJHF@JBLBLFJ@JAL@V@N@HBN@JAPBRBLBLBHBD@@@BB@@BD@B@B@@BB@F@@BD@B@@B@@B@@BB@B@@BB@B@B@B@B@BBB@@@@@D@@DB@B@@@@@DABBBB@@B@BA@@B@@BB@@@B@BBBBB@@@@AB@@@@BBBB@B@F@BBB@B@BBD@B@@AD@BB@BB@@@@ABA@@@@BB@B@BB@@@D@BB@BDBBFBBBDD@B@B@B@@@B@B@B@@@B@BBB@@@B@BBB@B@D@@@D@B@B@B@@AFA@@DAD@F@@@BAB@B@B@B@B@D@@@D@B@DBBED@B@DAD@DAD@B@BAB@DA@AD@D@B@DAD@B@B@BBH@BAD@@BBBBD@@@AB@B@@B@BB@@@DBB@BBBBFBB@@DDBBB@BBBBBBBB@BB@@BB@@BB@@@@B@@@@@@BBB@BBB@@@A@@@@BB@@BB@@@@@@@@BB@AB@@B@B@B@AB@B@@AB@BAB@@@B@@AB@@B@@BBB@@@@ABB@@B@@@BB@BB@BB@@@BB@@B@BBDBBDD@@@BB@@@@B@@BD@B@BBD@BBB@BBB@BBB@BBDBDBBBBB@BB@BBBDDDDD@BB@@BBBBDBD@BBBBBBBB@BB@@D@@BB@@BBBB@@@@B@B@@BB@@DFB@DB^THFFFDD@B@DCDBBABEDAHADEHFL@@B@@B@@@BDJFN@B@@BB@@@B@BDH@@BBBDDJ@@DJBDDH@D@DBBBHBD@F@DBB@B@F@B@@@@@D@@@@@@@B@@@BBDBB@B@@@BBB@D@@BB@DHPDFPFDX@HBH@@BF@@@@@B@@@@KNOT@D@FAF@B@F@B@B@FAB@D@B@DBB@BAB@B@BBFJRHR@F@Ro@E@CIIJCL@HA@CD@@EF@Q[K@I@@AEEEGAAFG@@CCFAD@@EAAACDA@EdE@@APJHJNdCPCjE\\FKVQFEBIBERMLTXXVTNPb@bAFI\\Wn}vwdR@R@T@R@T@d@R@f@R@h@x@J@BQtERERSvWCNGRAF]EPERGRERERERERGRERSvQtKhI\\AN@˞˨A@ˡBCXalXeNSPW^oV_bsNWDCLSNUXaVa^mJQDA@AbsduVaLQ@A@A@@@AB@@A@@@A@@BA@@@@@A@ABCBA@A@CB@@@@A@ABABC@CBA@A@ABA@A@AB@@C@@@@@AB@@A@ABA@CBA@ABA@CBC@A@@BA@A@ABA@@@A@ABA@ABA@ABC@ABA@CBA@CBA@A@ABA@@@ABA@CBA@ABA@AA@@@@@@AA@@@@@@@AA@@@@@@@@@@@@@@@AA@@@@A@@@@@@@@@A@@@@@@@@@@A@@@@@@A@@@@@@@@@@@@A@@@@@@@@@@@@@@@@@B@@@@@@@@@@@@@@A@@@@@@@@@@@@@@@@@@@@@@A@@@@@@@@@B@@@@A@@@@@@@@@@@@@@@@@@AA@@@@@@@@@@B@@@@@@A@@@@@A@@@@@@@@@@@@@@A@@@@@A@@@@@@@@@@AB@@@@@@@@@@@@@@@@@@@@A@@@@@@@@@@A@@@@@@@@@@@@@@@@@@@@@@@@A@@@@@@@@@@@@@@A@@@@@@@@@@@@@@@BA@@@@@@@@@@@@@@@@@@@@@@@A@@@@@@@@@@A@@@@@@@@@@@@@@A@@@@@@@@@@@@@A@@@A@@@@@@@A@@@@@@B@@@@@@@@@@@BA@@@@@@@@@@@A@@B@@@@A@@@@@A@@@@@@@A@@@@BA@@@@@A@@@@@@@@@@@@@@@@@@A@@@@@@@@@A@@@@@@A@@@@@@@@@@@@@A@@@@@@@@@@B@@@@A@@@@@@@@@@@@@@BA@@@@@@@AB@@@@@@@@A@@@@BA@@@@@A@@BA@@@@@@@AB@@@@@@AB@@@@A@@@@@@@@@@@A@@@@B@@A@@@@@@@@@@@AB@@@@@@@@@BA@@@@@@@@B@@A@@@@B@@@@@@@@@BA@@@A@C@A@@BA@@@A@@BA@A@A@@@@@A@@@@@@@ADA@@@A@AAG@@@AGAABQ^MXW`AHEB_HEB
TAEBGBIBCBEBAAAGCACDABEBAHKDIHM@CA@K@G@k@Y@K@G@@B@H@H@DCBI@ABCDE@ECA@M@QBFaDOBAAAC@C@AAAACHEDCNAJABCDAHAH@B@BC@CD@DBBAFCFABA@EAGACACBEFED@BA@ADCBAAAECDCBC@C@ADBD@BACCCCBEDCDADBFBFABCCCAA@GEA@CDAH@H@BA@CAACBAADAFCF@ACCCABAB@BAACA@CBCHGHCDCBODIDGBEDECEAEDG@EGEACBC@I@EGEE@GCGEAG@EBEBA@C@C@E@E@E@E@A@A@ABCACAG@CBAB@BAA@AAECCA@CBABBB@@A@AEECC@AB@@AAA@AB@B@D@@AAC@A@@B@@@DA@@D@@@HELGNGTMXQNKRKHIAAC@CAAA@@C@G@CA@ADABA@@AAC@@B@@AA@CB@B@BBB@@@AC@AAAAACBA@@@@@@ABA@@@A@ACE@AFALCFAB@BA\\WJIBAFE@@@@BABA@@BADEB@B@BA@@@C@AC@BA@ABCBAA@@A@CBAB@@@D@@A@ABA@AA@@@AA@@BAB@@@BAB@@@AA@@@@B@@A@@@AC@@@@B@A@@BAB@BACACB@@AA@@@AB@B@@@@A@@@AAAA@DI@@@ABEBCAC@AAE@AA@A@@@@@AAA@@A@AAAAA@@BA@@B@B@B@@A@@AAA@@AAAAA@@@@AAB@@AB@B@@@@@BABAA@AA@@AAA@A@@@A@AA@@AA@@AB@@@BABA@@@@A@A@@@A@AF@BA@AAAAA@@B@D@@A@A@AAAA@@C@C@@@A@AA@A@AAAA@A@@AAB@BAB@@A@A@AAAFEJIB@@AJE@@AC@@II@ACCEECCBE@CBA@CGGCEAAGMIICAFERMHEBC@AAC@@B@@A@@@@@@@@@A@@@@@@@@@@@A@@@@@@@A@@@@@@@@@A@@A@@@@@@@@A@@@@@@@A@@@@@@@A@@@@@@AA@@@@@A@@@@@@@A@@@@@A@@@@A@@A@@@@@C@@@A@@A@@@@A@@@@@@@A@@@@@@@A@@@@@AA@@@@@@A@@@@@A@@@@@A@@@@A@@A@@@@@@@@A@@@@BC@KJKF@BCBA@@BCBA@IB@Ca@ABAABAB@CC@A@CCEEE@@@@A@A@@@A@@A@@E@A@C@@BA@@B@@AAA@CCAA@@AB@@KC@@KGC@GCA@@@AAA@@@CA@@@@@@@ADA@@@AA@A@A@@@AAA@CAA@AAA@@@BA@@@AA@@AG@@@A@@@@@A@BBA@A@@@@@A@@@A@@AA@@B@@AA@@@@@AA@AAA@@@A@@@A@AB@A@@@@AAC@A@@@ABCB@@AA@@A@A@@A@@CCCA@AAA@@@@AA@@A@@@@A@@A@@@@A@@A@@@AAA@A@A@@A@@A@@AA@@@A@@A@@@@@A@@@@@@@A@@@@@AA@@@@@A@@@A@@@AA@@A@@@A@@A@@A@@@@@@@@@@AA@@A@@A@@@@@@AA@@@@A@@@@A@@@@A@@A@@A@@@@@@A@@AA@@A@@@A@@@@BA@@BA@@@A@@@AA@AA@@AA@@@@@@@@AA@@@@@@@@A@@@@AA@@@A@@@A@@@AAA@@@@@@@@AA@@@@@@A@@@@@@A@@@@@@@@@@@@A@@AA@@@A@@@@@@@@@@AA@@@@A@@@@AA@@@A@@@@@@@@@@@@@B@B@B@@A@AA@A@C@CA@@A@A@@@@AA@@@A@@@AA@@AA@@A@@AA@@@A@A@@@@AA@@@@@AA@@AACA@@A@AA@@AA@@@@AAA@@@@AA@@@@@AA@@@A@@@@@A@@@@@A@@A@@A@@@@A@@A@@A@@A@@@@AA@@@@@@@@@@@@@A@@@AA@AA@@@@@AA@@@@@A@@A@@@@@@@@@AA@@@@AA@@@@A@A@@@@@@A@@A@@@@@@A@@AAA@@A@@A@@A@A@@@A@@@A@@AA@@@@@@@CA@@AAA@@@@A@@@AA@@A@@@@A@AA@@A@@@AAA@@@@@@A@@@@AA@@@@AAAA@@AA@@AA@@@@@A@@AA@@@AA@@@@AA@CG@@@@@AA@@@@AA@@@@A@@@AA@@@@@AA@@@@@AAA@AA@@A@@AA@@@A@@AA@@B@@AA@@@@A@@@A@AA@@@AAAA@@A@@@@A@CA@@@AA@@AA@@@@@AB@@@BA@@@A@A@AA@@A@@@A@@@A@@BA@A@A@A@@@A@AA@@A@@@A@ABABA@@@AB@@@@@@A@A@@B@@ABA@@@ABC@A@CB@@A@A@AB@@A@@B@@A@A@@@A@@@A@@B@BA@@@@BA@A@@@A@A@@B@@A@@@A@@@ABA@@@@@@BA@@@A@@@A@@@A@A@@@ABA@@@ABA@ABA@@@@@A@@AA@AA@@A@A@@@A@AB@@A@@@A@@@A@@@@@C@A@@@A@A@A@@BA@A@A@@@@@A@@@A@@@A@@@AB@@@@A@@@@@A@@@A@@@A@A@@@A@@@A@@AA@A@@@@@AB@@@BC@A@@@AAA@@@A@@@@AA@@@A@@@@@AB@@A@@@@@@@A@@@A@A@A@A@@@A@@@A@A@@@A@A@@@A@@@@CCA@@@@A@@@@BAB@@A@@@@@A@@@AB@@@@@@@@AA@@@A@@@@A@@@@A@@@A@@@A@@@A@@@AA@@@A@@B@@@@A@BB@@@@@BB@@B@@AB@@@@A@@A@@@@A@@@@@@@@@A@@@@A@AA@@@@AA@@@@@AA@@@@@A@A@@@@@@@@A@@A@@A@@A@@@@A@@@AA@@@@@A@@@@A@@@@@A@@@@@@@@AA@@@@@A@@@A@@@@@@@@AA@@A@@@@AAA@@@@A@@A@@A@@@AA@@@@AA@@@@@A@@@@A@@@@A@@@@A@A@@@@@@@@AB@@@@@@@@@@@@@A@A@@AA@@@AA@@@A@@@@@A@@@@@AA@@@@@A@@@@@@A@@A@@@@A@@C@@@@A@A@@CCAAAAEA@A@@@@@A@@@AA@AA@A@@AA@@AAAA@A@@AC@AA@@@A@@AA@@AAAAAEECCA@@A@A@ACAAAA@AA@@@@@AAC@C@@CA@ACIAA@E@A@@AAAA@A@A@ACA@AA@@A@AAAAA@@@AAA@@CA@AA@@AAA@AAA@A@A@CAE@@CA@@@@@AA@@AE@@AAAA@EAEACA@@EA@A@A@@A@A@A@A@@AAA@AIC@@AAAAA@A@AA@@AAAAAA@@@C@A@@A@AAAAAA@A@AA@@@@AA@@@@@AA@@@A@@@AA@@@A@@@AA@@AAA@@A@AA@AA@@AA@A@@@AA@@A@@A@@A@@@AAA@@AAAA@@@@EA@@A@@A@@@A@A@AA@@AA@@A@A@@@@@A@@@A@@@@@@AA@@@@AAA@@A@@@AA@@@@A@@A@@@@@@@AA@A@@@AA@@A@@@@AA@@B@@AA@@@AA@AA@AAAA@@AA@@AA@@@AAAAA@AA@@A@@@@@AA@@A@@A@AAAAAACA@A@@@A@@AA@@AA@@A@@A@@AA@@@@@@@A@@@@AAA@@@@@AAA@@AA@@AA@@A@AA@@@@A@@AA@@AA@@AAAA@@AAA@@@@@CA@@AA@@AAC@A@AA@AA@@@AA@@ABA@AAA@@@CA@@AA@@A@@A@@A@@@@AA@@@@@@A@@@@A@@@@@A@@@A@@@@AA@@@@AA@@@@@A@@A@@A@AA@@@@@@AA@@A@@@@@A@AAA@A@@A@@@@@@AA@@@@A@A@@@@@A@@@@@A@@@@AA@@@@@A@A@@@@@AA@@AA@@A@AA@@A@@@AA@@A@@@AA@@A@@A@@@@CA@@AA@@@@@@@AA@@@AA@@@@@A@@A@AA@@AA@@AA@@@@@AA@A@@@A@AA@@@@@A@@@@A@AAA@@@AAA@@@@@A@@AA@@@@AA@@@@@@@@A@@@@@@A@@@A@@@@@AA@@A@A@@@@A@@A@@A@@@@A@CA@@C@@AA@@A@@A@AA@@@AA@@AA@@@A@A@@@@A@@@@A@@@AA@@A@@A@@@@@@@AA@@@@@A@@@AA@@AA@@A@@@A@@A@@B@@A@@@AA@@@AA@@A@@@@@AAAA@AEA@A@@AC@CAE@A@C@@@A@AAAA@@A@AAABA@A@@@AAA@A"],"encodeOffsets":[[89081,44555]]}}],"UTF8Encoding":true});}));
|
PypiClean
|
/jira-cache-0.8.3.tar.gz/jira-cache-0.8.3/README.rst
|
==========
jira-cache
==========
Dump Jira issues to file and reload them without connection to the original server.
Quickstart
----------
Dump issues::
from jira import JIRA
from jira_cache import CachedIssues
jira = JIRA('https://jira.atlassian.com/')
result = jira.search_issues('project=JRA and text ~ "Python"')
cached = CachedIssues(result)
cached.dump(open('python-issues.json', 'w'))
Loading from file::
from jira import JIRA
from jira_cache import CachedIssues
result = CachedIssues.load(open('python-issues.json'))
Installation
------------
Simply run ::
pip install jira-history
Contributing
------------
Please do! I would love for this to be developed further by anyone who is interested. Wherever possible, please
provide unit tests for your work (yes, this is very much a 'do as I say, not as I do' kind of moment).
Don't forget to add your name to AUTHORS.
License
-------
Copyright (c) 2016 Sebastian Rahlf
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
PypiClean
|
/nik2img-0.8.0.tar.gz/nik2img-0.8.0/README.txt
|
nik2img
=======
Generate Mapnik graphics from the command line
Description
===========
Use nik2img to interact with the Mapnik C++/Python mapping toolkit from the command line.
Requirements
============
Mapnik >=0.6.0
Tests only work with >= Mapnik 0.7.2
Installing nik2img
==================
To run this program::
* Make sure you have Mapnik installed (https://trac.mapnik.org/wiki/MapnikInstallation)
* Then install nik2img
* See INSTALL.txt for details
For more info see: http://code.google.com/p/mapnik-utils/wiki/Nik2Img
Troubleshooting
===============
Post issues you encounter at http://code.google.com/p/mapnik-utils/issues/list
See also
========
http://code.google.com/p/mapnik-utils/
http://mapnik.org/
http://trac.mapnik.org/
|
PypiClean
|
/jupyterlab_remote_contents-0.1.1.tar.gz/jupyterlab_remote_contents-0.1.1/node_modules/caniuse-lite/data/regions/SL.js
|
module.exports={C:{"30":0.00225,"33":0.00225,"35":0.01578,"43":0.00902,"45":0.00676,"47":0.00225,"57":0.00451,"61":0.00225,"78":0.00451,"80":0.00225,"87":0.00451,"91":0.00902,"94":0.00225,"95":0.03156,"96":0.41474,"97":0.73706,"98":0.06311,_:"2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 31 32 34 36 37 38 39 40 41 42 44 46 48 49 50 51 52 53 54 55 56 58 59 60 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 79 81 82 83 84 85 86 88 89 90 92 93 99 3.5 3.6"},D:{"34":0.02029,"37":0.01127,"43":0.01578,"44":0.00225,"46":0.00676,"48":0.00225,"49":0.01803,"53":0.00676,"55":0.00451,"56":0.00451,"57":0.01803,"60":0.02705,"62":0.00225,"63":0.00225,"64":0.0293,"65":0.00902,"67":0.00676,"68":0.00225,"69":0.00902,"72":0.2547,"73":0.00676,"74":0.00676,"75":0.02029,"76":0.01803,"77":0.02479,"79":0.01352,"80":0.00902,"81":0.00902,"83":0.02029,"84":0.01352,"85":0.00451,"86":0.02029,"87":0.0293,"88":0.01803,"89":0.01803,"90":0.00676,"91":0.02254,"92":0.02029,"93":0.03832,"94":0.02479,"95":0.01803,"96":0.18934,"97":3.01811,"98":5.74319,"99":0.01352,"100":0.00225,_:"4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 35 36 38 39 40 41 42 45 47 50 51 52 54 58 59 61 66 70 71 78 101"},F:{"20":0.00451,"31":0.00225,"38":0.00225,"42":0.01127,"48":0.00225,"60":0.00676,"62":0.00451,"64":0.00451,"66":0.00225,"67":0.00225,"79":0.01352,"80":0.00225,"81":0.00902,"82":0.1961,"83":0.70776,_:"9 11 12 15 16 17 18 19 21 22 23 24 25 26 27 28 29 30 32 33 34 35 36 37 39 40 41 43 44 45 46 47 49 50 51 52 53 54 55 56 57 58 63 65 68 69 70 71 72 73 74 75 76 77 78 9.5-9.6 10.5 10.6 11.1 11.5 11.6 12.1","10.0-10.1":0},B:{"12":0.07664,"13":0.04057,"14":0.03381,"15":0.02254,"16":0.04057,"17":0.01352,"18":0.11045,"80":0.00451,"84":0.01803,"85":0.01352,"87":0.01127,"88":0.00451,"89":0.01803,"90":0.01578,"91":0.01578,"92":0.06762,"93":0.01803,"94":0.01352,"95":0.0293,"96":0.11045,"97":0.51166,"98":1.65894,_:"79 81 83 86"},E:{"4":0,"13":0.00451,"14":0.02705,"15":0.01127,_:"0 5 6 7 8 9 10 11 12 3.1 3.2 6.1 10.1 15.4","5.1":0.00451,"7.1":0.00225,"9.1":0.00225,"11.1":0.01578,"12.1":0.00451,"13.1":0.01578,"14.1":0.23442,"15.1":0.0586,"15.2-15.3":0.05184},G:{"8":0.00207,"3.2":0,"4.0-4.1":0,"4.2-4.3":0.00069,"5.0-5.1":0,"6.0-6.1":0.00207,"7.0-7.1":0.00829,"8.1-8.4":0,"9.0-9.2":0,"9.3":0.09815,"10.0-10.2":0.00415,"10.3":0.03456,"11.0-11.2":0.03387,"11.3-11.4":0.01313,"12.0-12.1":0.06013,"12.2-12.5":0.59236,"13.0-13.1":0.05806,"13.2":0.03387,"13.3":0.1258,"13.4-13.7":0.18455,"14.0-14.4":1.16744,"14.5-14.8":1.51165,"15.0-15.1":1.43078,"15.2-15.3":1.54483,"15.4":0.00207},P:{"4":0.1852,"5.0-5.4":0.01029,"6.2-6.4":0.07216,"7.2-7.4":0.07202,"8.2":0.03042,"9.2":0.03087,"10.1":0.01029,"11.1-11.2":0.05144,"12.0":0.16495,"13.0":0.03087,"14.0":0.10289,"15.0":0.11318,"16.0":0.80254},I:{"0":0,"3":0,"4":0,"2.1":0,"2.2":0,"2.3":0,"4.1":0.0012,"4.2-4.3":0.00329,"4.4":0,"4.4.3-4.4.4":0.04972},A:{"10":0.02058,"11":0.18228,_:"6 7 8 9 5.5"},K:{_:"0 10 11 12 11.1 11.5 12.1"},J:{"7":0,"10":0.01549},N:{"10":0.04242,"11":0.03845},R:{_:"0"},M:{"0":0.12392},Q:{"10.4":0},O:{"0":1.79684},H:{"0":18.71978},L:{"0":52.91197},S:{"2.5":0.02324}};
|
PypiClean
|
/bpy_2.79-1.0.0-py3-none-manylinux2014_x86_64.whl/bpy/2.79/scripts/addons/sequencer_kinoraw_tools/recursive_loader.py
|
import bpy
import os
from bpy.types import (
Operator,
Panel,
)
from bpy.props import (
EnumProperty,
BoolProperty,
)
from . import functions
from . import exiftool
class Sequencer_Extra_RecursiveLoader(Operator):
bl_idname = "sequencerextra.recursiveload"
bl_label = "Recursive Load"
bl_options = {'REGISTER', 'UNDO'}
recursive = BoolProperty(
name="Recursive",
description="Load in recursive folders",
default=False
)
recursive_select_by_extension = BoolProperty(
name="Select by extension",
description="Load only clips with selected extension",
default=False
)
ext = EnumProperty(
items=functions.movieextdict,
name="Extension",
default='3'
)
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor:
return (scn.sequence_editor)
else:
return False
def invoke(self, context, event):
scn = context.scene
try:
self.recursive = scn.kr_recursive
self.recursive_select_by_extension = scn.kr_recursive_select_by_extension
self.ext = scn.kr_default_ext
except AttributeError:
functions.initSceneProperties(context)
self.recursive = scn.kr_recursive
self.recursive_select_by_extension = scn.kr_recursive_select_by_extension
self.ext = scn.kr_default_ext
return context.window_manager.invoke_props_dialog(self)
def loader(self, context, filelist):
scn = context.scene
if filelist:
for i in filelist:
functions.setpathinbrowser(context, i[0], i[1])
try:
bpy.ops.sequencerextra.placefromfilebrowser()
except:
print("Error loading file (recursive loader error): ", i[1])
functions.add_marker(context, i[1], scn.frame_current)
self.report({'ERROR_INVALID_INPUT'}, 'Error loading file ')
pass
def execute(self, context):
scn = context.scene
if self.recursive is True:
# recursive
self.loader(
context, functions.sortlist(
functions.recursive(context, self.recursive_select_by_extension,
self.ext)
)
)
else:
# non recursive
self.loader(
context, functions.sortlist(functions.onefolder(
context, self.recursive_select_by_extension,
self.ext)
)
)
try:
scn.kr_recursive = self.recursive
scn.kr_recursive_select_by_extension = self.recursive_select_by_extension
scn.kr_default_ext = self.ext
except AttributeError:
functions.initSceneProperties(context)
self.recursive = scn.kr_recursive
self.recursive_select_by_extension = scn.kr_recursive_select_by_extension
self.ext = scn.kr_default_ext
return {'FINISHED'}
# Read exif data
# load exifdata from strip to scene['metadata'] property
class Sequencer_Extra_ReadExifData(Operator):
bl_label = "Read EXIF Data"
bl_idname = "sequencerextra.read_exif"
bl_description = "Load exifdata from strip to metadata property in scene"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return scn.sequence_editor.active_strip.type in ('IMAGE', 'MOVIE')
else:
return False
def execute(self, context):
try:
exiftool.ExifTool().start()
except:
self.report({'ERROR_INVALID_INPUT'}, "exiftool not found in PATH")
return {'CANCELLED'}
def getexifdata(strip):
def getexifvalues_image(lista):
metadata = []
with exiftool.ExifTool() as et:
try:
metadata = et.get_metadata_batch(lista)
except UnicodeDecodeError as Err:
print(Err)
# print(metadata[0])
print(len(metadata))
return metadata
def getexifvalues_movie(path):
metadata = []
with exiftool.ExifTool() as et:
try:
metadata = et.get_metadata_batch([path])
except UnicodeDecodeError as Err:
print(Err)
print(metadata[0])
print(len(metadata))
return metadata
def getlist(lista):
for root, dirs, files in os.walk(path):
for f in files:
if "." + f.rpartition(".")[2].lower() in \
functions.imb_ext_image:
lista.append(f)
"""
if "." + f.rpartition(".")[2] in imb_ext_movie:
lista.append(f)
"""
strip.elements
lista.sort()
return lista
if strip.type == "IMAGE":
path = bpy.path.abspath(strip.directory)
os.chdir(path)
# get a list of files
lista = []
for i in strip.elements:
lista.append(i.filename)
print(lista)
return getexifvalues_image(lista)
if strip.type == "MOVIE":
path = bpy.path.abspath(strip.filepath)
print([path])
return getexifvalues_movie(path)
sce = bpy.context.scene
strip = context.scene.sequence_editor.active_strip
sce['metadata'] = getexifdata(strip)
return {'FINISHED'}
# TODO: fix poll to hide when unuseful
class ExifInfoPanel(Panel):
"""Creates a Panel in the Object properties window"""
bl_label = "EXIF Info Panel"
bl_space_type = 'SEQUENCE_EDITOR'
bl_region_type = 'UI'
@classmethod
def poll(self, context):
if context.space_data.view_type in {'SEQUENCER', 'SEQUENCER_PREVIEW'}:
strip = functions.act_strip(context)
scn = context.scene
preferences = context.user_preferences
prefs = preferences.addons[__package__].preferences
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
if prefs.use_exif_panel:
return strip.type in ('MOVIE', 'IMAGE')
else:
return False
def draw_header(self, context):
layout = self.layout
layout.label(text="", icon="RADIO")
def draw(self, context):
layout = self.layout
sce = context.scene
row = layout.row()
row.operator("sequencerextra.read_exif")
row = layout.row()
row.label(text="Exif Data", icon='RENDER_REGION')
row = layout.row()
try:
strip = context.scene.sequence_editor.active_strip
f = strip.frame_start
frame = sce.frame_current
try:
if len(sce['metadata']) == 1:
for d in sce['metadata'][0]:
split = layout.split(percentage=0.5)
col = split.column()
row = col.row()
col.label(text=d)
col = split.column()
col.label(str(sce['metadata'][0][d]))
else:
for d in sce['metadata'][frame - f]:
split = layout.split(percentage=0.5)
col = split.column()
row = col.row()
col.label(text=d)
col = split.column()
col.label(str(sce['metadata'][frame - f][d]))
except (IndexError, KeyError):
pass
except AttributeError:
pass
|
PypiClean
|
/pulumi_gcp-6.65.0a1693462587.tar.gz/pulumi_gcp-6.65.0a1693462587/pulumi_gcp/appengine/application_url_dispatch_rules.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ApplicationUrlDispatchRulesArgs', 'ApplicationUrlDispatchRules']
@pulumi.input_type
class ApplicationUrlDispatchRulesArgs:
def __init__(__self__, *,
dispatch_rules: pulumi.Input[Sequence[pulumi.Input['ApplicationUrlDispatchRulesDispatchRuleArgs']]],
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ApplicationUrlDispatchRules resource.
:param pulumi.Input[Sequence[pulumi.Input['ApplicationUrlDispatchRulesDispatchRuleArgs']]] dispatch_rules: Rules to match an HTTP request and dispatch that request to a service.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "dispatch_rules", dispatch_rules)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="dispatchRules")
def dispatch_rules(self) -> pulumi.Input[Sequence[pulumi.Input['ApplicationUrlDispatchRulesDispatchRuleArgs']]]:
"""
Rules to match an HTTP request and dispatch that request to a service.
Structure is documented below.
"""
return pulumi.get(self, "dispatch_rules")
@dispatch_rules.setter
def dispatch_rules(self, value: pulumi.Input[Sequence[pulumi.Input['ApplicationUrlDispatchRulesDispatchRuleArgs']]]):
pulumi.set(self, "dispatch_rules", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _ApplicationUrlDispatchRulesState:
def __init__(__self__, *,
dispatch_rules: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationUrlDispatchRulesDispatchRuleArgs']]]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ApplicationUrlDispatchRules resources.
:param pulumi.Input[Sequence[pulumi.Input['ApplicationUrlDispatchRulesDispatchRuleArgs']]] dispatch_rules: Rules to match an HTTP request and dispatch that request to a service.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
if dispatch_rules is not None:
pulumi.set(__self__, "dispatch_rules", dispatch_rules)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="dispatchRules")
def dispatch_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationUrlDispatchRulesDispatchRuleArgs']]]]:
"""
Rules to match an HTTP request and dispatch that request to a service.
Structure is documented below.
"""
return pulumi.get(self, "dispatch_rules")
@dispatch_rules.setter
def dispatch_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationUrlDispatchRulesDispatchRuleArgs']]]]):
pulumi.set(self, "dispatch_rules", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class ApplicationUrlDispatchRules(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dispatch_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationUrlDispatchRulesDispatchRuleArgs']]]]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Rules to match an HTTP request and dispatch that request to a service.
To get more information about ApplicationUrlDispatchRules, see:
* [API documentation](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps#UrlDispatchRule)
## Example Usage
### App Engine Application Url Dispatch Rules Basic
```python
import pulumi
import pulumi_gcp as gcp
bucket = gcp.storage.Bucket("bucket", location="US")
object = gcp.storage.BucketObject("object",
bucket=bucket.name,
source=pulumi.FileAsset("./test-fixtures/hello-world.zip"))
admin_v3 = gcp.appengine.StandardAppVersion("adminV3",
version_id="v3",
service="admin",
runtime="nodejs10",
entrypoint=gcp.appengine.StandardAppVersionEntrypointArgs(
shell="node ./app.js",
),
deployment=gcp.appengine.StandardAppVersionDeploymentArgs(
zip=gcp.appengine.StandardAppVersionDeploymentZipArgs(
source_url=pulumi.Output.all(bucket.name, object.name).apply(lambda bucketName, objectName: f"https://storage.googleapis.com/{bucket_name}/{object_name}"),
),
),
env_variables={
"port": "8080",
},
delete_service_on_destroy=True)
web_service = gcp.appengine.ApplicationUrlDispatchRules("webService", dispatch_rules=[
gcp.appengine.ApplicationUrlDispatchRulesDispatchRuleArgs(
domain="*",
path="/*",
service="default",
),
gcp.appengine.ApplicationUrlDispatchRulesDispatchRuleArgs(
domain="*",
path="/admin/*",
service=admin_v3.service,
),
])
```
## Import
ApplicationUrlDispatchRules can be imported using any of these accepted formats
```sh
$ pulumi import gcp:appengine/applicationUrlDispatchRules:ApplicationUrlDispatchRules default {{project}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationUrlDispatchRulesDispatchRuleArgs']]]] dispatch_rules: Rules to match an HTTP request and dispatch that request to a service.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ApplicationUrlDispatchRulesArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Rules to match an HTTP request and dispatch that request to a service.
To get more information about ApplicationUrlDispatchRules, see:
* [API documentation](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps#UrlDispatchRule)
## Example Usage
### App Engine Application Url Dispatch Rules Basic
```python
import pulumi
import pulumi_gcp as gcp
bucket = gcp.storage.Bucket("bucket", location="US")
object = gcp.storage.BucketObject("object",
bucket=bucket.name,
source=pulumi.FileAsset("./test-fixtures/hello-world.zip"))
admin_v3 = gcp.appengine.StandardAppVersion("adminV3",
version_id="v3",
service="admin",
runtime="nodejs10",
entrypoint=gcp.appengine.StandardAppVersionEntrypointArgs(
shell="node ./app.js",
),
deployment=gcp.appengine.StandardAppVersionDeploymentArgs(
zip=gcp.appengine.StandardAppVersionDeploymentZipArgs(
source_url=pulumi.Output.all(bucket.name, object.name).apply(lambda bucketName, objectName: f"https://storage.googleapis.com/{bucket_name}/{object_name}"),
),
),
env_variables={
"port": "8080",
},
delete_service_on_destroy=True)
web_service = gcp.appengine.ApplicationUrlDispatchRules("webService", dispatch_rules=[
gcp.appengine.ApplicationUrlDispatchRulesDispatchRuleArgs(
domain="*",
path="/*",
service="default",
),
gcp.appengine.ApplicationUrlDispatchRulesDispatchRuleArgs(
domain="*",
path="/admin/*",
service=admin_v3.service,
),
])
```
## Import
ApplicationUrlDispatchRules can be imported using any of these accepted formats
```sh
$ pulumi import gcp:appengine/applicationUrlDispatchRules:ApplicationUrlDispatchRules default {{project}}
```
:param str resource_name: The name of the resource.
:param ApplicationUrlDispatchRulesArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApplicationUrlDispatchRulesArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dispatch_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationUrlDispatchRulesDispatchRuleArgs']]]]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApplicationUrlDispatchRulesArgs.__new__(ApplicationUrlDispatchRulesArgs)
if dispatch_rules is None and not opts.urn:
raise TypeError("Missing required property 'dispatch_rules'")
__props__.__dict__["dispatch_rules"] = dispatch_rules
__props__.__dict__["project"] = project
super(ApplicationUrlDispatchRules, __self__).__init__(
'gcp:appengine/applicationUrlDispatchRules:ApplicationUrlDispatchRules',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
dispatch_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationUrlDispatchRulesDispatchRuleArgs']]]]] = None,
project: Optional[pulumi.Input[str]] = None) -> 'ApplicationUrlDispatchRules':
"""
Get an existing ApplicationUrlDispatchRules resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationUrlDispatchRulesDispatchRuleArgs']]]] dispatch_rules: Rules to match an HTTP request and dispatch that request to a service.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ApplicationUrlDispatchRulesState.__new__(_ApplicationUrlDispatchRulesState)
__props__.__dict__["dispatch_rules"] = dispatch_rules
__props__.__dict__["project"] = project
return ApplicationUrlDispatchRules(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dispatchRules")
def dispatch_rules(self) -> pulumi.Output[Sequence['outputs.ApplicationUrlDispatchRulesDispatchRule']]:
"""
Rules to match an HTTP request and dispatch that request to a service.
Structure is documented below.
"""
return pulumi.get(self, "dispatch_rules")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
|
PypiClean
|
/witheppy-0.1.8.tar.gz/witheppy-0.1.8/docs/witheppy.eppyhelpers.rst
|
witheppy.eppyhelpers package
============================
Submodules
----------
witheppy.eppyhelpers.extfields module
-------------------------------------
.. automodule:: witheppy.eppyhelpers.extfields
:members:
:undoc-members:
:show-inheritance:
witheppy.eppyhelpers.geometry module
------------------------------------
.. automodule:: witheppy.eppyhelpers.geometry
:members:
:undoc-members:
:show-inheritance:
witheppy.eppyhelpers.hvac module
--------------------------------
.. automodule:: witheppy.eppyhelpers.hvac
:members:
:undoc-members:
:show-inheritance:
witheppy.eppyhelpers.iddhelpers module
--------------------------------------
.. automodule:: witheppy.eppyhelpers.iddhelpers
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: witheppy.eppyhelpers
:members:
:undoc-members:
:show-inheritance:
|
PypiClean
|
/FreeClimb-4.5.0-py3-none-any.whl/freeclimb/model/message_result_all_of.py
|
import re # noqa: F401
import sys # noqa: F401
from freeclimb.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from freeclimb.exceptions import ApiAttributeError
def lazy_import():
from freeclimb.model.message_status import MessageStatus
globals()['MessageStatus'] = MessageStatus
class MessageResultAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'account_id': (str, none_type,), # noqa: E501
'message_id': (str, none_type,), # noqa: E501
'status': (MessageStatus,), # noqa: E501
'_from': (str, none_type,), # noqa: E501
'to': (str, none_type,), # noqa: E501
'text': (str, none_type,), # noqa: E501
'direction': (str, none_type,), # noqa: E501
'notification_url': (str, none_type,), # noqa: E501
'brand_id': (str, none_type,), # noqa: E501
'campaign_id': (str, none_type,), # noqa: E501
'segment_count': (float, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'account_id': 'accountId', # noqa: E501
'message_id': 'messageId', # noqa: E501
'status': 'status', # noqa: E501
'_from': 'from', # noqa: E501
'to': 'to', # noqa: E501
'text': 'text', # noqa: E501
'direction': 'direction', # noqa: E501
'notification_url': 'notificationUrl', # noqa: E501
'brand_id': 'brandId', # noqa: E501
'campaign_id': 'campaignId', # noqa: E501
'segment_count': 'segmentCount', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""MessageResultAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
account_id (str, none_type): String that uniquely identifies this account resource.. [optional] # noqa: E501
message_id (str, none_type): String that uniquely identifies this message resource. [optional] # noqa: E501
status (MessageStatus): [optional] # noqa: E501
_from (str, none_type): Phone number in E.164 format that sent the message.. [optional] # noqa: E501
to (str, none_type): Phone number in E.164 format that received the message.. [optional] # noqa: E501
text (str, none_type): Message contents. [optional] # noqa: E501
direction (str, none_type): Noting whether the message was inbound or outbound. [optional] # noqa: E501
notification_url (str, none_type): URL invoked when message sent. [optional] # noqa: E501
brand_id (str, none_type): The unique identifier for the brand associated with the message. [optional] # noqa: E501
campaign_id (str, none_type): The unique identifier for the campaign associated with the message. [optional] # noqa: E501
segment_count (float, none_type): The number of segments into which the message was split. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""MessageResultAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
account_id (str, none_type): String that uniquely identifies this account resource.. [optional] # noqa: E501
message_id (str, none_type): String that uniquely identifies this message resource. [optional] # noqa: E501
status (MessageStatus): [optional] # noqa: E501
_from (str, none_type): Phone number in E.164 format that sent the message.. [optional] # noqa: E501
to (str, none_type): Phone number in E.164 format that received the message.. [optional] # noqa: E501
text (str, none_type): Message contents. [optional] # noqa: E501
direction (str, none_type): Noting whether the message was inbound or outbound. [optional] # noqa: E501
notification_url (str, none_type): URL invoked when message sent. [optional] # noqa: E501
brand_id (str, none_type): The unique identifier for the brand associated with the message. [optional] # noqa: E501
campaign_id (str, none_type): The unique identifier for the campaign associated with the message. [optional] # noqa: E501
segment_count (float, none_type): The number of segments into which the message was split. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/tf_gpu-2.11.0.2301-cp38-cp38-manylinux2014_x86_64.whl/tensorflow/python/keras/engine/base_layer_v1.py
|
"""Contains the base Layer class, from which all layers inherit."""
import collections
import functools
import itertools
import threading
import warnings
import numpy as np
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.mixed_precision import autocast_variable
from tensorflow.python.keras.mixed_precision import loss_scale_optimizer
from tensorflow.python.keras.mixed_precision import policy
from tensorflow.python.keras.saving.saved_model import layer_serialization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import object_identity
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils import tf_utils
# A module that only depends on `keras.layers` import these from here.
from tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging
from tensorflow.python.trackable import autotrackable
from tensorflow.python.trackable import base as trackable
from tensorflow.python.trackable import data_structures
from tensorflow.python.util import nest
from tensorflow.tools.docs import doc_controls
# pylint: disable=g-classes-have-attributes
class Layer(base_layer.Layer):
"""Base layer class.
This is the class from which all layers inherit.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing weights,
losses, updates, and inter-layer connectivity.
Users will just instantiate a layer and then treat it as a callable.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Save configuration in member variables
* `build()`: Called once from `__call__`, when we know the shapes of inputs
and `dtype`. Should have the calls to `add_weight()`, and then
call the super's `build()` (which sets `self.built = True`, which is
nice in case the user wants to call `build()` manually before the
first `__call__`).
* `call()`: Called in `__call__` after making sure `build()` has been called
once. Should actually perform the logic of applying the layer to the
input tensors (which should be passed in as the first argument).
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: The dtype of the layer's computations and weights (default of
`None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type
of the first input in TensorFlow 1).
dynamic: Set this to `True` if your layer should only be run eagerly, and
should not be used to generate a static computation graph.
This would be the case for a Tree-RNN or a recursive network,
for example, or generally for any layer that manipulates tensors
using Python control flow. If `False`, we assume that the layer can
safely be used to generate a static computation graph.
Attributes:
name: The name of the layer (string).
dtype: The dtype of the layer's computations and weights. If mixed
precision is used with a `tf.keras.mixed_precision.Policy`, this is
instead just the dtype of the layer's weights, as the computations are
done in a different dtype.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
Each layer has a dtype, which is typically the dtype of the layer's
computations and variables. A layer's dtype can be queried via the
`Layer.dtype` property. The dtype is specified with the `dtype` constructor
argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()`
if no dtype is passed. `floatx()` itself defaults to "float32". Additionally,
layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed
precision is used, layers may have different computation and variable dtypes.
See `tf.keras.mixed_precision.Policy` for details on layer dtypes.
"""
# See tf.Module for the usage of this property.
# The key for _obj_reference_counts_dict is a Trackable, which could be a
# variable or layer etc. tf.Module._flatten will fail to flatten the key
# since it is trying to convert Trackable to a string. This attribute can be
# ignored even after the fix of nest lib, since the trackable object should
# already been available as individual attributes. _obj_reference_counts_dict
# just contains a copy of them.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_obj_reference_counts_dict',),
module.Module._TF_MODULE_IGNORED_PROPERTIES
))
@trackable.no_automatic_dependency_tracking
def __init__(self, trainable=True, name=None, dtype=None, dynamic=False,
**kwargs):
self._instrument_layer_creation()
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_dim', 'input_shape', 'batch_input_shape', 'batch_size',
'weights', 'activity_regularizer', 'autocast', 'implementation'
}
# Validate optional keyword arguments.
generic_utils.validate_kwargs(kwargs, allowed_kwargs)
# Mutable properties
# Indicates whether the layer's weights are updated during training
# and whether the layer's updates are run during training.
self._trainable = trainable
# A stateful layer is a layer whose updates are run during inference too,
# for instance stateful RNNs.
self._stateful = False
# Indicates whether `build` needs to be called upon layer call, to create
# the layer's weights.
self.built = False
self._build_input_shape = None
# Provides information about which inputs are compatible with the layer.
self._input_spec = None
self.supports_masking = False
self._init_set_name(name)
self._activity_regularizer = regularizers.get(
kwargs.pop('activity_regularizer', None))
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = []
# Object to store all thread local layer properties.
self._thread_local = threading.local()
# A list of zero-argument lambdas which return Tensors, used for variable
# regularizers.
self._callable_losses = []
# A list of symbolic Tensors containing activity regularizers and losses
# manually added through `add_loss` in graph-building mode.
self._losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# Both graph and subclassed networks have a dtype policy. For graph
# networks, the policy's compute and variable dtypes are ignored. Such
# networks only use the policy if it is a PolicyV1, in which case it uses
# the PolicyV1's loss_scale (Policy does not have a loss_scale). For
# subclassed networks, the compute and variable dtypes are used as like any
# ordinary layer.
self._set_dtype_policy(dtype)
# Boolean indicating whether the layer automatically casts its inputs to the
# layer's compute_dtype.
self._autocast = kwargs.get('autocast',
base_layer_utils.v2_dtype_behavior_enabled())
# Dependencies tracked via attribute assignment.
# All layers in order of horizontal graph traversal.
# Entries are unique. For models includes input and output layers.
self._maybe_create_attribute('_self_tracked_trackables', [])
# These lists will be filled via successive calls
# to self._add_inbound_node().
# Used in symbolic mode only, only in conjunction with graph-networks
self._inbound_nodes_value = []
self._outbound_nodes_value = []
self._init_call_fn_args()
# Whether the `call` method can be used to build a TF graph without issues.
# This attribute has no effect if the model is created using the Functional
# API. Instead, `model.dynamic` is determined based on the internal layers.
self._dynamic = dynamic
# Manage input shape information if passed.
if 'input_dim' in kwargs and 'input_shape' not in kwargs:
# Backwards compatibility: alias 'input_dim' to 'input_shape'.
kwargs['input_shape'] = (kwargs['input_dim'],)
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
self._initial_weights = kwargs.get('weights', None)
# Whether the layer will track any layers that is set as attribute on itself
# as sub-layers, the weights from the sub-layers will be included in the
# parent layer's variables() as well.
# Default to True, which means auto tracking is turned on. Certain subclass
# might want to turn it off, like Sequential model.
self._auto_track_sub_layers = True
# Mark this layer as having been originally built as a tf1 layer/model
self._originally_built_as_v1 = True
# For backwards compat reasons, most built-in layers do not guarantee
# That they will 100% preserve the structure of input args when saving
# / loading configs. E.g. they may un-nest an arg that is
# a list with one element.
self._preserve_input_structure_in_config = False
@trackable.no_automatic_dependency_tracking
@generic_utils.default
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call.
This is typically used to create the weights of `Layer` subclasses.
Args:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
if not hasattr(self.build, '_is_default'):
self._build_input_shape = input_shape
self.built = True
@doc_controls.for_subclass_implementers
def call(self, inputs, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
Args:
inputs: Input tensor, or list/tuple of input tensors.
**kwargs: Additional keyword arguments.
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
@doc_controls.for_subclass_implementers
def _add_trackable(self, trackable_object, trainable):
"""Adds a Trackable object to this layer's state.
Args:
trackable_object: The tf.tracking.Trackable object to add.
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases) or
"non_trainable_variables" (e.g. BatchNorm mean and variance).
Returns:
The TrackableWeightHandler used to track this object.
"""
if isinstance(trackable_object, base_layer_utils.TrackableWeightHandler):
handler = trackable_object
else:
handler = base_layer_utils.TrackableWeightHandler(trackable_object)
if trainable:
self._trainable_weights.append(handler)
else:
self._non_trainable_weights.append(handler)
return handler
@doc_controls.for_subclass_implementers
def add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
partitioner=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**kwargs):
"""Adds a new variable to the layer.
Args:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: Initializer instance (callable).
regularizer: Regularizer instance (callable).
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that `trainable` cannot be `True` if `synchronization`
is set to `ON_READ`.
constraint: Constraint instance (callable).
partitioner: Partitioner to be passed to the `Trackable` API.
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
**kwargs: Additional keyword arguments. Accepted values are `getter`,
`collections`, `experimental_autocast` and `caching_device`.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partitioned variable regularization and
eager execution is enabled.
ValueError: When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as `ON_READ`.
"""
if shape is None:
shape = ()
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in ['getter', 'collections', 'experimental_autocast',
'caching_device']:
raise TypeError('Unknown keyword argument:', kwarg)
has_custom_getter = 'getter' in kwargs
getter = kwargs.pop('getter', base_layer_utils.make_variable)
collections_arg = kwargs.pop('collections', None)
# 'experimental_autocast' can be set to False by the caller to indicate an
# AutoCastVariable should never be created.
autocast = kwargs.pop('experimental_autocast', True)
# See the docstring for tf.Variable about the details for caching_device.
caching_device = kwargs.pop('caching_device', None)
if dtype is None:
dtype = self.dtype or backend.floatx()
dtype = dtypes.as_dtype(dtype)
if self._dtype_policy.variable_dtype is None:
# The policy is "_infer", so we infer the policy from the variable dtype.
self._set_dtype_policy(policy.Policy(dtype.base_dtype.name))
initializer = initializers.get(initializer)
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = initializers.get('glorot_uniform')
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = initializers.zeros()
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
elif not has_custom_getter:
# When `getter` is specified, it's possibly fine for `initializer` to be
# None since it's up to the custom `getter` to raise error in case it
# indeed needs `initializer`.
raise ValueError('An initializer for variable %s of type %s is required'
' for layer %s' % (name, dtype.base_dtype, self.name))
if (autocast and
self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype
and dtype.is_floating):
# Wrap 'getter' with a version that returns an AutoCastVariable.
old_getter = getter
def getter(*args, **kwargs): # pylint: disable=function-redefined
variable = old_getter(*args, **kwargs)
return autocast_variable.create_autocast_variable(variable)
# Also the caching_device does not work with the mixed precision API,
# disable it if it is specified.
# TODO(b/142020079): Reenable it once the bug is fixed.
if caching_device is not None:
tf_logging.warning(
'`caching_device` does not work with mixed precision API. Ignoring '
'user specified `caching_device`.')
caching_device = None
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
# TODO(allenl): a `make_variable` equivalent should be added as a
# `Trackable` method.
getter=getter,
# Manage errors in Layer rather than Trackable.
overwrite=True,
initializer=initializer,
dtype=dtype,
constraint=constraint,
trainable=trainable,
partitioner=partitioner,
use_resource=use_resource,
collections=collections_arg,
synchronization=synchronization,
aggregation=aggregation,
caching_device=caching_device)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
# level of variable creation, and weight regularization losses
# should be variable attributes.
name_in_scope = variable.name[:variable.name.find(':')]
self._handle_weight_regularization(name_in_scope,
variable,
regularizer)
if base_layer_utils.is_split_variable(variable):
for v in variable:
backend.track_variable(v)
if trainable:
self._trainable_weights.append(v)
else:
self._non_trainable_weights.append(v)
else:
backend.track_variable(variable)
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
@generic_utils.default
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Network` (one layer of abstraction above).
Returns:
Python dictionary.
"""
all_args = tf_inspect.getfullargspec(self.__init__).args
config = {'name': self.name, 'trainable': self.trainable}
if hasattr(self, '_batch_input_shape'):
config['batch_input_shape'] = self._batch_input_shape
config['dtype'] = policy.serialize(self._dtype_policy)
if hasattr(self, 'dynamic'):
# Only include `dynamic` in the `config` if it is `True`
if self.dynamic:
config['dynamic'] = self.dynamic
elif 'dynamic' in all_args:
all_args.remove('dynamic')
expected_args = config.keys()
# Finds all arguments in the `__init__` that are not in the config:
extra_args = [arg for arg in all_args if arg not in expected_args]
# Check that either the only argument in the `__init__` is `self`,
# or that `get_config` has been overridden:
if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):
raise NotImplementedError('Layers with arguments in `__init__` must '
'override `get_config`.')
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Args:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
If the layer has not been built, this method will call `build` on the
layer. This assumes that the layer will later be used with inputs that
match the input shape provided here.
Args:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if context.executing_eagerly():
# In this case we build the model first in order to do shape inference.
# This is acceptable because the framework only calls
# `compute_output_shape` on shape values that the layer would later be
# built for. It would however cause issues in case a user attempts to
# use `compute_output_shape` manually with shapes that are incompatible
# with the shape the Layer will be called on (these users will have to
# implement `compute_output_shape` themselves).
self._maybe_build(input_shape)
with ops.get_default_graph().as_default():
graph = func_graph.FuncGraph('graph')
with graph.as_default():
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
inputs = nest.map_structure(
base_layer_utils.generate_placeholders_from_shape, input_shape)
try:
outputs = self(inputs, training=False)
except TypeError as e:
raise NotImplementedError(
'We could not automatically infer the static shape of the '
'layer\'s output. Please implement the '
'`compute_output_shape` method on your layer (%s).' %
self.__class__.__name__) from e
return nest.map_structure(lambda t: t.shape, outputs)
raise NotImplementedError
@doc_controls.for_subclass_implementers
def compute_output_signature(self, input_signature):
"""Compute the output tensor signature of the layer based on the inputs.
Unlike a TensorShape object, a TensorSpec object contains both shape
and dtype information for a tensor. This method allows layers to provide
output dtype information if it is different from the input dtype.
For any layer that doesn't implement this function,
the framework will fall back to use `compute_output_shape`, and will
assume that the output dtype matches the input dtype.
Args:
input_signature: Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.
Returns:
Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.
Raises:
TypeError: If input_signature contains a non-TensorSpec object.
"""
def check_type_return_shape(s):
if not isinstance(s, tensor_spec.TensorSpec):
raise TypeError('Only TensorSpec signature types are supported, '
'but saw signature entry: {}.'.format(s))
return s.shape
input_shape = nest.map_structure(check_type_return_shape, input_signature)
output_shape = self.compute_output_shape(input_shape)
dtype = self._compute_dtype
if dtype is None:
input_dtypes = [s.dtype for s in nest.flatten(input_signature)]
# Default behavior when self.dtype is None, is to use the first input's
# dtype.
dtype = input_dtypes[0]
return nest.map_structure(
lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),
output_shape)
@generic_utils.default
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Args:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self.supports_masking:
if any(m is not None for m in nest.flatten(mask)):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask.
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
def __call__(self, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Args:
*args: Positional arguments to be passed to `self.call`.
**kwargs: Keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
Note:
- The following optional keyword arguments are reserved for specific uses:
* `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
* `mask`: Boolean input mask.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
RuntimeError: if `super().__init__()` was not called in the constructor.
"""
self._assert_built_as_v1()
if not hasattr(self, '_thread_local'):
raise RuntimeError(
'You must call `super().__init__()` in the layer constructor.')
# Grab the first positional or keyword argument.
if args:
inputs = args[0]
args = args[1:]
elif self._call_fn_args[0] in kwargs:
inputs = kwargs.pop(self._call_fn_args[0])
else:
raise ValueError(
'The first argument to `Layer.call` must always be passed.')
call_context = base_layer_utils.call_context()
input_list = nest.flatten(inputs)
# We will attempt to build a TF graph if & only if all inputs are symbolic.
# This is always the case in graph mode. It can also be the case in eager
# mode when all inputs can be traced back to `keras.Input()` (when building
# models using the functional API).
build_graph = tf_utils.are_all_symbolic_tensors(input_list)
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):
def _convert_non_tensor(x):
# Don't call `ops.convert_to_tensor` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, (np.ndarray, float, int)):
return ops.convert_to_tensor_v2_with_dispatch(x)
return x
inputs = nest.map_structure(_convert_non_tensor, inputs)
input_list = nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
mask_arg_passed_by_framework = False
input_masks = self._collect_input_masks(inputs, args, kwargs)
if (self._expects_mask_arg and input_masks is not None and
not self._call_arg_was_passed('mask', args, kwargs)):
mask_arg_passed_by_framework = True
kwargs['mask'] = input_masks
# If `training` argument is None or not explicitly passed,
# propagate `training` value from this layer's calling layer.
training_value = None
training_arg_passed_by_framework = False
# Priority 1: `training` was explicitly passed.
if self._call_arg_was_passed('training', args, kwargs):
training_value = self._get_call_arg_value('training', args, kwargs)
if not self._expects_training_arg:
kwargs.pop('training')
if training_value is None:
# Priority 2: `training` was passed to a parent layer.
if call_context.training is not None:
training_value = call_context.training
# Priority 3a: `learning_phase()` has been set.
elif backend.global_learning_phase_is_set():
training_value = backend.learning_phase()
# Priority 3b: Pass the `learning_phase()` if in the Keras FuncGraph.
elif build_graph:
with backend.get_graph().as_default():
if base_layer_utils.is_in_keras_graph():
training_value = backend.learning_phase()
if self._expects_training_arg and training_value is not None:
# Force the training_value to be bool type which matches to the contract
# for layer/model call args.
if tensor_util.is_tf_type(training_value):
training_value = math_ops.cast(training_value, dtypes.bool)
else:
training_value = bool(training_value)
args, kwargs = self._set_call_arg_value(
'training', training_value, args, kwargs)
training_arg_passed_by_framework = True
# Only create Keras history if at least one tensor originates from a
# `keras.Input`. Otherwise this Layer may be being used outside the Keras
# framework.
if build_graph and base_layer_utils.needs_keras_history(inputs):
base_layer_utils.create_keras_history(inputs)
with call_context.enter(self, inputs, build_graph, training_value):
# Check input assumptions set after layer building, e.g. input shape.
if build_graph:
# Symbolic execution on symbolic tensors. We will attempt to build
# the corresponding TF subgraph inside `backend.get_graph()`
input_spec.assert_input_compatibility(self.input_spec, inputs,
self.name)
graph = backend.get_graph()
with graph.as_default(), backend.name_scope(self._name_scope()): # pylint: disable=not-callable
# Build layer if applicable (if the `build` method has been
# overridden).
self._maybe_build(inputs)
cast_inputs = self._maybe_cast_inputs(inputs)
# Wrapping `call` function in autograph to allow for dynamic control
# flow and control dependencies in call. We are limiting this to
# subclassed layers as autograph is strictly needed only for
# subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if (base_layer_utils.is_subclassed(self) and
not base_layer_utils.from_saved_model(self)):
call_fn = autograph.tf_convert(
self.call, ag_ctx.control_status_ctx())
else:
call_fn = self.call
if not self.dynamic:
try:
with autocast_variable.enable_auto_cast_variables(
self._compute_dtype_object):
outputs = call_fn(cast_inputs, *args, **kwargs)
except errors.OperatorNotAllowedInGraphError as e:
raise TypeError('You are attempting to use Python control '
'flow in a layer that was not declared to be '
'dynamic. Pass `dynamic=True` to the class '
'constructor.\nEncountered error:\n"""\n' +
str(e) + '\n"""')
else:
# We will use static shape inference to return symbolic tensors
# matching the specifications of the layer outputs.
# Since `self.dynamic` is True, we will never attempt to
# run the underlying TF graph (which is disconnected).
# TODO(fchollet): consider py_func as an alternative, which
# would enable us to run the underlying graph if needed.
outputs = self._symbolic_call(inputs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a '
'Tensor or a list of Tensors, not None '
'(layer: ' + self.name + ').')
if base_layer_utils.have_all_keras_metadata(inputs):
if training_arg_passed_by_framework:
args, kwargs = self._set_call_arg_value(
'training', None, args, kwargs, pop_kwarg_if_none=True)
if mask_arg_passed_by_framework:
kwargs.pop('mask')
outputs = self._set_connectivity_metadata((inputs,) + args, kwargs,
outputs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks)
if hasattr(self, '_set_inputs') and not self.inputs:
# Subclassed network: explicitly set metadata normally set by
# a call to self._set_inputs().
# TODO(b/120997007): This should be done in Eager as well, but
# causes garbage collection issues because of the placeholders
# created on the default Keras graph.
self._set_inputs(inputs, outputs)
else:
# Eager execution on data tensors.
with backend.name_scope(self._name_scope()): # pylint: disable=not-callable
self._maybe_build(inputs)
cast_inputs = self._maybe_cast_inputs(inputs)
with autocast_variable.enable_auto_cast_variables(
self._compute_dtype_object):
outputs = self.call(cast_inputs, *args, **kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks)
return outputs
def _assert_built_as_v1(self):
if not hasattr(self, '_originally_built_as_v1'):
raise ValueError(
'Your Layer or Model is in an invalid state. '
'This can happen for the following cases:\n '
'1. You might be interleaving estimator/non-estimator models or '
'interleaving models/layers made in tf.compat.v1.Graph.as_default() '
'with models/layers created outside of it. '
'Converting a model to an estimator (via model_to_estimator) '
'invalidates all models/layers made before the conversion (even '
'if they were not the model converted to an estimator). '
'Similarly, making a layer or a model inside a '
'a tf.compat.v1.Graph invalidates all layers/models you previously '
'made outside of the graph.\n'
'2. You might be using a custom keras layer implementation with '
' custom __init__ which didn\'t call super().__init__. '
' Please check the implementation of %s and its bases.' %
(type(self),))
@property
def dtype(self):
return self._dtype_policy.variable_dtype
@property
def name(self):
return self._name
@property
def dynamic(self):
return any(layer._dynamic for layer in self._flatten_layers())
@property
@doc_controls.do_not_generate_docs
def stateful(self):
return any(layer._stateful for layer in self._flatten_layers())
@stateful.setter
def stateful(self, value):
self._stateful = value
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
for layer in getattr(self, '_self_tracked_trackables', []):
layer.trainable = value
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
self._activity_regularizer = regularizer
@property
def input_spec(self):
return self._input_spec
@input_spec.setter
# Must be decorated to prevent tracking, since the input_spec can be nested
# InputSpec objects.
@trackable.no_automatic_dependency_tracking
def input_spec(self, value):
for v in nest.flatten(value):
if v is not None and not isinstance(v, base_layer.InputSpec):
raise TypeError('Layer input_spec must be an instance of InputSpec. '
'Got: {}'.format(v))
self._input_spec = value
@property
def updates(self):
collected_updates = []
all_layers = self._flatten_layers()
with backend.get_graph().as_default():
for layer in all_layers:
if not layer.trainable and not layer.stateful:
continue
for u in layer._updates:
if callable(u):
try:
u = u()
except ValueError as e:
if 'InaccessibleTensorError' in type(e).__name__:
# For one specific case of error we try to raise
# a more meaningful error message about the graph if we can.
# This error is an internal TF symbol that is not
# publicly exposed, so we check the name directly rather
# than using a direct import.
base_layer_utils.check_graph_consistency(
method='add_update', force_raise=True)
raise # check_graph_consistency may not always raise.
base_layer_utils.check_graph_consistency(u, method='add_update')
collected_updates.append(u)
return collected_updates
@property
def losses(self):
"""Losses which are associated with this `Layer`.
Variable regularization tensors are created when this property is accessed,
so it is eager safe: accessing `losses` under a `tf.GradientTape` will
propagate gradients back to the corresponding variables.
Returns:
A list of tensors.
"""
collected_losses = []
all_layers = self._flatten_layers()
for layer in all_layers:
# If any eager losses are present, we assume the model to be part of an
# eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return just the eager losses.
collected_losses.extend(layer._losses)
for regularizer in layer._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses
@doc_controls.for_subclass_implementers
def add_loss(self, losses, inputs=None):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This method can be used inside a subclassed layer or model's `call`
function, in which case `losses` should be a Tensor or list of Tensors.
Example:
```python
class MyLayer(tf.keras.layers.Layer):
def call(inputs, self):
self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True)
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any loss Tensors passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
losses become part of the model's topology and are tracked in `get_config`.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Activity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))
```
If this is not the case for your loss (if, for example, your loss references
a `Variable` of one of the model's layers), you can wrap your loss in a
zero-argument lambda. These losses are not tracked as part of the model's
topology since they can't be serialized.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(x.kernel))
```
The `get_losses_for` method allows to retrieve the losses relevant to a
specific set of inputs.
Args:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor.
inputs: Ignored when executing eagerly. If anything other than None is
passed, it signals the losses are conditional on some of the layer's
inputs, and thus they should only be run where these inputs are
available. This is the case for activity regularization losses, for
instance. If `None` is passed, the losses are assumed
to be unconditional, and will apply across all dataflows of the layer
(e.g. weight regularization losses).
"""
def _tag_unconditional(loss):
"""Process the loss and tag it by setting loss._unconditional_loss."""
if callable(loss):
# We run the loss without autocasting, as regularizers are often
# numerically unstable in float16.
with autocast_variable.enable_auto_cast_variables(None):
loss = loss()
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tensor_util.is_tf_type(loss):
loss = ops.convert_to_tensor_v2_with_dispatch(
loss, dtype=backend.floatx())
loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access
return loss
losses = nest.flatten(losses)
callable_losses = []
symbolic_losses = []
for loss in losses:
if callable(loss):
callable_losses.append(functools.partial(_tag_unconditional, loss))
continue
if loss is None:
continue
if not tensor_util.is_tf_type(loss):
loss = ops.convert_to_tensor_v2_with_dispatch(
loss, dtype=backend.floatx())
# TF Functions should take the eager path.
if (tf_utils.is_symbolic_tensor(loss) and
not base_layer_utils.is_in_tf_function()):
symbolic_losses.append(_tag_unconditional(loss))
base_layer_utils.check_graph_consistency(loss, method='add_loss')
self._callable_losses.extend(callable_losses)
in_call_context = base_layer_utils.call_context().in_call
if in_call_context:
for symbolic_loss in symbolic_losses:
self._losses.append(symbolic_loss)
else:
for symbolic_loss in symbolic_losses:
if getattr(self, '_is_graph_network', False):
self._graph_network_add_loss(symbolic_loss)
else:
# Possible a loss was added in a Layer's `build`.
self._losses.append(symbolic_loss)
@property
def metrics(self):
collected_metrics = []
for layer in self._flatten_layers():
collected_metrics.extend(layer._metrics)
return collected_metrics
@doc_controls.for_subclass_implementers
def add_metric(self, value, aggregation=None, name=None):
"""Adds metric tensor to the layer.
Args:
value: Metric tensor.
aggregation: Sample-wise metric reduction function. If `aggregation=None`,
it indicates that the metric tensor provided has been aggregated
already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by
`model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the
given metric tensor will be sample-wise reduced using `mean` function.
eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',
aggregation='mean')`.
name: String metric name.
Raises:
ValueError: If `aggregation` is anything other than None or `mean`.
"""
if aggregation is not None and aggregation != 'mean':
raise ValueError(
'We currently support only `mean` sample-wise metric aggregation. '
'You provided aggregation=`%s`' % aggregation)
from_metric_obj = hasattr(value, '_metric_obj')
is_symbolic = tf_utils.is_symbolic_tensor(value)
in_call_context = base_layer_utils.call_context().in_call
if name is None and not from_metric_obj:
# Eg. `self.add_metric(math_ops.reduce_sum(x), aggregation='mean')`
# In eager mode, we use metric name to lookup a metric. Without a name,
# a new Mean metric wrapper will be created on every model/layer call.
# So, we raise an error when no name is provided.
# We will do the same for symbolic mode for consistency although a name
# will be generated if no name is provided.
# We will not raise this error in the foll use case for the sake of
# consistency as name in provided in the metric constructor.
# mean = metrics.Mean(name='my_metric')
# model.add_metric(mean(outputs))
raise ValueError('Please provide a name for your metric like '
'`self.add_metric(tf.reduce_sum(inputs), '
'name=\'mean_activation\', aggregation=\'mean\')`')
elif from_metric_obj:
name = value._metric_obj.name
if in_call_context:
# TF Function path should take the eager path.
self._symbolic_add_metric(value, aggregation, name)
else:
if not is_symbolic:
raise ValueError('Expected a symbolic Tensor for the metric value, '
'received: ' + str(value))
# Possible a metric was added in a Layer's `build`.
if not getattr(self, '_is_graph_network', False):
with backend.get_graph().as_default():
self._symbolic_add_metric(value, aggregation, name)
return
if from_metric_obj:
raise ValueError('Using the result of calling a `Metric` object '
'when calling `add_metric` on a Functional '
'Model is not supported. Please pass the '
'Tensor to monitor directly.')
# Insert layers into the Keras Graph Network.
self._graph_network_add_metric(value, aggregation, name)
@doc_controls.for_subclass_implementers
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
The `get_updates_for` method allows to retrieve the updates relevant to a
specific set of inputs.
This call is ignored when eager execution is enabled (in that case, variable
updates are run on the fly and thus do not need to be tracked for later
execution).
Args:
updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False`
on this Layer, when executing in Eager mode.
inputs: Deprecated, will be automatically inferred.
"""
if inputs is not None:
tf_logging.warning(
'`add_update` `inputs` kwarg has been deprecated. You no longer need '
'to pass a value to `inputs` as it is being automatically inferred.')
call_context = base_layer_utils.call_context()
if (ds_context.has_strategy() and
ds_context.in_cross_replica_context() and
# When saving the model, the distribution strategy context should be
# ignored, following the default path for adding updates.
not call_context.saving):
# Updates don't need to be run in a cross-replica context.
return
updates = generic_utils.to_list(updates)
if call_context.in_call:
relevant_inputs = call_context.inputs
else:
inbound_nodes = getattr(self, '_inbound_nodes', [])
relevant_inputs = [node.input_tensors for node in inbound_nodes]
def process_update(x):
"""Standardize update ops.
Args:
x: Tensor, op, or callable.
Returns:
An update op.
"""
if callable(x):
update = lambda: process_update(x())
return update()
elif isinstance(x, ops.Operation):
update = x
elif hasattr(x, 'op'):
update = x.op
else:
update = ops.convert_to_tensor_v2_with_dispatch(x)
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])
update._unconditional_update = update not in reachable
return update
updates = [process_update(x) for x in updates]
self._updates.extend(updates)
def set_weights(self, weights):
"""Sets the weights of the layer, from Numpy arrays.
The weights of a layer represent the state of the layer. This function
sets the weight values from numpy arrays. The weight values should be
passed in the order they are created by the layer. Note that the layer's
weights must be instantiated before calling this function by calling
the layer.
For example, a Dense layer returns a list of two values-- per-output
weights and the bias value. These can be used to set the weights of another
Dense layer:
>>> a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> b.set_weights(a.get_weights())
>>> b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Args:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
expected_num_weights = 0
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
expected_num_weights += param.num_tensors
else:
expected_num_weights += 1
if expected_num_weights != len(weights):
raise ValueError(
'You called `set_weights(weights)` on layer "%s" '
'with a weight list of length %s, but the layer was '
'expecting %s weights. Provided weights: %s...' %
(self.name, len(weights), expected_num_weights, str(weights)[:50]))
weight_index = 0
weight_value_tuples = []
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
num_tensors = param.num_tensors
tensors = weights[weight_index:weight_index + num_tensors]
param.set_weights(tensors)
weight_index += num_tensors
else:
weight = weights[weight_index]
weight_shape = weight.shape if hasattr(weight, 'shape') else ()
ref_shape = param.shape
if not ref_shape.is_compatible_with(weight_shape):
raise ValueError(
'Layer weight shape %s not compatible with provided weight '
'shape %s' % (ref_shape, weight_shape))
weight_value_tuples.append((param, weight))
weight_index += 1
backend.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current weights of the layer.
The weights of a layer represent the state of the layer. This function
returns both trainable and non-trainable weight values associated with this
layer as a list of Numpy arrays, which can in turn be used to load state
into similarly parameterized layers.
For example, a Dense layer returns a list of two values-- per-output
weights and the bias value. These can be used to set the weights of another
Dense layer:
>>> a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> b.set_weights(a.get_weights())
>>> b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Returns:
Weights values as a list of numpy arrays.
"""
weights = self.weights
output_weights = []
for weight in weights:
if isinstance(weight, base_layer_utils.TrackableWeightHandler):
output_weights.extend(weight.get_tensors())
else:
output_weights.append(weight)
return backend.batch_get_value(output_weights)
def get_updates_for(self, inputs):
"""Retrieves updates relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional updates.
return [u for u in self.updates if u._unconditional_update]
# Requesting input-conditional updates.
updates = [u for u in self.updates if not u._unconditional_update]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, updates)
return [u for u in updates if u in reachable]
def get_losses_for(self, inputs):
"""Retrieves losses relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional losses.
return [l for l in self.losses if l._unconditional_loss]
# Requesting input-conditional losses.
losses = [l for l in self.losses if not l._unconditional_loss]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, losses)
return [l for l in losses if l in reachable]
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first input node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first output node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined input shape.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
return self._inbound_nodes[0].input_shapes
else:
raise AttributeError('The layer "' + str(self.name) +
' has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if getattr(self, '_is_graph_network', False):
with tf_utils.maybe_init_scope(self):
self._maybe_build(self.inputs)
else:
raise ValueError('You tried to call `count_params` on ' + self.name +
', but the layer isn\'t built. '
'You can build it manually via: `' + self.name +
'.build(batch_input_shape)`.')
return layer_utils.count_params(self.weights)
@property
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
@doc_controls.do_not_doc_inheritable
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
@doc_controls.do_not_doc_inheritable
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
##############################################################################
# Methods & attributes below are public aliases of other methods. #
##############################################################################
@doc_controls.do_not_doc_inheritable
def apply(self, inputs, *args, **kwargs):
"""Deprecated, do NOT use!
This is an alias of `self.__call__`.
Args:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
warnings.warn('`layer.apply` is deprecated and '
'will be removed in a future version. '
'Please use `layer.__call__` method instead.')
return self.__call__(inputs, *args, **kwargs)
@doc_controls.do_not_doc_inheritable
def add_variable(self, *args, **kwargs):
"""Deprecated, do NOT use! Alias for `add_weight`."""
warnings.warn('`layer.add_variable` is deprecated and '
'will be removed in a future version. '
'Please use `layer.add_weight` method instead.')
return self.add_weight(*args, **kwargs)
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Returns:
A list of variables.
"""
return self.weights
@property
def trainable_variables(self):
return self.trainable_weights
@property
def non_trainable_variables(self):
return self.non_trainable_weights
##############################################################################
# Methods & attributes below are all private and only used by the framework. #
##############################################################################
@property
def _inbound_nodes(self):
return self._inbound_nodes_value
@_inbound_nodes.setter
@trackable.no_automatic_dependency_tracking
def _inbound_nodes(self, value):
self._inbound_nodes_value = value
@property
def _outbound_nodes(self):
return self._outbound_nodes_value
@_outbound_nodes.setter
@trackable.no_automatic_dependency_tracking
def _outbound_nodes(self, value):
self._outbound_nodes_value = value
def _set_dtype_policy(self, dtype):
"""Sets self._dtype_policy."""
if isinstance(dtype, policy.Policy):
self._dtype_policy = dtype
elif isinstance(dtype, dict):
self._dtype_policy = policy.deserialize(dtype)
elif isinstance(dtype, str) and dtype in ('mixed_float16',
'mixed_bfloat16'):
# The isinstance check is required since np.dtype raises an error if
# compared to a non-dtype string.
self._dtype_policy = policy.Policy(dtype)
elif dtype:
self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name)
else:
self._dtype_policy = policy.global_policy()
if (self._dtype_policy.name == 'mixed_float16' and
not loss_scale_optimizer.strategy_supports_loss_scaling()):
# Although only loss scaling doesn't support certain strategies, to avoid
# confusion, we disallow the 'mixed_float16' policy with unsupported
# strategies. This is because 'mixed_float16' requires loss scaling for
# numeric stability.
strategy = ds_context.get_strategy()
raise ValueError('Mixed precision is not supported with the '
'tf.distribute.Strategy: %s. Either stop using mixed '
'precision by removing the use of the "%s" policy or '
'use a different Strategy, e.g. a MirroredStrategy.' %
(strategy.__class__.__name__, self._dtype_policy.name))
# Performance optimization: cache the compute dtype as a Dtype object or
# None, so that str to Dtype conversion doesn't happen in Layer.__call__.
if self._dtype_policy.compute_dtype:
self._compute_dtype_object = dtypes.as_dtype(
self._dtype_policy.compute_dtype)
else:
self._compute_dtype_object = None
# TODO(reedwm): Expose this property?
@property
def _compute_dtype(self):
"""The layer's compute dtype.
Unless mixed-precision is used, this is the same as `Layer.dtype`.
If self._autocast is True, layer's will cast floating-point inputs to this.
Returns:
The layer's compute dtype.
"""
return self._dtype_policy.compute_dtype
def _maybe_cast_inputs(self, inputs):
"""Maybe casts the inputs to the compute dtype.
If self._compute_dtype is floating-point, and self_autocast is True,
floating-point inputs are casted to self._compute_dtype.
Args:
inputs: Input tensor, or structure of input tensors.
Returns:
`inputs`, but tensors may have been casted to self._compute_dtype
"""
compute_dtype = self._compute_dtype
if (self._autocast and compute_dtype and
dtypes.as_dtype(compute_dtype).is_floating):
def f(x):
"""Cast a single Tensor or TensorSpec to the compute dtype."""
cast_types = (ops.Tensor, sparse_tensor.SparseTensor,
ragged_tensor.RaggedTensor)
if (isinstance(x, cast_types) and x.dtype.is_floating and
x.dtype.base_dtype.name != compute_dtype):
return math_ops.cast(x, compute_dtype)
elif isinstance(x, tensor_spec.TensorSpec) and x.dtype.is_floating:
# Inputs may be TensorSpecs when this function is called from
# model._set_inputs.
return tensor_spec.TensorSpec(x.shape, compute_dtype, x.name)
else:
return x
return nest.map_structure(f, inputs)
else:
return inputs
# _dtype used to be an attribute set in the constructor. We still expose it
# because some clients still use it.
# TODO(reedwm): Deprecate, then remove the _dtype property.
@property
def _dtype(self):
# This is equivalent to returning self.dtype . We do not return self.dtype
# as it would cause infinite recursion in a few subclasses, which override
# "dtype" to return self._dtype.
return self._dtype_policy.variable_dtype
@_dtype.setter
def _dtype(self, value):
value = dtypes.as_dtype(value).name
self._set_dtype_policy(policy.Policy(value))
def _name_scope(self): # pylint: disable=method-hidden
return self.name
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
self._name = name
def _get_existing_metric(self, name=None):
match = [m for m in self._metrics if m.name == name]
if not match:
return
if len(match) > 1:
raise ValueError(
'Please provide different names for the metrics you have added. '
'We found {} metrics with the name: "{}"'.format(len(match), name))
return match[0]
def _symbolic_add_metric(self, value, aggregation=None, name=None):
base_layer_utils.check_graph_consistency(value, method='add_metric')
match = self._get_existing_metric(name)
if aggregation is None:
# Iterate over the metrics and check if the given metric exists already.
# This can happen when a metric instance is created in subclassed model
# layer `__init__` and we have tracked that instance already in
# model.__setattr__.
if match:
result_tensor = value
metric_obj = match
elif hasattr(value, '_metric_obj'):
# We track the instance using the metadata on the result tensor.
result_tensor = value
metric_obj = result_tensor._metric_obj
self._metrics.append(metric_obj)
else:
raise ValueError(
'We do not support adding an aggregated metric result tensor that '
'is not the output of a `tf.keras.metrics.Metric` metric instance. '
'Without having access to the metric instance we cannot reset the '
'state of a metric after every epoch during training. You can '
'create a `tf.keras.metrics.Metric` instance and pass the result '
'here or pass an un-aggregated result with `aggregation` parameter '
'set as `mean`. For example: `self.add_metric(tf.reduce_sum(inputs)'
', name=\'mean_activation\', aggregation=\'mean\')`')
else:
# If a non-aggregated tensor is given as input (ie. `aggregation` is
# explicitly set to `mean`), we wrap the tensor in `Mean` metric.
if match:
result_tensor = match(value)
metric_obj = match
else:
metric_obj, result_tensor = base_layer_utils.create_mean_metric(
value, name)
self._metrics.append(metric_obj)
def _handle_weight_regularization(self, name, variable, regularizer):
"""Create lambdas which compute regularization losses."""
def _loss_for_variable(v):
"""Creates a regularization loss `Tensor` for variable `v`."""
with backend.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
return regularization
if base_layer_utils.is_split_variable(variable):
for v in variable:
self.add_loss(functools.partial(_loss_for_variable, v))
else:
self.add_loss(functools.partial(_loss_for_variable, variable))
def _handle_activity_regularization(self, inputs, outputs):
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = nest.flatten(outputs)
with backend.name_scope('ActivityRegularizer'):
for output in output_list:
activity_loss = self._activity_regularizer(output)
batch_size = math_ops.cast(
array_ops.shape(output)[0], activity_loss.dtype)
# Make activity regularization strength batch-agnostic.
mean_activity_loss = activity_loss / batch_size
base_layer_utils.check_graph_consistency(
mean_activity_loss, method='activity_regularizer')
self.add_loss(mean_activity_loss, inputs=inputs)
def _set_mask_metadata(self, inputs, outputs, previous_mask):
flat_outputs = nest.flatten(outputs)
mask_already_computed = (
getattr(self, '_compute_output_and_mask_jointly', False) or
all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))
# Only compute the mask if the Layer explicitly supports masking or has
# overridden `compute_mask`.
should_compute_mask = (
hasattr(self, 'compute_mask') and
(self.supports_masking or
not getattr(self.compute_mask, '_is_default', False)))
if mask_already_computed:
flat_masks = [getattr(x, '_keras_mask', None) for x in flat_outputs]
elif not should_compute_mask:
flat_masks = [None for _ in flat_outputs]
else:
output_masks = self.compute_mask(inputs, previous_mask)
# `compute_mask` can return a single `None` even when a Layer
# has multiple outputs.
if output_masks is None:
flat_masks = [None for _ in flat_outputs]
else:
flat_masks = nest.flatten(output_masks)
for output, mask in zip(flat_outputs, flat_masks):
try:
output._keras_mask = mask
except AttributeError:
# C Type such as np.ndarray.
pass
if tf_utils.are_all_symbolic_tensors(flat_outputs):
for output in flat_outputs:
if getattr(output, '_keras_mask', None) is not None:
# Do not track masks for `TensorFlowOpLayer` construction.
output._keras_mask._keras_history_checked = True
def _collect_input_masks(self, inputs, args, kwargs):
"""Checks if `mask` argument was passed, else gathers mask from inputs."""
if self._call_arg_was_passed('mask', args, kwargs):
return self._get_call_arg_value('mask', args, kwargs)
if not self._should_compute_mask:
return None
input_masks = nest.map_structure(lambda t: getattr(t, '_keras_mask', None),
inputs)
if generic_utils.is_all_none(input_masks):
return None
return input_masks
def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return True
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
if arg_name in dict(zip(call_fn_args, args)):
return True
return False
def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return kwargs[arg_name]
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
args_dict = dict(zip(call_fn_args, args))
return args_dict[arg_name]
def _set_call_arg_value(
self, arg_name, new_value, args,
kwargs, inputs_in_args=False, pop_kwarg_if_none=False):
arg_pos = self._call_fn_arg_positions.get(arg_name, None)
if arg_pos is not None:
if not inputs_in_args:
# Ignore `inputs` arg.
arg_pos = arg_pos - 1
if len(args) > arg_pos:
args = list(args)
args[arg_pos] = new_value
return args, kwargs
if new_value is None and pop_kwarg_if_none:
kwargs.pop(arg_name, None)
else:
kwargs[arg_name] = new_value
return args, kwargs
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Args:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
if not self._inbound_nodes:
raise RuntimeError('The layer has never been called '
'and thus has no defined ' + attr_name + '.')
if not len(self._inbound_nodes) > node_index:
raise ValueError('Asked to get ' + attr_name + ' at node ' +
str(node_index) + ', but the layer has only ' +
str(len(self._inbound_nodes)) + ' inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _maybe_build(self, inputs):
# Check input assumptions set before layer building, e.g. input rank.
if not self.built:
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
input_list = nest.flatten(inputs)
if input_list and self._dtype_policy.compute_dtype is None:
try:
dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
else:
self._set_dtype_policy(policy.Policy(dtype))
input_shapes = None
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
# Only call `build` if the user has manually overridden the build method.
if not hasattr(self.build, '_is_default'):
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
self.build(input_shapes)
# We must set also ensure that the layer is marked as built, and the build
# shape is stored since user defined build functions may not be calling
# `super.build()`
Layer.build(self, input_shapes)
# Optionally load weight values specified at layer instantiation.
if self._initial_weights is not None:
self.set_weights(self._initial_weights)
self._initial_weights = None
def _symbolic_call(self, inputs):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
return nest.map_structure(_make_placeholder_like, output_shapes)
def _get_trainable_state(self):
"""Get the `trainable` state of each sublayer.
Returns:
A dict mapping all sublayers to their `trainable` value.
"""
layers = self._flatten_layers(include_self=False, recursive=False)
trainable_state = {self: self.trainable}
for l in layers:
trainable_state.update(l._get_trainable_state())
return trainable_state
def _set_trainable_state(self, trainable_state):
"""Set `trainable` state for each sublayer."""
if self in trainable_state:
self.trainable = trainable_state[self]
layers = self._flatten_layers(include_self=False, recursive=False)
for l in layers:
if l in trainable_state:
l._set_trainable_state(trainable_state)
@property
def _obj_reference_counts(self):
"""A dictionary counting the number of attributes referencing an object."""
self._maybe_create_attribute('_obj_reference_counts_dict',
object_identity.ObjectIdentityDictionary())
return self._obj_reference_counts_dict
@trackable.no_automatic_dependency_tracking
def _maybe_create_attribute(self, name, default_value):
"""Create the attribute with the default value if it hasn't been created.
This is useful for fields that is used for tracking purpose,
_trainable_weights, or _layers. Note that user could create a layer subclass
and assign an internal field before invoking the Layer.__init__(), the
__setattr__() need to create the tracking fields and __init__() need to not
override them.
Args:
name: String, the name of the attribute.
default_value: Object, the default value of the attribute.
"""
if not hasattr(self, name):
self.__setattr__(name, default_value)
def __delattr__(self, name):
# For any super.__delattr__() call, we will directly use the implementation
# in Trackable and skip the behavior in AutoTrackable. The Layer was
# originally use Trackable as base class, the change of using Module as base
# class forced us to have AutoTrackable in the class hierarchy.
#
# TODO(b/180760306) Keeping the status quo of skipping _delattr__ and
# __setattr__ in AutoTrackable may be unsustainable.
existing_value = getattr(self, name, None)
# If this value is replacing an existing object assigned to an attribute, we
# should clean it out to avoid leaking memory. First we check if there are
# other attributes referencing it.
reference_counts = self._obj_reference_counts
if existing_value not in reference_counts:
super(autotrackable.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
return
reference_count = reference_counts[existing_value]
if reference_count > 1:
# There are other remaining references. We can't remove this object from
# _layers etc.
reference_counts[existing_value] = reference_count - 1
super(autotrackable.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
return
else:
# This is the last remaining reference.
del reference_counts[existing_value]
super(autotrackable.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
if (isinstance(existing_value, Layer)
or base_layer_utils.has_weights(existing_value)):
super(autotrackable.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_self_tracked_trackables',
[l for l in self._self_tracked_trackables if l is not existing_value])
if isinstance(existing_value, tf_variables.Variable):
super(autotrackable.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_trainable_weights',
[w for w in self._trainable_weights if w is not existing_value])
super(autotrackable.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_non_trainable_weights',
[w for w in self._non_trainable_weights if w is not existing_value])
def __setattr__(self, name, value):
if (name == '_self_setattr_tracking' or
not getattr(self, '_self_setattr_tracking', True) or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)):
try:
super(autotrackable.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call
except AttributeError:
raise AttributeError(
('Can\'t set the attribute "{}", likely because it conflicts with '
'an existing read-only @property of the object. Please choose a '
'different name.').format(name))
return
# Keep track of trackable objects, for the needs of `Network.save_weights`.
value = data_structures.sticky_attribute_assignment(
trackable=self, value=value, name=name)
reference_counts = self._obj_reference_counts
reference_counts[value] = reference_counts.get(value, 0) + 1
# Clean out the old attribute, which clears _layers and _trainable_weights
# if necessary.
try:
self.__delattr__(name)
except AttributeError:
pass
# Keep track of metric instance created in subclassed layer.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
for val in nest.flatten(value):
if isinstance(val, metrics_module.Metric) and hasattr(self, '_metrics'):
self._metrics.append(val)
# TODO(scottzhu): Need to track Module object as well for weight tracking.
# Be careful about metric if it becomes a Module in future.
# Append value to self._layers if relevant
if (getattr(self, '_auto_track_sub_layers', True) and
(isinstance(value, Layer) or base_layer_utils.has_weights(value))):
self._maybe_create_attribute('_self_tracked_trackables', [])
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._self_tracked_trackables)):
self._self_tracked_trackables.append(value)
if hasattr(value, '_use_resource_variables'):
# Legacy layers (V1 tf.layers) must always use
# resource variables.
value._use_resource_variables = True
# Append value to list of trainable / non-trainable weights if relevant
# TODO(b/125122625): This won't pick up on any variables added to a
# list/dict after creation.
for val in nest.flatten(value):
if not isinstance(val, tf_variables.Variable):
continue
# Users may add extra weights/variables
# simply by assigning them to attributes (invalid for graph networks)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
if val.trainable:
if any(val is w for w in self._trainable_weights):
continue
self._trainable_weights.append(val)
else:
if any(val is w for w in self._non_trainable_weights):
continue
self._non_trainable_weights.append(val)
backend.track_variable(val)
# TODO(b/180760306) Skip the auto trackable from tf.Module to keep status
# quo. See the comment at __delattr__.
super(autotrackable.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call
# This is a hack so that the is_layer (within
# training/trackable/layer_utils.py) check doesn't get the weights attr.
# TODO(b/110718070): Remove when fixed.
def _is_layer(self):
return True
def _init_call_fn_args(self, expects_training_arg=None):
# Clear cached call function arguments.
self.__class__._call_full_argspec.fget.cache.pop(self, None)
self.__class__._call_fn_args.fget.cache.pop(self, None)
self.__class__._call_accepts_kwargs.fget.cache.pop(self, None)
call_fn_args = self._call_fn_args
if expects_training_arg is None:
self._expects_training_arg = ('training' in call_fn_args or
self._call_accepts_kwargs)
else:
# Use value encoded into the metadata when loading from the SavedModel.
self._expects_training_arg = expects_training_arg
self._expects_mask_arg = ('mask' in call_fn_args or
self._call_accepts_kwargs)
@property
@layer_utils.cached_per_instance
def _call_full_argspec(self):
# Argspec inspection is expensive and the call spec is used often, so it
# makes sense to cache the result.
return tf_inspect.getfullargspec(self.call)
@property
@layer_utils.cached_per_instance
def _call_fn_args(self):
all_args = self._call_full_argspec.args
# Scrub `self` that appears if a decorator was applied.
if all_args and all_args[0] == 'self':
return all_args[1:]
return all_args
@property
@layer_utils.cached_per_instance
def _call_fn_arg_positions(self):
call_fn_arg_positions = dict()
for pos, arg in enumerate(self._call_fn_args):
call_fn_arg_positions[arg] = pos
return call_fn_arg_positions
@property
@layer_utils.cached_per_instance
def _call_accepts_kwargs(self):
return self._call_full_argspec.varkw is not None
@property
@layer_utils.cached_per_instance
def _should_compute_mask(self):
return ('mask' in self._call_fn_args or
getattr(self, 'compute_mask', None) is not None)
def _dedup_weights(self, weights):
"""Dedupe weights while maintaining order as much as possible."""
output, seen_ids = [], set()
for w in weights:
if id(w) not in seen_ids:
output.append(w)
# Track the Variable's identity to avoid __eq__ issues.
seen_ids.add(id(w))
return output
# SavedModel properties. Please see keras/saving/saved_model for details.
@property
def _trackable_saved_model_saver(self):
return layer_serialization.LayerSavedModelSaver(self)
@property
def _object_identifier(self):
return self._trackable_saved_model_saver.object_identifier
@property
def _tracking_metadata(self):
return self._trackable_saved_model_saver.tracking_metadata
def _trackable_children(self, save_type='checkpoint', **kwargs):
if save_type == 'savedmodel':
cache = kwargs['cache']
# TODO(b/213628533): This must be called before super() to ensure
# that any input shape changes are applied before getting the config of
# the model.
children = self._trackable_saved_model_saver.trackable_children(cache)
else:
children = {}
children.update(super()._trackable_children(save_type, **kwargs))
return children
def __getstate__(self):
# Override to support `copy.deepcopy` and pickling.
# Thread-local objects cannot be copied in Python 3, so pop these.
# Thread-local objects are used to cache losses in MirroredStrategy, and
# so shouldn't be copied.
state = self.__dict__.copy()
state.pop('_thread_local', None)
return state
def __setstate__(self, state):
state['_thread_local'] = threading.local()
# Bypass Trackable logic as `__dict__` already contains this info.
object.__setattr__(self, '__dict__', state)
class KerasHistory(
collections.namedtuple('KerasHistory',
['layer', 'node_index', 'tensor_index'])):
"""Tracks the Layer call that created a Tensor, for Keras Graph Networks.
During construction of Keras Graph Networks, this metadata is added to
each Tensor produced as the output of a Layer, starting with an
`InputLayer`. This allows Keras to track how each Tensor was produced, and
this information is later retraced by the `keras.engine.Network` class to
reconstruct the Keras Graph Network.
Attributes:
layer: The Layer that produced the Tensor.
node_index: The specific call to the Layer that produced this Tensor. Layers
can be called multiple times in order to share weights. A new node is
created every time a Tensor is called.
tensor_index: The output index for this Tensor. Always zero if the Layer
that produced this Tensor only has one output. Nested structures of
Tensors are deterministically assigned an index via `nest.flatten`.
"""
# Added to maintain memory and performance characteristics of `namedtuple`
# while subclassing.
__slots__ = ()
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
|
PypiClean
|
/autonomi_nos-0.0.9a1-py3-none-any.whl/nos/server/_service.py
|
import time
import traceback
from functools import lru_cache
from typing import Any, Dict
import grpc
import rich.console
import rich.status
from google.protobuf import empty_pb2
from nos import hub
from nos.common import FunctionSignature, ModelSpec, TaskType, dumps, loads
from nos.common.shm import NOS_SHM_ENABLED, SharedMemoryDataDict, SharedMemoryTransportManager
from nos.constants import ( # noqa F401
DEFAULT_GRPC_PORT, # noqa F401
NOS_PROFILING_ENABLED,
)
from nos.exceptions import ModelNotFoundError
from nos.executors.ray import RayExecutor
from nos.logging import logger
from nos.managers import ModelHandle, ModelManager
from nos.protoc import import_module
from nos.version import __version__
nos_service_pb2 = import_module("nos_service_pb2")
nos_service_pb2_grpc = import_module("nos_service_pb2_grpc")
@lru_cache(maxsize=32)
def load_spec(model_name: str, task: TaskType) -> ModelSpec:
"""Get the model spec cache."""
model_spec: ModelSpec = hub.load_spec(model_name, task=task)
logger.info(f"Loaded model spec [task={model_spec.task.value}, name={model_spec.name}]")
return model_spec
class InferenceService:
"""Ray-executor based inference service.
Parameters:
model_manager (ModelManager): Model manager.
executor (RayExecutor): Ray executor.
shm_manager (SharedMemoryTransportManager): Shared memory transport manager.
Used to create shared memory buffers for inputs/outputs,
and to copy data to/from shared memory.
Note: To be used with the `InferenceServiceImpl` gRPC service.
"""
def __init__(self):
self.model_manager = ModelManager()
self.executor = RayExecutor.get()
try:
self.executor.init()
except Exception as e:
err_msg = f"Failed to initialize executor [e={e}]"
logger.info(err_msg)
raise RuntimeError(err_msg)
if NOS_SHM_ENABLED:
self.shm_manager = SharedMemoryTransportManager()
else:
self.shm_manager = None
def execute(self, model_name: str, task: TaskType = None, inputs: Dict[str, Any] = None) -> Dict[str, Any]:
"""Execute the model.
Args:
model_name (str): Model identifier (e.g. `openai/clip-vit-base-patch32`).
task (TaskType): Task type (e.g. `TaskType.OBJECT_DETECTION_2D`).
inputs (Dict[str, Any]): Model inputs.
Returns:
Dict[str, Any]: Model outputs.
"""
# Load the model spec
try:
model_spec: ModelSpec = load_spec(model_name, task=task)
except Exception as e:
raise ModelNotFoundError(f"Failed to load model spec [model_name={model_name}, e={e}]")
# TODO (spillai): Validate/Decode the inputs
st = time.perf_counter()
model_inputs = FunctionSignature.validate(inputs, model_spec.signature.inputs)
model_inputs = SharedMemoryDataDict.decode(model_inputs)
if NOS_PROFILING_ENABLED:
model_inputs_types = [
f"{k}: List[type={type(v[0])}, len={len(v)}]" if isinstance(v, list) else str(type(v))
for k, v in model_inputs.items()
]
logger.debug(
f"Decoded inputs [inputs=({', '.join(model_inputs_types)}), elapsed={(time.perf_counter() - st) * 1e3:.1f}ms]"
)
# Initialize the model (if not already initialized)
# This call should also evict models and garbage collect if
# too many models are loaded are loaded simultaneously.
model_handle: ModelHandle = self.model_manager.get(model_spec)
# Get the model handle and call it remotely (with model spec, actor handle)
st = time.perf_counter()
response: Dict[str, Any] = model_handle(**model_inputs)
if NOS_PROFILING_ENABLED:
logger.debug(f"Executed model [name={model_spec.name}, elapsed={(time.perf_counter() - st) * 1e3:.1f}ms]")
# If the response is a single value, wrap it in a dict with the appropriate key
if len(model_spec.signature.outputs) == 1:
response = {k: response for k in model_spec.signature.outputs}
# Encode the response
st = time.perf_counter()
response = SharedMemoryDataDict.encode(response)
if NOS_PROFILING_ENABLED:
logger.debug(f"Encoded response [elapsed={(time.perf_counter() - st) * 1e3:.1f}ms]")
return response
class InferenceServiceImpl(nos_service_pb2_grpc.InferenceServiceServicer, InferenceService):
"""
Experimental gRPC-based inference service.
This service is used to serve models over gRPC.
Refer to the bring-your-own-schema section:
https://docs.ray.io/en/master/serve/direct-ingress.html?highlight=grpc#bring-your-own-schema
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def Ping(self, request: empty_pb2.Empty, context: grpc.ServicerContext) -> nos_service_pb2.PingResponse:
"""Health check."""
return nos_service_pb2.PingResponse(status="ok")
def GetServiceInfo(
self, request: empty_pb2.Empty, context: grpc.ServicerContext
) -> nos_service_pb2.ServiceInfoResponse:
"""Get information on the service."""
return nos_service_pb2.ServiceInfoResponse(version=__version__)
def ListModels(self, request: empty_pb2.Empty, context: grpc.ServicerContext) -> nos_service_pb2.ModelListResponse:
"""List all models."""
response = nos_service_pb2.ModelListResponse()
for spec in hub.list():
response.models.append(nos_service_pb2.ModelInfo(name=spec.name, task=spec.task.value))
return response
def GetModelInfo(
self, request: nos_service_pb2.ModelInfoRequest, context: grpc.ServicerContext
) -> nos_service_pb2.ModelInfoResponse:
"""Get model information."""
try:
model_info = request.request
spec: ModelSpec = hub.load_spec(model_info.name, task=TaskType(model_info.task))
logger.debug(f"GetModelInfo() [spec={spec}]")
except KeyError as e:
logger.error(f"Failed to load spec [request={request.request}, e={e}]")
context.abort(grpc.StatusCode.NOT_FOUND, str(e))
return spec._to_proto(public=True)
def RegisterSystemSharedMemory(
self, request: nos_service_pb2.GenericRequest, context: grpc.ServicerContext
) -> nos_service_pb2.GenericResponse:
"""Register system shared memory under a specific namespace `<client_id>/<object_id>`."""
if not NOS_SHM_ENABLED:
context.abort(grpc.StatusCode.UNIMPLEMENTED, "Shared memory not enabled.")
metadata = dict(context.invocation_metadata())
client_id = metadata.get("client_id", None)
object_id = metadata.get("object_id", None)
namespace = f"{client_id}/{object_id}"
logger.debug(f"Registering shm [client_id={client_id}, object_id={object_id}]")
try:
# Create a shared memory segment for the inputs
# Note: The returned keys for shared memory segments are identical to the
# keys in the input dictionary (i.e. <key>), and are not prefixed with the
# namespace `<client_id>/<object_id>`.
shm_map = self.shm_manager.create(loads(request.request_bytes), namespace=namespace)
# Here, dumps() is used to serialize the shared memory numy objects via __getstate__().
# The serialized data is then sent back to the client, which can then deserialized
# and set via __setstate__() on the client-side, so that the client can access the shared
# memory segments.
logger.debug(f"Registered shm [client_id={client_id}, object_id={object_id}, shm_map={shm_map}]")
return nos_service_pb2.GenericResponse(response_bytes=dumps(shm_map))
except Exception as e:
logger.error(f"Failed to register system shared memory: {e}")
context.abort(grpc.StatusCode.INTERNAL, str(e))
def UnregisterSystemSharedMemory(
self, request: nos_service_pb2.GenericRequest, context: grpc.ServicerContext
) -> nos_service_pb2.GenericResponse:
"""Unregister system shared memory for specific namespace `<client_id>/<object_id>`."""
if not NOS_SHM_ENABLED:
context.abort(context, grpc.StatusCode.UNIMPLEMENTED, "Shared memory not enabled.")
metadata = dict(context.invocation_metadata())
client_id = metadata.get("client_id", None)
object_id = metadata.get("object_id", None)
namespace = f"{client_id}/{object_id}"
# TODO (spillai): Currently, we can ignore the `request` provided
# by the client, since all the shared memory segments under the namespace are deleted.
logger.debug(f"Unregistering shm [client_id={client_id}, object_id={object_id}]")
try:
self.shm_manager.cleanup(namespace=namespace)
except Exception as e:
logger.error(f"Failed to unregister shm [e{e}]")
context.abort(grpc.StatusCode.INTERNAL, str(e))
return nos_service_pb2.GenericResponse()
def Run(
self, request: nos_service_pb2.InferenceRequest, context: grpc.ServicerContext
) -> nos_service_pb2.InferenceResponse:
"""Main model prediction interface."""
model_request = request.model
logger.debug(f"=> Received request [task={model_request.task}, model={model_request.name}]")
if model_request.task not in (
TaskType.IMAGE_GENERATION.value,
TaskType.IMAGE_EMBEDDING.value,
TaskType.TEXT_EMBEDDING.value,
TaskType.OBJECT_DETECTION_2D.value,
TaskType.IMAGE_SEGMENTATION_2D.value,
TaskType.CUSTOM.value,
):
context.abort(grpc.StatusCode.NOT_FOUND, f"Invalid task [task={model_request.task}]")
try:
st = time.perf_counter()
logger.info(f"Executing request [task={model_request.task}, model={model_request.name}]")
response = self.execute(model_request.name, task=TaskType(model_request.task), inputs=request.inputs)
logger.info(
f"Executed request [task={model_request.task}, model={model_request.name}, elapsed={(time.perf_counter() - st) * 1e3:.1f}ms]"
)
return nos_service_pb2.InferenceResponse(response_bytes=dumps(response))
except (grpc.RpcError, Exception) as e:
msg = f"Failed to execute request [task={model_request.task}, model={model_request.name}]"
msg += f"{traceback.format_exc()}"
logger.error(f"{msg}, e={e}")
context.abort(grpc.StatusCode.INTERNAL, "Internal Server Error")
def serve(address: str = f"[::]:{DEFAULT_GRPC_PORT}", max_workers: int = 1) -> None:
"""Start the gRPC server."""
from concurrent import futures
options = [
("grpc.max_message_length", 512 * 1024 * 1024),
("grpc.max_send_message_length", 512 * 1024 * 1024),
("grpc.max_receive_message_length", 512 * 1024 * 1024),
]
server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers), options=options)
nos_service_pb2_grpc.add_InferenceServiceServicer_to_server(InferenceServiceImpl(), server)
server.add_insecure_port(address)
console = rich.console.Console()
console.print(f"[bold green] Starting server on {address}[/bold green]")
start_t = time.time()
server.start()
console.print(
f"[bold green] ✓ InferenceService :: Deployment complete (elapsed={time.time() - start_t:.1f}s) [/bold green]", # noqa
)
server.wait_for_termination()
console.print("Server stopped")
def main():
serve()
if __name__ == "__main__":
main()
|
PypiClean
|
/django-magic-notifier-0.2.3.tar.gz/django-magic-notifier-0.2.3/docs/source/usage.rst
|
Usage
-----
Send an email with a direct final string (no template) to a user instance::
user = User(email="testuser@localhost", username="testuser")
subject = "Test magic notifier"
notify(["email"], subject, [user], final_message="Nice if you get this")
Send an email with a template (hello) to a user instance::
user = User(email="testuser@localhost", username="testuser")
subject = "Test magic notifier"
notify(["email"], subject, [user], template='hello')
Send an email with a template to all superuser::
user = User(email="testuser@localhost", username="testuser")
subject = "Test magic notifier"
notify(["email"], subject, "admins", template='hello')
Send an email with a template to all staff users::
user = User(email="testuser@localhost", username="testuser")
subject = "Test magic notifier"
notify(["email"], subject, "staff", template='hello')
Send an email with a template to all users::
user = User(email="testuser@localhost", username="testuser")
subject = "Test magic notifier"
notify(["email"], subject, "all", template='hello')
Send an email with a template to all users excluding staff::
user = User(email="testuser@localhost", username="testuser")
subject = "Test magic notifier"
notify(["email"], subject, "all-staff", template='hello')
Send an email with a file and a template to all users::
user = User(email="testuser@localhost", username="testuser")
subject = "Test magic notifier"
notify(["email"], subject, "all-staff", template='hello',
files=['path/to/file.ext'])
Send a sms with a direct message (no template) to a set of users::
users = User.objects.filter(pk<10)
subject = "Test magic notifier"
notify(["sms"], subject, users, final_message="Nice if you get this")
Send a sms with a template to a set of users::
users = User.objects.filter(pk<10)
subject = "Test magic notifier"
notify(["sms"], subject, users, template='hello')
Send an email and sms with a template to all users excluding staff::
user = User(email="testuser@localhost", username="testuser")
subject = "Test magic notifier"
notify(["email", 'sms'], subject, "all-staff", template='hello')
Send an email, a sms and a push notification with a template to all users excluding staff::
user = User(email="testuser@localhost", username="testuser")
subject = "Test magic notifier"
notify(["email", 'sms', 'push'], subject, "all-staff", template='hello')
|
PypiClean
|
/PsychRNN-1.0.0-py3-none-any.whl/psychrnn/backend/models/lstm.py
|
from __future__ import division
from psychrnn.backend.rnn import RNN
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
class LSTM(RNN):
""" LSTM (Long Short Term Memory) recurrent network model
LSTM implementation of :class:`psychrnn.backend.rnn.RNN`. Because LSTM is structured differently from the basic RNN, biological constraints such as dale's, autapses, and connectivity are not enabled.
Args:
params (dict): See :class:`psychrnn.backend.rnn.RNN` for details.
"""
def __init__(self, params):
# ----------------------------------
# Call RNN constructor
# ----------------------------------
super(LSTM, self).__init__(params)
# ----------------------------------
# Add new variables for gates
# TODO better LSTM initialization
# ----------------------------------
self.N_concat = self.N_in + self.N_rec
self.init_hidden_initializer = tf.compat.v1.random_normal_initializer(mean=0, stddev=0.1)
self.init_cell_initializer = tf.compat.v1.random_normal_initializer(mean=0, stddev=0.1)
self.W_f_initializer = tf.compat.v1.random_normal_initializer(mean=0, stddev=0.1)
self.W_i_initializer = tf.compat.v1.random_normal_initializer(mean=0, stddev=0.1)
self.W_c_initializer = tf.compat.v1.random_normal_initializer(mean=0, stddev=0.1)
self.W_o_initializer = tf.compat.v1.random_normal_initializer(mean=0, stddev=0.1)
self.b_f_initializer = tf.compat.v1.constant_initializer(1.0)
self.b_i_initializer = tf.compat.v1.constant_initializer(1.0)
self.b_c_initializer = tf.compat.v1.constant_initializer(1.0)
self.b_o_initializer = tf.compat.v1.constant_initializer(1.0)
# ----------------------------------
# TensorFlow initializations
# ----------------------------------
with tf.compat.v1.variable_scope(self.name) as scope:
self.init_hidden = tf.compat.v1.get_variable('init_hidden', [self.N_batch, self.N_rec],
initializer=self.init_hidden_initializer,
trainable=True)
self.init_cell = tf.compat.v1.get_variable('init_cell', [self.N_batch, self.N_rec],
initializer=self.init_cell_initializer,
trainable=True)
self.W_f = tf.compat.v1.get_variable('W_f', [self.N_concat, self.N_rec],
initializer=self.W_f_initializer,
trainable=True)
self.W_i = tf.compat.v1.get_variable('W_i', [self.N_concat, self.N_rec],
initializer=self.W_i_initializer,
trainable=True)
self.W_c = tf.compat.v1.get_variable('W_c', [self.N_concat, self.N_rec],
initializer=self.W_c_initializer,
trainable=True)
self.W_o = tf.compat.v1.get_variable('W_o', [self.N_concat, self.N_rec],
initializer=self.W_o_initializer,
trainable=True)
self.b_f = tf.compat.v1.get_variable('b_f', [self.N_rec], initializer=self.b_f_initializer,
trainable=True)
self.b_i = tf.compat.v1.get_variable('b_i', [self.N_rec], initializer=self.b_i_initializer,
trainable=True)
self.b_c = tf.compat.v1.get_variable('b_c', [self.N_rec], initializer=self.b_c_initializer,
trainable=True)
self.b_o = tf.compat.v1.get_variable('b_o', [self.N_rec], initializer=self.b_o_initializer,
trainable=True)
def recurrent_timestep(self, rnn_in, hidden, cell):
""" Recurrent time step.
Given input and previous state, outputs the next state of the network.
Arguments:
rnn_in (*tf.Tensor(dtype=float, shape=(?*, :attr:`N_in` *))*): Input to the rnn at a certain time point.
hidden (*tf.Tensor(dtype=float, shape=(* :attr:`N_batch` , :attr:`N_rec` *))*): Hidden units state of network at previous time point.
cell (*tf.Tensor(dtype=float, shape=(* :attr:`N_batch` , :attr:`N_rec` *))*): Cell state of the network at previous time point.
Returns:
tuple:
* **new_hidden** (*tf.Tensor(dtype=float, shape=(* :attr:`N_batch` , :attr:`N_rec` *))*) -- New hidden unit state of the network.
* **new_cell** (*tf.Tensor(dtype=float, shape=(* :attr:`N_batch` , :attr:`N_rec` *))*) -- New cell state of the network.
"""
f = tf.nn.sigmoid(tf.matmul(tf.concat([hidden, rnn_in], 1), self.W_f)
+ self.b_f)
i = tf.nn.sigmoid(tf.matmul(tf.concat([hidden, rnn_in], 1), self.W_i)
+ self.b_i)
c = tf.nn.tanh(tf.matmul(tf.concat([hidden, rnn_in], 1), self.W_c)
+ self.b_c)
o = tf.nn.sigmoid(tf.matmul(tf.concat([hidden, rnn_in], 1), self.W_o)
+ self.b_o)
new_cell = f * cell + i * c
new_hidden = o * tf.nn.sigmoid(new_cell)
return new_hidden, new_cell
def output_timestep(self, hidden):
"""Returns the output node activity for a given timestep.
Arguments:
hidden (*tf.Tensor(dtype=float, shape=(* :attr:`N_batch` , :attr:`N_rec` *))*): Hidden units of network at a given timepoint for each trial in the batch.
Returns:
output (*tf.Tensor(dtype=float, shape=(* :attr:`N_batch` , :attr:`N_out` *))*): Output of the network at a given timepoint for each trial in the batch.
"""
output = tf.matmul(hidden, self.W_out, transpose_b=True) + self.b_out
return output
def forward_pass(self):
""" Run the LSTM on a batch of task inputs.
Iterates over timesteps, running the :func:`recurrent_timestep` and :func:`output_timestep`
Implements :func:`psychrnn.backend.rnn.RNN.forward_pass`.
Returns:
tuple:
* **predictions** (*tf.Tensor(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_out` *))*) -- Network output on inputs found in self.x within the tf network.
* **hidden** (*tf.Tensor(*:attr:`N_batch`, :attr:`N_steps`, :attr:`N_rec` *))*) -- Hidden unit values over the course of the trials found in self.x within the tf network.
"""
rnn_inputs = tf.unstack(self.x, axis=1)
hidden = self.init_hidden
cell = self.init_cell
rnn_outputs = []
rnn_states = []
for rnn_input in rnn_inputs:
hidden, cell = self.recurrent_timestep(rnn_input, hidden, cell)
output = self.output_timestep(hidden)
rnn_outputs.append(output)
rnn_states.append(hidden)
return tf.transpose(a=rnn_outputs, perm=[1, 0, 2]), tf.transpose(a=rnn_states, perm=[1, 0, 2])
|
PypiClean
|
/vioneta-2023.7.3.tar.gz/vioneta-2023.7.3/homeassistant/components/onkyo/media_player.py
|
from __future__ import annotations
import logging
from typing import Any
import eiscp
from eiscp import eISCP
import voluptuous as vol
from homeassistant.components.media_player import (
DOMAIN,
PLATFORM_SCHEMA,
MediaPlayerEntity,
MediaPlayerEntityFeature,
MediaPlayerState,
MediaType,
)
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant, ServiceCall
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
CONF_SOURCES = "sources"
CONF_MAX_VOLUME = "max_volume"
CONF_RECEIVER_MAX_VOLUME = "receiver_max_volume"
DEFAULT_NAME = "Onkyo Receiver"
SUPPORTED_MAX_VOLUME = 100
DEFAULT_RECEIVER_MAX_VOLUME = 80
SUPPORT_ONKYO_WO_VOLUME = (
MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.SELECT_SOURCE
| MediaPlayerEntityFeature.PLAY
| MediaPlayerEntityFeature.PLAY_MEDIA
)
SUPPORT_ONKYO = (
SUPPORT_ONKYO_WO_VOLUME
| MediaPlayerEntityFeature.VOLUME_SET
| MediaPlayerEntityFeature.VOLUME_MUTE
| MediaPlayerEntityFeature.VOLUME_STEP
)
KNOWN_HOSTS: list[str] = []
DEFAULT_SOURCES = {
"tv": "TV",
"bd": "Bluray",
"game": "Game",
"aux1": "Aux1",
"video1": "Video 1",
"video2": "Video 2",
"video3": "Video 3",
"video4": "Video 4",
"video5": "Video 5",
"video6": "Video 6",
"video7": "Video 7",
"fm": "Radio",
}
DEFAULT_PLAYABLE_SOURCES = ("fm", "am", "tuner")
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MAX_VOLUME, default=SUPPORTED_MAX_VOLUME): vol.All(
vol.Coerce(int), vol.Range(min=1, max=100)
),
vol.Optional(
CONF_RECEIVER_MAX_VOLUME, default=DEFAULT_RECEIVER_MAX_VOLUME
): cv.positive_int,
vol.Optional(CONF_SOURCES, default=DEFAULT_SOURCES): {cv.string: cv.string},
}
)
TIMEOUT_MESSAGE = "Timeout waiting for response."
ATTR_HDMI_OUTPUT = "hdmi_output"
ATTR_PRESET = "preset"
ATTR_AUDIO_INFORMATION = "audio_information"
ATTR_VIDEO_INFORMATION = "video_information"
ATTR_VIDEO_OUT = "video_out"
ACCEPTED_VALUES = [
"no",
"analog",
"yes",
"out",
"out-sub",
"sub",
"hdbaset",
"both",
"up",
]
ONKYO_SELECT_OUTPUT_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_HDMI_OUTPUT): vol.In(ACCEPTED_VALUES),
}
)
SERVICE_SELECT_HDMI_OUTPUT = "onkyo_select_hdmi_output"
def _parse_onkyo_payload(payload):
"""Parse a payload returned from the eiscp library."""
if isinstance(payload, bool):
# command not supported by the device
return False
if len(payload) < 2:
# no value
return None
if isinstance(payload[1], str):
return payload[1].split(",")
return payload[1]
def _tuple_get(tup, index, default=None):
"""Return a tuple item at index or a default value if it doesn't exist."""
return (tup[index : index + 1] or [default])[0]
def determine_zones(receiver):
"""Determine what zones are available for the receiver."""
out = {"zone2": False, "zone3": False}
try:
_LOGGER.debug("Checking for zone 2 capability")
response = receiver.raw("ZPWQSTN")
if response != "ZPWN/A": # Zone 2 Available
out["zone2"] = True
else:
_LOGGER.debug("Zone 2 not available")
except ValueError as error:
if str(error) != TIMEOUT_MESSAGE:
raise error
_LOGGER.debug("Zone 2 timed out, assuming no functionality")
try:
_LOGGER.debug("Checking for zone 3 capability")
response = receiver.raw("PW3QSTN")
if response != "PW3N/A":
out["zone3"] = True
else:
_LOGGER.debug("Zone 3 not available")
except ValueError as error:
if str(error) != TIMEOUT_MESSAGE:
raise error
_LOGGER.debug("Zone 3 timed out, assuming no functionality")
except AssertionError:
_LOGGER.error("Zone 3 detection failed")
return out
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Onkyo platform."""
hosts: list[OnkyoDevice] = []
def service_handle(service: ServiceCall) -> None:
"""Handle for services."""
entity_ids = service.data[ATTR_ENTITY_ID]
devices = [d for d in hosts if d.entity_id in entity_ids]
for device in devices:
if service.service == SERVICE_SELECT_HDMI_OUTPUT:
device.select_output(service.data[ATTR_HDMI_OUTPUT])
hass.services.register(
DOMAIN,
SERVICE_SELECT_HDMI_OUTPUT,
service_handle,
schema=ONKYO_SELECT_OUTPUT_SCHEMA,
)
if CONF_HOST in config and (host := config[CONF_HOST]) not in KNOWN_HOSTS:
try:
receiver = eiscp.eISCP(host)
hosts.append(
OnkyoDevice(
receiver,
config.get(CONF_SOURCES),
name=config.get(CONF_NAME),
max_volume=config.get(CONF_MAX_VOLUME),
receiver_max_volume=config.get(CONF_RECEIVER_MAX_VOLUME),
)
)
KNOWN_HOSTS.append(host)
zones = determine_zones(receiver)
# Add Zone2 if available
if zones["zone2"]:
_LOGGER.debug("Setting up zone 2")
hosts.append(
OnkyoDeviceZone(
"2",
receiver,
config.get(CONF_SOURCES),
name=f"{config[CONF_NAME]} Zone 2",
max_volume=config.get(CONF_MAX_VOLUME),
receiver_max_volume=config.get(CONF_RECEIVER_MAX_VOLUME),
)
)
# Add Zone3 if available
if zones["zone3"]:
_LOGGER.debug("Setting up zone 3")
hosts.append(
OnkyoDeviceZone(
"3",
receiver,
config.get(CONF_SOURCES),
name=f"{config[CONF_NAME]} Zone 3",
max_volume=config.get(CONF_MAX_VOLUME),
receiver_max_volume=config.get(CONF_RECEIVER_MAX_VOLUME),
)
)
except OSError:
_LOGGER.error("Unable to connect to receiver at %s", host)
else:
for receiver in eISCP.discover():
if receiver.host not in KNOWN_HOSTS:
hosts.append(OnkyoDevice(receiver, config.get(CONF_SOURCES)))
KNOWN_HOSTS.append(receiver.host)
add_entities(hosts, True)
class OnkyoDevice(MediaPlayerEntity):
"""Representation of an Onkyo device."""
_attr_supported_features = SUPPORT_ONKYO
def __init__(
self,
receiver,
sources,
name=None,
max_volume=SUPPORTED_MAX_VOLUME,
receiver_max_volume=DEFAULT_RECEIVER_MAX_VOLUME,
):
"""Initialize the Onkyo Receiver."""
self._receiver = receiver
self._attr_is_volume_muted = False
self._attr_volume_level = 0
self._attr_state = MediaPlayerState.OFF
if name:
# not discovered
self._attr_name = name
else:
# discovered
self._attr_unique_id = (
f"{receiver.info['model_name']}_{receiver.info['identifier']}"
)
self._attr_name = self._attr_unique_id
self._max_volume = max_volume
self._receiver_max_volume = receiver_max_volume
self._attr_source_list = list(sources.values())
self._source_mapping = sources
self._reverse_mapping = {value: key for key, value in sources.items()}
self._attr_extra_state_attributes = {}
self._hdmi_out_supported = True
self._audio_info_supported = True
self._video_info_supported = True
def command(self, command):
"""Run an eiscp command and catch connection errors."""
try:
result = self._receiver.command(command)
except (ValueError, OSError, AttributeError, AssertionError):
if self._receiver.command_socket:
self._receiver.command_socket = None
_LOGGER.debug("Resetting connection to %s", self.name)
else:
_LOGGER.info("%s is disconnected. Attempting to reconnect", self.name)
return False
_LOGGER.debug("Result for %s: %s", command, result)
return result
def update(self) -> None:
"""Get the latest state from the device."""
status = self.command("system-power query")
if not status:
return
if status[1] == "on":
self._attr_state = MediaPlayerState.ON
else:
self._attr_state = MediaPlayerState.OFF
self._attr_extra_state_attributes.pop(ATTR_AUDIO_INFORMATION, None)
self._attr_extra_state_attributes.pop(ATTR_VIDEO_INFORMATION, None)
self._attr_extra_state_attributes.pop(ATTR_PRESET, None)
self._attr_extra_state_attributes.pop(ATTR_VIDEO_OUT, None)
return
volume_raw = self.command("volume query")
mute_raw = self.command("audio-muting query")
current_source_raw = self.command("input-selector query")
# If the following command is sent to a device with only one HDMI out,
# the display shows 'Not Available'.
# We avoid this by checking if HDMI out is supported
if self._hdmi_out_supported:
hdmi_out_raw = self.command("hdmi-output-selector query")
else:
hdmi_out_raw = []
preset_raw = self.command("preset query")
if self._audio_info_supported:
audio_information_raw = self.command("audio-information query")
self._parse_audio_information(audio_information_raw)
if self._video_info_supported:
video_information_raw = self.command("video-information query")
self._parse_video_information(video_information_raw)
if not (volume_raw and mute_raw and current_source_raw):
return
sources = _parse_onkyo_payload(current_source_raw)
for source in sources:
if source in self._source_mapping:
self._attr_source = self._source_mapping[source]
break
self._attr_source = "_".join(sources)
if preset_raw and self.source and self.source.lower() == "radio":
self._attr_extra_state_attributes[ATTR_PRESET] = preset_raw[1]
elif ATTR_PRESET in self._attr_extra_state_attributes:
del self._attr_extra_state_attributes[ATTR_PRESET]
self._attr_is_volume_muted = bool(mute_raw[1] == "on")
# AMP_VOL/MAX_RECEIVER_VOL*(MAX_VOL/100)
self._attr_volume_level = volume_raw[1] / (
self._receiver_max_volume * self._max_volume / 100
)
if not hdmi_out_raw:
return
self._attr_extra_state_attributes[ATTR_VIDEO_OUT] = ",".join(hdmi_out_raw[1])
if hdmi_out_raw[1] == "N/A":
self._hdmi_out_supported = False
def turn_off(self) -> None:
"""Turn the media player off."""
self.command("system-power standby")
def set_volume_level(self, volume: float) -> None:
"""Set volume level, input is range 0..1.
However full volume on the amp is usually far too loud so allow the user to
specify the upper range with CONF_MAX_VOLUME. We change as per max_volume
set by user. This means that if max volume is 80 then full volume in HA will
give 80% volume on the receiver. Then we convert that to the correct scale
for the receiver.
"""
# HA_VOL * (MAX VOL / 100) * MAX_RECEIVER_VOL
self.command(
"volume"
f" {int(volume * (self._max_volume / 100) * self._receiver_max_volume)}"
)
def volume_up(self) -> None:
"""Increase volume by 1 step."""
self.command("volume level-up")
def volume_down(self) -> None:
"""Decrease volume by 1 step."""
self.command("volume level-down")
def mute_volume(self, mute: bool) -> None:
"""Mute (true) or unmute (false) media player."""
if mute:
self.command("audio-muting on")
else:
self.command("audio-muting off")
def turn_on(self) -> None:
"""Turn the media player on."""
self.command("system-power on")
def select_source(self, source: str) -> None:
"""Set the input source."""
if self.source_list and source in self.source_list:
source = self._reverse_mapping[source]
self.command(f"input-selector {source}")
def play_media(
self, media_type: MediaType | str, media_id: str, **kwargs: Any
) -> None:
"""Play radio station by preset number."""
source = self._reverse_mapping[self._attr_source]
if media_type.lower() == "radio" and source in DEFAULT_PLAYABLE_SOURCES:
self.command(f"preset {media_id}")
def select_output(self, output):
"""Set hdmi-out."""
self.command(f"hdmi-output-selector={output}")
def _parse_audio_information(self, audio_information_raw):
values = _parse_onkyo_payload(audio_information_raw)
if values is False:
self._audio_info_supported = False
return
if values:
info = {
"format": _tuple_get(values, 1),
"input_frequency": _tuple_get(values, 2),
"input_channels": _tuple_get(values, 3),
"listening_mode": _tuple_get(values, 4),
"output_channels": _tuple_get(values, 5),
"output_frequency": _tuple_get(values, 6),
}
self._attr_extra_state_attributes[ATTR_AUDIO_INFORMATION] = info
else:
self._attr_extra_state_attributes.pop(ATTR_AUDIO_INFORMATION, None)
def _parse_video_information(self, video_information_raw):
values = _parse_onkyo_payload(video_information_raw)
if values is False:
self._video_info_supported = False
return
if values:
info = {
"input_resolution": _tuple_get(values, 1),
"input_color_schema": _tuple_get(values, 2),
"input_color_depth": _tuple_get(values, 3),
"output_resolution": _tuple_get(values, 5),
"output_color_schema": _tuple_get(values, 6),
"output_color_depth": _tuple_get(values, 7),
"picture_mode": _tuple_get(values, 8),
}
self._attr_extra_state_attributes[ATTR_VIDEO_INFORMATION] = info
else:
self._attr_extra_state_attributes.pop(ATTR_VIDEO_INFORMATION, None)
class OnkyoDeviceZone(OnkyoDevice):
"""Representation of an Onkyo device's extra zone."""
def __init__(
self,
zone,
receiver,
sources,
name=None,
max_volume=SUPPORTED_MAX_VOLUME,
receiver_max_volume=DEFAULT_RECEIVER_MAX_VOLUME,
):
"""Initialize the Zone with the zone identifier."""
self._zone = zone
self._supports_volume = True
super().__init__(receiver, sources, name, max_volume, receiver_max_volume)
def update(self) -> None:
"""Get the latest state from the device."""
status = self.command(f"zone{self._zone}.power=query")
if not status:
return
if status[1] == "on":
self._attr_state = MediaPlayerState.ON
else:
self._attr_state = MediaPlayerState.OFF
return
volume_raw = self.command(f"zone{self._zone}.volume=query")
mute_raw = self.command(f"zone{self._zone}.muting=query")
current_source_raw = self.command(f"zone{self._zone}.selector=query")
preset_raw = self.command(f"zone{self._zone}.preset=query")
# If we received a source value, but not a volume value
# it's likely this zone permanently does not support volume.
if current_source_raw and not volume_raw:
self._supports_volume = False
if not (volume_raw and mute_raw and current_source_raw):
return
# It's possible for some players to have zones set to HDMI with
# no sound control. In this case, the string `N/A` is returned.
self._supports_volume = isinstance(volume_raw[1], (float, int))
# eiscp can return string or tuple. Make everything tuples.
if isinstance(current_source_raw[1], str):
current_source_tuples = (current_source_raw[0], (current_source_raw[1],))
else:
current_source_tuples = current_source_raw
for source in current_source_tuples[1]:
if source in self._source_mapping:
self._attr_source = self._source_mapping[source]
break
self._attr_source = "_".join(current_source_tuples[1])
self._attr_is_volume_muted = bool(mute_raw[1] == "on")
if preset_raw and self.source and self.source.lower() == "radio":
self._attr_extra_state_attributes[ATTR_PRESET] = preset_raw[1]
elif ATTR_PRESET in self._attr_extra_state_attributes:
del self._attr_extra_state_attributes[ATTR_PRESET]
if self._supports_volume:
# AMP_VOL/MAX_RECEIVER_VOL*(MAX_VOL/100)
self._attr_volume_level = (
volume_raw[1] / self._receiver_max_volume * (self._max_volume / 100)
)
@property
def supported_features(self) -> MediaPlayerEntityFeature:
"""Return media player features that are supported."""
if self._supports_volume:
return SUPPORT_ONKYO
return SUPPORT_ONKYO_WO_VOLUME
def turn_off(self) -> None:
"""Turn the media player off."""
self.command(f"zone{self._zone}.power=standby")
def set_volume_level(self, volume: float) -> None:
"""Set volume level, input is range 0..1.
However full volume on the amp is usually far too loud so allow the user to
specify the upper range with CONF_MAX_VOLUME. We change as per max_volume
set by user. This means that if max volume is 80 then full volume in HA
will give 80% volume on the receiver. Then we convert that to the correct
scale for the receiver.
"""
# HA_VOL * (MAX VOL / 100) * MAX_RECEIVER_VOL
self.command(
f"zone{self._zone}.volume={int(volume * (self._max_volume / 100) * self._receiver_max_volume)}"
)
def volume_up(self) -> None:
"""Increase volume by 1 step."""
self.command(f"zone{self._zone}.volume=level-up")
def volume_down(self) -> None:
"""Decrease volume by 1 step."""
self.command(f"zone{self._zone}.volume=level-down")
def mute_volume(self, mute: bool) -> None:
"""Mute (true) or unmute (false) media player."""
if mute:
self.command(f"zone{self._zone}.muting=on")
else:
self.command(f"zone{self._zone}.muting=off")
def turn_on(self) -> None:
"""Turn the media player on."""
self.command(f"zone{self._zone}.power=on")
def select_source(self, source: str) -> None:
"""Set the input source."""
if self.source_list and source in self.source_list:
source = self._reverse_mapping[source]
self.command(f"zone{self._zone}.selector={source}")
|
PypiClean
|
/Azule_Hair_Transplant-0.0.1.tar.gz/Azule_Hair_Transplant-0.0.1/models/face_parsing/resnet.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as modelzoo
# from modules.bn import InPlaceABNSync as BatchNorm2d
resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, in_chan, out_chan, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_chan, out_chan, stride)
self.bn1 = nn.BatchNorm2d(out_chan)
self.conv2 = conv3x3(out_chan, out_chan)
self.bn2 = nn.BatchNorm2d(out_chan)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
if in_chan != out_chan or stride != 1:
self.downsample = nn.Sequential(
nn.Conv2d(in_chan, out_chan,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_chan),
)
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
out = shortcut + residual
out = self.relu(out)
return out
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
layers = [BasicBlock(in_chan, out_chan, stride=stride)]
for i in range(bnum-1):
layers.append(BasicBlock(out_chan, out_chan, stride=1))
return nn.Sequential(*layers)
class Resnet18(nn.Module):
def __init__(self):
super(Resnet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
self.init_weight()
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.maxpool(x)
x = self.layer1(x)
feat8 = self.layer2(x) # 1/8
feat16 = self.layer3(feat8) # 1/16
feat32 = self.layer4(feat16) # 1/32
return feat8, feat16, feat32
def init_weight(self):
state_dict = modelzoo.load_url(resnet18_url)
self_state_dict = self.state_dict()
for k, v in state_dict.items():
if 'fc' in k: continue
self_state_dict.update({k: v})
self.load_state_dict(self_state_dict)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
if __name__ == "__main__":
net = Resnet18()
x = torch.randn(16, 3, 224, 224)
out = net(x)
print(out[0].size())
print(out[1].size())
print(out[2].size())
net.get_params()
|
PypiClean
|
/pytickets-0.9.2.1.tar.gz/pytickets-0.9.2.1/tickets/bytemapper.py
|
__all__ = [
'ByteMapperFactory'
]
class BaseByteMapper(object):
@classmethod
def _join_strings(cls, names_values):
def string_start_offset(name):
return getattr(cls, name.upper()+'_OFFSET')
sorted_strings = [
names_values[name] for name in
sorted(names_values, key=string_start_offset)
]
return ''.join(sorted_strings)
@classmethod
def _serialize_join_strings(cls, names_values, **kw):
_names_values = {}
for (name,val) in names_values.iteritems():
serialize = getattr(cls, '_serialize_'+name)
val = serialize(val, **kw)
assert type(val) is str
_names_values[name] = val
names_values = _names_values
return cls._join_strings(names_values)
@classmethod
def _split_strings(cls, plaintext, *names):
'''
beware: no alerts if plaintext is too small
'''
def string_start_offset(name):
return getattr(cls, name.upper()+'_OFFSET')
sorted_strings = []
for name in sorted(names, key=string_start_offset):
len_ = getattr(cls, name.upper()+'_LEN')
s = plaintext[:len_]
sorted_strings.append(s)
plaintext = plaintext[len_:]
return tuple(sorted_strings)
@classmethod
def _split_deserialize_strings(cls, plaintext, names):
'''
beware: no alerts if plaintext is too small
'''
def string_start_offset(name):
return getattr(cls, name.upper()+'_OFFSET')
sorted_values = []
for name in sorted(names, key=string_start_offset):
len_ = getattr(cls, name.upper()+'_LEN')
deserialize = getattr(cls, '_deserialize_'+name)
s = deserialize(plaintext[:len_])
sorted_values.append(s)
plaintext = plaintext[len_:]
return tuple(sorted_values)
class ByteMapperFactory(object):
'''
ByteMapperFactory creates new classes able to parse custom byte ranges
from attribute _bytes. Parsing is done by slicing. Attribute _bytes can
be of any sliceable type but must be explicitly set in instances!
Make sure to declare _bytes, preferrably in __init__() of inheriting classes:
>>> _MyBM = ByteMapperFactory('_MyBM', [ (3,'asd'), (3,'qwe') ])
>>> class MyBM(_MyBM):
... def __init__(self):
... super(MyBM, self).__init__()
... self._bytes = [1,2,3,4,5,6]
...
>>> a = MyBM()
>>> assert a.asd == [1,2,3]
>>> assert a.qwe == [4,5,6]
>>> assert a.QWE_OFFSET == 3
'''
@classmethod
def classmethod_parse(cls, start, end):
@classmethod
def parse(cls, b):
return b[start : end]
return parse
@classmethod
def classmethod_serialize(cls):
@classmethod
def serialize(self, val, **kw):
return val
return serialize
@classmethod
def classmethod_deserialize(cls):
@classmethod
def deserialize(self, val, **kw):
return val
return deserialize
@classmethod
def property_get(cls, name):
@property
def getter(self):
parse = getattr(self.__class__, '_parse_'+name)
deserialize = getattr(self.__class__, '_deserialize_'+name)
val = parse(self._bytes)
return deserialize(val)
return getter
def __new__(cls, classname, sizes_names):
d = {}
d['_ByteMapperFactory_args'] = (classname, sizes_names)
d['_offsets'] = {}
offset = 0
for tup in sizes_names:
try:
size, name, docs = tup
except ValueError:
size, name = tup
docs = ''
start, end = offset, offset+size
#
# class attributes
#
d['_offsets'][name] = (start, end)
d[name.upper()+'_OFFSET'] = start
d[name.upper()+'_LEN'] = size
d['OFFSET_'+name.upper()] = start
#
# classmethods
#
d['_parse_'+name] = cls.classmethod_parse(start, end)
d['_serialize_'+name] = cls.classmethod_serialize()
d['_deserialize_'+name] = cls.classmethod_deserialize()
#
# properties
#
d[name] = cls.property_get(name)
offset += size
d['_bytes_len'] = offset
return type(classname, (BaseByteMapper,), d)
import unittest
class Test_ByteMapperFactory(unittest.TestCase):
def test_test(self):
assert 1==1
def test1(self):
X = ByteMapperFactory('X', [
(2, 'asd'),
(2, 'qwe'),
(4, 'foo'),
(4, 'bar'),
])
class Y(X):
def __init__(self):
super(Y, self).__init__()
self._bytes = range(self._bytes_len)
y = Y()
assert y.asd == [0,1]
assert y.qwe == [2,3]
assert y.foo == [4,5,6,7]
assert y.bar == [8,9,10,11]
class Z(Y):
@classmethod
def _deserialize_asd(cls, val):
return [str(i) for i in val]
z = Z()
assert z.asd == ['0', '1']
assert z.qwe == [2, 3]
def test_join_strings(self):
X = ByteMapperFactory('X', [
(2, 'asd'),
(2, 'qwe'),
(4, 'foo'),
(4, 'bar'),
])
res = X._join_strings(dict(qwe='QWE', bar='BAR', asd='ASD'))
assert res == 'ASDQWEBAR'
class Y(X):
@classmethod
def _serialize_bar(cls, val):
return val+'rrr'
res = Y._serialize_join_strings(dict(qwe='QWE', bar='BAR', asd='ASD'))
assert res == 'ASDQWEBARrrr'
def _test():
import doctest
import unittest
doctest.testmod()
unittest.main()
if __name__ == "__main__":
_test()
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/groups/item/drives/item/items/item/versions/item/content/content_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from ..........models.o_data_errors import o_data_error
class ContentRequestBuilder():
"""
Provides operations to manage the media for the group entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new ContentRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/groups/{group%2Did}/drives/{drive%2Did}/items/{driveItem%2Did}/versions/{driveItemVersion%2Did}/content"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_get_request_information(self,request_configuration: Optional[ContentRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
The content stream for this version of the item.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
return request_info
def create_put_request_information(self,body: bytes, request_configuration: Optional[ContentRequestBuilderPutRequestConfiguration] = None) -> RequestInformation:
"""
The content stream for this version of the item.
Args:
body: Binary request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.PUT
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_stream_content(body)
return request_info
async def get(self,request_configuration: Optional[ContentRequestBuilderGetRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> bytes:
"""
The content stream for this version of the item.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: bytes
"""
request_info = self.create_get_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_primitive_async(request_info, "bytes", response_handler, error_mapping)
async def put(self,body: bytes, request_configuration: Optional[ContentRequestBuilderPutRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> None:
"""
The content stream for this version of the item.
Args:
body: Binary request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.create_put_request_information(
body, request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_no_response_content_async(request_info, response_handler, error_mapping)
@dataclass
class ContentRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
@dataclass
class ContentRequestBuilderPutRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/p4a-django-1.9.1.tar.gz/p4a-django-1.9.1/django/contrib/auth/middleware.py
|
from django.contrib import auth
from django.contrib.auth import load_backend
from django.contrib.auth.backends import RemoteUserBackend
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
def get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = auth.get_user(request)
return request._cached_user
class AuthenticationMiddleware(object):
def process_request(self, request):
assert hasattr(request, 'session'), (
"The Django authentication middleware requires session middleware "
"to be installed. Edit your MIDDLEWARE_CLASSES setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
)
request.user = SimpleLazyObject(lambda: get_user(request))
class SessionAuthenticationMiddleware(object):
"""
Formerly, a middleware for invalidating a user's sessions that don't
correspond to the user's current session authentication hash. However, it
caused the "Vary: Cookie" header on all responses.
Now a backwards compatibility shim that enables session verification in
auth.get_user() if this middleware is in MIDDLEWARE_CLASSES.
"""
def process_request(self, request):
pass
class RemoteUserMiddleware(object):
"""
Middleware for utilizing Web-server-provided authentication.
If request.user is not authenticated, then this middleware attempts to
authenticate the username passed in the ``REMOTE_USER`` request header.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
The header used is configurable and defaults to ``REMOTE_USER``. Subclass
this class and change the ``header`` attribute if you need to use a
different header.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
header = "REMOTE_USER"
force_logout_if_no_header = True
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
username = request.META[self.header]
except KeyError:
# If specified header doesn't exist then remove any existing
# authenticated remote-user, or return (leaving request.user set to
# AnonymousUser by the AuthenticationMiddleware).
if self.force_logout_if_no_header and request.user.is_authenticated():
self._remove_invalid_user(request)
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated():
if request.user.get_username() == self.clean_username(username, request):
return
else:
# An authenticated user is associated with the request, but
# it does not match the authorized user in the header.
self._remove_invalid_user(request)
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(remote_user=username)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[auth.BACKEND_SESSION_KEY]
backend = auth.load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError: # Backend has no clean_username method.
pass
return username
def _remove_invalid_user(self, request):
"""
Removes the current authenticated user in the request which is invalid
but only if the user is authenticated via the RemoteUserBackend.
"""
try:
stored_backend = load_backend(request.session.get(auth.BACKEND_SESSION_KEY, ''))
except ImportError:
# backend failed to load
auth.logout(request)
else:
if isinstance(stored_backend, RemoteUserBackend):
auth.logout(request)
class PersistentRemoteUserMiddleware(RemoteUserMiddleware):
"""
Middleware for Web-server provided authentication on logon pages.
Like RemoteUserMiddleware but keeps the user authenticated even if
the header (``REMOTE_USER``) is not found in the request. Useful
for setups when the external authentication via ``REMOTE_USER``
is only expected to happen on some "logon" URL and the rest of
the application wants to use Django's authentication mechanism.
"""
force_logout_if_no_header = False
|
PypiClean
|
/Skailar-framework-5.0.tar.gz/Skailar-framework-5.0/skailar/db/backends/postgresql/base.py
|
import asyncio
import threading
import warnings
from contextlib import contextmanager
from skailar.conf import settings
from skailar.core.exceptions import ImproperlyConfigured
from skailar.db import DatabaseError as WrappedDatabaseError
from skailar.db import connections
from skailar.db.backends.base.base import BaseDatabaseWrapper
from skailar.db.backends.utils import CursorDebugWrapper as BaseCursorDebugWrapper
from skailar.utils.asyncio import async_unsafe
from skailar.utils.functional import cached_property
from skailar.utils.safestring import SafeString
from skailar.utils.version import get_version_tuple
try:
try:
import psycopg as Database
except ImportError:
import psycopg2 as Database
except ImportError:
raise ImproperlyConfigured("Error loading psycopg2 or psycopg module")
def psycopg_version():
version = Database.__version__.split(" ", 1)[0]
return get_version_tuple(version)
if psycopg_version() < (2, 8, 4):
raise ImproperlyConfigured(
f"psycopg2 version 2.8.4 or newer is required; you have {Database.__version__}"
)
if (3,) <= psycopg_version() < (3, 1, 8):
raise ImproperlyConfigured(
f"psycopg version 3.1.8 or newer is required; you have {Database.__version__}"
)
from .psycopg_any import IsolationLevel, is_psycopg3 # NOQA isort:skip
if is_psycopg3:
from psycopg import adapters, sql
from psycopg.pq import Format
from .psycopg_any import get_adapters_template, register_tzloader
TIMESTAMPTZ_OID = adapters.types["timestamptz"].oid
else:
import psycopg2.extensions
import psycopg2.extras
psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
"INETARRAY",
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
# Some of these import psycopg, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
def _get_varchar_column(data):
if data["max_length"] is None:
return "varchar"
return "varchar(%(max_length)s)" % data
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = "postgresql"
display_name = "PostgreSQL"
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
"AutoField": "integer",
"BigAutoField": "bigint",
"BinaryField": "bytea",
"BooleanField": "boolean",
"CharField": _get_varchar_column,
"DateField": "date",
"DateTimeField": "timestamp with time zone",
"DecimalField": "numeric(%(max_digits)s, %(decimal_places)s)",
"DurationField": "interval",
"FileField": "varchar(%(max_length)s)",
"FilePathField": "varchar(%(max_length)s)",
"FloatField": "double precision",
"IntegerField": "integer",
"BigIntegerField": "bigint",
"IPAddressField": "inet",
"GenericIPAddressField": "inet",
"JSONField": "jsonb",
"OneToOneField": "integer",
"PositiveBigIntegerField": "bigint",
"PositiveIntegerField": "integer",
"PositiveSmallIntegerField": "smallint",
"SlugField": "varchar(%(max_length)s)",
"SmallAutoField": "smallint",
"SmallIntegerField": "smallint",
"TextField": "text",
"TimeField": "time",
"UUIDField": "uuid",
}
data_type_check_constraints = {
"PositiveBigIntegerField": '"%(column)s" >= 0',
"PositiveIntegerField": '"%(column)s" >= 0',
"PositiveSmallIntegerField": '"%(column)s" >= 0',
}
data_types_suffix = {
"AutoField": "GENERATED BY DEFAULT AS IDENTITY",
"BigAutoField": "GENERATED BY DEFAULT AS IDENTITY",
"SmallAutoField": "GENERATED BY DEFAULT AS IDENTITY",
}
operators = {
"exact": "= %s",
"iexact": "= UPPER(%s)",
"contains": "LIKE %s",
"icontains": "LIKE UPPER(%s)",
"regex": "~ %s",
"iregex": "~* %s",
"gt": "> %s",
"gte": ">= %s",
"lt": "< %s",
"lte": "<= %s",
"startswith": "LIKE %s",
"endswith": "LIKE %s",
"istartswith": "LIKE UPPER(%s)",
"iendswith": "LIKE UPPER(%s)",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = (
r"REPLACE(REPLACE(REPLACE({}, E'\\', E'\\\\'), E'%%', E'\\%%'), E'_', E'\\_')"
)
pattern_ops = {
"contains": "LIKE '%%' || {} || '%%'",
"icontains": "LIKE '%%' || UPPER({}) || '%%'",
"startswith": "LIKE {} || '%%'",
"istartswith": "LIKE UPPER({}) || '%%'",
"endswith": "LIKE '%%' || {}",
"iendswith": "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
# PostgreSQL backend-specific attributes.
_named_cursor_idx = 0
def get_database_version(self):
"""
Return a tuple of the database's version.
E.g. for pg_version 120004, return (12, 4).
"""
return divmod(self.pg_version, 10000)
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict["NAME"] == "" and not settings_dict.get("OPTIONS", {}).get(
"service"
):
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME or OPTIONS['service'] value."
)
if len(settings_dict["NAME"] or "") > self.ops.max_name_length():
raise ImproperlyConfigured(
"The database name '%s' (%d characters) is longer than "
"PostgreSQL's limit of %d characters. Supply a shorter NAME "
"in settings.DATABASES."
% (
settings_dict["NAME"],
len(settings_dict["NAME"]),
self.ops.max_name_length(),
)
)
if settings_dict["NAME"]:
conn_params = {
"dbname": settings_dict["NAME"],
**settings_dict["OPTIONS"],
}
elif settings_dict["NAME"] is None:
# Connect to the default 'postgres' db.
settings_dict.get("OPTIONS", {}).pop("service", None)
conn_params = {"dbname": "postgres", **settings_dict["OPTIONS"]}
else:
conn_params = {**settings_dict["OPTIONS"]}
conn_params["client_encoding"] = "UTF8"
conn_params.pop("assume_role", None)
conn_params.pop("isolation_level", None)
server_side_binding = conn_params.pop("server_side_binding", None)
conn_params.setdefault(
"cursor_factory",
ServerBindingCursor
if is_psycopg3 and server_side_binding is True
else Cursor,
)
if settings_dict["USER"]:
conn_params["user"] = settings_dict["USER"]
if settings_dict["PASSWORD"]:
conn_params["password"] = settings_dict["PASSWORD"]
if settings_dict["HOST"]:
conn_params["host"] = settings_dict["HOST"]
if settings_dict["PORT"]:
conn_params["port"] = settings_dict["PORT"]
if is_psycopg3:
conn_params["context"] = get_adapters_template(
settings.USE_TZ, self.timezone
)
# Disable prepared statements by default to keep connection poolers
# working. Can be reenabled via OPTIONS in the settings dict.
conn_params["prepare_threshold"] = conn_params.pop(
"prepare_threshold", None
)
return conn_params
@async_unsafe
def get_new_connection(self, conn_params):
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict["OPTIONS"]
set_isolation_level = False
try:
isolation_level_value = options["isolation_level"]
except KeyError:
self.isolation_level = IsolationLevel.READ_COMMITTED
else:
# Set the isolation level to the value from OPTIONS.
try:
self.isolation_level = IsolationLevel(isolation_level_value)
set_isolation_level = True
except ValueError:
raise ImproperlyConfigured(
f"Invalid transaction isolation level {isolation_level_value} "
f"specified. Use one of the psycopg.IsolationLevel values."
)
connection = self.Database.connect(**conn_params)
if set_isolation_level:
connection.isolation_level = self.isolation_level
if not is_psycopg3:
# Register dummy loads() to avoid a round trip from psycopg2's
# decode to json.dumps() to json.loads(), when using a custom
# decoder in JSONField.
psycopg2.extras.register_default_jsonb(
conn_or_curs=connection, loads=lambda x: x
)
return connection
def ensure_timezone(self):
if self.connection is None:
return False
conn_timezone_name = self.connection.info.parameter_status("TimeZone")
timezone_name = self.timezone_name
if timezone_name and conn_timezone_name != timezone_name:
with self.connection.cursor() as cursor:
cursor.execute(self.ops.set_time_zone_sql(), [timezone_name])
return True
return False
def ensure_role(self):
if self.connection is None:
return False
if new_role := self.settings_dict.get("OPTIONS", {}).get("assume_role"):
with self.connection.cursor() as cursor:
sql = self.ops.compose_sql("SET ROLE %s", [new_role])
cursor.execute(sql)
return True
return False
def init_connection_state(self):
super().init_connection_state()
# Commit after setting the time zone.
commit_tz = self.ensure_timezone()
# Set the role on the connection. This is useful if the credential used
# to login is not the same as the role that owns database resources. As
# can be the case when using temporary or ephemeral credentials.
commit_role = self.ensure_role()
if (commit_role or commit_tz) and not self.get_autocommit():
self.connection.commit()
@async_unsafe
def create_cursor(self, name=None):
if name:
# In autocommit mode, the cursor will be used outside of a
# transaction, hence use a holdable cursor.
cursor = self.connection.cursor(
name, scrollable=False, withhold=self.connection.autocommit
)
else:
cursor = self.connection.cursor()
if is_psycopg3:
# Register the cursor timezone only if the connection disagrees, to
# avoid copying the adapter map.
tzloader = self.connection.adapters.get_loader(TIMESTAMPTZ_OID, Format.TEXT)
if self.timezone != tzloader.timezone:
register_tzloader(self.timezone, cursor)
else:
cursor.tzinfo_factory = self.tzinfo_factory if settings.USE_TZ else None
return cursor
def tzinfo_factory(self, offset):
return self.timezone
@async_unsafe
def chunked_cursor(self):
self._named_cursor_idx += 1
# Get the current async task
# Note that right now this is behind @async_unsafe, so this is
# unreachable, but in future we'll start loosening this restriction.
# For now, it's here so that every use of "threading" is
# also async-compatible.
try:
current_task = asyncio.current_task()
except RuntimeError:
current_task = None
# Current task can be none even if the current_task call didn't error
if current_task:
task_ident = str(id(current_task))
else:
task_ident = "sync"
# Use that and the thread ident to get a unique name
return self._cursor(
name="_skailar_curs_%d_%s_%d"
% (
# Avoid reusing name in other threads / tasks
threading.current_thread().ident,
task_ident,
self._named_cursor_idx,
)
)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
with self.cursor() as cursor:
cursor.execute("SET CONSTRAINTS ALL IMMEDIATE")
cursor.execute("SET CONSTRAINTS ALL DEFERRED")
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Skailar's utilities.
with self.connection.cursor() as cursor:
cursor.execute("SELECT 1")
except Database.Error:
return False
else:
return True
@contextmanager
def _nodb_cursor(self):
cursor = None
try:
with super()._nodb_cursor() as cursor:
yield cursor
except (Database.DatabaseError, WrappedDatabaseError):
if cursor is not None:
raise
warnings.warn(
"Normally Skailar will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Skailar was unable to create a connection to the 'postgres' database "
"and will use the first PostgreSQL database instead.",
RuntimeWarning,
)
for connection in connections.all():
if (
connection.vendor == "postgresql"
and connection.settings_dict["NAME"] != "postgres"
):
conn = self.__class__(
{
**self.settings_dict,
"NAME": connection.settings_dict["NAME"],
},
alias=self.alias,
)
try:
with conn.cursor() as cursor:
yield cursor
finally:
conn.close()
break
else:
raise
@cached_property
def pg_version(self):
with self.temporary_connection():
return self.connection.info.server_version
def make_debug_cursor(self, cursor):
return CursorDebugWrapper(cursor, self)
if is_psycopg3:
class CursorMixin:
"""
A subclass of psycopg cursor implementing callproc.
"""
def callproc(self, name, args=None):
if not isinstance(name, sql.Identifier):
name = sql.Identifier(name)
qparts = [sql.SQL("SELECT * FROM "), name, sql.SQL("(")]
if args:
for item in args:
qparts.append(sql.Literal(item))
qparts.append(sql.SQL(","))
del qparts[-1]
qparts.append(sql.SQL(")"))
stmt = sql.Composed(qparts)
self.execute(stmt)
return args
class ServerBindingCursor(CursorMixin, Database.Cursor):
pass
class Cursor(CursorMixin, Database.ClientCursor):
pass
class CursorDebugWrapper(BaseCursorDebugWrapper):
def copy(self, statement):
with self.debug_sql(statement):
return self.cursor.copy(statement)
else:
Cursor = psycopg2.extensions.cursor
class CursorDebugWrapper(BaseCursorDebugWrapper):
def copy_expert(self, sql, file, *args):
with self.debug_sql(sql):
return self.cursor.copy_expert(sql, file, *args)
def copy_to(self, file, table, *args, **kwargs):
with self.debug_sql(sql="COPY %s TO STDOUT" % table):
return self.cursor.copy_to(file, table, *args, **kwargs)
|
PypiClean
|
/pretix_banktool-1.0.0-py3-none-any.whl/pretix_banktool/main.py
|
import configparser
from urllib.parse import urljoin
import click
from pretix_banktool.upload import upload_transactions
from .config import validate_config
from .testing import test_fints, test_pretix
@click.group()
def main():
pass
@main.command()
@click.argument('configfile', type=click.Path(exists=True))
@click.option('--fints/--no-fints', default=True, help='Test FinTS connection')
@click.option('--pretix/--no-pretix', default=True, help='Test pretix connection')
def test(configfile, fints, pretix):
config = configparser.ConfigParser()
config.read(configfile)
validate_config(config)
if config['banktool']['type'] == 'fints' and fints:
test_fints(config)
if pretix:
test_pretix(config)
@main.command()
@click.argument('configfile', type=click.Path(exists=True))
@click.option('--days', default=30, help='Number of days to go back.')
@click.option('--ignore', help='Ignore all references that match the given regular expression. '
'Can be passed multiple times.', multiple=True)
def upload(configfile, days, ignore):
config = configparser.ConfigParser()
config.read(configfile)
validate_config(config)
upload_transactions(config, days, ignore)
@main.command()
@click.option('--type', type=click.Choice(['fints']), default='fints')
def setup(type):
click.echo(click.style('Welcome to the pretix-banktool setup!', fg='green'))
if type == 'fints':
click.echo('You will now be prompted all information required to setup a FinTS account for pretix.')
click.echo('')
click.echo(click.style('Banking information', fg='blue'))
blz = click.prompt('Your bank\'s BLZ')
iban = click.prompt('Your account IBAN')
endpoint = click.prompt('Your bank\'s FinTS endpount URL')
username = click.prompt('Your online-banking username')
click.echo(click.style('WARNING: If you enter your PIN here, it will be stored in clear text on your disk. '
'If you leave it empty, you will instead be asked for it every time.', fg='yellow'))
pin = click.prompt('Your online-banking PIN', hide_input=True, default='', show_default=False)
click.echo('')
click.echo(click.style('pretix information', fg='blue'))
api_server = click.prompt('pretix Server', default='https://pretix.eu/')
api_organizer = click.prompt('Short name of your organizer account', type=click.STRING)
click.echo('You will now need an API key. If you do not have one yet, you can create one as part of a team here:')
click.echo(urljoin(api_server, '/control/organizer/{}/teams'.format(api_organizer)))
click.echo('The key needs to created for a team with the permissions "can view orders" and "can change orders" '
'for all events that you want to match orders with.')
api_key = click.prompt('API key')
click.echo('')
click.echo(click.style('Other information', fg='blue'))
filename = click.prompt('Configuration file', default=api_organizer + '.cfg', type=click.Path(exists=False))
config = configparser.ConfigParser()
config['banktool'] = {
'type': type
}
if type == 'fints':
config['fints'] = {
'blz': blz,
'endpoint': endpoint,
'username': username,
'iban': iban,
'pin': pin
}
config['pretix'] = {
'server': api_server,
'organizer': api_organizer,
'key': api_key
}
with open(filename, 'w') as configfile:
config.write(configfile)
click.echo('')
click.echo(click.style('Configuration file created!', fg='green'))
click.echo(click.style('Please note that your pin has been saved to the file in plain text. Make sure to secure '
'the file appropriately.',
fg='red'))
click.echo('')
click.echo('You can now run')
click.echo(' pretix-banktool test %s' % filename)
click.echo('to test the connection to your bank account.')
|
PypiClean
|
/melan-0.2.0.tar.gz/melan-0.2.0/src/nxp/io/line.py
|
import re
from .util import rstrip, rstripn, lstripn
from nxp.charset import white
# regular expressions used to parse lines of text
#_segline = re.compile( r'^(?P<pre>[' + white + r']*)(?P<txt>.*)(?P<pst>[' + white + r']*)$' )
_chkeol = re.compile( r'(\r?\n)?' )
# ------------------------------------------------------------------------
class Line:
"""
Line objects segment a line of text into:
indent leading whitespace
text main contents
post trailing whitespace
nl newline chars
"""
__slots__ = ('_raw','_num','_off','_bot','_eot','_nl')
def __init__(self, line, lnum=0, offset=0):
# tokenise input string
self._raw, self._nl = rstrip(line, '\r\n')
self._bot = lstripn(self._raw, white)
self._eot = rstripn(self._raw, white)
# assign properties
self._num = lnum
self._off = offset
# check invalid EOLs
if _chkeol.fullmatch(self._nl) is None:
raise ValueError('Bad end-of-line')
def __len__(self): return len(self._raw)
def __str__(self): return self._raw
def __repr__(self): return str({
'num': self._num,
'off': self._off,
'raw': self._raw,
'nl': self._nl
})
def __getitem__(self,key):
return self._raw[key]
# position within file
@property
def lnum(self): return self._num
@property
def offset(self): return self._off
# begin/end of text
@property
def bot(self): return self._bot
@property
def eot(self): return self._eot
# contents of segments
@property
def indent(self): return self._raw[0:self._bot]
@property
def text(self): return self._raw[self._bot:self._eot]
@property
def post(self): return self._raw[self._eot:]
@property
def nl(self): return self._nl
@property
def raw(self): return self._raw
@property
def full(self): return self._raw + self._nl
# lengths of segments
@property
def prelen(self): return self._bot
@property
def textlen(self): return self._eot - self._bot
@property
def postlen(self): return len(self) - self._eot
# properties
def is_empty(self): return len(self) == 0
def is_white(self): return self._eot == self._bot
def has_text(self): return self.textlen > 0
def uses_lf(self): return self._nl == '\n'
def uses_crlf(self): return self._nl == '\r\n'
|
PypiClean
|
/smartautomatic_server_frontend-20220907.2-py3-none-any.whl/sas_frontend/frontend_es5/4dd04d6c.js
|
"use strict";(self.webpackChunksmartautomatic_server_frontend=self.webpackChunksmartautomatic_server_frontend||[]).push([[75049],{75049:function(e,t,r){r.r(t);r(51187);var n,i,o,a,s,l,c,d=r(37500),u=r(33310),f=r(47181),p=r(34821),h=(r(3555),r(86630),r(11654)),m=r(93748),y=r(27322),v=r(44547),w=(r(44577),r(32594));function b(e){return b="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},b(e)}function g(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function k(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function E(e,t){return E=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},E(e,t)}function _(e){var t=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var r,n=A(e);if(t){var i=A(this).constructor;r=Reflect.construct(n,arguments,i)}else r=n.apply(this,arguments);return x(this,r)}}function x(e,t){if(t&&("object"===b(t)||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return P(e)}function P(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}function A(e){return A=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)},A(e)}function C(){C=function(){return e};var e={elementsDefinitionOrder:[["method"],["field"]],initializeInstanceElements:function(e,t){["method","field"].forEach((function(r){t.forEach((function(t){t.kind===r&&"own"===t.placement&&this.defineClassElement(e,t)}),this)}),this)},initializeClassElements:function(e,t){var r=e.prototype;["method","field"].forEach((function(n){t.forEach((function(t){var i=t.placement;if(t.kind===n&&("static"===i||"prototype"===i)){var o="static"===i?e:r;this.defineClassElement(o,t)}}),this)}),this)},defineClassElement:function(e,t){var r=t.descriptor;if("field"===t.kind){var n=t.initializer;r={enumerable:r.enumerable,writable:r.writable,configurable:r.configurable,value:void 0===n?void 0:n.call(e)}}Object.defineProperty(e,t.key,r)},decorateClass:function(e,t){var r=[],n=[],i={static:[],prototype:[],own:[]};if(e.forEach((function(e){this.addElementPlacement(e,i)}),this),e.forEach((function(e){if(!O(e))return r.push(e);var t=this.decorateElement(e,i);r.push(t.element),r.push.apply(r,t.extras),n.push.apply(n,t.finishers)}),this),!t)return{elements:r,finishers:n};var o=this.decorateConstructor(r,t);return n.push.apply(n,o.finishers),o.finishers=n,o},addElementPlacement:function(e,t,r){var n=t[e.placement];if(!r&&-1!==n.indexOf(e.key))throw new TypeError("Duplicated element ("+e.key+")");n.push(e.key)},decorateElement:function(e,t){for(var r=[],n=[],i=e.decorators,o=i.length-1;o>=0;o--){var a=t[e.placement];a.splice(a.indexOf(e.key),1);var s=this.fromElementDescriptor(e),l=this.toElementFinisherExtras((0,i[o])(s)||s);e=l.element,this.addElementPlacement(e,t),l.finisher&&n.push(l.finisher);var c=l.extras;if(c){for(var d=0;d<c.length;d++)this.addElementPlacement(c[d],t);r.push.apply(r,c)}}return{element:e,finishers:n,extras:r}},decorateConstructor:function(e,t){for(var r=[],n=t.length-1;n>=0;n--){var i=this.fromClassDescriptor(e),o=this.toClassDescriptor((0,t[n])(i)||i);if(void 0!==o.finisher&&r.push(o.finisher),void 0!==o.elements){e=o.elements;for(var a=0;a<e.length-1;a++)for(var s=a+1;s<e.length;s++)if(e[a].key===e[s].key&&e[a].placement===e[s].placement)throw new TypeError("Duplicated element ("+e[a].key+")")}}return{elements:e,finishers:r}},fromElementDescriptor:function(e){var t={kind:e.kind,key:e.key,placement:e.placement,descriptor:e.descriptor};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),"field"===e.kind&&(t.initializer=e.initializer),t},toElementDescriptors:function(e){var t;if(void 0!==e)return(t=e,function(e){if(Array.isArray(e))return e}(t)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(t)||function(e,t){if(e){if("string"==typeof e)return T(e,t);var r=Object.prototype.toString.call(e).slice(8,-1);return"Object"===r&&e.constructor&&(r=e.constructor.name),"Map"===r||"Set"===r?Array.from(e):"Arguments"===r||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(r)?T(e,t):void 0}}(t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()).map((function(e){var t=this.toElementDescriptor(e);return this.disallowProperty(e,"finisher","An element descriptor"),this.disallowProperty(e,"extras","An element descriptor"),t}),this)},toElementDescriptor:function(e){var t=String(e.kind);if("method"!==t&&"field"!==t)throw new TypeError('An element descriptor\'s .kind property must be either "method" or "field", but a decorator created an element descriptor with .kind "'+t+'"');var r=M(e.key),n=String(e.placement);if("static"!==n&&"prototype"!==n&&"own"!==n)throw new TypeError('An element descriptor\'s .placement property must be one of "static", "prototype" or "own", but a decorator created an element descriptor with .placement "'+n+'"');var i=e.descriptor;this.disallowProperty(e,"elements","An element descriptor");var o={kind:t,key:r,placement:n,descriptor:Object.assign({},i)};return"field"!==t?this.disallowProperty(e,"initializer","A method descriptor"):(this.disallowProperty(i,"get","The property descriptor of a field descriptor"),this.disallowProperty(i,"set","The property descriptor of a field descriptor"),this.disallowProperty(i,"value","The property descriptor of a field descriptor"),o.initializer=e.initializer),o},toElementFinisherExtras:function(e){return{element:this.toElementDescriptor(e),finisher:j(e,"finisher"),extras:this.toElementDescriptors(e.extras)}},fromClassDescriptor:function(e){var t={kind:"class",elements:e.map(this.fromElementDescriptor,this)};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),t},toClassDescriptor:function(e){var t=String(e.kind);if("class"!==t)throw new TypeError('A class descriptor\'s .kind property must be "class", but a decorator created a class descriptor with .kind "'+t+'"');this.disallowProperty(e,"key","A class descriptor"),this.disallowProperty(e,"placement","A class descriptor"),this.disallowProperty(e,"descriptor","A class descriptor"),this.disallowProperty(e,"initializer","A class descriptor"),this.disallowProperty(e,"extras","A class descriptor");var r=j(e,"finisher");return{elements:this.toElementDescriptors(e.elements),finisher:r}},runClassFinishers:function(e,t){for(var r=0;r<t.length;r++){var n=(0,t[r])(e);if(void 0!==n){if("function"!=typeof n)throw new TypeError("Finishers must return a constructor.");e=n}}return e},disallowProperty:function(e,t,r){if(void 0!==e[t])throw new TypeError(r+" can't have a ."+t+" property.")}};return e}function D(e){var t,r=M(e.key);"method"===e.kind?t={value:e.value,writable:!0,configurable:!0,enumerable:!1}:"get"===e.kind?t={get:e.value,configurable:!0,enumerable:!1}:"set"===e.kind?t={set:e.value,configurable:!0,enumerable:!1}:"field"===e.kind&&(t={configurable:!0,writable:!0,enumerable:!0});var n={kind:"field"===e.kind?"field":"method",key:r,placement:e.static?"static":"field"===e.kind?"own":"prototype",descriptor:t};return e.decorators&&(n.decorators=e.decorators),"field"===e.kind&&(n.initializer=e.value),n}function S(e,t){void 0!==e.descriptor.get?t.descriptor.get=e.descriptor.get:t.descriptor.set=e.descriptor.set}function O(e){return e.decorators&&e.decorators.length}function z(e){return void 0!==e&&!(void 0===e.value&&void 0===e.writable)}function j(e,t){var r=e[t];if(void 0!==r&&"function"!=typeof r)throw new TypeError("Expected '"+t+"' to be a function");return r}function M(e){var t=function(e,t){if("object"!==b(e)||null===e)return e;var r=e[Symbol.toPrimitive];if(void 0!==r){var n=r.call(e,t||"default");if("object"!==b(n))return n;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===b(t)?t:String(t)}function T(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r<t;r++)n[r]=e[r];return n}!function(e,t,r,n){var i=C();if(n)for(var o=0;o<n.length;o++)i=n[o](i);var a=t((function(e){i.initializeInstanceElements(e,s.elements)}),r),s=i.decorateClass(function(e){for(var t=[],r=function(e){return"method"===e.kind&&e.key===o.key&&e.placement===o.placement},n=0;n<e.length;n++){var i,o=e[n];if("method"===o.kind&&(i=t.find(r)))if(z(o.descriptor)||z(i.descriptor)){if(O(o)||O(i))throw new ReferenceError("Duplicated methods ("+o.key+") can't be decorated.");i.descriptor=o.descriptor}else{if(O(o)){if(O(i))throw new ReferenceError("Decorators can't be placed on different accessors with for the same property ("+o.key+").");i.decorators=o.decorators}S(o,i)}else t.push(o)}return t}(a.d.map(D)),e);i.initializeClassElements(a.F,s.elements),i.runClassFinishers(a.F,s.finishers)}([(0,u.Mo)("ha-dialog-automation-mode")],(function(e,t){var r=function(t){!function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&E(e,t)}(n,t);var r=_(n);function n(){var t;k(this,n);for(var i=arguments.length,o=new Array(i),a=0;a<i;a++)o[a]=arguments[a];return t=r.call.apply(r,[this].concat(o)),e(P(t)),t}return n}(t);return{F:r,d:[{kind:"field",decorators:[(0,u.Cb)({attribute:!1})],key:"hass",value:void 0},{kind:"field",decorators:[(0,u.SB)()],key:"_opened",value:function(){return!1}},{kind:"field",key:"_params",value:void 0},{kind:"field",decorators:[(0,u.SB)()],key:"_newMode",value:function(){return m.B$}},{kind:"field",decorators:[(0,u.SB)()],key:"_newMax",value:void 0},{kind:"method",key:"showDialog",value:function(e){this._opened=!0,this._params=e,this._newMode=e.config.mode||m.B$,this._newMax=(0,v.vA)(this._newMode)?e.config.max||m.Yc:void 0}},{kind:"method",key:"closeDialog",value:function(){this._params.onClose(),this._opened&&(0,f.B)(this,"dialog-closed",{dialog:this.localName}),this._opened=!1}},{kind:"method",key:"render",value:function(){var e,t,r=this;return this._opened?(0,d.dy)(i||(i=g(["\n <ha-dialog\n open\n scrimClickAction\n @closed=","\n .heading=","\n >\n <ha-select\n .label=","\n .value=","\n @selected=","\n @closed=","\n fixedMenuPosition\n .helper=","\n >\n ","\n </ha-select>\n ","\n\n <mwc-button @click=",' slot="secondaryAction">\n ',"\n </mwc-button>\n <mwc-button @click=",' slot="primaryAction">\n ',"\n </mwc-button>\n </ha-dialog>\n "])),this.closeDialog,(0,p.i)(this.hass,this.hass.localize("ui.panel.config.automation.editor.change_mode")),this.hass.localize("ui.panel.config.automation.editor.modes.label"),this._newMode,this._modeChanged,w.U,(0,d.dy)(o||(o=g(['\n <a\n style="color: var(--secondary-text-color)"\n href=','\n target="_blank"\n rel="noreferrer"\n >',"</a\n >\n "])),(0,y.R)(this.hass,"/docs/automation/modes/"),this.hass.localize("ui.panel.config.automation.editor.modes.learn_more")),v.EH.map((function(e){return(0,d.dy)(a||(a=g(["\n <mwc-list-item .value=",">\n ","\n </mwc-list-item>\n "])),e,r.hass.localize("ui.panel.config.automation.editor.modes.".concat(e))||e)})),(0,v.vA)(this._newMode)?(0,d.dy)(s||(s=g(["\n <br /><ha-textfield\n .label=",'\n type="number"\n name="max"\n .value=',"\n @change=",'\n class="max"\n >\n </ha-textfield>\n '])),this.hass.localize("ui.panel.config.automation.editor.max.".concat(this._newMode)),null!==(e=null===(t=this._newMax)||void 0===t?void 0:t.toString())&&void 0!==e?e:"",this._valueChanged):(0,d.dy)(l||(l=g([""]))),this.closeDialog,this.hass.localize("ui.dialogs.generic.cancel"),this._save,this.hass.localize("ui.panel.config.automation.editor.change_mode")):(0,d.dy)(n||(n=g([""])))}},{kind:"method",key:"_modeChanged",value:function(e){var t=e.target.value;this._newMode=t,(0,v.vA)(t)?this._newMax||(this._newMax=m.Yc):this._newMax=void 0}},{kind:"method",key:"_valueChanged",value:function(e){e.stopPropagation();var t=e.target;"max"===t.name&&(this._newMax=Number(t.value))}},{kind:"method",key:"_save",value:function(){this._params.updateAutomation(Object.assign({},this._params.config,{mode:this._newMode,max:this._newMax})),this.closeDialog()}},{kind:"get",static:!0,key:"styles",value:function(){return[h.Qx,h.yu,(0,d.iv)(c||(c=g(["\n ha-select,\n ha-textfield {\n display: block;\n }\n "])))]}}]}}),d.oi)}}]);
|
PypiClean
|
/gd_py-1.0.0-cp39-cp39-manylinux_2_35_x86_64.whl/gd/api/levels.py
|
from typing import Any, ClassVar, Optional, Type, TypeVar
from attrs import define, field
from pendulum import Duration, duration
from typing_aliases import StringDict, StringMapping
from gd.api.editor import Editor
from gd.api.recording import Recording
from gd.api.songs import SongReferenceAPI
from gd.binary import VERSION, Binary, BinaryReader, BinaryWriter
from gd.binary_utils import Reader, Writer
from gd.capacity import Capacity
from gd.constants import (
DEFAULT_ATTEMPTS,
DEFAULT_AUTO,
DEFAULT_CHECK,
DEFAULT_CLICKS,
DEFAULT_COINS,
DEFAULT_COLLECTED,
DEFAULT_CUSTOM,
DEFAULT_DEMON,
DEFAULT_DENOMINATOR,
DEFAULT_DOWNLOADS,
DEFAULT_ENCODING,
DEFAULT_ERRORS,
DEFAULT_FAVORITE,
DEFAULT_HIDDEN,
DEFAULT_HIGH_OBJECT_COUNT,
DEFAULT_ID,
DEFAULT_JUMPS,
DEFAULT_LEVEL_ORDER,
DEFAULT_LOW_DETAIL,
DEFAULT_LOW_DETAIL_TOGGLED,
DEFAULT_NUMERATOR,
DEFAULT_OBJECT_COUNT,
DEFAULT_ORB_PERCENTAGE,
DEFAULT_PASSWORD,
DEFAULT_RATING,
DEFAULT_RECORD,
DEFAULT_REVISION,
DEFAULT_ROUNDING,
DEFAULT_SCORE,
DEFAULT_STARS,
DEFAULT_TWO_PLAYER,
DEFAULT_UNLISTED,
DEFAULT_UPLOADED,
DEFAULT_VERIFIED,
DEFAULT_VERIFIED_COINS,
DEFAULT_VERSION,
EMPTY,
WEEKLY_ID_ADD,
)
from gd.decorators import cache_by
from gd.difficulty_parameters import DEFAULT_DEMON_DIFFICULTY_VALUE, DifficultyParameters
from gd.encoding import (
compress,
decode_base64_string_url_safe,
decompress,
encode_base64_string_url_safe,
generate_leaderboard_seed,
unzip_level_string,
zip_level_string,
)
from gd.enums import (
ByteOrder,
CollectedCoins,
Difficulty,
InternalType,
LevelLength,
LevelType,
RateType,
TimelyType,
)
from gd.models_constants import OBJECTS_SEPARATOR
from gd.password import Password
from gd.progress import Progress
from gd.users import User
from gd.versions import CURRENT_BINARY_VERSION, CURRENT_GAME_VERSION, GameVersion, RobTopVersion
INTERNAL_TYPE = "kCEK"
ID = "k1"
NAME = "k2"
DESCRIPTION = "k3"
DATA = "k4"
CREATOR_NAME = "k5"
CREATOR_ID = "k6"
CREATOR_ACCOUNT_ID = "k60"
OFFICIAL_SONG_ID = "k8"
SONG_ID = "k45"
DIRECT_DIFFICULTY = "k7"
DIFFICULTY_DENOMINATOR = "k9"
DIFFICULTY_NUMERATOR = "k10"
AUTO = "k33"
DEMON = "k25"
DEMON_DIFFICULTY = "k76"
DOWNLOADS = "k11"
VERIFIED = "k14"
UPLOADED = "k15"
LEVEL_VERSION = "k16"
REVISION = "k46"
GAME_VERSION = "k17"
BINARY_VERSION = "k50"
ATTEMPTS = "k18"
JUMPS = "k36"
NORMAL_RECORD = "k19"
PRACTICE_RECORD = "k20"
TYPE = "k21"
RATING = "k22"
LENGTH = "k23"
STARS = "k26"
SCORE = "k27"
EPIC = "k75"
RECORDING = "k34"
HIDDEN = "k35"
REQUIRED_COINS = "k37"
PASSWORD = "k41"
ORIGINAL_ID = "k42"
TWO_PLAYER = "k43"
OBJECT_COUNT = "k48"
FIRST_COIN = "k61"
SECOND_COIN = "k62"
THIRD_COIN = "k63"
COINS = "k64"
VERIFIED_COINS = "k65"
REQUESTED_STARS = "k66"
CAPACITY = "k67"
HIGH_OBJECT_COUNT = "k69"
ORB_PERCENTAGE = "k71"
LOW_DETAIL = "k72"
LOW_DETAIL_TOGGLED = "k73"
TIMELY_ID = "k74"
GAUNTLET = "k77"
NON_MAIN = "k78" # XXX: needs more research
UNLISTED = "k79"
EDITOR_SECONDS = "k80"
COPIES_SECONDS = "k81"
FAVORITE = "k82"
LEVEL_ORDER = "k83"
FOLDER_ID = "k84"
BEST_CLICKS = "k85"
BEST_SECONDS = "k86"
PROGRESS = "k88"
CHECK = "k89"
LEADERBOARD_RECORD = "k90"
LEADERBOARD_SEED = "k87"
DEFAULT_EPIC = False
CHECK_BIT = 0b00000001
UNPROCESSED_DATA = "unprocessed_data"
B = TypeVar("B", bound="BaseLevelAPI")
@define()
class BaseLevelAPI(Binary):
TYPE: ClassVar[LevelType] = LevelType.DEFAULT
id: int = field()
name: str = field()
song_reference: SongReferenceAPI = field()
creator: User = field()
version: int = field(default=DEFAULT_VERSION)
attempts: int = field(default=DEFAULT_ATTEMPTS)
normal_record: int = field(default=DEFAULT_RECORD)
practice_record: int = field(default=DEFAULT_RECORD)
stars: int = field(default=DEFAULT_STARS)
jumps: int = field(default=DEFAULT_JUMPS)
binary_version: RobTopVersion = field(default=CURRENT_BINARY_VERSION)
coins: int = field(default=DEFAULT_COINS)
capacity: Capacity = field(factory=Capacity)
orb_percentage: int = field(default=DEFAULT_ORB_PERCENTAGE)
best_clicks: int = field(default=DEFAULT_CLICKS)
best_time: Duration = field(factory=duration)
progress: Progress = field(factory=Progress)
check: bool = field(default=DEFAULT_CHECK)
leaderboard_record: int = field(default=DEFAULT_RECORD)
leaderboard_seed: int = field() # computed automatically
def __hash__(self) -> int:
return hash(type(self)) ^ self.id
@classmethod
def default(
cls: Type[B],
id: int = DEFAULT_ID,
song_id: int = DEFAULT_ID,
song_custom: bool = DEFAULT_CUSTOM,
creator_id: int = DEFAULT_ID,
creator_account_id: int = DEFAULT_ID,
) -> B:
return cls(
id=id,
name=EMPTY,
song_reference=SongReferenceAPI.default(song_id, song_custom),
creator=User.default(creator_id, creator_account_id),
)
def compute_leaderboard_seed(self) -> int:
return generate_leaderboard_seed(
self.best_clicks,
self.leaderboard_record,
int(self.best_time.total_seconds()), # type: ignore
)
@leaderboard_seed.default
def default_leaderboard_seed(self) -> int:
return self.compute_leaderboard_seed()
def refresh_leaderboard_seed(self: B) -> B:
self.leaderboard_seed = self.compute_leaderboard_seed()
return self
def is_check(self) -> bool:
return self.check
@classmethod
def from_robtop_data(cls: Type[B], data: StringMapping[Any]) -> B: # type: ignore
id = data.get(ID, DEFAULT_ID)
name = data.get(NAME, EMPTY)
official_song_id = data.get(OFFICIAL_SONG_ID, DEFAULT_ID)
if official_song_id:
song_reference = SongReferenceAPI(official_song_id, custom=False)
else:
song_id = data.get(SONG_ID, DEFAULT_ID)
song_reference = SongReferenceAPI(song_id, custom=True)
creator_id = data.get(CREATOR_ID, DEFAULT_ID)
creator_name = data.get(CREATOR_NAME, EMPTY)
creator_account_id = data.get(CREATOR_ACCOUNT_ID, DEFAULT_ID)
creator = User(creator_id, creator_name, creator_account_id)
level_version = data.get(LEVEL_VERSION, DEFAULT_VERSION)
attempts = data.get(ATTEMPTS, DEFAULT_ATTEMPTS)
normal_record = data.get(NORMAL_RECORD, DEFAULT_RECORD)
practice_record = data.get(PRACTICE_RECORD, DEFAULT_RECORD)
stars = data.get(STARS, DEFAULT_STARS)
jumps = data.get(JUMPS, DEFAULT_JUMPS)
binary_version_option = data.get(BINARY_VERSION)
if binary_version_option is None:
binary_version = CURRENT_BINARY_VERSION
else:
binary_version = RobTopVersion.from_value(binary_version_option)
coins = data.get(COINS, DEFAULT_COINS)
capacity_option = data.get(CAPACITY)
if capacity_option is None:
capacity = Capacity()
else:
capacity = Capacity.from_robtop(capacity_option)
orb_percentage = data.get(ORB_PERCENTAGE, DEFAULT_ORB_PERCENTAGE)
best_clicks = data.get(BEST_CLICKS, DEFAULT_CLICKS)
best_option = data.get(BEST_SECONDS)
if best_option is None:
best_time = duration()
else:
best_time = duration(seconds=best_option)
progress_option = data.get(PROGRESS)
if progress_option is None:
progress = Progress()
else:
progress = Progress.from_robtop(progress_option)
check = data.get(CHECK, DEFAULT_CHECK)
leaderboard_record = data.get(LEADERBOARD_RECORD, DEFAULT_RECORD)
return cls(
id=id,
name=name,
song_reference=song_reference,
creator=creator,
version=level_version,
attempts=attempts,
normal_record=normal_record,
practice_record=practice_record,
stars=stars,
jumps=jumps,
binary_version=binary_version,
coins=coins,
capacity=capacity,
orb_percentage=orb_percentage,
best_clicks=best_clicks,
best_time=best_time,
progress=progress,
check=check,
leaderboard_record=leaderboard_record,
)
def to_robtop_data(self) -> StringDict[Any]:
creator = self.creator
data = {
INTERNAL_TYPE: InternalType.LEVEL.value,
TYPE: self.TYPE.value,
ID: self.id,
NAME: self.name,
CREATOR_ID: creator.id,
CREATOR_NAME: creator.name,
CREATOR_ACCOUNT_ID: creator.account_id,
LEVEL_VERSION: self.version,
ATTEMPTS: self.attempts,
NORMAL_RECORD: self.normal_record,
PRACTICE_RECORD: self.practice_record,
STARS: self.stars,
JUMPS: self.jumps,
BINARY_VERSION: self.binary_version.to_value(),
COINS: self.coins,
CAPACITY: self.capacity.to_robtop(),
ORB_PERCENTAGE: self.orb_percentage,
BEST_CLICKS: self.best_clicks,
BEST_SECONDS: int(self.best_time.total_seconds()), # type: ignore
PROGRESS: self.progress.to_robtop(),
CHECK: self.check,
LEADERBOARD_RECORD: self.leaderboard_record,
LEADERBOARD_SEED: self.leaderboard_seed,
}
song_reference = self.song_reference
song_id = song_reference.id
if song_reference.is_custom():
data[SONG_ID] = song_id
else:
data[OFFICIAL_SONG_ID] = song_id
return data
@classmethod
def from_binary(
cls: Type[B],
binary: BinaryReader,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> B:
rounding = DEFAULT_ROUNDING
check_bit = CHECK_BIT
reader = Reader(binary, order)
id = reader.read_u32()
name_length = reader.read_u8()
name = reader.read(name_length).decode(encoding, errors)
song_reference = SongReferenceAPI.from_binary(binary, order, version)
creator = User.from_binary(binary, order, version, encoding, errors)
level_version = reader.read_u8()
attempts = reader.read_u32()
normal_record = reader.read_u8()
practice_record = reader.read_u8()
stars = reader.read_u8()
jumps = reader.read_u32()
binary_version = RobTopVersion.from_binary(binary, order, version)
coins = reader.read_u8()
capacity = Capacity.from_binary(binary, order, version)
orb_percentage = reader.read_u8()
best_clicks = reader.read_u16()
best_seconds = round(reader.read_f32(), rounding)
best_time = duration(seconds=best_seconds)
progress = Progress.from_binary(binary, order, version)
value = reader.read_u8()
check = value & check_bit == check_bit
leaderboard_record = reader.read_u8()
return cls(
id=id,
name=name,
song_reference=song_reference,
creator=creator,
version=level_version,
attempts=attempts,
normal_record=normal_record,
practice_record=practice_record,
stars=stars,
jumps=jumps,
binary_version=binary_version,
coins=coins,
capacity=capacity,
orb_percentage=orb_percentage,
best_clicks=best_clicks,
best_time=best_time,
progress=progress,
check=check,
leaderboard_record=leaderboard_record,
)
def to_binary(
self,
binary: BinaryWriter,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> None:
writer = Writer(binary, order)
writer.write_u32(self.id)
data = self.name.encode(encoding, errors)
writer.write_u8(len(data))
writer.write(data)
self.song_reference.to_binary(binary, order, version)
self.creator.to_binary(binary, order, version, encoding, errors)
writer.write_u8(self.version)
writer.write_u32(self.attempts)
writer.write_u8(self.normal_record)
writer.write_u8(self.practice_record)
writer.write_u8(self.stars)
writer.write_u32(self.jumps)
self.binary_version.to_binary(binary, order, version)
writer.write_u8(self.coins)
self.capacity.to_binary(binary, order, version)
writer.write_u8(self.orb_percentage)
writer.write_u16(self.best_clicks)
best_seconds = self.best_time.total_seconds() # type: ignore
writer.write_f32(best_seconds)
self.progress.to_binary(binary, order, version)
value = 0
if self.is_check():
value |= CHECK_BIT
writer.write_u8(value)
writer.write_u8(self.leaderboard_record)
O = TypeVar("O", bound="OfficialLevelAPI")
@define()
class OfficialLevelAPI(BaseLevelAPI):
TYPE: ClassVar[LevelType] = LevelType.OFFICIAL
difficulty: Difficulty = field(default=Difficulty.DEFAULT)
required_coins: int = field(default=DEFAULT_COINS)
def __hash__(self) -> int:
return hash(type(self)) ^ self.id
def is_demon(self) -> bool:
return self.difficulty.is_demon()
@classmethod
def from_robtop_data(cls: Type[O], data: StringMapping[Any]) -> O: # type: ignore
level = super().from_robtop_data(data)
direct_difficulty_option = data.get(DIRECT_DIFFICULTY)
if direct_difficulty_option is None:
difficulty = Difficulty.DEFAULT
else:
demon = data.get(DEMON, DEFAULT_DEMON)
if demon:
demon_difficulty_option = data.get(DEMON_DIFFICULTY)
if demon_difficulty_option is None:
difficulty = Difficulty.DEMON # unspecified demon
else:
difficulty = DifficultyParameters(
demon_difficulty_value=demon_difficulty_option, demon=demon
).into_difficulty()
else:
difficulty = Difficulty(direct_difficulty_option + 1) # funky way to convert
required_coins = data.get(REQUIRED_COINS, DEFAULT_COINS)
level.difficulty = difficulty
level.required_coins = required_coins
return level
def to_robtop_data(self) -> StringDict[Any]:
data = super().to_robtop_data()
difficulty = self.difficulty
difficulty_parameters = DifficultyParameters.from_difficulty(difficulty)
actual = {
# difficulty parameters
DIRECT_DIFFICULTY: difficulty.clamp_demon().value - 1, # convert back
AUTO: difficulty_parameters.is_auto(),
DEMON: difficulty_parameters.is_demon(),
DEMON_DIFFICULTY: difficulty_parameters.demon_difficulty_value,
# others
REQUIRED_COINS: self.required_coins,
}
data.update(actual)
return data
@classmethod
def from_binary(
cls: Type[O],
binary: BinaryReader,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> O:
reader = Reader(binary, order)
level = super().from_binary(binary, order, version, encoding, errors)
difficulty_value = reader.read_u8()
difficulty = Difficulty(difficulty_value)
required_coins = reader.read_u8()
level.difficulty = difficulty
level.required_coins = required_coins
return level
def to_binary(
self,
binary: BinaryWriter,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> None:
writer = Writer(binary, order)
super().to_binary(binary, order, version, encoding, errors)
writer.write_u8(self.difficulty.value)
writer.write_u8(self.required_coins)
TWO_PLAYER_BIT = 0b00000001
HIGH_OBJECT_COUNT_BIT = 0b00000010
LOW_DETAIL_BIT = 0b00000100
LOW_DETAIL_TOGGLED_BIT = 0b00001000
DEFAULT_LENGTH_VALUE = LevelLength.DEFAULT.value
C = TypeVar("C", bound="CustomLevelAPI")
@define()
class CustomLevelAPI(BaseLevelAPI):
description: str = field(default=EMPTY)
unprocessed_data: str = field(default=EMPTY, repr=False)
length: LevelLength = field(default=LevelLength.DEFAULT)
password_data: Password = field(factory=Password)
original_id: int = field(default=DEFAULT_ID)
two_player: bool = field(default=DEFAULT_TWO_PLAYER)
object_count: int = field(default=DEFAULT_OBJECT_COUNT)
high_object_count: bool = field(default=DEFAULT_HIGH_OBJECT_COUNT)
requested_stars: int = field(default=DEFAULT_STARS)
low_detail: bool = field(default=DEFAULT_LOW_DETAIL)
low_detail_toggled: bool = field(default=DEFAULT_LOW_DETAIL_TOGGLED)
editor_time: Duration = field(factory=duration)
copies_time: Duration = field(factory=duration)
level_order: int = field(default=DEFAULT_LEVEL_ORDER)
folder_id: int = field(default=DEFAULT_ID)
def __hash__(self) -> int:
return hash(type(self)) ^ self.id
@classmethod
def from_binary(
cls: Type[C],
binary: BinaryReader,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> C:
rounding = DEFAULT_ROUNDING
two_player_bit = TWO_PLAYER_BIT
high_object_count_bit = HIGH_OBJECT_COUNT_BIT
low_detail_bit = LOW_DETAIL_BIT
low_detail_toggled_bit = LOW_DETAIL_TOGGLED_BIT
level = super().from_binary(binary, order, version, encoding, errors)
reader = Reader(binary, order)
description_length = reader.read_u8()
description = reader.read(description_length).decode(encoding, errors)
data_length = reader.read_u32()
data = decompress(reader.read(data_length))
length_value = reader.read_u8()
length = LevelLength(length_value)
password_data = Password.from_binary(binary, order, version)
original_id = reader.read_u32()
value = reader.read_u8()
two_player = value & two_player_bit == two_player_bit
high_object_count = value & high_object_count_bit == high_object_count_bit
low_detail = value & low_detail_bit == low_detail_bit
low_detail_toggled = value & low_detail_toggled_bit == low_detail_toggled_bit
object_count = reader.read_u32()
requested_stars = reader.read_u8()
editor_seconds = round(reader.read_f32(), rounding)
copies_seconds = round(reader.read_f32(), rounding)
editor_time = duration(seconds=editor_seconds)
copies_time = duration(seconds=copies_seconds)
level_order = reader.read_u32()
folder_id = reader.read_u8()
level.description = description
level.data = data
level.length = length
level.password_data = password_data
level.original_id = original_id
level.two_player = two_player
level.object_count = object_count
level.high_object_count = high_object_count
level.requested_stars = requested_stars
level.low_detail = low_detail
level.low_detail_toggled = low_detail_toggled
level.editor_time = editor_time
level.copies_time = copies_time
level.level_order = level_order
level.folder_id = folder_id
return level
def to_binary(
self,
binary: BinaryWriter,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> None:
super().to_binary(binary, order, version, encoding, errors)
writer = Writer(binary, order)
data = self.description.encode(encoding, errors)
writer.write_u8(len(data))
writer.write(data)
data = compress(self.data)
writer.write_u32(len(data))
writer.write(data)
writer.write_u8(self.length.value)
self.password_data.to_binary(binary, order, version)
writer.write_u32(self.original_id)
value = 0
if self.is_two_player():
value |= TWO_PLAYER_BIT
if self.has_high_object_count():
value |= HIGH_OBJECT_COUNT_BIT
if self.has_low_detail():
value |= LOW_DETAIL_BIT
if self.has_low_detail_toggled():
value |= LOW_DETAIL_TOGGLED_BIT
writer.write_u8(value)
writer.write_u32(self.object_count)
writer.write_u8(self.requested_stars)
editor_seconds = self.editor_time.total_seconds() # type: ignore
copies_seconds = self.copies_time.total_seconds() # type: ignore
writer.write_f32(editor_seconds)
writer.write_f32(copies_seconds)
writer.write_u32(self.level_order)
writer.write_u8(self.folder_id)
@property
@cache_by(UNPROCESSED_DATA)
def processed_data(self) -> str:
return unzip_level_string(self.unprocessed_data)
@processed_data.setter
def processed_data(self, processed_data: str) -> None:
self.unprocessed_data = zip_level_string(processed_data)
@property
@cache_by(UNPROCESSED_DATA)
def data(self) -> bytes:
return self.open_editor().to_bytes()
@data.setter
def data(self, data: bytes) -> None:
self.processed_data = Editor.from_bytes(data).to_robtop()
def open_editor(self) -> Editor:
return Editor.from_robtop(self.processed_data)
@property
def password(self) -> Optional[int]:
return self.password_data.password
def is_copyable(self) -> bool:
return self.password_data.is_copyable()
def is_original(self) -> bool:
return not self.original_id
def is_two_player(self) -> bool:
return self.two_player
def has_high_object_count(self) -> bool:
return self.high_object_count
def has_low_detail(self) -> bool:
return self.low_detail
def has_low_detail_toggled(self) -> bool:
return self.low_detail_toggled
@classmethod
def from_robtop_data(cls: Type[C], data: StringMapping[Any]) -> C: # type: ignore
level = super().from_robtop_data(data)
description = decode_base64_string_url_safe(data.get(DESCRIPTION, EMPTY))
unprocessed_data = data.get(DATA, EMPTY)
if OBJECTS_SEPARATOR in unprocessed_data:
unprocessed_data = zip_level_string(unprocessed_data)
length_value = data.get(LENGTH, DEFAULT_LENGTH_VALUE)
length = LevelLength(length_value)
password_value = data.get(PASSWORD, DEFAULT_PASSWORD)
password_data = Password.from_robtop_value(password_value)
original_id = data.get(ORIGINAL_ID, DEFAULT_ID)
two_player = data.get(TWO_PLAYER, DEFAULT_TWO_PLAYER)
object_count = data.get(OBJECT_COUNT, DEFAULT_OBJECT_COUNT)
high_object_count = data.get(HIGH_OBJECT_COUNT, DEFAULT_HIGH_OBJECT_COUNT)
requested_stars = data.get(REQUESTED_STARS, DEFAULT_STARS)
low_detail = data.get(LOW_DETAIL, DEFAULT_LOW_DETAIL)
low_detail_toggled = data.get(LOW_DETAIL_TOGGLED, DEFAULT_LOW_DETAIL_TOGGLED)
editor_seconds = data.get(EDITOR_SECONDS)
if editor_seconds is None:
editor_time = duration()
else:
editor_time = duration(seconds=editor_seconds)
copies_seconds = data.get(COPIES_SECONDS)
if copies_seconds is None:
copies_time = duration()
else:
copies_time = duration(seconds=copies_seconds)
level_order = data.get(LEVEL_ORDER, DEFAULT_LEVEL_ORDER)
folder_id = data.get(FOLDER_ID, DEFAULT_ID)
level.description = description
level.unprocessed_data = unprocessed_data
level.length = length
level.password_data = password_data
level.original_id = original_id
level.two_player = two_player
level.object_count = object_count
level.high_object_count = high_object_count
level.requested_stars = requested_stars
level.low_detail = low_detail
level.low_detail_toggled = low_detail_toggled
level.editor_time = editor_time
level.copies_time = copies_time
level.level_order = level_order
level.folder_id = folder_id
return level
def to_robtop_data(self) -> StringDict[Any]:
data = super().to_robtop_data()
actual = {
DESCRIPTION: encode_base64_string_url_safe(self.description),
DATA: self.unprocessed_data,
LENGTH: self.length.value,
PASSWORD: self.password_data.to_robtop_value(),
ORIGINAL_ID: self.original_id,
TWO_PLAYER: self.is_two_player(),
OBJECT_COUNT: self.object_count,
HIGH_OBJECT_COUNT: self.has_high_object_count(),
REQUESTED_STARS: self.requested_stars,
LOW_DETAIL: self.has_low_detail(),
LOW_DETAIL_TOGGLED: self.has_low_detail_toggled(),
EDITOR_SECONDS: int(self.editor_time.total_seconds()), # type: ignore
COPIES_SECONDS: int(self.copies_time.total_seconds()), # type: ignore
LEVEL_ORDER: self.level_order,
FOLDER_ID: self.folder_id,
}
data.update(actual)
return data
VERIFIED_BIT = 0b00000001
UPLOADED_BIT = 0b00000010
UNLISTED_BIT = 0b00000100
COLLECTED_COINS_MASK = 0b00111000
COLLECTED_COINS_SHIFT = UNLISTED_BIT.bit_length()
CR = TypeVar("CR", bound="CreatedLevelAPI")
@define()
class CreatedLevelAPI(CustomLevelAPI):
TYPE: ClassVar[LevelType] = LevelType.CREATED
revision: int = field(default=DEFAULT_REVISION)
verified: bool = field(default=DEFAULT_VERIFIED)
uploaded: bool = field(default=DEFAULT_UPLOADED)
recording: Recording = field(factory=Recording, repr=False)
collected_coins: CollectedCoins = field(default=CollectedCoins.DEFAULT)
unlisted: bool = field(default=DEFAULT_UNLISTED)
def __hash__(self) -> int:
return hash(type(self)) ^ self.id
def is_verified(self) -> bool:
return self.verified
def is_uploaded(self) -> bool:
return self.uploaded
def is_unlisted(self) -> bool:
return self.unlisted
@classmethod
def from_robtop_data(cls: Type[CR], data: StringMapping[Any]) -> CR: # type: ignore
level = super().from_robtop_data(data)
revision = data.get(REVISION, DEFAULT_REVISION)
verified = data.get(VERIFIED, DEFAULT_VERIFIED)
uploaded = data.get(UPLOADED, DEFAULT_UPLOADED)
recording_string = data.get(RECORDING, EMPTY)
recording = Recording.from_robtop(recording_string)
first_coin = data.get(FIRST_COIN, DEFAULT_COLLECTED)
second_coin = data.get(SECOND_COIN, DEFAULT_COLLECTED)
third_coin = data.get(THIRD_COIN, DEFAULT_COLLECTED)
collected_coins = CollectedCoins.NONE
if first_coin:
collected_coins |= CollectedCoins.FIRST
if second_coin:
collected_coins |= CollectedCoins.SECOND
if third_coin:
collected_coins |= CollectedCoins.THIRD
unlisted = data.get(UNLISTED, DEFAULT_UNLISTED)
level.revision = revision
level.verified = verified
level.uploaded = uploaded
level.recording = recording
level.collected_coins = collected_coins
level.unlisted = unlisted
return level
def to_robtop_data(self) -> StringDict[Any]:
data = super().to_robtop_data()
collected_coins = self.collected_coins
actual = {
REVISION: self.revision,
VERIFIED: self.is_verified(),
UPLOADED: self.is_uploaded(),
RECORDING: self.recording.to_robtop(),
FIRST_COIN: collected_coins.first(),
SECOND_COIN: collected_coins.second(),
THIRD_COIN: collected_coins.third(),
UNLISTED: self.is_unlisted(),
}
data.update(actual)
return data
@classmethod
def from_binary(
cls: Type[CR],
binary: BinaryReader,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> CR:
verified_bit = VERIFIED_BIT
uploaded_bit = UPLOADED_BIT
unlisted_bit = UNLISTED_BIT
level = super().from_binary(binary, order, version, encoding, errors)
reader = Reader(binary, order)
revision = reader.read_u8()
value = reader.read_u8()
verified = value & verified_bit == verified_bit
uploaded = value & uploaded_bit == uploaded_bit
unlisted = value & unlisted_bit == unlisted_bit
collected_coins_value = (value & COLLECTED_COINS_MASK) >> COLLECTED_COINS_SHIFT
collected_coins = CollectedCoins(collected_coins_value)
recording = Recording.from_binary(binary, order, version)
level.revision = revision
level.verified = verified
level.uploaded = uploaded
level.recording = recording
level.collected_coins = collected_coins
level.unlisted = unlisted
return level
def to_binary(
self,
binary: BinaryWriter,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> None:
super().to_binary(binary, order, version, encoding, errors)
writer = Writer(binary, order)
writer.write_u8(self.revision)
value = 0
if self.is_verified():
value |= VERIFIED_BIT
if self.is_uploaded():
value |= UPLOADED_BIT
if self.is_unlisted():
value |= UNLISTED_BIT
value |= self.collected_coins.value << COLLECTED_COINS_SHIFT
writer.write_u8(value)
self.recording.to_binary(binary, order, version)
HIDDEN_BIT = 0b00000001
VERIFIED_COINS_BIT = 0b00000010
FAVORITE_BIT = 0b00000100
S = TypeVar("S", bound="SavedLevelAPI")
@define()
class SavedLevelAPI(CustomLevelAPI):
TYPE: ClassVar[LevelType] = LevelType.SAVED
difficulty: Difficulty = field(default=Difficulty.DEFAULT)
downloads: int = field(default=DEFAULT_DOWNLOADS)
game_version: GameVersion = field(default=CURRENT_GAME_VERSION)
rating: int = field(default=DEFAULT_RATING)
stars: int = field(default=DEFAULT_STARS)
score: int = field(default=DEFAULT_SCORE)
rate_type: RateType = field(default=RateType.DEFAULT)
hidden: bool = field(default=DEFAULT_HIDDEN)
verified_coins: bool = field(default=DEFAULT_VERIFIED_COINS)
favorite: bool = field(default=DEFAULT_FAVORITE)
@classmethod
def from_robtop_data(cls: Type[S], data: StringMapping[Any]) -> S: # type: ignore
level = super().from_robtop_data(data)
difficulty_numerator = data.get(DIFFICULTY_NUMERATOR, DEFAULT_NUMERATOR)
difficulty_denominator = data.get(DIFFICULTY_DENOMINATOR, DEFAULT_DENOMINATOR)
demon_difficulty_value = data.get(DEMON_DIFFICULTY, DEFAULT_DEMON_DIFFICULTY_VALUE)
auto = data.get(AUTO, DEFAULT_AUTO)
demon = data.get(DEMON, DEFAULT_DEMON)
difficulty_parameters = DifficultyParameters(
difficulty_numerator=difficulty_numerator,
difficulty_denominator=difficulty_denominator,
demon_difficulty_value=demon_difficulty_value,
auto=auto,
demon=demon,
)
difficulty = difficulty_parameters.into_difficulty()
downloads = data.get(DOWNLOADS, DEFAULT_DOWNLOADS)
game_version_value = data.get(GAME_VERSION)
if game_version_value is None:
game_version = CURRENT_GAME_VERSION
else:
game_version = GameVersion.from_robtop_value(game_version_value)
rating = data.get(RATING, DEFAULT_RATING)
stars = data.get(STARS, DEFAULT_STARS)
rate_type = RateType.NOT_RATED
if stars > 0:
rate_type = RateType.RATED
score = data.get(SCORE, DEFAULT_SCORE)
if score < 0:
score = 0
if score > 0:
rate_type = RateType.FEATURED
epic = data.get(EPIC, DEFAULT_EPIC)
if epic:
rate_type = RateType.EPIC
hidden = data.get(HIDDEN, DEFAULT_HIDDEN)
verified_coins = data.get(VERIFIED_COINS, DEFAULT_VERIFIED_COINS)
favorite = data.get(FAVORITE, DEFAULT_FAVORITE)
level.difficulty = difficulty
level.downloads = downloads
level.game_version = game_version
level.rating = rating
level.stars = stars
level.score = score
level.rate_type = rate_type
level.hidden = hidden
level.verified_coins = verified_coins
level.favorite = favorite
return level
def to_robtop_data(self) -> StringDict[Any]:
data = super().to_robtop_data()
difficulty_parameters = DifficultyParameters.from_difficulty(self.difficulty)
actual = {
# difficulty parameters
DIFFICULTY_NUMERATOR: difficulty_parameters.difficulty_numerator,
DIFFICULTY_DENOMINATOR: difficulty_parameters.difficulty_denominator,
DEMON_DIFFICULTY: difficulty_parameters.demon_difficulty_value,
AUTO: difficulty_parameters.auto,
DEMON: difficulty_parameters.demon,
# others
DOWNLOADS: self.downloads,
GAME_VERSION: self.game_version.to_robtop_value(),
RATING: self.rating,
STARS: self.stars,
SCORE: self.score,
EPIC: self.is_epic(),
HIDDEN: self.is_hidden(),
VERIFIED_COINS: self.has_verified_coins(),
FAVORITE: self.is_favorite(),
}
data.update(actual)
return data
@classmethod
def from_binary(
cls: Type[S],
binary: BinaryReader,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> S:
hidden_bit = HIDDEN_BIT
verified_coins_bit = VERIFIED_COINS_BIT
favorite_bit = FAVORITE_BIT
level = super().from_binary(binary, order, version, encoding, errors)
reader = Reader(binary, order)
difficulty_value = reader.read_u8()
difficulty = Difficulty(difficulty_value)
downloads = reader.read_u32()
game_version = GameVersion.from_binary(binary, order, version)
rating = reader.read_i32()
stars = reader.read_u8()
score = reader.read_u32()
rate_type_value = reader.read_u8()
rate_type = RateType(rate_type_value)
value = reader.read_u8()
hidden = value & hidden_bit == hidden_bit
verified_coins = value & verified_coins_bit == verified_coins_bit
favorite = value & favorite_bit == favorite_bit
level.difficulty = difficulty
level.downloads = downloads
level.game_version = game_version
level.rating = rating
level.stars = stars
level.score = score
level.rate_type = rate_type
level.hidden = hidden
level.verified_coins = verified_coins
level.favorite = favorite
return level
def to_binary(
self,
binary: BinaryWriter,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> None:
super().to_binary(binary, order, version, encoding, errors)
writer = Writer(binary, order)
writer.write_u8(self.difficulty.value)
writer.write_u32(self.downloads)
self.game_version.to_binary(binary, order, version)
writer.write_i32(self.rating)
writer.write_u8(self.stars)
writer.write_u32(self.score)
writer.write_u8(self.rate_type.value)
value = 0
if self.is_hidden():
value |= HIDDEN_BIT
if self.has_verified_coins():
value |= VERIFIED_COINS_BIT
if self.is_favorite():
value |= FAVORITE_BIT
writer.write_u8(value)
def __hash__(self) -> int:
return hash(type(self)) ^ self.id
def is_rated(self) -> bool:
return self.rate_type.is_rated()
def is_featured(self) -> bool:
return self.rate_type.is_featured()
def is_epic(self) -> bool:
return self.rate_type.is_epic()
def is_godlike(self) -> bool:
return self.rate_type.is_godlike()
def is_demon(self) -> bool:
return self.difficulty.is_demon()
def is_hidden(self) -> bool:
return self.hidden
def has_verified_coins(self) -> bool:
return self.verified_coins
def is_favorite(self) -> bool:
return self.favorite
T = TypeVar("T", bound="TimelyLevelAPI")
@define()
class TimelyLevelAPI(SavedLevelAPI):
timely_id: int = field(default=DEFAULT_ID)
timely_type: TimelyType = field(default=TimelyType.DEFAULT)
def __hash__(self) -> int:
return hash(type(self)) ^ self.id
@classmethod
def from_robtop_data(cls: Type[T], data: StringMapping[Any]) -> T: # type: ignore
level = super().from_robtop_data(data)
timely_id = data.get(TIMELY_ID, DEFAULT_ID)
result, timely_id = divmod(timely_id, WEEKLY_ID_ADD)
if result:
timely_type = TimelyType.WEEKLY
else:
timely_type = TimelyType.DAILY
level.timely_id = timely_id
level.timely_type = timely_type
return level
def to_robtop_data(self) -> StringDict[Any]:
data = super().to_robtop_data()
timely_id = self.timely_id
if self.is_weekly():
timely_id += WEEKLY_ID_ADD
data[TIMELY_ID] = timely_id
return data
@classmethod
def from_binary(
cls: Type[T],
binary: BinaryReader,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> T:
level = super().from_binary(binary, order, version, encoding, errors)
reader = Reader(binary, order)
timely_id = reader.read_u16()
timely_type_value = reader.read_u8()
timely_type = TimelyType(timely_type_value)
level.timely_id = timely_id
level.timely_type = timely_type
return level
def to_binary(
self,
binary: BinaryWriter,
order: ByteOrder = ByteOrder.DEFAULT,
version: int = VERSION,
encoding: str = DEFAULT_ENCODING,
errors: str = DEFAULT_ERRORS,
) -> None:
super().to_binary(binary, order, version, encoding, errors)
writer = Writer(binary, order)
writer.write_u16(self.timely_id)
writer.write_u8(self.timely_type.value)
def is_timely(self, timely_type: Optional[TimelyType] = None) -> bool:
if timely_type is None:
return self.timely_type.is_timely()
return self.timely_type is timely_type
def is_daily(self) -> bool:
return self.is_timely(TimelyType.DAILY)
def is_weekly(self) -> bool:
return self.is_timely(TimelyType.WEEKLY)
def is_event(self) -> bool:
return self.is_timely(TimelyType.EVENT)
@define()
class GauntletLevelAPI(SavedLevelAPI):
def __hash__(self) -> int:
return hash(type(self)) ^ self.id
def to_robtop_data(self) -> StringDict[Any]:
data = super().to_robtop_data()
data[GAUNTLET] = True
return data
|
PypiClean
|
/bibdesk2zotero-0.0.3.tar.gz/bibdesk2zotero-0.0.3/README.md
|
# bibdesk2zotero
This is a little utility for rewriting a BibDesk BibTeX file so that it can be
read by Zotero with the file references intact. BibDesk and Zotero mostly get
along except for the way BibFile stores links to documents like PDFs. This
utility Base64 decodes all Bdesk-File paths, parses the serialized [plist],
extracts the relative path to the file and adds it back to the bibliographic
entry as an absolute path.
[plist]: https://en.wikipedia.org/wiki/Property_list
## Install
pip install bibdesk2zotero
## Use
$ bibdesk2zotero citations.bib /path/to/pdf/files/ > new-citations.bib
|
PypiClean
|
/lite_tools-0.4.10.9-py3-none-any.whl/lite_tools/tools/core/lite_try.py
|
import time
import asyncio
import traceback
from typing import Callable, TypeVar, List, Union, Sequence
from functools import wraps, partial
from asyncio import iscoroutinefunction
from lite_tools.logs import my_logger as try_log
from lite_tools.logs import logger, handle_exception
__ALL__ = ["try_catch"]
T = TypeVar('T')
class BaseRetryException(Exception):
def __init__(self):
pass
def combine_retry(exc: Union[List[Exception], Exception, type]) -> T:
if isinstance(exc, tuple):
not_retry_exception = exc
elif isinstance(exc, (list, set)):
not_retry_exception = tuple(exc)
elif issubclass(exc, Exception):
not_retry_exception = exc
else:
not_retry_exception = BaseRetryException
return not_retry_exception
def get_params_args(params):
if isinstance(params[0], int): # (1, 2) 这种格式的
for item in params:
if not isinstance(item, int):
return []
return params
if isinstance(params[0], list): # ([1, 23], ["e"]) # 这格式的
for item in params[0]:
if not isinstance(item, int):
return []
return params[0]
return []
def get_params_kwargs(params):
if isinstance(params[0], str): # ("data", "args") 这种格式的
for item in params:
if not isinstance(item, str):
return []
return params
if isinstance(params[1], list): # ([1, 23], ["e"]) # 这格式的
for item in params[1]:
if not isinstance(item, str):
return []
return params[1]
return []
def try_catch(
func=None, *,
retry: int = 1,
except_retry: Union[List[Exception], Exception, type] = BaseRetryException,
ignore_retry: Union[List[Exception], Exception, type] = BaseRetryException,
default: T = None, log: Union[bool, str] = True, catch: bool = False, timeout: Union[int, float] = None,
err_callback: Callable = None, err_args: Sequence = None, err_params: T = None):
"""
异常捕获装饰器
-->不加参数 就是把异常捕获了 返回None
-->加了参数==参数如下:
:param func :
:param retry : 重试次数
:param timeout : 重试的时候加的休眠时间
:param except_retry: 如果有这种异常那么就不重试 直接返回默认值 这里如果写Exception 那么就会忽略所有异常不进行重试直接返回默认值
:param ignore_retry: 如果有这种异常 就继续重试
:param default : 默认的返回值
:param log : 是否打印报错信息,默认是打印的(如果传入指定的内容 那么就会报错指定内容)
:param catch : 按栈方式捕获异常
:param err_callback: 当出现错误的时候调用的回调函数,只需要传入方法名即可
:param err_args : 如果有参数请用序列方式传入,要结合你自己的err_callback参数,无参数也可以 参考见demo
:param err_params : 这个和 err_args 二选一参数两者不同的区别在于 args是自己方法的自己的参数, params是装饰函数的参数
优先级是 err_args > err_params 也就是如果写了args参数那么将不会提取err_params的参数
> 如何取值:
"""
if func is None:
return partial(
try_catch, retry=retry, except_retry=except_retry, default=default, log=log, catch=catch, timeout=timeout,
err_callback=err_callback, err_args=err_args, err_params=err_params
)
not_retry_exception = combine_retry(except_retry)
continue_exception = combine_retry(ignore_retry)
def __log_true(last: bool = False, *args, **kwargs):
"""
:param last: 如果是最后一次重试才会打印日志
"""
line, fl, exception_type, exception_detail = handle_exception(traceback.format_exc(), func.__name__)
if err_callback is not None and last is True:
try:
if isinstance(err_args, (tuple, list, set, dict)):
err_callback(*err_args)
else:
if err_params is None:
err_callback()
else:
# 到这里了 err_params 是一定有内容的
function_args = args[0]
function_kwargs = args[1]
send_args = ()
if function_args:
send_args = get_params_args(err_params) # [1, 3] 返回这种格式的
send_kwargs = ()
if function_kwargs:
send_kwargs = get_params_kwargs(err_params)
cache_args = []
for a in send_args:
if a >= len(args[0]):
continue
cache_args.append(args[0][a])
cache_kwargs = {}
for k in send_kwargs:
cache_kwargs[k] = args[1].get(k)
try:
err_callback(*cache_args, **cache_kwargs)
except Exception as err:
if log is True:
logger.error(f"""[{err.__traceback__.tb_lineno}] 参数有问题,这里建议的参数设置方案如下:
def 被装饰函数(a, b, c=None, d=None): | def 回调函数(x, c=666):
pass | pass
首先被装饰函数 a, b 是位置传参 那么我们建议是:
@try_catch(..., err_params=([1], ["c"])) 这里意思就是
位置参数取 b -赋值给回调函数-> x
命名传递的参数 那么回调函数也得同名并且 参数位置第二个列表里面写的键是沟通两个函数的所以命名得一样
如果只需要取值位置参数 或者 命名传递,只需要传需要传的就行了 如:
@try_catch(..., err_params=(0, 1)) | @try_catch(..., err_params=("c",))
如果被装饰是命名传参,那么这个回调函数的参数也得设置为命名 如上c,d 调用被装饰函数的时候如果用位置传参那么命名将取不到值 如 被装饰函数(a, b, c) 没有用 被装饰函数(a, b, c=c)
如果被装饰函数全部传入的是位置传参,那么位置对应好即可 如 被装饰函数(a, b, c) 不要用c=c 这样子 回调函数哪里参数可以直接写 err_params=(0, 1, 2)
原理是根据你调用函数是怎么传参的 传入的参数示例: (('aaa', 'bbbb', 'ccccc'), {{'d': 888, 'e': 666}}) 位置传参映射第一部分 命名传参映射第二部分
""")
except Exception as err:
if log is True:
logger.error(f"[{err.__traceback__.tb_lineno}] 传入的回调函数不存在或者报错: {err}")
if catch is True:
logger.opt(exception=True, colors=True, capture=True).error("Information: ↓ ↓ ↓ ")
elif log and isinstance(log, str):
logger.error(log)
else:
try_log(fl, func.__name__, line, f"{exception_type} --> {exception_detail}")
@wraps(func)
def wrapper(*args, **kwargs):
for ind in range(retry):
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
print()
exit(0)
except not_retry_exception:
break
except continue_exception:
continue
except Exception as err:
_ = err
if log:
__log_true(True if retry-1 == ind else False, args, kwargs)
if timeout is not None and isinstance(timeout, (int, float)):
time.sleep(timeout)
continue
return default
@wraps(func)
async def async_wrapper(*args, **kwargs):
for ind in range(retry):
try:
return await func(*args, **kwargs)
except KeyboardInterrupt:
print()
exit(0)
except not_retry_exception:
break
except continue_exception:
continue
except Exception as err:
_ = err
if log:
__log_true(True if retry-1 == ind else False, args, kwargs)
if timeout is not None and isinstance(timeout, (int, float)):
await asyncio.sleep(timeout)
continue
return default
return async_wrapper if iscoroutinefunction(func) else wrapper
|
PypiClean
|
/js.extjs-4.2.1.883.tar.gz/js.extjs-4.2.1.883/js/extjs/resources/examples/ux/ToolbarDroppable.js
|
Ext.define('Ext.ux.ToolbarDroppable', {
/**
* Creates new ToolbarDroppable.
* @param {Object} config Config options.
*/
constructor: function(config) {
Ext.apply(this, config);
},
/**
* Initializes the plugin and saves a reference to the toolbar
* @param {Ext.toolbar.Toolbar} toolbar The toolbar instance
*/
init: function(toolbar) {
/**
* @property toolbar
* @type Ext.toolbar.Toolbar
* The toolbar instance that this plugin is tied to
*/
this.toolbar = toolbar;
this.toolbar.on({
scope : this,
render: this.createDropTarget
});
},
/**
* Creates a drop target on the toolbar
*/
createDropTarget: function() {
/**
* @property dropTarget
* @type Ext.dd.DropTarget
* The drop target attached to the toolbar instance
*/
this.dropTarget = Ext.create('Ext.dd.DropTarget', this.toolbar.getEl(), {
notifyOver: Ext.Function.bind(this.notifyOver, this),
notifyDrop: Ext.Function.bind(this.notifyDrop, this)
});
},
/**
* Adds the given DD Group to the drop target
* @param {String} ddGroup The DD Group
*/
addDDGroup: function(ddGroup) {
this.dropTarget.addToGroup(ddGroup);
},
/**
* Calculates the location on the toolbar to create the new sorter button based on the XY of the
* drag event
* @param {Ext.EventObject} e The event object
* @return {Number} The index at which to insert the new button
*/
calculateEntryIndex: function(e) {
var entryIndex = 0,
toolbar = this.toolbar,
items = toolbar.items.items,
count = items.length,
xHover = e.getXY()[0],
index = 0,
el, xTotal, width, midpoint;
for (; index < count; index++) {
el = items[index].getEl();
xTotal = el.getXY()[0];
width = el.getWidth();
midpoint = xTotal + width / 2;
if (xHover < midpoint) {
entryIndex = index;
break;
} else {
entryIndex = index + 1;
}
}
return entryIndex;
},
/**
* Returns true if the drop is allowed on the drop target. This function can be overridden
* and defaults to simply return true
* @param {Object} data Arbitrary data from the drag source
* @return {Boolean} True if the drop is allowed
*/
canDrop: function(data) {
return true;
},
/**
* Custom notifyOver method which will be used in the plugin's internal DropTarget
* @return {String} The CSS class to add
*/
notifyOver: function(dragSource, event, data) {
return this.canDrop.apply(this, arguments) ? this.dropTarget.dropAllowed : this.dropTarget.dropNotAllowed;
},
/**
* Called when the drop has been made. Creates the new toolbar item, places it at the correct location
* and calls the afterLayout callback.
*/
notifyDrop: function(dragSource, event, data) {
var canAdd = this.canDrop(dragSource, event, data),
tbar = this.toolbar;
if (canAdd) {
var entryIndex = this.calculateEntryIndex(event);
tbar.insert(entryIndex, this.createItem(data));
tbar.doLayout();
this.afterLayout();
}
return canAdd;
},
/**
* Creates the new toolbar item based on drop data. This method must be implemented by the plugin instance
* @param {Object} data Arbitrary data from the drop
* @return {Mixed} An item that can be added to a toolbar
*/
createItem: function(data) {
//<debug>
Ext.Error.raise("The createItem method must be implemented in the ToolbarDroppable plugin");
//</debug>
},
/**
* Called after a new button has been created and added to the toolbar. Add any required cleanup logic here
*/
afterLayout: Ext.emptyFn
});
|
PypiClean
|
/megrok.resourceviewlet-0.2.tar.gz/megrok.resourceviewlet-0.2/src/megrok/resourceviewlet/README.txt
|
======================
megrok.resourceviewlet
======================
`megrok.resourceviewlet` is a package meant to include resources
using layer, context and view discriminations.
Setup
=====
Let's import and init the necessary work environment::
>>> import grokcore.component as grok
>>> from grokcore import view, viewlet
>>> from zope.app.wsgi.testlayer import Browser
>>> browser = Browser()
>>> browser.handleErrors = False
Library
=======
We first declare a resource. We'll include it in our page::
>>> from fanstatic import Resource, Library
>>> myLibrary = Library('test_library', 'ftests/resources')
>>> Thing = Resource(myLibrary, 'thing.js')
This step is done by an entry point. For the testing, we do it by hand::
>>> from zope.fanstatic.zcml import create_factory
>>> from zope.component import getGlobalSiteManager
>>> from zope.publisher.interfaces.browser import IBrowserRequest
>>> from zope.interface import Interface
>>> resource_factory = create_factory(myLibrary)
>>> getGlobalSiteManager().registerAdapter(
... resource_factory, (IBrowserRequest,), Interface, myLibrary.name)
Components
==========
To demonstrate our resource viewlet, we first need a page to
render. This page contains a content provider named 'resources'::
>>> from zope.interface import Interface
>>> class Index(view.View):
... view.require("zope.Public")
... view.context(Interface)
...
... template = view.PageTemplate("""<html><head>
... <tal:resources replace='provider:resources' />
... </head></html>""")
>>> grok.testing.grok_component('index', Index)
True
Manager
-------
We now register a content provider named 'resources'. It will be a
ResourcesManager. An ResourcesManager is a component
dedicated in rendering ResourceViewlets::
>>> from megrok.resourceviewlet import ResourcesManager
>>> class Resources(ResourcesManager):
... viewlet.context(Interface)
>>> grok.testing.grok_component('resources', Resources)
True
Viewlet
-------
Now, we register a ResourceViewlet, including our resource. The
declaration is very straightforward::
>>> from megrok.resourceviewlet import ResourceViewlet
>>> class SomeViewlet(ResourceViewlet):
... viewlet.context(Interface)
... resources = [Thing]
>>> grok.testing.grok_component('viewlet', SomeViewlet)
True
By default, a ResourceViewlet is registered for an instance of
ResourcesManager. Most of the time, a page contains only one of
these content providers. If it's not the case, make sure to provide
your own `viewletmanager` directive value.
Rendering
=========
Rendering our page should render the ResourcesManager and
therefore, include our resource::
>>> browser.open('http://localhost/@@index')
>>> print browser.contents
<html><head>
<script type="text/javascript"
src="http://localhost/fanstatic/test_library/thing.js"></script>
</head></html>
It works ! Enjoy.
|
PypiClean
|
/ThreeDVG/lib/solver.py
|
import os
import sys
import time
import torch
import numpy as np
from tqdm import tqdm
from tensorboardX import SummaryWriter
from torch.optim.lr_scheduler import StepLR, MultiStepLR, CosineAnnealingLR
from ..lib.config import CONF
from ..lib.loss_helper import get_loss
from ..lib.eval_helper import get_eval
from ..utils.eta import decode_eta
from ..lib.pointnet2.pytorch_utils import BNMomentumScheduler
ITER_REPORT_TEMPLATE = """
-------------------------------iter: [{epoch_id}: {iter_id}/{total_iter}]-------------------------------
[loss] train_loss: {train_loss}
[loss] train_ref_loss: {train_ref_loss}
[loss] train_lang_loss: {train_lang_loss}
[loss] train_objectness_loss: {train_objectness_loss}
[loss] train_vote_loss: {train_vote_loss}
[loss] train_box_loss: {train_box_loss}
[loss] train_lang_acc: {train_lang_acc}
[sco.] train_ref_acc: {train_ref_acc}
[sco.] train_obj_acc: {train_obj_acc}
[sco.] train_pos_ratio: {train_pos_ratio}, train_neg_ratio: {train_neg_ratio}
[sco.] train_iou_rate_0.25: {train_iou_rate_25}, train_iou_rate_0.5: {train_iou_rate_5}
[sco.] train_iou_max_rate_0.25: {train_iou_max_rate_25}, train_iou_max_rate_0.5: {train_iou_max_rate_5}
[info] mean_fetch_time: {mean_fetch_time}s
[info] mean_forward_time: {mean_forward_time}s
[info] mean_backward_time: {mean_backward_time}s
[info] mean_eval_time: {mean_eval_time}s
[info] mean_iter_time: {mean_iter_time}s
[info] ETA: {eta_h}h {eta_m}m {eta_s}s
"""
EPOCH_REPORT_TEMPLATE = """
---------------------------------summary---------------------------------
[train] train_loss: {train_loss}
[train] train_ref_loss: {train_ref_loss}
[train] train_lang_loss: {train_lang_loss}
[train] train_objectness_loss: {train_objectness_loss}
[train] train_vote_loss: {train_vote_loss}
[train] train_box_loss: {train_box_loss}
[train] train_lang_acc: {train_lang_acc}
[train] train_ref_acc: {train_ref_acc}
[train] train_obj_acc: {train_obj_acc}
[train] train_pos_ratio: {train_pos_ratio}, train_neg_ratio: {train_neg_ratio}
[train] train_iou_rate_0.25: {train_iou_rate_25}, train_iou_rate_0.5: {train_iou_rate_5}
[train] train_max_iou_rate_0.25: {train_max_iou_rate_25}, train_max_iou_rate_0.5: {train_max_iou_rate_5}
[val] val_loss: {val_loss}
[val] val_ref_loss: {val_ref_loss}
[val] val_lang_loss: {val_lang_loss}
[val] val_objectness_loss: {val_objectness_loss}
[val] val_vote_loss: {val_vote_loss}
[val] val_box_loss: {val_box_loss}
[val] val_lang_acc: {val_lang_acc}
[val] val_ref_acc: {val_ref_acc}
[val] val_obj_acc: {val_obj_acc}
[val] val_pos_ratio: {val_pos_ratio}, val_neg_ratio: {val_neg_ratio}
[val] val_iou_rate_0.25: {val_iou_rate_25}, val_iou_rate_0.5: {val_iou_rate_5}
[val] val_max_iou_rate_0.25: {val_max_iou_rate_25}, val_max_iou_rate_0.5: {val_max_iou_rate_5}
"""
BEST_REPORT_TEMPLATE = """
--------------------------------------best--------------------------------------
[best] epoch: {epoch}
[loss] loss: {loss}
[loss] ref_loss: {ref_loss}
[loss] lang_loss: {lang_loss}
[loss] objectness_loss: {objectness_loss}
[loss] vote_loss: {vote_loss}
[loss] box_loss: {box_loss}
[loss] lang_acc: {lang_acc}
[sco.] ref_acc: {ref_acc}
[sco.] obj_acc: {obj_acc}
[sco.] pos_ratio: {pos_ratio}, neg_ratio: {neg_ratio}
[sco.] iou_rate_0.25: {iou_rate_25}, iou_rate_0.5: {iou_rate_5}
"""
class Solver():
def __init__(self, model, config, dataloader, optimizer, stamp, val_step=10,
detection=True, reference=True, use_lang_classifier=True,
lr_decay_step=None, lr_decay_rate=None, bn_decay_step=None, bn_decay_rate=None):
self.epoch = 0 # set in __call__
self.verbose = 0 # set in __call__
self.model = model
self.config = config
self.dataloader = dataloader
self.optimizer = optimizer
self.stamp = stamp
self.val_step = val_step
self.detection = detection
self.reference = reference
self.use_lang_classifier = use_lang_classifier
self.lr_decay_step = lr_decay_step
self.lr_decay_rate = lr_decay_rate
self.bn_decay_step = bn_decay_step
self.bn_decay_rate = bn_decay_rate
self.best = {
"epoch": 0,
"loss": float("inf"),
"ref_loss": float("inf"),
"lang_loss": float("inf"),
"objectness_loss": float("inf"),
"vote_loss": float("inf"),
"box_loss": float("inf"),
"lang_acc": -float("inf"),
"ref_acc": -float("inf"),
"obj_acc": -float("inf"),
"pos_ratio": -float("inf"),
"neg_ratio": -float("inf"),
"iou_rate_0.25": -float("inf"),
"iou_rate_0.5": -float("inf"),
"max_iou_rate_0.25": -float("inf"),
"max_iou_rate_0.5": -float("inf")
}
# init log
# contains all necessary info for all phases
self.log = {
"train": {},
"val": {}
}
# tensorboard
os.makedirs(os.path.join(CONF.PATH.OUTPUT, stamp, "tensorboard/train"), exist_ok=True)
os.makedirs(os.path.join(CONF.PATH.OUTPUT, stamp, "tensorboard/val"), exist_ok=True)
self._log_writer = {
"train": SummaryWriter(os.path.join(CONF.PATH.OUTPUT, stamp, "tensorboard/train")),
"val": SummaryWriter(os.path.join(CONF.PATH.OUTPUT, stamp, "tensorboard/val"))
}
# training log
log_path = os.path.join(CONF.PATH.OUTPUT, stamp, "log.txt")
self.log_fout = open(log_path, "a")
eval_path = os.path.join(CONF.PATH.OUTPUT, stamp, "eval.txt")
self.eval_fout = open(eval_path, "a")
# private
# only for internal access and temporary results
self._running_log = {}
self._global_iter_id = 0
self._total_iter = {} # set in __call__
# templates
self.__iter_report_template = ITER_REPORT_TEMPLATE
self.__epoch_report_template = EPOCH_REPORT_TEMPLATE
self.__best_report_template = BEST_REPORT_TEMPLATE
# lr scheduler
if lr_decay_step:
if isinstance(lr_decay_step, list):
self.lr_scheduler = MultiStepLR(optimizer, lr_decay_step, lr_decay_rate)
elif isinstance(lr_decay_step, dict):
if lr_decay_step['type'] != 'cosine':
raise NotImplementedError('lr dict type should be cosine (other not implemented)')
print(lr_decay_step, '<< lr_decay_step dict', flush=True) # TODO
config = lr_decay_step
config['optimizer'] = optimizer
config.pop('type')
self.lr_scheduler = CosineAnnealingLR(**config)
else:
self.lr_scheduler = StepLR(optimizer, lr_decay_step, lr_decay_rate)
else:
self.lr_scheduler = None
# bn scheduler
if bn_decay_step and bn_decay_rate:
it = -1
start_epoch = 0
BN_MOMENTUM_INIT = 0.5
BN_MOMENTUM_MAX = 0.001
bn_lbmd = lambda it: max(BN_MOMENTUM_INIT * bn_decay_rate**(int(it / bn_decay_step)), BN_MOMENTUM_MAX)
self.bn_scheduler = BNMomentumScheduler(model, bn_lambda=bn_lbmd, last_epoch=start_epoch-1)
else:
self.bn_scheduler = None
def __call__(self, epoch, verbose):
# setting
self.epoch = epoch
self.verbose = verbose
self._total_iter["train"] = len(self.dataloader["train"]) * epoch
self._total_iter["val"] = len(self.dataloader["val"]) * self.val_step
# base_lr = self.lr_scheduler.get_lr()[0]
# base_group_lr = [param['lr'] for param in self.optimizer.param_groups]
for epoch_id in range(epoch):
try:
self._log("epoch {} starting...".format(epoch_id + 1))
if self.lr_scheduler:
# self.lr_scheduler.step()
print("learning rate --> {}\n".format(self.lr_scheduler.get_lr()), flush=True)
# now_lr = self.lr_scheduler.get_lr()[0]
for (idx, param_group) in enumerate(self.optimizer.param_groups):
# print(param_group.keys(), '<< param key shape')
print('[LR Param Group]', param_group['Param_Name'], param_group['lr'], '<< should', flush=True)
# param_group['lr'] = base_group_lr[idx] / base_lr * now_lr
# feed
self.dataloader['train'].dataset.shuffle_data()
self._feed(self.dataloader["train"], "train", epoch_id)
# save model
self._log("saving last models...\n")
model_root = os.path.join(CONF.PATH.OUTPUT, self.stamp)
#torch.save(self.model.state_dict(), os.path.join(model_root, "model_last.pth"))
# update lr scheduler
if self.lr_scheduler:
print("update learning rate --> {}\n".format(self.lr_scheduler.get_lr()))
self.lr_scheduler.step()
# update bn scheduler
if self.bn_scheduler:
print("update batch normalization momentum --> {}\n".format(self.bn_scheduler.lmbd(self.bn_scheduler.last_epoch)))
self.bn_scheduler.step()
except KeyboardInterrupt:
# finish training
self._finish(epoch_id)
exit()
# finish training
self._finish(epoch_id)
def _log(self, info_str):
self.log_fout.write(info_str + "\n")
self.log_fout.flush()
print(info_str, flush=True)
def _log_eval(self, info_str):
self.eval_fout.write(info_str + "\n")
self.eval_fout.flush()
print(info_str, flush=True)
def _reset_log(self, phase):
self.log[phase] = {
# info
"forward": [],
"backward": [],
"eval": [],
"fetch": [],
"iter_time": [],
# loss (float, not torch.cuda.FloatTensor)
"loss": [],
"ref_loss": [],
"lang_loss": [],
"objectness_loss": [],
"vote_loss": [],
"box_loss": [],
# scores (float, not torch.cuda.FloatTensor)
"lang_acc": [],
"ref_acc": [],
"obj_acc": [],
"pos_ratio": [],
"neg_ratio": [],
"iou_rate_0.25": [],
"iou_rate_0.5": [],
"max_iou_rate_0.25": [],
"max_iou_rate_0.5": []
}
def _set_phase(self, phase):
if phase == "train":
self.model.train()
elif phase == "val":
self.model.eval()
else:
raise ValueError("invalid phase")
def _forward(self, data_dict):
data_dict = self.model(data_dict)
return data_dict
def _backward(self):
# optimize
self.optimizer.zero_grad()
self._running_log["loss"].backward()
self.optimizer.step()
def _compute_loss(self, data_dict):
_, data_dict = get_loss(
data_dict=data_dict,
config=self.config,
detection=self.detection,
reference=self.reference,
use_lang_classifier=self.use_lang_classifier
)
# dump
self._running_log["ref_loss"] = data_dict["ref_loss"]
self._running_log["lang_loss"] = data_dict["lang_loss"]
self._running_log["objectness_loss"] = data_dict["objectness_loss"]
self._running_log["vote_loss"] = data_dict["vote_loss"]
self._running_log["box_loss"] = data_dict["box_loss"]
self._running_log["loss"] = data_dict["loss"]
def _eval(self, data_dict):
data_dict = get_eval(
data_dict=data_dict,
config=self.config,
reference=self.reference,
use_lang_classifier=self.use_lang_classifier
)
# dump
self._running_log["lang_acc"] = data_dict["lang_acc"].item()
self._running_log["ref_acc"] = np.mean(data_dict["ref_acc"])
self._running_log["obj_acc"] = data_dict["obj_acc"].item()
self._running_log["pos_ratio"] = data_dict["pos_ratio"].item()
self._running_log["neg_ratio"] = data_dict["neg_ratio"].item()
self._running_log["iou_rate_0.25"] = np.mean(data_dict["ref_iou_rate_0.25"])
self._running_log["iou_rate_0.5"] = np.mean(data_dict["ref_iou_rate_0.5"])
self._running_log["max_iou_rate_0.25"] = np.mean(data_dict["max_iou_rate_0.25"])
self._running_log["max_iou_rate_0.5"] = np.mean(data_dict["max_iou_rate_0.5"])
def _feed(self, dataloader, phase, epoch_id):
# switch mode
self._set_phase(phase)
# re-init log
self._reset_log(phase)
# change dataloader
dataloader = dataloader if phase == "train" else tqdm(dataloader)
for data_dict in dataloader:
# move to cuda
for key in data_dict:
data_dict[key] = data_dict[key].cuda()
# initialize the running loss
self._running_log = {
# loss
"loss": 0,
"ref_loss": 0,
"lang_loss": 0,
"objectness_loss": 0,
"vote_loss": 0,
"box_loss": 0,
# acc
"lang_acc": 0,
"ref_acc": 0,
"obj_acc": 0,
"pos_ratio": 0,
"neg_ratio": 0,
"iou_rate_0.25": 0,
"iou_rate_0.5": 0,
"max_iou_rate_0.25": 0,
"max_iou_rate_0.5": 0
}
# load
self.log[phase]["fetch"].append(data_dict["load_time"].sum().item())
with torch.autograd.set_detect_anomaly(True):
# forward
data_dict["epoch_id"] = epoch_id
start = time.time()
data_dict = self._forward(data_dict)
self._compute_loss(data_dict)
self.log[phase]["forward"].append(time.time() - start)
# backward
if phase == "train":
start = time.time()
self._backward()
self.log[phase]["backward"].append(time.time() - start)
# eval
start = time.time()
self._eval(data_dict)
self.log[phase]["eval"].append(time.time() - start)
# record log
self.log[phase]["loss"].append(self._running_log["loss"].item())
self.log[phase]["ref_loss"].append(self._running_log["ref_loss"].item())
self.log[phase]["lang_loss"].append(self._running_log["lang_loss"].item())
self.log[phase]["objectness_loss"].append(self._running_log["objectness_loss"].item())
self.log[phase]["vote_loss"].append(self._running_log["vote_loss"].item())
self.log[phase]["box_loss"].append(self._running_log["box_loss"].item())
self.log[phase]["lang_acc"].append(self._running_log["lang_acc"])
self.log[phase]["ref_acc"].append(self._running_log["ref_acc"])
self.log[phase]["obj_acc"].append(self._running_log["obj_acc"])
self.log[phase]["pos_ratio"].append(self._running_log["pos_ratio"])
self.log[phase]["neg_ratio"].append(self._running_log["neg_ratio"])
self.log[phase]["iou_rate_0.25"].append(self._running_log["iou_rate_0.25"])
self.log[phase]["iou_rate_0.5"].append(self._running_log["iou_rate_0.5"])
self.log[phase]["max_iou_rate_0.25"].append(self._running_log["max_iou_rate_0.25"])
self.log[phase]["max_iou_rate_0.5"].append(self._running_log["max_iou_rate_0.5"])
# report
if phase == "train":
iter_time = self.log[phase]["fetch"][-1]
iter_time += self.log[phase]["forward"][-1]
iter_time += self.log[phase]["backward"][-1]
iter_time += self.log[phase]["eval"][-1]
self.log[phase]["iter_time"].append(iter_time)
if (self._global_iter_id + 1) % self.verbose == 0:
self._train_report(epoch_id)
# evaluation
if self._global_iter_id % self.val_step == 0 and self._global_iter_id != 0:
print("evaluating...")
# val
self._feed(self.dataloader["val"], "val", epoch_id)
self._dump_log("val")
self._set_phase("train")
self._epoch_report(epoch_id)
# dump log
if self._global_iter_id % 50 == 0:
self._dump_log("train")
self._global_iter_id += 1
# check best
if phase == "val":
cur_criterion = "iou_rate_0.5"
cur_criterion_25 = "iou_rate_0.25"
cur_best = np.mean(self.log[phase][cur_criterion])
cur_best_25 = np.mean(self.log[phase][cur_criterion_25])
if cur_best + cur_best_25 > self.best[cur_criterion] + self.best[cur_criterion_25]:
self._log("best {} achieved: {}".format(cur_criterion, cur_best))
self._log("best {} achieved: {}".format(cur_criterion_25, cur_best_25))
self._log("current train_loss: {}".format(np.mean(self.log["train"]["loss"])))
self._log("current val_loss: {}".format(np.mean(self.log["val"]["loss"])))
self.best["epoch"] = epoch_id + 1
self.best["loss"] = np.mean(self.log[phase]["loss"])
self.best["ref_loss"] = np.mean(self.log[phase]["ref_loss"])
self.best["lang_loss"] = np.mean(self.log[phase]["lang_loss"])
self.best["objectness_loss"] = np.mean(self.log[phase]["objectness_loss"])
self.best["vote_loss"] = np.mean(self.log[phase]["vote_loss"])
self.best["box_loss"] = np.mean(self.log[phase]["box_loss"])
self.best["lang_acc"] = np.mean(self.log[phase]["lang_acc"])
self.best["ref_acc"] = np.mean(self.log[phase]["ref_acc"])
self.best["obj_acc"] = np.mean(self.log[phase]["obj_acc"])
self.best["pos_ratio"] = np.mean(self.log[phase]["pos_ratio"])
self.best["neg_ratio"] = np.mean(self.log[phase]["neg_ratio"])
self.best["iou_rate_0.25"] = np.mean(self.log[phase]["iou_rate_0.25"])
self.best["iou_rate_0.5"] = np.mean(self.log[phase]["iou_rate_0.5"])
# save model
self._log("saving best models...\n")
model_root = os.path.join(CONF.PATH.OUTPUT, self.stamp)
torch.save(self.model.state_dict(), os.path.join(model_root, "model.pth"))
det_cur_criterion = "max_iou_rate_0.5"
det_cur_best = np.mean(self.log[phase][det_cur_criterion])
if det_cur_best > self.best[det_cur_criterion]:
self.best["max_iou_rate_0.25"] = np.mean(self.log[phase]["max_iou_rate_0.25"])
self.best["max_iou_rate_0.5"] = np.mean(self.log[phase]["max_iou_rate_0.5"])
model_root = os.path.join(CONF.PATH.OUTPUT, self.stamp)
torch.save(self.model.state_dict(), os.path.join(model_root, "model_last.pth"))
def _dump_log(self, phase):
log = {
"loss": ["loss", "ref_loss", "lang_loss", "objectness_loss", "vote_loss", "box_loss"],
"score": ["lang_acc", "ref_acc", "obj_acc", "pos_ratio", "neg_ratio", "iou_rate_0.25", "iou_rate_0.5", "max_iou_rate_0.25", "max_iou_rate_0.5"]
}
for key in log:
for item in log[key]:
self._log_writer[phase].add_scalar(
"{}/{}".format(key, item),
np.mean([v for v in self.log[phase][item]]),
self._global_iter_id
)
def _finish(self, epoch_id):
# print best
self._best_report()
# save check point
self._log("saving checkpoint...\n")
save_dict = {
"epoch": epoch_id,
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict()
}
checkpoint_root = os.path.join(CONF.PATH.OUTPUT, self.stamp)
torch.save(save_dict, os.path.join(checkpoint_root, "checkpoint.tar"))
# save model
self._log("saving last models...\n")
model_root = os.path.join(CONF.PATH.OUTPUT, self.stamp)
torch.save(self.model.state_dict(), os.path.join(model_root, "model_last.pth"))
# export
for phase in ["train", "val"]:
self._log_writer[phase].export_scalars_to_json(os.path.join(CONF.PATH.OUTPUT, self.stamp, "tensorboard/{}".format(phase), "all_scalars.json"))
def _train_report(self, epoch_id):
# compute ETA
fetch_time = self.log["train"]["fetch"]
forward_time = self.log["train"]["forward"]
backward_time = self.log["train"]["backward"]
eval_time = self.log["train"]["eval"]
iter_time = self.log["train"]["iter_time"]
mean_train_time = np.mean(iter_time)
mean_est_val_time = np.mean([fetch + forward for fetch, forward in zip(fetch_time, forward_time)])
eta_sec = (self._total_iter["train"] - self._global_iter_id - 1) * mean_train_time
eta_sec += len(self.dataloader["val"]) * np.ceil(self._total_iter["train"] / self.val_step) * mean_est_val_time
eta = decode_eta(eta_sec)
# print report
iter_report = self.__iter_report_template.format(
epoch_id=epoch_id + 1,
iter_id=self._global_iter_id + 1,
total_iter=self._total_iter["train"],
train_loss=round(np.mean([v for v in self.log["train"]["loss"]]), 5),
train_ref_loss=round(np.mean([v for v in self.log["train"]["ref_loss"]]), 5),
train_lang_loss=round(np.mean([v for v in self.log["train"]["lang_loss"]]), 5),
train_objectness_loss=round(np.mean([v for v in self.log["train"]["objectness_loss"]]), 5),
train_vote_loss=round(np.mean([v for v in self.log["train"]["vote_loss"]]), 5),
train_box_loss=round(np.mean([v for v in self.log["train"]["box_loss"]]), 5),
train_lang_acc=round(np.mean([v for v in self.log["train"]["lang_acc"]]), 5),
train_ref_acc=round(np.mean([v for v in self.log["train"]["ref_acc"]]), 5),
train_obj_acc=round(np.mean([v for v in self.log["train"]["obj_acc"]]), 5),
train_pos_ratio=round(np.mean([v for v in self.log["train"]["pos_ratio"]]), 5),
train_neg_ratio=round(np.mean([v for v in self.log["train"]["neg_ratio"]]), 5),
train_iou_rate_25=round(np.mean([v for v in self.log["train"]["iou_rate_0.25"]]), 5),
train_iou_rate_5=round(np.mean([v for v in self.log["train"]["iou_rate_0.5"]]), 5),
train_iou_max_rate_25=round(np.mean([v for v in self.log["train"]["max_iou_rate_0.25"]]), 5),
train_iou_max_rate_5=round(np.mean([v for v in self.log["train"]["max_iou_rate_0.5"]]), 5),
mean_fetch_time=round(np.mean(fetch_time), 5),
mean_forward_time=round(np.mean(forward_time), 5),
mean_backward_time=round(np.mean(backward_time), 5),
mean_eval_time=round(np.mean(eval_time), 5),
mean_iter_time=round(np.mean(iter_time), 5),
eta_h=eta["h"],
eta_m=eta["m"],
eta_s=eta["s"]
)
self._log(iter_report)
def _epoch_report(self, epoch_id):
self._log("epoch [{}/{}] done...".format(epoch_id+1, self.epoch))
self._log_eval("epoch [{}/{}] done...".format(epoch_id + 1, self.epoch))
epoch_report = self.__epoch_report_template.format(
train_loss=round(np.mean([v for v in self.log["train"]["loss"]]), 5),
train_ref_loss=round(np.mean([v for v in self.log["train"]["ref_loss"]]), 5),
train_lang_loss=round(np.mean([v for v in self.log["train"]["lang_loss"]]), 5),
train_objectness_loss=round(np.mean([v for v in self.log["train"]["objectness_loss"]]), 5),
train_vote_loss=round(np.mean([v for v in self.log["train"]["vote_loss"]]), 5),
train_box_loss=round(np.mean([v for v in self.log["train"]["box_loss"]]), 5),
train_lang_acc=round(np.mean([v for v in self.log["train"]["lang_acc"]]), 5),
train_ref_acc=round(np.mean([v for v in self.log["train"]["ref_acc"]]), 5),
train_obj_acc=round(np.mean([v for v in self.log["train"]["obj_acc"]]), 5),
train_pos_ratio=round(np.mean([v for v in self.log["train"]["pos_ratio"]]), 5),
train_neg_ratio=round(np.mean([v for v in self.log["train"]["neg_ratio"]]), 5),
train_iou_rate_25=round(np.mean([v for v in self.log["train"]["iou_rate_0.25"]]), 5),
train_iou_rate_5=round(np.mean([v for v in self.log["train"]["iou_rate_0.5"]]), 5),
train_max_iou_rate_25=round(np.mean([v for v in self.log["train"]["max_iou_rate_0.25"]]), 5),
train_max_iou_rate_5=round(np.mean([v for v in self.log["train"]["max_iou_rate_0.5"]]), 5),
val_loss=round(np.mean([v for v in self.log["val"]["loss"]]), 5),
val_ref_loss=round(np.mean([v for v in self.log["val"]["ref_loss"]]), 5),
val_lang_loss=round(np.mean([v for v in self.log["val"]["lang_loss"]]), 5),
val_objectness_loss=round(np.mean([v for v in self.log["val"]["objectness_loss"]]), 5),
val_vote_loss=round(np.mean([v for v in self.log["val"]["vote_loss"]]), 5),
val_box_loss=round(np.mean([v for v in self.log["val"]["box_loss"]]), 5),
val_lang_acc=round(np.mean([v for v in self.log["val"]["lang_acc"]]), 5),
val_ref_acc=round(np.mean([v for v in self.log["val"]["ref_acc"]]), 5),
val_obj_acc=round(np.mean([v for v in self.log["val"]["obj_acc"]]), 5),
val_pos_ratio=round(np.mean([v for v in self.log["val"]["pos_ratio"]]), 5),
val_neg_ratio=round(np.mean([v for v in self.log["val"]["neg_ratio"]]), 5),
val_iou_rate_25=round(np.mean([v for v in self.log["val"]["iou_rate_0.25"]]), 5),
val_iou_rate_5=round(np.mean([v for v in self.log["val"]["iou_rate_0.5"]]), 5),
val_max_iou_rate_25=round(np.mean([v for v in self.log["val"]["max_iou_rate_0.25"]]), 5),
val_max_iou_rate_5=round(np.mean([v for v in self.log["val"]["max_iou_rate_0.5"]]), 5),
)
self._log(epoch_report)
self._log_eval(epoch_report)
def _best_report(self):
self._log("training completed...")
best_report = self.__best_report_template.format(
epoch=self.best["epoch"],
loss=round(self.best["loss"], 5),
ref_loss=round(self.best["ref_loss"], 5),
lang_loss=round(self.best["lang_loss"], 5),
objectness_loss=round(self.best["objectness_loss"], 5),
vote_loss=round(self.best["vote_loss"], 5),
box_loss=round(self.best["box_loss"], 5),
lang_acc=round(self.best["lang_acc"], 5),
ref_acc=round(self.best["ref_acc"], 5),
obj_acc=round(self.best["obj_acc"], 5),
pos_ratio=round(self.best["pos_ratio"], 5),
neg_ratio=round(self.best["neg_ratio"], 5),
iou_rate_25=round(self.best["iou_rate_0.25"], 5),
iou_rate_5=round(self.best["iou_rate_0.5"], 5),
)
self._log(best_report)
with open(os.path.join(CONF.PATH.OUTPUT, self.stamp, "best.txt"), "w") as f:
f.write(best_report)
|
PypiClean
|
/tensorflow_ascend-1.15.0-cp37-cp37m-manylinux2014_aarch64.whl/tensorflow_core/python/training/tensorboard_logging.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import tf_logging as logging
DEBUG = 'DEBUG'
INFO = 'INFO'
WARN = 'WARN'
ERROR = 'ERROR'
FATAL = 'FATAL'
# Messages with levels below this verbosity will not be logged.
_verbosity = WARN
# A value meaning 'not set yet' so we can use None to mean 'user actively told
# us they don't want a SummaryWriter'.
_sentinel_summary_writer = object()
# The SummaryWriter instance to use when logging, or None to not log, or
# _sentinel_summary_writer to indicate that the user hasn't called
# set_summary_writer yet.
_summary_writer = _sentinel_summary_writer
# Map from the tensorboard_logging logging enum values to the proto's enum
# values.
_LEVEL_PROTO_MAP = {
DEBUG: event_pb2.LogMessage.DEBUGGING,
INFO: event_pb2.LogMessage.INFO,
WARN: event_pb2.LogMessage.WARN,
ERROR: event_pb2.LogMessage.ERROR,
FATAL: event_pb2.LogMessage.FATAL,
}
# Map from the tensorboard_logging module levels to the logging module levels.
_PLATFORM_LOGGING_LEVEL_MAP = {
DEBUG: logging.DEBUG,
INFO: logging.INFO,
WARN: logging.WARN,
ERROR: logging.ERROR,
FATAL: logging.FATAL
}
def get_verbosity():
return _verbosity
def set_verbosity(verbosity):
_check_verbosity(verbosity)
global _verbosity
_verbosity = verbosity
def _check_verbosity(verbosity):
if verbosity not in _LEVEL_PROTO_MAP:
raise ValueError('Level %s is not a valid tensorboard_logging level' %
verbosity)
def set_summary_writer(summary_writer):
"""Sets the summary writer that events will be logged to.
Calling any logging methods inside this module without calling this method
will fail. If you don't want to log, call `set_summary_writer(None)`.
Args:
summary_writer: Either a SummaryWriter or None. None will cause messages not
to be logged to any SummaryWriter, but they will still be passed to the
platform logging module.
"""
global _summary_writer
_summary_writer = summary_writer
def _clear_summary_writer():
"""Makes all subsequent log invocations error.
This is only used for testing. If you want to disable TensorBoard logging,
call `set_summary_writer(None)` instead.
"""
global _summary_writer
_summary_writer = _sentinel_summary_writer
def log(level, message, *args):
"""Conditionally logs `message % args` at the level `level`.
Note that tensorboard_logging verbosity and logging verbosity are separate;
the message will always be passed through to the logging module regardless of
whether it passes the tensorboard_logging verbosity check.
Args:
level: The verbosity level to use. Must be one of
tensorboard_logging.{DEBUG, INFO, WARN, ERROR, FATAL}.
message: The message template to use.
*args: Arguments to interpolate to the message template, if any.
Raises:
ValueError: If `level` is not a valid logging level.
RuntimeError: If the `SummaryWriter` to use has not been set.
"""
if _summary_writer is _sentinel_summary_writer:
raise RuntimeError('Must call set_summary_writer before doing any '
'logging from tensorboard_logging')
_check_verbosity(level)
proto_level = _LEVEL_PROTO_MAP[level]
if proto_level >= _LEVEL_PROTO_MAP[_verbosity]:
log_message = event_pb2.LogMessage(level=proto_level,
message=message % args)
event = event_pb2.Event(wall_time=time.time(), log_message=log_message)
if _summary_writer:
_summary_writer.add_event(event)
logging.log(_PLATFORM_LOGGING_LEVEL_MAP[level], message, *args)
def debug(message, *args):
log(DEBUG, message, *args)
def info(message, *args):
log(INFO, message, *args)
def warn(message, *args):
log(WARN, message, *args)
def error(message, *args):
log(ERROR, message, *args)
def fatal(message, *args):
log(FATAL, message, *args)
|
PypiClean
|
/play3d-0.1.5.tar.gz/play3d-0.1.5/README.md
|
3D Playground - on Python from scratch.
=====================================

[](https://github.com/timabilov/python-play3d/actions/workflows/python-package.yml)
[](https://pypi.python.org/pypi/play3d/)
#### TL;DR: Basic 3D world playground with animations and [camera](#camera-keys-example) completely from scratch(only 2D pixels).
This implementation / API only for demonstration and *playground* purposes based on [Perspective projection](https://en.wikipedia.org/wiki/3D_projection#Perspective_projection).
Can be used on top of **any** 2d graphics engine/lib(frame buffers, sdl and etc.)
Not implemented features due to low performance:
* Face clipping not implemented, vertices clipping ignored too
* Flat shading and Gouraud shading not implemented.
* Z-buffering
`models.Model` API is open demonstration of [MVP](https://stackoverflow.com/questions/5550620/the-purpose-of-model-view-projection-matrix) model and is definitely a good starting point/topic for 3D graphics.
Also you can plot any function on 3D scene.
* [Install](#install)
* [How to use](#how-to-use)
* [Model View Projection](#model-view-projection)
* [Projection](#projection)
* [Camera](#world-camera)
* [Camera scene example](#camera-keys-example)
* [Mesh and Wireframe](#mesh-and-wireframe)
* [Rasterization](#rasterization)
* [3D Plotting](#3d-plotting)
* [Basic Wavefront .obj format support](#obj-format)
* [Model API](#models-api)
* [Trajectory API](#trajectory-api)
* [Pygame Example](#pygame-example)
## Install
```
pip install play3d
```
## How to use
There is only one requirement - to provide 2D pixel and line renderer(drawer)
As current example uses `pygame`,
```
pip install pygame==2.0.1 # recommended version
# You have to install sdl lib separately
# for Mac OS:
brew install sdl2 sdl2_gfx sdl2_image sdl2_mixer sdl2_net sdl2_ttf
```
```python
from play3d.three_d import Device
import pygame
# our adapter will rely on pygame renderer
put_pixel = lambda x, y, color: pygame.draw.circle(screen, color, (x, y), 1)
# we certainly can draw lines ourselves using put_pixel three_d.drawline
# but implementation below - much faster
line_adapter = lambda p1, p2, color: pygame.draw.line(screen, color, (p1[x], p1[y]), (p2[x], p2[y]), 1)
width, height = 1024, 768 # should be same as 2D provider
Device.viewport(width, height)
Device.set_renderer(put_pixel, line_adapter)
screen = pygame.display.set_mode(Device.get_resolution())
```
That's all we need for setting up environment.
Now we can create and render model objects by calling `Model.draw()` at each frame update (See [Example](#pygame-example))\
To create model you can simply pass 3D world vertices as 2-d list `Model(data=data)`
It is possible to provide faces as 2d array `Model(data=data, faces=faces)`. Face index starts from 1. Only triangles supported. For more information see below.
Simply by providing 3D (or 4D homogeneous where w=1) `data` vertices list - Model transforms this coordinates from 3D world space to projected screen space
```python
from play3d.models import Model
# our 2D library renderer setup.. See above.
# Cube model. Already built-in `models.Cube`
cube = Model(position=(0, 0, 0),
data=[
[-1, 1, 1, 1],
[1, 1, 1, 1],
[-1, -1, 1, 1],
[1, -1, 1, 1],
[-1, 1, -1, 1],
[1, 1, -1, 1],
[1, -1, -1, 1],
[-1, -1, -1, 1]
])
while True: # your render lib/method
cube.draw()
```
## Model View Projection
`models.Model` and `three_d.Camera` implements all MVP(See `Model.draw`).
### Projection
Here we use perspective projection matrix\
Z axis of clipped cube(from frustum) mapped to [-1, 1] and our camera directed to -z axis (OpenGL convention)\
Projection Matrix can be tuned there (aspect ratio, FOV and etc.) \
```python
Camera.near = 1
Camera.far = 10
Camera.fov = 60
Camera.aspect_ratio = 3/4
```
### World camera
By OpenGL standard we basically move our scene.
Facing direction considered when we move our camera in case of rotations(direction vector will be transformed too)\
Camera can be moved through `three_d.Camera` API:
```python
from play3d.three_d import Camera
camera = Camera.get_instance()
# move camera to x, y, z with 0.5 step considering facing direction
camera['x'] += 0.5
camera['y'] += 0.5
camera['z'] += 0.5
camera.move(0.5, 0.5, 0.5) # identical above
# rotate camera to our left on XZ plane
camera.rotate('y', 2) #
```
#### Camera keys example

## Mesh and Wireframe
To exploit mesh one should provide both `data` and `faces`. Face represents triple group of vertices index referenced from `data`. Face index starts from 1.\
By default object rendered as wireframe
```python
from play3d.models import Model
triangle = Model(position=(-5, 3, -4),
data=[
[-3, 1, -7, 1],
[-2, 2, -7, 1],
[-1, 0, -7, 1],
], faces=[[1, 2, 3]])
```

## Rasterization
By default if data and faces provided, rasterization will be enabled.\
For rasterization we use - standard slope algorithm with horizontal filling lines.
```python
from play3d.models import Model
white = (230, 230, 230)
suzanne = Model.load_OBJ('suzanne.obj.txt', position=(-4, 2, -6), color=white, rasterize=True)
suzanne_wireframe = Model.load_OBJ('suzanne.obj.txt', position=(-4, 2, -6), color=white)
suzanne.rotate(0, -14)
suzanne_wireframe.rotate(0, 14)
```

## 3D plotting
You can plot any function you want by providing parametric equation as `func(*parameters) -> [x, y, z]`.
For example, sphere and some awesome wave both polar and parametric equations(Sphere built-in as `Models.Sphere`):
```python
import math
from play3d.models import Plot
def fn(phi, theta):
return [
math.sin(phi * math.pi / 180) * math.cos(theta * math.pi / 180),
math.sin(theta * math.pi / 180) * math.sin(phi * math.pi / 180),
math.cos(phi * math.pi / 180)
]
sphere_model = Plot(func=fn, allrange=[0, 360], position=(-4, 2, 1), color=(0, 64, 255))
blow_your_head = Plot(
position=(-4, 2, 1), color=(0, 64, 255),
func=lambda x, t: [x, math.cos(x) * math.cos(t), math.cos(t)], allrange=[0, 2*math.pi], interpolate=75
)
```

## OBJ format
Wawefront format is widely used as a standard in 3D graphics
You can import your model here. Only vertices and faces supported.\
`Model.load_OBJ(cls, path_or_url, wireframe=False, **all_model_kwargs)`
You can find examples here [github.com/alecjacobson/common-3d-test-models](https://github.com/alecjacobson/common-3d-test-models)
You might have to normalize(scale and etc.)each `.obj` sample differently \
Only vertices and faces are supported.
```python
Model.load_OBJ('beetle.obj.txt', wireframe=True, color=white, position=(-2, 2, -4), scale=3)
```

## Models API
`Models.Model`
| Fields | Description |
| ------------- | ------------- |
| `position` | `tuple=(0, 0, 0)` with x, y, z world coordinates |
| `scale` | `integer(=1)` |
| `color` | `tuple` `(255, 255, 255)` |
| `data` | `list[[x, y, z, [w=1]]]` - Model vertices(points) |
| `faces` | `list[[A, B, C]]` - Defines triangles See: [Mesh and Wireframe](#mesh-and-wireframe) |
| `rasterize` | `bool(=True)` - Rasterize - "fill" an object |
| `shimmering` | `bool(=False)` - color flickering/dancing |
```python
# Initial Model Matrix
model.matrix = Matrix([
[1 * scale, 0, 0, 0],
[0, 1 * scale, 0, 0],
[0, 0, 1 * scale, 0],
[*position, 1]
])
```
### Methods
* `model_obj @ translate(x, y, z)`
translates object's model matrix (in world space)
## Trajectory API
`Models.Trajectory`
| Fields | Description |
| ------------- | ------------- |
| `func` | `func` Parametrized math function which takes `*args` and returns world respective coordinates `tuple=(x, y, z)` |
To move our object through defined path we can build Trajectory for our object.
You can provide any parametric equation with args.\
World coordinates defined by `func(*args)` tuple output.
### Methods
* `rotate(self, angle_x, angle_y=0, angle_z=0)`
Rotates object relative to particular axis plane. First object translated from the world space back to local origin, then we rotate the object
* `route(self, trajectory: 'Trajectory', enable_trace=False)`
Set the function-based trajectory routing for the object.
- trajectory `Trajectory` - trajectory state
- enable_trace `bool` - Keep track of i.e. draw trajectory path (breadcrumbs)
#### Example
```python
import math
from play3d.models import Sphere, Trajectory
white = (230, 230, 230)
moving_sphere = Sphere(position=(1, 3, -5), color=white, interpolate=50)
moving_sphere.route(Trajectory.ToAxis.Z(speed=0.02).backwards())
whirling_sphere = Sphere(position=(1, 3, -5), color=white, interpolate=50)
# Already built-in as Trajectory.SineXY(speed=0.1)
whirling_sphere.route(Trajectory(lambda x: [x, math.sin(x)], speed=0.1))
while True: # inside your "render()"
moving_sphere.draw()
whirling_sphere.draw()
```
## Pygame example
```python
import logging
import sys
import pygame
from play3d.models import Model, Grid
from pygame_utils import handle_camera_with_keys # custom keyboard handling - moving camera
from play3d.three_d import Device, Camera
from play3d.utils import capture_fps
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
black, white = (20, 20, 20), (230, 230, 230)
Device.viewport(1024, 768)
pygame.init()
screen = pygame.display.set_mode(Device.get_resolution())
# just for simplicity - array access, we should avoid that
x, y, z = 0, 1, 2
# pygame sdl line is faster than default one
line_adapter = lambda p1, p2, color: pygame.draw.line(screen, color, (p1[x], p1[y]), (p2[x], p2[y]), 1)
put_pixel = lambda x, y, color: pygame.draw.circle(screen, color, (x, y), 1)
Device.set_renderer(put_pixel, line_renderer=line_adapter)
grid = Grid(color=(30, 140, 200), dimensions=(30, 30))
# be aware of different scaling of .obj samples. Only vertices and faces supported!
suzanne = Model.load_OBJ(
'https://raw.githubusercontent.com/OpenGLInsights/OpenGLInsightsCode/master/Chapter%2026%20Indexing%20Multiple%20Vertex%20Arrays/article/suzanne.obj',
position=(3, 2, -7), color=white, rasterize=True)
beetle = Model.load_OBJ(
'https://raw.githubusercontent.com/alecjacobson/common-3d-test-models/master/data/beetle.obj',
wireframe=False, color=white, position=(0, 2, -11), scale=3)
beetle.rotate(0, 45, 50)
camera = Camera.get_instance()
# move our camera up and back a bit, from origin
camera.move(y=1, z=2)
@capture_fps
def frame():
if pygame.event.get(pygame.QUIT):
sys.exit(0)
screen.fill(black)
handle_camera_with_keys() # to move our camera - walk, can be ignored
grid.draw()
beetle.draw()
suzanne.rotate(0, 1, 0).draw()
pygame.display.flip()
while True:
frame()
```
|
PypiClean
|
/jupyterhub_url_sharing-0.1.0.tar.gz/jupyterhub_url_sharing-0.1.0/node_modules/enquirer/lib/types/boolean.js
|
'use strict';
const Prompt = require('../prompt');
const { isPrimitive, hasColor } = require('../utils');
class BooleanPrompt extends Prompt {
constructor(options) {
super(options);
this.cursorHide();
}
async initialize() {
let initial = await this.resolve(this.initial, this.state);
this.input = await this.cast(initial);
await super.initialize();
}
dispatch(ch) {
if (!this.isValue(ch)) return this.alert();
this.input = ch;
return this.submit();
}
format(value) {
let { styles, state } = this;
return !state.submitted ? styles.primary(value) : styles.success(value);
}
cast(input) {
return this.isTrue(input);
}
isTrue(input) {
return /^[ty1]/i.test(input);
}
isFalse(input) {
return /^[fn0]/i.test(input);
}
isValue(value) {
return isPrimitive(value) && (this.isTrue(value) || this.isFalse(value));
}
async hint() {
if (this.state.status === 'pending') {
let hint = await this.element('hint');
if (!hasColor(hint)) {
return this.styles.muted(hint);
}
return hint;
}
}
async render() {
let { input, size } = this.state;
let prefix = await this.prefix();
let sep = await this.separator();
let msg = await this.message();
let hint = this.styles.muted(this.default);
let promptLine = [prefix, msg, hint, sep].filter(Boolean).join(' ');
this.state.prompt = promptLine;
let header = await this.header();
let value = this.value = this.cast(input);
let output = await this.format(value);
let help = (await this.error()) || (await this.hint());
let footer = await this.footer();
if (help && !promptLine.includes(help)) output += ' ' + help;
promptLine += ' ' + output;
this.clear(size);
this.write([header, promptLine, footer].filter(Boolean).join('\n'));
this.restore();
}
set value(value) {
super.value = value;
}
get value() {
return this.cast(super.value);
}
}
module.exports = BooleanPrompt;
|
PypiClean
|
/Photini-2023.7.1-py3-none-any.whl/photini/googlemap.py
|
import locale
import logging
import requests
from photini.configstore import key_store
from photini.photinimap import GeocoderBase, PhotiniMap
from photini.pyqt import Busy, Qt, QtCore, QtWidgets, scale_font
from photini.widgets import Label
logger = logging.getLogger(__name__)
translate = QtCore.QCoreApplication.translate
class GoogleGeocoder(GeocoderBase):
api_key = key_store.get('googlemap', 'api_key')
interval = 50
def query(self, params, url):
params['key'] = self.api_key
with Busy():
self.rate_limit()
try:
rsp = requests.get(url, params=params, timeout=5)
except Exception as ex:
logger.error(str(ex))
return []
rsp = rsp.json()
if rsp['status'] != 'OK':
if 'error_message' in rsp:
logger.error(
'Search error: %s: %s', rsp['status'], rsp['error_message'])
else:
logger.error('Search error: %s', rsp['status'])
return []
results = rsp['results']
if not results:
logger.error('No results found')
return []
return results
def get_altitude(self, coords):
params = {'locations': '{:.5f},{:.5f}'.format(*coords)}
results = self.cached_query(
params, 'https://maps.googleapis.com/maps/api/elevation/json')
if results:
return results[0]['elevation']
return None
def search(self, search_string, bounds=None):
params = {'address': search_string}
lang, encoding = locale.getdefaultlocale()
if lang:
params['language'] = lang
if bounds:
north, east, south, west = bounds
params['bounds'] = '{:.4f},{:.4f}|{:.4f},{:.4f}'.format(
south, west, north, east)
for result in self.cached_query(
params, 'https://maps.googleapis.com/maps/api/geocode/json'):
bounds = result['geometry']['viewport']
yield (bounds['northeast']['lat'], bounds['northeast']['lng'],
bounds['southwest']['lat'], bounds['southwest']['lng'],
result['formatted_address'])
def search_terms(self):
widget = Label(translate(
'MapTabGoogle', 'Search and altitude lookup powered by Google',
'Do not translate "powered by Google"'), lines=2)
widget.setAlignment(Qt.AlignmentFlag.AlignRight)
scale_font(widget, 80)
return [widget]
class TabWidget(PhotiniMap):
api_key = key_store.get('googlemap', 'api_key')
@staticmethod
def tab_name():
return translate('MapTabGoogle', 'Map (&Google)')
def get_geocoder(self):
return GoogleGeocoder(parent=self)
def get_head(self):
url = 'http://maps.googleapis.com/maps/api/js?callback=initialize'
if self.app.options.test:
url += '&v=beta'
url += '&key=' + self.api_key
lang, encoding = locale.getdefaultlocale()
if lang:
language, sep, region = lang.replace('_', '-').partition('-')
url += '&language=' + language
if region:
url += '®ion=' + region
return ''' <script type="text/javascript"
src="{}" async>
</script>'''.format(url)
|
PypiClean
|
/django-atc-demo-ui-0.1.6.tar.gz/django-atc-demo-ui-0.1.6/atc_demo_ui/static/js/atc-auth.js
|
* Copyright (c) 2014, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
var TokenFrame = React.createClass({
getInitialState: function() {
this.oos_notified = false;
return {
token: null,
valid_until: null,
};
},
componentDidMount: function() {
this.getToken();
this.interval = setInterval(this.getToken, 3000);
},
componentWillUnmount: function() {
if (this.interval != null) {
clearInterval(this.interval);
}
},
getToken: function() {
this.props.client.getToken(function (result) {
if (result.status >= 200 && result.status < 300) {
valid_until = new Date(result.json.valid_until*1000).toLocaleTimeString();
if (result.json.valid_until - Math.floor(new Date().getTime() / 1000) < 0) {
if (!this.oos_notified) {
this.props.notify("warn", "The time on the ATC server is out of sync.");
this.oos_notified = true;
}
}
this.setState({
token: result.json,
});
} else {
this.props.notify("error", "Could not fetch current token: " + result.json);
this.setState({
token: null,
});
}
}.bind(this));
},
render: function() {
if (this.state.token == null) {
return null;
}
return (
<div className="col-md-6">
<div>
<h4>This Machine's Token: <b>{this.state.token.token}</b></h4>
<b>Valid Until:</b> {valid_until}
<h4>This Machine' Address: {this.state.token.address}</h4>
</div>
</div>
);
},
});
var AuthFrame = React.createClass({
getInitialState: function() {
return {
auth: null,
token: null,
address: null,
};
},
componentDidMount: function() {
this.getAuthInfo();
},
updateToken: function(event) {
this.setState({token: event.target.value});
},
updateAddress: function(event) {
this.setState({address: event.target.value});
},
getAuthInfo: function() {
this.props.client.getAuthInfo(function (result) {
if (result.status >= 200 && result.status < 300) {
this.setState({
auth: result.json,
address: result.json.address,
});
} else {
this.props.notify("error", "Could not fetch auth info: " + result.json);
this.setState({
auth: null,
address: null,
});
}
}.bind(this));
},
updateAuth: function() {
var failed = false;
if (this.state.address == null || this.state.address == "") {
this.props.notify("error", "You must enter an address");
failed = true;
}
if (this.state.token == null || this.state.token == "") {
this.props.notify("error", "You must enter a token");
failed = true;
}
if (failed) {
return;
}
this.props.client.updateAuthInfo(this.state.address, {token: Number(this.state.token)}, function(result) {
if (result.status >= 200 && result.status < 300) {
console.log("Authorizing:", result.json);
this.props.notify("success", "You can now shape " + result.json.controlled_ip);
} else {
this.props.notify("error", "Could not update auth info: ", result.json);
}
}.bind(this));
},
render: function() {
if (this.state.auth == null) {
return null;
}
var controlled_ips = null;
if (this.state.auth.controlled_ips.length > 0) {
controlled_ips = this.state.auth.controlled_ips.map(function (addr) {
return (
<li><pre><code>{addr}</code></pre></li>
);
});
controlled_ips = (
<ul>{controlled_ips}</ul>
);
} else {
controlled_ips = (
<i>No Controlled Machines</i>
);
}
return (
<div className="col-md-6">
<div>
<h4>Machines You Can Shape:</h4>
{controlled_ips}
<p>
<b>Note:</b> A machine is always allowed to shape itself.
</p>
<h4>Authorize a New Machine:</h4>
<label className="control-label">Address:</label>
<input type="text" className="form-control" placeholder="127.0.0.1" onChange={this.updateAddress}/>
<label className="control-label">Token:</label>
<input type="number" className="form-control" placeholder="12345" onChange={this.updateToken}/>
<button className="btn btn-success" onClick={this.updateAuth}>Authorize</button>
</div>
</div>
);
},
});
var AuthPanel = React.createClass({
render: function() {
return (
<div className="panel-group" id="accordion3" role="tablist" aria-multiselectable="false">
<div className="panel panel-default">
<div className="panel-heading" data-toggle="collapse" data-parent="#accordion3" href="#collapseAuth" aria-expanded="false" aria-controls="collapseAuth">
<h4 className="panel-title">
Authentication
</h4>
</div>
<div id="collapseAuth" className="panel-collapse collapse" role="tabpanel">
<div className="panel-body">
<div className="row">
<AuthFrame client={this.props.client} notify={this.props.notify} />
<TokenFrame client={this.props.client} notify={this.props.notify} />
</div>
</div>
</div>
</div>
</div>
);
}
})
|
PypiClean
|
/EModelRunner-1.1.16.tar.gz/EModelRunner-1.1.16/emodelrunner/create_recordings.py
|
# Copyright 2020-2022 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bluepyopt import ephys
from emodelrunner.synapses.recordings import SynapseRecordingCustom
def get_pairsim_recordings(
soma_loc, syn_locs, synrecs, presyn_prot_name, postsyn_prot_name
):
"""Return the precell and the postcell recordings for a pair simulation.
Args:
soma_loc (bluepyopt.ephys.locations.NrnSeclistCompLocation):
location of the soma of the pre-synaptic cell
syn_locs (list of bluepyopt.ephys.locations.NrnPointProcessLocation):
location of synapses of the post-synaptic cell
synrecs (list of str): the extra synapse variables to record
presyn_prot_name (str): presynaptic protocol name
postsyn_prot_name (str): postsynaptic protocol name
Returns:
a tuple containing
- list of recordings: presynaptic recordings
- list of recordings: postsynaptic recordings
"""
presyn_rec = ephys.recordings.CompRecording(
name=presyn_prot_name, location=soma_loc, variable="v"
)
presyn_recs = [presyn_rec]
postsyn_rec = ephys.recordings.CompRecording(
name=postsyn_prot_name, location=soma_loc, variable="v"
)
postsyn_recs = [postsyn_rec]
for syn_loc in syn_locs:
for synrec in synrecs:
postsyn_recs.append(
SynapseRecordingCustom(name=synrec, location=syn_loc, variable=synrec)
)
return (presyn_recs, postsyn_recs)
|
PypiClean
|
/ChemDataExtractor_c-1.0.0-py3-none-any.whl/chemdataextractor/parse/table.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import re
from lxml.builder import E
from .common import delim
from ..utils import first
from ..model import Compound, UvvisSpectrum, UvvisPeak, QuantumYield, FluorescenceLifetime, MeltingPoint, GlassTransition
from ..model import ElectrochemicalPotential, IrSpectrum, IrPeak
from .actions import join, merge, fix_whitespace
from .base import BaseParser
from .cem import chemical_label, label_before_name, chemical_name, chemical_label_phrase, solvent_name, lenient_chemical_label
from .elements import R, I, W, Optional, ZeroOrMore, Any, OneOrMore, Start, End, Group, Not
log = logging.getLogger(__name__)
delims = ZeroOrMore(delim)
minus = R('^[\-–−‒]$')
name_blacklist = R('^([\d\.]+)$')
#: Compound identifier column heading
compound_heading = R('(^|\b)(comp((oun)?d)?|molecule|ligand|oligomer|complex|dye|porphyrin|substance|sample|material|catalyst|acronym|isomer|(co)?polymer|chromophore|species|quinone|ether|diene|adduct|acid|radical|monomer|amine|analyte|product|system|(photo)?sensitiser|phthalocyanine|MPc)(e?s)?($|\b)', re.I)
solvent_heading = R('(^|\b)(solvent)s?($|\b)', re.I)
solvent_in_heading = Group(solvent_name)('cem')
solvent_cell = Group(solvent_name | chemical_name)('cem')
compound_cell = Group(
(Start() + chemical_label + End())('cem') |
(Start() + lenient_chemical_label + End())('cem') |
chemical_label_phrase('cem') |
(Not(Start() + OneOrMore(name_blacklist) + End()) + OneOrMore(Any())('name').add_action(join).add_action(fix_whitespace) + Optional(W('(').hide() + chemical_label + W(')').hide()))('cem') |
label_before_name
)('cem_phrase')
uvvis_emi_title = (
I('emission') + R('max(ima)?') |
W('λ') + Optional(I('max')) + Optional(W(',')) + R('em(i(ssion)?)?', re.I) |
R('em(i(ssion)?)?', re.I) + W('λ') + Optional(I('max')) + Optional(W(','))
)
uvvis_abs_title = (
I('absorption') + R('max(ima)?') |
W('λ') + OneOrMore(R('^(a|sol)?max$', re.I) | R('abs(or[bp]tion)?', re.I) | I('a') | W(',')) |
R('uv([-/]?vis)?', re.I)
)
extinction_title = Optional(R('^10\d$') | W('10') + minus + R('^\d$')).hide() + W('ε') + Optional(I('max'))
uvvis_units = (W('nm') | R('^eV[\-–−‒]1$') | W('eV') + minus + W('1'))('uvvis_units').add_action(merge)
multiplier = Optional(I('×')) + (R('^10–?[34]$') | (W('10') + minus + R('^[345]$')))
extinction_units = (
(Optional(multiplier + delims) + (
I('M') + minus + I('1') + I('cm') + minus + I('1') |
I('M') + minus + I('1') + I('cm') + minus + I('1') |
I('dm3') + I('mol') + minus + I('1') + I('cm') + minus + I('1') |
I('l') + I('mol') + minus + I('1') + I('cm') + minus + I('1') |
I('l') + I('cm') + minus + I('1') + I('mol') + minus + I('1')
)) | multiplier
)('extinction_units').add_action(join)
ir_title = (
R('^(FT-?)?IR$') + Optional(I('absorption'))
)
ir_units = Optional(W('/')).hide() + (
R('^\[?cm[-–−]1\]?$') |
W('cm') + R('^[-–−]$') + W('1')
)('ir_units').add_action(merge)
ir_heading = (OneOrMore(ir_title.hide()) + ZeroOrMore(delims.hide() + ir_units))('ir_heading')
ir_value = (R('^\d{3,5}(\.\d{1,2})?$'))('value')
peak_strength = R('^(sh(oulder)?|br(oad)?)$')('strength')
ir_peak = (
ir_value + Optional(W('(').hide()) + Optional(peak_strength) + Optional(W(')').hide())
)('ir_peak')
ir_cell = (
ir_peak + ZeroOrMore(W(',').hide() + ir_peak)
)('ir_cell')
# TODO: (photoluminescence|fluorescence) quantum yield
quantum_yield_title = (R('^(Φ|ϕ)(fl?|pl|ze|t|l|lum)?$', re.I) + Optional(R('^(fl?|pl|ze|t|l|lum)$', re.I)))('quantum_yield_type').add_action(merge) # + ZeroOrMore(Any())
quantum_yield_units = W('%')('quantum_yield_units')
quantum_yield_heading = Group(Start() + quantum_yield_title + delims.hide() + Optional(quantum_yield_units) + delims.hide() + End())('quantum_yield_heading')
quantum_yield_value = (Optional(R('^[~∼\<\>]$')) + ((W('10') + minus + R('^\d$')) | R('^(100(\.0+)?|\d\d?(\.\d+)?)$')) + Optional(W('±') + R('^\d+(\.\d+)?$')))('quantum_yield_value').add_action(merge)
quantum_yield_cell = (quantum_yield_value + Optional(quantum_yield_units))('quantum_yield_cell')
def split_uvvis_shape(tokens, start, result):
""""""
if result[0].text.endswith('sh') or result[0].text.endswith('br'):
result.append(E('shape', result[0].text[-2:]))
result[0].text = result[0].text[:-2]
uvvis_emi_heading = (OneOrMore(uvvis_emi_title.hide()))('uvvis_emi_heading')
uvvis_abs_heading = (OneOrMore(uvvis_abs_title.hide()) + ZeroOrMore(delims.hide() + (uvvis_units | extinction_title.hide() | extinction_units)))('uvvis_abs_heading')
uvvis_abs_disallowed = I('emission')
extinction_heading = (extinction_title.hide() + delims.hide() + Optional(extinction_units))('extinction_heading')
uvvis_value = (R('^\d{3,4}(\.\d{1,2})?(sh|br)?$'))('value').add_action(split_uvvis_shape)
peak_shape = R('^(sh(oulder)?|br(oad)?)$')('shape')
extinction_value = (
R('^\d+\.\d+$') + Optional(W('±') + R('^\d+\.\d+$')) + Optional(W('×') + R('10\d+')) | # Scientific notation
R('^\d{1,3}$') + R('^\d\d\d$') | # RSC often inserts spaces within values instead of commas
R('^\d{1,2},?\d{3,3}$')
)('extinction').add_action(merge)
uvvis_abs_emi_quantum_yield_heading = (
OneOrMore(uvvis_abs_title.hide()) +
Optional(Optional(delims.hide()) + uvvis_units('uvvis_abs_units') + Optional(delims.hide())) +
OneOrMore(uvvis_emi_title.hide()) +
Optional(Optional(delims.hide()) + uvvis_units + Optional(delims.hide())) +
Optional(delims.hide()) + quantum_yield_title.hide() + Optional(delims.hide()) +
Optional(Optional(delims.hide()) + quantum_yield_units + Optional(delims.hide()))
)('uvvis_emi_quantum_yield_heading')
uvvis_abs_emi_quantum_yield_cell = (
uvvis_value('uvvis_abs_value') + delims.hide() + uvvis_value + delims.hide() + quantum_yield_value + Optional(quantum_yield_units)
)('uvvis_emi_quantum_yield_cell')
uvvis_emi_quantum_yield_heading = (
OneOrMore(uvvis_emi_title.hide()) +
Optional(Optional(delims.hide()) + uvvis_units + Optional(delims.hide())) +
Optional(delims.hide()) + quantum_yield_title.hide() + Optional(delims.hide()) +
Optional(Optional(delims.hide()) + quantum_yield_units + Optional(delims.hide()))
)('uvvis_emi_quantum_yield_heading')
uvvis_emi_quantum_yield_cell = (
uvvis_value + delims.hide() + quantum_yield_value + Optional(quantum_yield_units)
)('uvvis_emi_quantum_yield_cell')
uvvis_abs_peak = (
uvvis_value + Optional(peak_shape) + Optional(W('(').hide() + extinction_value + W(')').hide())
)('uvvis_abs_peak')
uvvis_abs_cell = (
uvvis_abs_peak + ZeroOrMore(W(',').hide() + uvvis_abs_peak)
)('uvvis_abs_cell')
extinction_cell = (
extinction_value + ZeroOrMore(W(',').hide() + extinction_value)
)('uvvis_abs_cell')
uvvis_emi_peak = (
uvvis_value + Optional(peak_shape)
)('uvvis_emi_peak')
uvvis_emi_cell = (
uvvis_emi_peak + ZeroOrMore(W(',').hide() + uvvis_emi_peak)
)('uvvis_emi_cell')
fluorescence_lifetime_title = W('τ') + R('^(e|f|ave|avg|0)$', re.I)
fluorescence_lifetime_units = (W('ns') | W('μ') + W('s'))('fluorescence_lifetime_units').add_action(merge)
fluorescence_lifetime_heading = (fluorescence_lifetime_title.hide() + delims.hide() + Optional(fluorescence_lifetime_units))('fluorescence_lifetime_heading')
fluorescence_lifetime_value = (Optional(R('^[~∼\<\>]$')) + R('^\d+(\.\d+)?$'))('fluorescence_lifetime_value').add_action(merge)
fluorescence_lifetime_cell = (
fluorescence_lifetime_value + ZeroOrMore(W(',').hide() + fluorescence_lifetime_value)
)('fluorescence_lifetime_cell')
electrochemical_potential_title = ((R('^E(ox|red)1?$', re.I) | W('E') + R('^(ox|red)1?$')) + Optional(W('/') + W('2')))('electrochemical_potential_type').add_action(merge)
electrochemical_potential_units = (W('V'))('electrochemical_potential_units').add_action(merge)
electrochemical_potential_heading = (electrochemical_potential_title + delims.hide() + Optional(electrochemical_potential_units))('electrochemical_potential_heading')
electrochemical_potential_value = (Optional(R('^[~∼\<\>]$')) + Optional(minus) + R('^\d+(\.\d+)?$'))('electrochemical_potential_value').add_action(merge)
electrochemical_potential_cell = (
electrochemical_potential_value + ZeroOrMore(delims.hide() + electrochemical_potential_value)
)('electrochemical_potential_cell')
subject_phrase = ((I('of') | I('for')) + chemical_name)('subject_phrase')
solvent_phrase = (I('in') + (solvent_name | chemical_name))('solvent_phrase')
temp_range = (Optional(R('^[\-–−]$')) + (R('^[\+\-–−]?\d+(\.\d+)?[\-–−]\d+(\.\d+)?$') | (R('^[\+\-–−]?\d+(\.\d+)?$') + R('^[\-–−]$') + R('^[\+\-–−]?\d+(\.\d+)?$'))))('temperature').add_action(merge)
temp_value = (Optional(R('^[\-–−]$')) + R('^[\+\-–−]?\d+(\.\d+)?$') + Optional(W('±') + R('^\d+(\.\d+)?$')))('temperature').add_action(merge)
temp_word = (I('room') + R('^temp(erature)?$') | R('^r\.?t\.?$', re.I))('temperature').add_action(merge)
temp = (temp_range | temp_value | temp_word)('value')
temp_units = (W('°') + R('[CFK]') | W('K'))('units').add_action(merge)
temp_with_units = (temp + temp_units)('temp')
temp_with_optional_units = (temp + Optional(temp_units))('temp')
temp_phrase = (I('at') + temp_with_units)('temp_phrase')
melting_point_title = R('^T(melt|m\.p|m)$', re.I) | W('T') + R('^(melt|m\.p|m)?$')
melting_point_heading = (melting_point_title.hide() + delims.hide() + Optional(temp_units))('melting_point_heading')
melting_point_cell = (
temp_with_optional_units + ZeroOrMore(delims.hide() + temp_with_optional_units)
)('melting_point_cell')
glass_transition_title = R('^T(g\.)$', re.I) | W('T') + R('^(g\.)?$')
glass_transition_heading = (glass_transition_title.hide() + delims.hide() + Optional(temp_units))('glass_transition_heading')
glass_transition_cell = (
temp_with_optional_units + ZeroOrMore(delims.hide() + temp_with_optional_units)
)('glass_transition_cell')
caption_context = Group(subject_phrase | solvent_phrase | temp_phrase)('caption_context')
class CompoundHeadingParser(BaseParser):
""""""
root = compound_heading
def interpret(self, result, start, end):
""""""
yield Compound()
class SolventHeadingParser(BaseParser):
""""""
root = solvent_heading
def interpret(self, result, start, end):
""""""
yield Compound()
class UvvisAbsDisallowedHeadingParser(BaseParser):
""""""
root = uvvis_abs_disallowed
def interpret(self, result, start, end):
""""""
yield Compound()
class SolventInHeadingParser(BaseParser):
""""""
root = solvent_in_heading
def interpret(self, result, start, end):
""""""
c = Compound()
solvent = first(result.xpath('./name/text()'))
if solvent is not None:
context = {'solvent': solvent}
c.melting_points = [MeltingPoint(**context)]
c.glass_transitions = [GlassTransition(**context)]
c.quantum_yields = [QuantumYield(**context)]
c.fluorescence_lifetimes = [FluorescenceLifetime(**context)]
c.electrochemical_potentials = [ElectrochemicalPotential(**context)]
c.uvvis_spectra = [UvvisSpectrum(**context)]
if c.serialize():
yield c
class TempInHeadingParser(BaseParser):
""""""
root = temp_with_units
def interpret(self, result, start, end):
""""""
c = Compound()
context = {
'temperature': first(result.xpath('./value/text()')),
'temperature_units': first(result.xpath('./units/text()'))
}
c.quantum_yields = [QuantumYield(**context)]
c.fluorescence_lifetimes = [FluorescenceLifetime(**context)]
c.electrochemical_potentials = [ElectrochemicalPotential(**context)]
c.uvvis_spectra = [UvvisSpectrum(**context)]
yield c
class SolventCellParser(BaseParser):
""""""
root = solvent_cell
def interpret(self, result, start, end):
""""""
c = Compound()
solvent = first(result.xpath('./name/text()'))
if solvent is not None:
context = {'solvent': solvent}
c.melting_points = [MeltingPoint(**context)]
c.glass_transitions = [GlassTransition(**context)]
c.quantum_yields = [QuantumYield(**context)]
c.fluorescence_lifetimes = [FluorescenceLifetime(**context)]
c.electrochemical_potentials = [ElectrochemicalPotential(**context)]
c.uvvis_spectra = [UvvisSpectrum(**context)]
if c.serialize():
yield c
class CompoundCellParser(BaseParser):
""""""
root = compound_cell
def interpret(self, result, start, end):
for cem_el in result.xpath('./cem'):
c = Compound(
names=cem_el.xpath('./name/text()'),
labels=cem_el.xpath('./label/text()')
)
yield c
class UvvisEmiHeadingParser(BaseParser):
""""""
root = uvvis_emi_heading
def interpret(self, result, start, end):
""""""
uvvis_units = first(result.xpath('./uvvis_units/text()'))
c = Compound()
# TODO: Emission peaks
yield c
class UvvisAbsHeadingParser(BaseParser):
""""""
root = uvvis_abs_heading
def interpret(self, result, start, end):
""""""
uvvis_units = first(result.xpath('./uvvis_units/text()'))
extinction_units = first(result.xpath('./extinction_units/text()'))
c = Compound()
if uvvis_units or extinction_units:
c.uvvis_spectra.append(
UvvisSpectrum(peaks=[UvvisPeak(units=uvvis_units, extinction_units=extinction_units)])
)
yield c
class ExtinctionHeadingParser(BaseParser):
""""""
root = extinction_heading
def interpret(self, result, start, end):
""""""
extinction_units = first(result.xpath('./extinction_units/text()'))
c = Compound()
if extinction_units:
c.uvvis_spectra.append(
UvvisSpectrum(peaks=[UvvisPeak(extinction_units=extinction_units)])
)
yield c
class IrHeadingParser(BaseParser):
""""""
root = ir_heading
def interpret(self, result, start, end):
""""""
ir_units = first(result.xpath('./ir_units/text()'))
c = Compound()
if ir_units:
c.ir_spectra.append(
IrSpectrum(peaks=[IrPeak(units=ir_units)])
)
yield c
class IrCellParser(BaseParser):
""""""
root = ir_cell
def interpret(self, result, start, end):
""""""
c = Compound()
ir = IrSpectrum()
for peak in result.xpath('./ir_peak'):
ir.peaks.append(
IrPeak(
value=first(peak.xpath('./value/text()')),
strength=first(peak.xpath('./strength/text()'))
)
)
if ir.peaks:
c.ir_spectra.append(ir)
yield c
class QuantumYieldHeadingParser(BaseParser):
""""""
root = quantum_yield_heading
def interpret(self, result, start, end):
""""""
c = Compound(
quantum_yields=[
QuantumYield(
type=first(result.xpath('./quantum_yield_type/text()')),
units=first(result.xpath('./quantum_yield_units/text()'))
)
]
)
yield c
class QuantumYieldCellParser(BaseParser):
""""""
root = quantum_yield_cell
def interpret(self, result, start, end):
""""""
c = Compound()
qy = QuantumYield(
value=first(result.xpath('./quantum_yield_value/text()')),
units=first(result.xpath('./quantum_yield_units/text()'))
)
if qy.value:
c.quantum_yields.append(qy)
yield c
class UvvisEmiCellParser(BaseParser):
""""""
root = uvvis_emi_cell
def interpret(self, result, start, end):
""""""
# TODO: Emission peaks
return
yield
class UvvisAbsCellParser(BaseParser):
""""""
root = uvvis_abs_cell
def interpret(self, result, start, end):
""""""
c = Compound()
uvvis = UvvisSpectrum()
for peak in result.xpath('./uvvis_abs_peak'):
uvvis.peaks.append(
UvvisPeak(
value=first(peak.xpath('./value/text()')),
extinction=first(peak.xpath('./extinction/text()')),
shape=first(peak.xpath('./shape/text()'))
)
)
if uvvis.peaks:
c.uvvis_spectra.append(uvvis)
yield c
class ExtinctionCellParser(BaseParser):
""""""
root = extinction_cell
def interpret(self, result, start, end):
""""""
c = Compound()
uvvis = UvvisSpectrum()
for value in result.xpath('./extinction/text()'):
uvvis.peaks.append(
UvvisPeak(
extinction=value,
)
)
if uvvis.peaks:
c.uvvis_spectra.append(uvvis)
yield c
class UvvisAbsEmiQuantumYieldHeadingParser(BaseParser):
""""""
root = uvvis_abs_emi_quantum_yield_heading
def interpret(self, result, start, end):
""""""
c = Compound()
abs_units = first(result.xpath('./uvvis_abs_units/text()'))
if abs_units:
c.uvvis_spectra.append(
UvvisSpectrum(peaks=[UvvisPeak(units=abs_units)])
)
qy_units = first(result.xpath('./quantum_yield_units/text()'))
if qy_units:
c.quantum_yields.append(
QuantumYield(units=qy_units)
)
yield c
class UvvisAbsEmiQuantumYieldCellParser(BaseParser):
""""""
root = uvvis_abs_emi_quantum_yield_cell
def interpret(self, result, start, end):
""""""
c = Compound()
uvvis = UvvisSpectrum()
for value in result.xpath('./uvvis_abs_value/text()'):
uvvis.peaks.append(
UvvisPeak(
value=value,
)
)
if uvvis.peaks:
c.uvvis_spectra.append(uvvis)
qy = QuantumYield(
value=first(result.xpath('./quantum_yield_value/text()'))
)
if qy.value:
c.quantum_yields.append(qy)
if c.quantum_yields or c.uvvis_spectra:
yield c
class UvvisEmiQuantumYieldHeadingParser(BaseParser):
""""""
root = uvvis_emi_quantum_yield_heading
def interpret(self, result, start, end):
""""""
# Yield an empty compound to signal that the Parser matched
yield Compound()
class UvvisEmiQuantumYieldCellParser(BaseParser):
""""""
root = uvvis_emi_quantum_yield_cell
def interpret(self, result, start, end):
""""""
c = Compound()
qy = QuantumYield(
value=first(result.xpath('./quantum_yield_value/text()'))
)
if qy.value:
c.quantum_yields.append(qy)
yield c
class FluorescenceLifetimeHeadingParser(BaseParser):
""""""
root = fluorescence_lifetime_heading
def interpret(self, result, start, end):
""""""
fluorescence_lifetime_units = first(result.xpath('./fluorescence_lifetime_units/text()'))
c = Compound()
if fluorescence_lifetime_units:
c.fluorescence_lifetimes.append(
FluorescenceLifetime(units=fluorescence_lifetime_units)
)
yield c
class FluorescenceLifetimeCellParser(BaseParser):
""""""
root = fluorescence_lifetime_cell
def interpret(self, result, start, end):
""""""
c = Compound()
fl = FluorescenceLifetime(
value=first(result.xpath('./fluorescence_lifetime_value/text()'))
)
if fl.value:
c.fluorescence_lifetimes.append(fl)
yield c
class MeltingPointHeadingParser(BaseParser):
""""""
root = melting_point_heading
def interpret(self, result, start, end):
""""""
melting_point_units = first(result.xpath('./units/text()'))
c = Compound()
if melting_point_units:
c.melting_points.append(
MeltingPoint(units=melting_point_units)
)
yield c
class MeltingPointCellParser(BaseParser):
""""""
root = melting_point_cell
def interpret(self, result, start, end):
""""""
c = Compound()
for mp in result.xpath('./temp'):
c.melting_points.append(
MeltingPoint(
value=first(mp.xpath('./value/text()')),
units=first(mp.xpath('./units/text()'))
)
)
if c.melting_points:
yield c
class GlassTransitionHeadingParser(BaseParser):
""""""
root = glass_transition_heading
def interpret(self, result, start, end):
""""""
glass_transition_units = first(result.xpath('./units/text()'))
c = Compound()
if glass_transition_units:
c.glass_transitions.append(
GlassTransition(units=glass_transition_units)
)
yield c
class GlassTransitionCellParser(BaseParser):
""""""
root = glass_transition_cell
def interpret(self, result, start, end):
""""""
c = Compound()
for tg in result.xpath('./temp'):
c.glass_transitions.append(
GlassTransition(
value=first(mp.xpath('./value/text()')),
units=first(mp.xpath('./units/text()'))
)
)
if c.glass_transition:
yield c
class ElectrochemicalPotentialHeadingParser(BaseParser):
""""""
root = electrochemical_potential_heading
def interpret(self, result, start, end):
""""""
c = Compound(
electrochemical_potentials=[
ElectrochemicalPotential(
type=first(result.xpath('./electrochemical_potential_type/text()')),
units=first(result.xpath('./electrochemical_potential_units/text()'))
)
]
)
yield c
class ElectrochemicalPotentialCellParser(BaseParser):
""""""
root = electrochemical_potential_cell
def interpret(self, result, start, end):
""""""
c = Compound()
for value in result.xpath('./electrochemical_potential_value/text()'):
c.electrochemical_potentials.append(
ElectrochemicalPotential(
value=value
)
)
if c.electrochemical_potentials:
yield c
class CaptionContextParser(BaseParser):
""""""
root = caption_context
def __init__(self):
pass
def interpret(self, result, start, end):
name = first(result.xpath('./subject_phrase/name/text()'))
c = Compound(names=[name]) if name else Compound()
context = {}
# print(etree.tostring(result[0]))
solvent = first(result.xpath('./solvent_phrase/name/text()'))
if solvent is not None:
context['solvent'] = solvent
# Melting point shouldn't have contextual temperature
if context:
c.melting_points = [MeltingPoint(**context)]
temp = first(result.xpath('./temp_phrase'))
if temp is not None:
context['temperature'] = first(temp.xpath('./temp/value/text()'))
context['temperature_units'] = first(temp.xpath('./temp/units/text()'))
# Glass transition temperature shouldn't have contextual temperature
if context:
c.glass_transitions = [GlassTransition(**context)]
temp = first(result.xpath('./temp_phrase'))
if temp is not None:
context['temperature'] = first(temp.xpath('./temp/value/text()'))
context['temperature_units'] = first(temp.xpath('./temp/units/text()'))
if context:
c.quantum_yields = [QuantumYield(**context)]
c.fluorescence_lifetimes = [FluorescenceLifetime(**context)]
c.electrochemical_potentials = [ElectrochemicalPotential(**context)]
c.uvvis_spectra = [UvvisSpectrum(**context)]
if c.serialize():
# print(c.to_primitive())
yield c
|
PypiClean
|
/onshape-test-client-1.0.0.tar.gz/onshape-test-client-1.0.0/onshape_client/oas/model/bt_material_library_settings_info.py
|
import re # noqa: F401
import sys # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from onshape_client.oas.exceptions import ApiAttributeError
def lazy_import():
from onshape_client.oas.model.bt_material_library_metadata_info import BTMaterialLibraryMetadataInfo
globals()['BTMaterialLibraryMetadataInfo'] = BTMaterialLibraryMetadataInfo
class BTMaterialLibrarySettingsInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'company_libraries': ([BTMaterialLibraryMetadataInfo],), # noqa: E501
'libraries': ([BTMaterialLibraryMetadataInfo],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'company_libraries': 'companyLibraries', # noqa: E501
'libraries': 'libraries', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""BTMaterialLibrarySettingsInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
company_libraries ([BTMaterialLibraryMetadataInfo]): [optional] # noqa: E501
libraries ([BTMaterialLibraryMetadataInfo]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BTMaterialLibrarySettingsInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
company_libraries ([BTMaterialLibraryMetadataInfo]): [optional] # noqa: E501
libraries ([BTMaterialLibraryMetadataInfo]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/standardnotes-fs-0.0.2.tar.gz/standardnotes-fs-0.0.2/standardnotes_fs/api.py
|
import requests
from standardnotes_fs.crypt import EncryptionHelper
class SNAPIException(Exception):
pass
class RESTAPI:
def __init__(self, base_url):
self.base_url = base_url
self.headers = {}
def get(self, route, params=None):
url = self.base_url + route
return requests.get(url, params, headers=self.headers).json()
def post(self, route, data=None):
url = self.base_url + route
return requests.post(url, json=data, headers=self.headers).json()
def add_header(self, header):
self.headers.update(header)
class StandardNotesAPI:
encryption_helper = EncryptionHelper()
sync_token = None
def get_auth_params_for_email(self):
return self.api.get('/auth/params', dict(email=self.username))
def gen_keys(self, password):
pw_info = self.get_auth_params_for_email()
if 'error' in pw_info:
raise SNAPIException(pw_info['error']['message'])
return self.encryption_helper.pure_generate_password_and_key(
password, pw_info['pw_salt'], pw_info['pw_cost'])
def sign_in(self, keys):
self.keys = keys
res = self.api.post('/auth/sign_in', dict(email=self.username,
password=self.keys['pw']))
if 'error' in res:
raise SNAPIException(res['error']['message'])
self.api.add_header(dict(Authorization='Bearer ' + res['token']))
def sync(self, dirty_items):
items = self.handle_dirty_items(dirty_items)
response = self.api.post('/items/sync', dict(sync_token=self.sync_token,
items=items))
self.sync_token = response['sync_token']
return self.handle_response_items(response)
def handle_dirty_items(self, dirty_items):
items = self.encryption_helper.encrypt_dirty_items(
dirty_items, self.keys)
return items
def handle_response_items(self, response):
response_items = self.encryption_helper.decrypt_response_items(
response['retrieved_items'], self.keys)
saved_items = self.encryption_helper.decrypt_response_items(
response['saved_items'], self.keys)
return dict(response_items=response_items, saved_items=saved_items)
def __init__(self, base_url, username):
self.api = RESTAPI(base_url)
self.username = username
|
PypiClean
|
/django-geoexplorer-worldmap-4.0.72.tar.gz/django-geoexplorer-worldmap-4.0.72/geoexplorer-worldmap/static/worldmap_client/externals/openlayers/lib/OpenLayers/Format/GPX.js
|
* @requires OpenLayers/Format/XML.js
* @requires OpenLayers/Feature/Vector.js
* @requires OpenLayers/Geometry/Point.js
* @requires OpenLayers/Geometry/LineString.js
* @requires OpenLayers/Projection.js
*/
/**
* Class: OpenLayers.Format.GPX
* Read/write GPX parser. Create a new instance with the
* <OpenLayers.Format.GPX> constructor.
*
* Inherits from:
* - <OpenLayers.Format.XML>
*/
OpenLayers.Format.GPX = OpenLayers.Class(OpenLayers.Format.XML, {
/**
* APIProperty: defaultDesc
* {String} Default description for the waypoints/tracks in the case
* where the feature has no "description" attribute.
* Default is "No description available".
*/
defaultDesc: "No description available",
/**
* APIProperty: extractWaypoints
* {Boolean} Extract waypoints from GPX. (default: true)
*/
extractWaypoints: true,
/**
* APIProperty: extractTracks
* {Boolean} Extract tracks from GPX. (default: true)
*/
extractTracks: true,
/**
* APIProperty: extractRoutes
* {Boolean} Extract routes from GPX. (default: true)
*/
extractRoutes: true,
/**
* APIProperty: extractAttributes
* {Boolean} Extract feature attributes from GPX. (default: true)
* NOTE: Attributes as part of extensions to the GPX standard may not
* be extracted.
*/
extractAttributes: true,
/**
* Property: namespaces
* {Object} Mapping of namespace aliases to namespace URIs.
*/
namespaces: {
gpx: "http://www.topografix.com/GPX/1/1",
xsi: "http://www.w3.org/2001/XMLSchema-instance"
},
/**
* Property: schemaLocation
* {String} Schema location. Defaults to
* "http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd"
*/
schemaLocation: "http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd",
/**
* APIProperty: creator
* {String} The creator attribute to be added to the written GPX files.
* Defaults to "OpenLayers"
*/
creator: "OpenLayers",
/**
* Constructor: OpenLayers.Format.GPX
* Create a new parser for GPX.
*
* Parameters:
* options - {Object} An optional object whose properties will be set on
* this instance.
*/
initialize: function(options) {
// GPX coordinates are always in longlat WGS84
this.externalProjection = new OpenLayers.Projection("EPSG:4326");
OpenLayers.Format.XML.prototype.initialize.apply(this, [options]);
},
/**
* APIMethod: read
* Return a list of features from a GPX doc
*
* Parameters:
* doc - {Element}
*
* Returns:
* Array({<OpenLayers.Feature.Vector>})
*/
read: function(doc) {
if (typeof doc == "string") {
doc = OpenLayers.Format.XML.prototype.read.apply(this, [doc]);
}
var features = [];
if(this.extractTracks) {
var tracks = doc.getElementsByTagName("trk");
for (var i=0, len=tracks.length; i<len; i++) {
// Attributes are only in trk nodes, not trkseg nodes
var attrs = {};
if(this.extractAttributes) {
attrs = this.parseAttributes(tracks[i]);
}
var segs = this.getElementsByTagNameNS(tracks[i], tracks[i].namespaceURI, "trkseg");
for (var j = 0, seglen = segs.length; j < seglen; j++) {
// We don't yet support extraction of trkpt attributes
// All trksegs of a trk get that trk's attributes
var track = this.extractSegment(segs[j], "trkpt");
features.push(new OpenLayers.Feature.Vector(track, attrs));
}
}
}
if(this.extractRoutes) {
var routes = doc.getElementsByTagName("rte");
for (var k=0, klen=routes.length; k<klen; k++) {
var attrs = {};
if(this.extractAttributes) {
attrs = this.parseAttributes(routes[k]);
}
var route = this.extractSegment(routes[k], "rtept");
features.push(new OpenLayers.Feature.Vector(route, attrs));
}
}
if(this.extractWaypoints) {
var waypoints = doc.getElementsByTagName("wpt");
for (var l = 0, len = waypoints.length; l < len; l++) {
var attrs = {};
if(this.extractAttributes) {
attrs = this.parseAttributes(waypoints[l]);
}
var wpt = new OpenLayers.Geometry.Point(waypoints[l].getAttribute("lon"), waypoints[l].getAttribute("lat"));
features.push(new OpenLayers.Feature.Vector(wpt, attrs));
}
}
if (this.internalProjection && this.externalProjection) {
for (var g = 0, featLength = features.length; g < featLength; g++) {
features[g].geometry.transform(this.externalProjection,
this.internalProjection);
}
}
return features;
},
/**
* Method: extractSegment
*
* Parameters:
* segment - {DOMElement} a trkseg or rte node to parse
* segmentType - {String} nodeName of waypoints that form the line
*
* Returns:
* {<OpenLayers.Geometry.LineString>} A linestring geometry
*/
extractSegment: function(segment, segmentType) {
var points = this.getElementsByTagNameNS(segment, segment.namespaceURI, segmentType);
var point_features = [];
for (var i = 0, len = points.length; i < len; i++) {
point_features.push(new OpenLayers.Geometry.Point(points[i].getAttribute("lon"), points[i].getAttribute("lat")));
}
return new OpenLayers.Geometry.LineString(point_features);
},
/**
* Method: parseAttributes
*
* Parameters:
* node - {<DOMElement>}
*
* Returns:
* {Object} An attributes object.
*/
parseAttributes: function(node) {
// node is either a wpt, trk or rte
// attributes are children of the form <attr>value</attr>
var attributes = {};
var attrNode = node.firstChild, value, name;
while(attrNode) {
if(attrNode.nodeType == 1 && attrNode.firstChild) {
value = attrNode.firstChild;
if(value.nodeType == 3 || value.nodeType == 4) {
name = (attrNode.prefix) ?
attrNode.nodeName.split(":")[1] :
attrNode.nodeName;
if(name != "trkseg" && name != "rtept") {
attributes[name] = value.nodeValue;
}
}
}
attrNode = attrNode.nextSibling;
}
return attributes;
},
/**
* APIMethod: write
* Accepts Feature Collection, and returns a string.
*
* Parameters:
* features - {Array(<OpenLayers.Feature.Vector>)} List of features to serialize into a string.
* metadata - {Object} A key/value pairs object to build a metadata node to
* add to the gpx. Supported keys are 'name', 'desc', 'author'.
*/
write: function(features, metadata) {
features = OpenLayers.Util.isArray(features) ?
features : [features];
var gpx = this.createElementNS(this.namespaces.gpx, "gpx");
gpx.setAttribute("version", "1.1");
gpx.setAttribute("creator", this.creator);
this.setAttributes(gpx, {
"xsi:schemaLocation": this.schemaLocation
});
if (metadata && typeof metadata == 'object') {
gpx.appendChild(this.buildMetadataNode(metadata));
}
for(var i=0, len=features.length; i<len; i++) {
gpx.appendChild(this.buildFeatureNode(features[i]));
}
return OpenLayers.Format.XML.prototype.write.apply(this, [gpx]);
},
/**
* Method: buildMetadataNode
* Creates a "metadata" node.
*
* Returns:
* {DOMElement}
*/
buildMetadataNode: function(metadata) {
var types = ['name', 'desc', 'author'],
node = this.createElementNSPlus('gpx:metadata');
for (var i=0; i < types.length; i++) {
var type = types[i];
if (metadata[type]) {
var n = this.createElementNSPlus("gpx:" + type);
n.appendChild(this.createTextNode(metadata[type]));
node.appendChild(n);
}
}
return node;
},
/**
* Method: buildFeatureNode
* Accepts an <OpenLayers.Feature.Vector>, and builds a node for it.
*
* Parameters:
* feature - {<OpenLayers.Feature.Vector>}
*
* Returns:
* {DOMElement} - The created node, either a 'wpt' or a 'trk'.
*/
buildFeatureNode: function(feature) {
var geometry = feature.geometry;
geometry = geometry.clone();
if (this.internalProjection && this.externalProjection) {
geometry.transform(this.internalProjection,
this.externalProjection);
}
if (geometry.CLASS_NAME == "OpenLayers.Geometry.Point") {
var wpt = this.buildWptNode(feature);
return wpt;
} else {
var trkNode = this.createElementNSPlus("gpx:trk");
this.appendAttributesNode(trkNode, feature);
var trkSegNodes = this.buildTrkSegNode(geometry);
trkSegNodes = OpenLayers.Util.isArray(trkSegNodes) ?
trkSegNodes : [trkSegNodes];
for (var i = 0, len = trkSegNodes.length; i < len; i++) {
trkNode.appendChild(trkSegNodes[i]);
}
return trkNode;
}
},
/**
* Method: buildTrkSegNode
* Builds trkseg node(s) given a geometry
*
* Parameters:
* trknode
* geometry - {<OpenLayers.Geometry>}
*/
buildTrkSegNode: function(geometry) {
var node,
i,
len,
point,
nodes;
if (geometry.CLASS_NAME == "OpenLayers.Geometry.LineString" ||
geometry.CLASS_NAME == "OpenLayers.Geometry.LinearRing") {
node = this.createElementNSPlus("gpx:trkseg");
for (i = 0, len=geometry.components.length; i < len; i++) {
point = geometry.components[i];
node.appendChild(this.buildTrkPtNode(point));
}
return node;
} else {
nodes = [];
for (i = 0, len = geometry.components.length; i < len; i++) {
nodes.push(this.buildTrkSegNode(geometry.components[i]));
}
return nodes;
}
},
/**
* Method: buildTrkPtNode
* Builds a trkpt node given a point
*
* Parameters:
* point - {<OpenLayers.Geometry.Point>}
*
* Returns:
* {DOMElement} A trkpt node
*/
buildTrkPtNode: function(point) {
var node = this.createElementNSPlus("gpx:trkpt");
node.setAttribute("lon", point.x);
node.setAttribute("lat", point.y);
return node;
},
/**
* Method: buildWptNode
* Builds a wpt node given a point
*
* Parameters:
* feature - {<OpenLayers.Feature.Vector>}
*
* Returns:
* {DOMElement} A wpt node
*/
buildWptNode: function(feature) {
var node = this.createElementNSPlus("gpx:wpt");
node.setAttribute("lon", feature.geometry.x);
node.setAttribute("lat", feature.geometry.y);
this.appendAttributesNode(node, feature);
return node;
},
/**
* Method: appendAttributesNode
* Adds some attributes node.
*
* Parameters:
* node - {DOMElement} the node to append the attribute nodes to.
* feature - {<OpenLayers.Feature.Vector>}
*/
appendAttributesNode: function(node, feature) {
var name = this.createElementNSPlus('gpx:name');
name.appendChild(this.createTextNode(
feature.attributes.name || feature.id));
node.appendChild(name);
var desc = this.createElementNSPlus('gpx:desc');
desc.appendChild(this.createTextNode(
feature.attributes.description || this.defaultDesc));
node.appendChild(desc);
// TBD - deal with remaining (non name/description) attributes.
},
CLASS_NAME: "OpenLayers.Format.GPX"
});
|
PypiClean
|
/bigdl_dllib-2.3.0b20230214-py3-none-macosx_10_11_x86_64.whl/bigdl/dllib/keras/layers/wrappers.py
|
import sys
from bigdl.dllib.keras.engine import ZooKerasLayer
if sys.version >= '3':
long = int
unicode = str
class TimeDistributed(ZooKerasLayer):
"""
TimeDistributed wrapper.
Apply a layer to every temporal slice of an input.
The input should be at least 3D.
The dimension of index one will be considered as the temporal dimension.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
name: String to specify the name of the wrapper. Default is None.
# Arguments
layer: A layer instance.
input_shape: A shape tuple, not including batch.
name: String to set the name of the wrapper.
If not specified, its name will by default to be a generated string.
>>> from bigdl.dllib.keras.layers import Dense
>>> timedistributed = TimeDistributed(Dense(8), input_shape=(10, 12))
creating: createZooKerasDense
creating: createZooKerasTimeDistributed
"""
def __init__(self, layer, input_shape=None, **kwargs):
super(TimeDistributed, self).__init__(None,
layer,
list(input_shape) if input_shape else None,
**kwargs)
class Bidirectional(ZooKerasLayer):
"""
Bidirectional wrapper for RNNs.
Bidirectional currently requires RNNs to return the full sequence, i.e. return_sequences = True.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
Example of creating a bidirectional LSTM:
Bidirectiona(LSTM(12, return_sequences=True), merge_mode="sum", input_shape=(32, 32))
# Arguments
layer: An instance of a recurrent layer.
merge_mode: Mode by which outputs of the forward and backward RNNs will be combined.
Must be one of: 'sum', 'mul', 'concat', 'ave'. Default is 'concat'.
input_shape: A shape tuple, not including batch.
name: String to set the name of the wrapper.
If not specified, its name will by default to be a generated string.
>>> from bigdl.dllib.keras.layers import LSTM
>>> bidiretional = Bidirectional(LSTM(10, return_sequences=True), input_shape=(12, 16))
creating: createZooKerasLSTM
creating: createZooKerasBidirectional
"""
def __init__(self, layer, merge_mode="concat", input_shape=None, **kwargs):
super(Bidirectional, self).__init__(None,
layer,
merge_mode,
list(input_shape) if input_shape else None,
**kwargs)
class KerasLayerWrapper(ZooKerasLayer):
"""
Wrap a torch style layer to keras style layer.
This layer can be built multiple times.
This layer will return a keras compatible layer
# Arguments
torch_layer: a torch style layer.
input_shape: A shape tuple, not including batch.
i.e If the input data is (2, 3, 4) and 2 is the batch size, you should input: (3, 4) here.
>>> from bigdl.dllib.keras.layers import KerasLayerWrapper
>>> from bigdl.dllib.nn.layer import Linear
>>> linear = Linear(100, 10, with_bias=True)
creating: createLinear
>>> kerasLayer = KerasLayerWrapper(linear, input_shape=(100, ))
creating: createZooKerasKerasLayerWrapper
"""
def __init__(self, torch_layer, input_shape=None, **kwargs):
super(KerasLayerWrapper, self).__init__(None,
torch_layer,
list(input_shape) if input_shape else None,
**kwargs)
|
PypiClean
|
/collective.fullcalendar-1.2.tar.gz/collective.fullcalendar-1.2/src/collective/fullcalendar/browser/fullcalendar.py
|
from collective.fullcalendar import _
from collective.fullcalendar.interfaces import IFullcalendarEnabled
from plone.app.contenttypes.behaviors.collection import ISyndicatableCollection
from plone.app.event.base import AnnotationAdapter
from plone.app.z3cform.widget import RelatedItemsFieldWidget
from plone.autoform import directives
from plone.dexterity.interfaces import IDexterityContainer
from plone.z3cform.layout import FormWrapper
from Products.Five.browser import BrowserView
from Products.statusmessages.interfaces import IStatusMessage
from z3c.form import button, field, form
from z3c.relationfield.schema import RelationChoice
from zope import schema
from zope.annotation.interfaces import IAnnotations
from zope.interface import alsoProvides
from zope.interface import implementer
from zope.interface import Interface
from zope.interface import noLongerProvides
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
class IIFullcalendarSettings(Interface):
slotMinutes = schema.Int(
title=_(u"label_slot_length", default=u"Slot length"),
description=_(u"help_slot_length", default=u"Slot length in minutes"),
required=True,
default=30,
)
allDay = schema.Bool(
title=_(u"label_allDay", default=u"All day"),
description=_(
u"help_allDay", default=u'Display "All day" option for timeGridWeek-View'
),
required=False,
default=True,
)
defaultCalendarView = schema.Choice(
title=_(u"label_defaultCalendarView", default=u"Standard View"),
description=_(u"help_defaultCalendarView", default=u"Standard View"),
required=True,
vocabulary=SimpleVocabulary.fromValues(
["dayGridMonth", "timeGridWeek", "listWeek", "dayGridWeek"]
),
default="dayGridMonth",
)
# Possible for headerLeft/headerRight: title, prev, next, prevYear, nextYear, today, dayGridMonth, timeGridWeek, listWeek, dayGridWeek
headerLeft = schema.TextLine(
title=_(u"label_headerLeft", default=u"Head area left"),
description=_(
u"help_headerLeft",
default=u"Possible values: title, prev, next, prevYear, nextYear, today, dayGridMonth, timeGridWeek, listWeek, dayGridWeek",
),
required=False,
default="prev,next today",
)
headerRight = schema.TextLine(
title=_(u"label_headerRight", default=u"Head area right"),
description=_(
u"help_headerRight",
default=u"Possible values: title, prev, next, prevYear, nextYear, today, dayGridMonth, timeGridWeek, listWeek, dayGridWeek",
),
required=False,
default="dayGridMonth timeGridWeek listWeek",
)
weekends = schema.Bool(
title=_(u"label_weekends", default=u"Show weekends"),
description=_(u"help_weekends", default=u"Show weekends"),
required=False,
default=True,
)
firstDay = schema.Choice(
title=_(u"label_firstDay", default=u"First day of the week"),
description=_(u"help_firstDay", default=u"Choose the first day of the week."),
required=True,
vocabulary=SimpleVocabulary(
[
SimpleTerm(value=pair[0], token=pair[0], title=pair[1])
for pair in [
(0, u"sunday"),
(1, u"monday"),
(2, u"tuesday"),
(3, u"wednesday"),
(4, u"thursday"),
(5, u"friday"),
(6, u"saturday"),
]
]
),
default=1,
)
firstHour = schema.TextLine(
title=_(u"label_firstHour", default=u"First visible hour"),
description=_(
u"help_firstHour",
default=u'Set the starting calendar day view scroll position (a number between 0 and 23). If there is a "+" or "-" in front of this number, the number is added or subtracted with the current time.',
),
required=True,
default="6",
)
minTime = schema.TextLine(
title=_(u"label_minTime", default=u"First visible hour"),
description=_(
u"help_minTime",
default=u"Select the first visible hour of the calendar (e.g. '5' or '5:30').",
),
required=True,
default="00:00:00",
)
maxTime = schema.TextLine(
title=_(u"label_maxTime", default=u"Last visible hour"),
description=_(
u"help_maxTime",
default=u"Select the last visible hour of the calendar (e.g. '5' oder '5:30').",
),
required=True,
default="24:00:00",
)
# Target for new events
target_folder = RelationChoice(
title=_(u"label_target_folder", default=u"Destination folder for new appointments"),
description=_(u"help_target_folder", default=u"Destination folder for new appointments"),
vocabulary="plone.app.vocabularies.Catalog",
required=False,
)
directives.widget(
"target_folder",
RelatedItemsFieldWidget,
pattern_options={
"selectableTypes": ["Folder"],
# "basePath": '/',
},
)
event_type = schema.Choice(
title=_(u"event_type", default=u"Event content_type to add"),
description=_(
u"help_event_type",
default=u"Or leave blank for default event type 'Event'.",
),
vocabulary="plone.app.vocabularies.PortalTypes",
required=False,
default="Event",
)
# Height of Calendar
calendarHeight = schema.Int(
title=_(u"label_calendarHeight", default=u"Calendar height"),
description=_(u"help_calendarHeight", default=u"Calendar height in pixels"),
required=False,
)
# Enable editing on events
caleditable = schema.Bool(
title=_(u"label_caleditable", default=u"Calendar editable"),
description=_(
u"help_caleditable",
default=u"Check this box if you want the events in the calendar to be editable.",
),
required=False,
default=False,
)
@implementer(IIFullcalendarSettings)
class IFullcalendarSettings(AnnotationAdapter):
"""Annotation Adapter for IIFullcalendarSettings."""
ANNOTATION_KEY = "fullcalendar_settings"
class FullcalendarSettingsForm(form.EditForm):
fields = field.Fields(IIFullcalendarSettings)
ignoreContext = False
label = _(u"Edit fullcalendar settings")
@button.buttonAndHandler(_(u"Save"), name="save")
def handleSave(self, action): # NOQA
data, errors = self.extractData()
if errors:
self.status = self.formErrorsMessage
return
self.applyChanges(data)
IStatusMessage(self.request).addStatusMessage(_(u"Changes saved."), "info")
self.request.response.redirect(self.context.absolute_url())
@button.buttonAndHandler(_(u"Cancel"), name="cancel")
def handleCancel(self, action):
IStatusMessage(self.request).addStatusMessage(_(u"Changes canceled."), "info")
self.request.response.redirect(
"%s/%s" % (self.context.absolute_url(), "@@fullcalendar_settings")
)
class IFullcalendarTool(BrowserView):
def available(self):
return IDexterityContainer.providedBy(self.context) or ISyndicatableCollection.providedBy(self.context)
def available_disabled(self):
return self.available() and not self.enabled()
def enabled(self):
return IFullcalendarEnabled.providedBy(self.context)
class FullcalendarSettingsFormView(FormWrapper):
form = FullcalendarSettingsForm
def enable(self):
"""Enable fullcalendar on this context."""
alsoProvides(self.context, IFullcalendarEnabled)
self.context.reindexObject(idxs=("object_provides"))
annotations = IAnnotations(self.context)
if "fullcalendar_settings" not in annotations:
# get the default-setting from the schema
default_settings = {}
for key, field in IIFullcalendarSettings.namesAndDescriptions():
default_settings[key] = field.default
annotations["fullcalendar_settings"] = default_settings
self.context.setLayout("fullcalendar-view")
self.request.response.redirect(self.context.absolute_url())
def disable(self):
"""Disable fullcalendar on this context."""
noLongerProvides(self.context, IFullcalendarEnabled)
self.context.reindexObject(idxs=("object_provides"))
annotations = IAnnotations(self.context)
del annotations["fullcalendar_settings"]
self.context.manage_delProperties(["layout"])
self.request.response.redirect(self.context.absolute_url())
|
PypiClean
|
/openreview_py-0.7.5-py3-none-any.whl/openreview/invitations.py
|
import openreview
class Submission(openreview.Invitation):
def __init__(self, name, conference_id, duedate = 0,
process = None, inv_params = {}, reply_params = {}, content_params = {}, mask = {}):
self.name = name
self.conference_id = conference_id
default_inv_params = {
'id': '/'.join([self.conference_id, '-', self.name]),
'readers': ['everyone'],
'writers': [self.conference_id],
'invitees': ['~'],
'signatures': [self.conference_id],
'duedate': duedate,
'process': process
}
default_reply_params = {
'forum': None,
'replyto': None,
'readers': {
'description': 'The users who will be allowed to read the above content.',
'values': ['everyone']
},
'signatures': {
'description': 'Your authorized identity to be associated with the above content.',
'values-regex': '~.*'
},
'writers': {
'values': [self.conference_id]
}
}
default_content_params = {
'title': {
'description': 'Title of paper.',
'order': 1,
'value-regex': '.{1,250}',
'required':True
},
'authors': {
'description': 'Comma separated list of author names. Please provide real names; identities will be anonymized.',
'order': 2,
'values-regex': "[^;,\\n]+(,[^,\\n]+)*",
'required':True
},
'authorids': {
'description': 'Comma separated list of author email addresses, lowercased, in the same order as above. For authors with existing OpenReview accounts, please make sure that the provided email address(es) match those listed in the author\'s profile. Please provide real emails; identities will be anonymized.',
'order': 3,
'values-regex': "([a-z0-9_\-\.]{2,}@[a-z0-9_\-\.]{2,}\.[a-z]{2,},){0,}([a-z0-9_\-\.]{2,}@[a-z0-9_\-\.]{2,}\.[a-z]{2,})",
'required':True
},
'keywords': {
'description': 'Comma separated list of keywords.',
'order': 6,
'values-regex': "(^$)|[^;,\\n]+(,[^,\\n]+)*"
},
'TL;DR': {
'description': '\"Too Long; Didn\'t Read\": a short sentence describing your paper',
'order': 7,
'value-regex': '[^\\n]{0,250}',
'required':False
},
'abstract': {
'description': 'Abstract of paper.',
'order': 8,
'value-regex': '[\\S\\s]{1,5000}',
'required':True
},
'pdf': {
'description': 'Upload a PDF file that ends with .pdf',
'order': 9,
'value-regex': 'upload',
'required':True
}
}
self.content_params = {}
self.content_params.update(default_content_params)
self.content_params.update(content_params)
if mask:
self.content_params = mask
self.reply_params = {}
self.reply_params.update(default_reply_params)
self.reply_params.update(reply_params)
self.reply_params['content'] = self.content_params
self.inv_params = {}
self.inv_params.update(default_inv_params)
self.inv_params.update(inv_params)
self.inv_params['reply'] = self.reply_params
super(Submission, self).__init__(**self.inv_params)
def add_process(self, process):
self.process = process.render()
class AddBid(openreview.Invitation):
def __init__(self, name, conference_id, duedate = 0,
completion_count = 50, inv_params = {}, reply_params = {},
content_params = {}, mask = {}):
default_inv_params = {
'id': conference_id + '/-/Add_Bid',
'readers': [conference_id, conference_id + '/Reviewers'],
'writers': [conference_id],
'invitees': [conference_id + '/Reviewers'],
'signatures': [conference_id],
'duedate': duedate, # 17:00:00 EST on May 1, 2018
'taskCompletionCount': 50,
'multiReply': False,
}
default_reply_params = {
'forum': None,
'replyto': None,
'invitation': conference_id + '/-/Blind_Submission',
'readers': {
'description': 'The users who will be allowed to read the above content.',
'values-copied': [conference_id, '{signatures}']
},
'signatures': {
'description': 'How your identity will be displayed with the above content.',
'values-regex': '~.*'
}
}
default_content_params = {
'tag': {
'description': 'Bid description',
'order': 1,
'value-radio': ['I want to review',
'I can review',
'I can probably review but am not an expert',
'I cannot review',
'No bid'],
'required':True
}
}
self.content_params = {}
self.content_params.update(default_content_params)
self.content_params.update(content_params)
self.reply_params = {}
self.reply_params.update(default_reply_params)
self.reply_params.update(reply_params)
self.reply_params['content'] = self.content_params
self.inv_params = {}
self.inv_params.update(default_inv_params)
self.inv_params.update(inv_params)
self.inv_params['reply'] = self.reply_params
super(AddBid, self).__init__(**self.inv_params)
|
PypiClean
|
/ixnetwork_restpy-1.1.10.tar.gz/ixnetwork_restpy-1.1.10/uhd_restpy/testplatform/sessions/ixnetwork/traffic/statistics/latency/latency.py
|
import sys
from uhd_restpy.base import Base
from uhd_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class Latency(Base):
"""This object sets the latency mode to fetch related statistics for each mode.
The Latency class encapsulates a required latency resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'latency'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'Mode': 'mode',
}
_SDM_ENUM_MAP = {
'mode': ['cutThrough'],
}
def __init__(self, parent, list_op=False):
super(Latency, self).__init__(parent, list_op)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, latency statistics is enabled and if false, latency statistics is disabled.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def Mode(self):
# type: () -> str
"""
Returns
-------
- str(cutThrough): Latency statistics is generated according to the mode set if latency is enabled.
"""
return self._get_attribute(self._SDM_ATT_MAP['Mode'])
@Mode.setter
def Mode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Mode'], value)
def update(self, Enabled=None, Mode=None):
# type: (bool, str) -> Latency
"""Updates latency resource on the server.
Args
----
- Enabled (bool): If true, latency statistics is enabled and if false, latency statistics is disabled.
- Mode (str(cutThrough)): Latency statistics is generated according to the mode set if latency is enabled.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Enabled=None, Mode=None):
# type: (bool, str) -> Latency
"""Finds and retrieves latency resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve latency resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all latency resources from the server.
Args
----
- Enabled (bool): If true, latency statistics is enabled and if false, latency statistics is disabled.
- Mode (str(cutThrough)): Latency statistics is generated according to the mode set if latency is enabled.
Returns
-------
- self: This instance with matching latency resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of latency data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the latency resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
PypiClean
|
/azure_mgmt_databox-2.0.0-py3-none-any.whl/azure/mgmt/databox/v2020_04_01/_configuration.py
|
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class DataBoxManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for DataBoxManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Subscription Id. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2020-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "TokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(DataBoxManagementClientConfiguration, self).__init__(**kwargs)
api_version: str = kwargs.pop("api_version", "2020-04-01")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-databox/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
|
PypiClean
|
/scikit_learn_tree-1.2.3-cp38-cp38-win_amd64.whl/sklearn_fork/linear_model/_glm/_newton_solver.py
|
import warnings
from abc import ABC, abstractmethod
import numpy as np
import scipy.linalg
import scipy.optimize
from ..._loss.loss import HalfSquaredError
from ...exceptions import ConvergenceWarning
from ...utils.optimize import _check_optimize_result
from .._linear_loss import LinearModelLoss
class NewtonSolver(ABC):
"""Newton solver for GLMs.
This class implements Newton/2nd-order optimization routines for GLMs. Each Newton
iteration aims at finding the Newton step which is done by the inner solver. With
Hessian H, gradient g and coefficients coef, one step solves:
H @ coef_newton = -g
For our GLM / LinearModelLoss, we have gradient g and Hessian H:
g = X.T @ loss.gradient + l2_reg_strength * coef
H = X.T @ diag(loss.hessian) @ X + l2_reg_strength * identity
Backtracking line search updates coef = coef_old + t * coef_newton for some t in
(0, 1].
This is a base class, actual implementations (child classes) may deviate from the
above pattern and use structure specific tricks.
Usage pattern:
- initialize solver: sol = NewtonSolver(...)
- solve the problem: sol.solve(X, y, sample_weight)
References
----------
- Jorge Nocedal, Stephen J. Wright. (2006) "Numerical Optimization"
2nd edition
https://doi.org/10.1007/978-0-387-40065-5
- Stephen P. Boyd, Lieven Vandenberghe. (2004) "Convex Optimization."
Cambridge University Press, 2004.
https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Initial coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
linear_loss : LinearModelLoss
The loss to be minimized.
l2_reg_strength : float, default=0.0
L2 regularization strength.
tol : float, default=1e-4
The optimization problem is solved when each of the following condition is
fulfilled:
1. maximum |gradient| <= tol
2. Newton decrement d: 1/2 * d^2 <= tol
max_iter : int, default=100
Maximum number of Newton steps allowed.
n_threads : int, default=1
Number of OpenMP threads to use for the computation of the Hessian and gradient
of the loss function.
Attributes
----------
coef_old : ndarray of shape coef.shape
Coefficient of previous iteration.
coef_newton : ndarray of shape coef.shape
Newton step.
gradient : ndarray of shape coef.shape
Gradient of the loss w.r.t. the coefficients.
gradient_old : ndarray of shape coef.shape
Gradient of previous iteration.
loss_value : float
Value of objective function = loss + penalty.
loss_value_old : float
Value of objective function of previous itertion.
raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes)
converged : bool
Indicator for convergence of the solver.
iteration : int
Number of Newton steps, i.e. calls to inner_solve
use_fallback_lbfgs_solve : bool
If set to True, the solver will resort to call LBFGS to finish the optimisation
procedure in case of convergence issues.
gradient_times_newton : float
gradient @ coef_newton, set in inner_solve and used by line_search. If the
Newton step is a descent direction, this is negative.
"""
def __init__(
self,
*,
coef,
linear_loss=LinearModelLoss(base_loss=HalfSquaredError(), fit_intercept=True),
l2_reg_strength=0.0,
tol=1e-4,
max_iter=100,
n_threads=1,
verbose=0,
):
self.coef = coef
self.linear_loss = linear_loss
self.l2_reg_strength = l2_reg_strength
self.tol = tol
self.max_iter = max_iter
self.n_threads = n_threads
self.verbose = verbose
def setup(self, X, y, sample_weight):
"""Precomputations
If None, initializes:
- self.coef
Sets:
- self.raw_prediction
- self.loss_value
"""
_, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X)
self.loss_value = self.linear_loss.loss(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
raw_prediction=self.raw_prediction,
)
@abstractmethod
def update_gradient_hessian(self, X, y, sample_weight):
"""Update gradient and Hessian."""
@abstractmethod
def inner_solve(self, X, y, sample_weight):
"""Compute Newton step.
Sets:
- self.coef_newton
- self.gradient_times_newton
"""
def fallback_lbfgs_solve(self, X, y, sample_weight):
"""Fallback solver in case of emergency.
If a solver detects convergence problems, it may fall back to this methods in
the hope to exit with success instead of raising an error.
Sets:
- self.coef
- self.converged
"""
opt_res = scipy.optimize.minimize(
self.linear_loss.loss_gradient,
self.coef,
method="L-BFGS-B",
jac=True,
options={
"maxiter": self.max_iter,
"maxls": 50, # default is 20
"iprint": self.verbose - 1,
"gtol": self.tol,
"ftol": 64 * np.finfo(np.float64).eps,
},
args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads),
)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res)
self.coef = opt_res.x
self.converged = opt_res.status == 0
def line_search(self, X, y, sample_weight):
"""Backtracking line search.
Sets:
- self.coef_old
- self.coef
- self.loss_value_old
- self.loss_value
- self.gradient_old
- self.gradient
- self.raw_prediction
"""
# line search parameters
beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11
eps = 16 * np.finfo(self.loss_value.dtype).eps
t = 1 # step size
# gradient_times_newton = self.gradient @ self.coef_newton
# was computed in inner_solve.
armijo_term = sigma * self.gradient_times_newton
_, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw(
self.coef_newton, X
)
self.coef_old = self.coef
self.loss_value_old = self.loss_value
self.gradient_old = self.gradient
# np.sum(np.abs(self.gradient_old))
sum_abs_grad_old = -1
is_verbose = self.verbose >= 2
if is_verbose:
print(" Backtracking Line Search")
print(f" eps=10 * finfo.eps={eps}")
for i in range(21): # until and including t = beta**20 ~ 1e-6
self.coef = self.coef_old + t * self.coef_newton
raw = self.raw_prediction + t * raw_prediction_newton
self.loss_value, self.gradient = self.linear_loss.loss_gradient(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
raw_prediction=raw,
)
# Note: If coef_newton is too large, loss_gradient may produce inf values,
# potentially accompanied by a RuntimeWarning.
# This case will be captured by the Armijo condition.
# 1. Check Armijo / sufficient decrease condition.
# The smaller (more negative) the better.
loss_improvement = self.loss_value - self.loss_value_old
check = loss_improvement <= t * armijo_term
if is_verbose:
print(
f" line search iteration={i+1}, step size={t}\n"
f" check loss improvement <= armijo term: {loss_improvement} "
f"<= {t * armijo_term} {check}"
)
if check:
break
# 2. Deal with relative loss differences around machine precision.
tiny_loss = np.abs(self.loss_value_old * eps)
check = np.abs(loss_improvement) <= tiny_loss
if is_verbose:
print(
" check loss |improvement| <= eps * |loss_old|:"
f" {np.abs(loss_improvement)} <= {tiny_loss} {check}"
)
if check:
if sum_abs_grad_old < 0:
sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1)
# 2.1 Check sum of absolute gradients as alternative condition.
sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1)
check = sum_abs_grad < sum_abs_grad_old
if is_verbose:
print(
" check sum(|gradient|) < sum(|gradient_old|): "
f"{sum_abs_grad} < {sum_abs_grad_old} {check}"
)
if check:
break
t *= beta
else:
warnings.warn(
(
f"Line search of Newton solver {self.__class__.__name__} at"
f" iteration #{self.iteration} did no converge after 21 line search"
" refinement iterations. It will now resort to lbfgs instead."
),
ConvergenceWarning,
)
if self.verbose:
print(" Line search did not converge and resorts to lbfgs instead.")
self.use_fallback_lbfgs_solve = True
return
self.raw_prediction = raw
def check_convergence(self, X, y, sample_weight):
"""Check for convergence.
Sets self.converged.
"""
if self.verbose:
print(" Check Convergence")
# Note: Checking maximum relative change of coefficient <= tol is a bad
# convergence criterion because even a large step could have brought us close
# to the true minimum.
# coef_step = self.coef - self.coef_old
# check = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old)))
# 1. Criterion: maximum |gradient| <= tol
# The gradient was already updated in line_search()
check = np.max(np.abs(self.gradient))
if self.verbose:
print(f" 1. max |gradient| {check} <= {self.tol}")
if check > self.tol:
return
# 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol
# d = sqrt(grad @ hessian^-1 @ grad)
# = sqrt(coef_newton @ hessian @ coef_newton)
# See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1.
d2 = self.coef_newton @ self.hessian @ self.coef_newton
if self.verbose:
print(f" 2. Newton decrement {0.5 * d2} <= {self.tol}")
if 0.5 * d2 > self.tol:
return
if self.verbose:
loss_value = self.linear_loss.loss(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
)
print(f" Solver did converge at loss = {loss_value}.")
self.converged = True
def finalize(self, X, y, sample_weight):
"""Finalize the solvers results.
Some solvers may need this, others not.
"""
pass
def solve(self, X, y, sample_weight):
"""Solve the optimization problem.
This is the main routine.
Order of calls:
self.setup()
while iteration:
self.update_gradient_hessian()
self.inner_solve()
self.line_search()
self.check_convergence()
self.finalize()
Returns
-------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Solution of the optimization problem.
"""
# setup usually:
# - initializes self.coef if needed
# - initializes and calculates self.raw_predictions, self.loss_value
self.setup(X=X, y=y, sample_weight=sample_weight)
self.iteration = 1
self.converged = False
while self.iteration <= self.max_iter and not self.converged:
if self.verbose:
print(f"Newton iter={self.iteration}")
self.use_fallback_lbfgs_solve = False # Fallback solver.
# 1. Update Hessian and gradient
self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight)
# TODO:
# if iteration == 1:
# We might stop early, e.g. we already are close to the optimum,
# usually detected by zero gradients at this stage.
# 2. Inner solver
# Calculate Newton step/direction
# This usually sets self.coef_newton and self.gradient_times_newton.
self.inner_solve(X=X, y=y, sample_weight=sample_weight)
if self.use_fallback_lbfgs_solve:
break
# 3. Backtracking line search
# This usually sets self.coef_old, self.coef, self.loss_value_old
# self.loss_value, self.gradient_old, self.gradient,
# self.raw_prediction.
self.line_search(X=X, y=y, sample_weight=sample_weight)
if self.use_fallback_lbfgs_solve:
break
# 4. Check convergence
# Sets self.converged.
self.check_convergence(X=X, y=y, sample_weight=sample_weight)
# 5. Next iteration
self.iteration += 1
if not self.converged:
if self.use_fallback_lbfgs_solve:
# Note: The fallback solver circumvents check_convergence and relies on
# the convergence checks of lbfgs instead. Enough warnings have been
# raised on the way.
self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight)
else:
warnings.warn(
(
f"Newton solver did not converge after {self.iteration - 1} "
"iterations."
),
ConvergenceWarning,
)
self.iteration -= 1
self.finalize(X=X, y=y, sample_weight=sample_weight)
return self.coef
class NewtonCholeskySolver(NewtonSolver):
"""Cholesky based Newton solver.
Inner solver for finding the Newton step H w_newton = -g uses Cholesky based linear
solver.
"""
def setup(self, X, y, sample_weight):
super().setup(X=X, y=y, sample_weight=sample_weight)
n_dof = X.shape[1]
if self.linear_loss.fit_intercept:
n_dof += 1
self.gradient = np.empty_like(self.coef)
self.hessian = np.empty_like(self.coef, shape=(n_dof, n_dof))
def update_gradient_hessian(self, X, y, sample_weight):
_, _, self.hessian_warning = self.linear_loss.gradient_hessian(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
gradient_out=self.gradient,
hessian_out=self.hessian,
raw_prediction=self.raw_prediction, # this was updated in line_search
)
def inner_solve(self, X, y, sample_weight):
if self.hessian_warning:
warnings.warn(
(
f"The inner solver of {self.__class__.__name__} detected a "
"pointwise hessian with many negative values at iteration "
f"#{self.iteration}. It will now resort to lbfgs instead."
),
ConvergenceWarning,
)
if self.verbose:
print(
" The inner solver detected a pointwise Hessian with many "
"negative values and resorts to lbfgs instead."
)
self.use_fallback_lbfgs_solve = True
return
try:
with warnings.catch_warnings():
warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
self.coef_newton = scipy.linalg.solve(
self.hessian, -self.gradient, check_finite=False, assume_a="sym"
)
self.gradient_times_newton = self.gradient @ self.coef_newton
if self.gradient_times_newton > 0:
if self.verbose:
print(
" The inner solver found a Newton step that is not a "
"descent direction and resorts to LBFGS steps instead."
)
self.use_fallback_lbfgs_solve = True
return
except (np.linalg.LinAlgError, scipy.linalg.LinAlgWarning) as e:
warnings.warn(
f"The inner solver of {self.__class__.__name__} stumbled upon a "
"singular or very ill-conditioned Hessian matrix at iteration "
f"#{self.iteration}. It will now resort to lbfgs instead.\n"
"Further options are to use another solver or to avoid such situation "
"in the first place. Possible remedies are removing collinear features"
" of X or increasing the penalization strengths.\n"
"The original Linear Algebra message was:\n"
+ str(e),
scipy.linalg.LinAlgWarning,
)
# Possible causes:
# 1. hess_pointwise is negative. But this is already taken care in
# LinearModelLoss.gradient_hessian.
# 2. X is singular or ill-conditioned
# This might be the most probable cause.
#
# There are many possible ways to deal with this situation. Most of them
# add, explicitly or implicitly, a matrix to the hessian to make it
# positive definite, confer to Chapter 3.4 of Nocedal & Wright 2nd ed.
# Instead, we resort to lbfgs.
if self.verbose:
print(
" The inner solver stumbled upon an singular or ill-conditioned "
"Hessian matrix and resorts to LBFGS instead."
)
self.use_fallback_lbfgs_solve = True
return
|
PypiClean
|
/Transcrypt-3.7.16.tar.gz/Transcrypt-3.7.16/transcrypt/demos/parcel_demo/node_modules/js-beautify/js/lib/cli.js
|
/*jshint strict:false */
var debug = process.env.DEBUG_JSBEAUTIFY || process.env.JSBEAUTIFY_DEBUG ? function() {
console.error.apply(console, arguments);
} : function() {};
var fs = require('fs'),
cc = require('config-chain'),
beautify = require('../index'),
mkdirp = require('mkdirp'),
nopt = require('nopt'),
glob = require('glob');
nopt.invalidHandler = function(key, val) {
throw new Error(key + " was invalid with value \"" + val + "\"");
};
nopt.typeDefs.brace_style = {
type: "brace_style",
validate: function(data, key, val) {
data[key] = val;
// TODO: expand-strict is obsolete, now identical to expand. Remove in future version
// TODO: collapse-preserve-inline is obselete, now identical to collapse,preserve-inline = true. Remove in future version
var validVals = ["collapse", "collapse-preserve-inline", "expand", "end-expand", "expand-strict", "none", "preserve-inline"];
var valSplit = val.split(/[^a-zA-Z0-9_\-]+/); //Split will always return at least one parameter
for (var i = 0; i < valSplit.length; i++) {
if (validVals.indexOf(valSplit[i]) === -1) {
return false;
}
}
return true;
}
};
var path = require('path'),
editorconfig = require('editorconfig'),
knownOpts = {
// Beautifier
"indent_size": Number,
"indent_char": String,
"eol": String,
"indent_level": Number,
"indent_with_tabs": Boolean,
"preserve_newlines": Boolean,
"max_preserve_newlines": Number,
"space_in_paren": Boolean,
"space_in_empty_paren": Boolean,
"jslint_happy": Boolean,
"space_after_anon_function": Boolean,
"space_after_named_function": Boolean,
"brace_style": "brace_style", //See above for validation
"unindent_chained_methods": Boolean,
"break_chained_methods": Boolean,
"keep_array_indentation": Boolean,
"unescape_strings": Boolean,
"wrap_line_length": Number,
"wrap_attributes": ["auto", "force", "force-aligned", "force-expand-multiline", "aligned-multiple", "preserve", "preserve-aligned"],
"wrap_attributes_indent_size": Number,
"e4x": Boolean,
"end_with_newline": Boolean,
"comma_first": Boolean,
"operator_position": ["before-newline", "after-newline", "preserve-newline"],
// CSS-only
"selector_separator_newline": Boolean,
"newline_between_rules": Boolean,
"space_around_combinator": Boolean,
//deprecated - replaced with space_around_combinator, remove in future version
"space_around_selector_separator": Boolean,
// HTML-only
"max_char": Number, // obsolete since 1.3.5
"inline": [String, Array],
"unformatted": [String, Array],
"content_unformatted": [String, Array],
"indent_inner_html": [Boolean],
"indent_handlebars": [Boolean],
"indent_scripts": ["keep", "separate", "normal"],
"extra_liners": [String, Array],
// CLI
"version": Boolean,
"help": Boolean,
"files": [path, Array],
"outfile": path,
"replace": Boolean,
"quiet": Boolean,
"type": ["js", "css", "html"],
"config": path,
"editorconfig": Boolean
},
// dasherizeShorthands provides { "indent-size": ["--indent_size"] }
// translation, allowing more convenient dashes in CLI arguments
shortHands = dasherizeShorthands({
// Beautifier
"s": ["--indent_size"],
"c": ["--indent_char"],
"e": ["--eol"],
"l": ["--indent_level"],
"t": ["--indent_with_tabs"],
"p": ["--preserve_newlines"],
"m": ["--max_preserve_newlines"],
"P": ["--space_in_paren"],
"Q": ["--space_in_empty_paren"],
"j": ["--jslint_happy"],
"a": ["--space_after_anon_function"],
"b": ["--brace_style"],
"u": ["--unindent_chained_methods"],
"B": ["--break_chained_methods"],
"k": ["--keep_array_indentation"],
"x": ["--unescape_strings"],
"w": ["--wrap_line_length"],
"X": ["--e4x"],
"n": ["--end_with_newline"],
"C": ["--comma_first"],
"O": ["--operator_position"],
// CSS-only
"L": ["--selector_separator_newline"],
"N": ["--newline_between_rules"],
// HTML-only
"A": ["--wrap_attributes"],
"i": ["--wrap_attributes_indent_size"],
"W": ["--max_char"], // obsolete since 1.3.5
"d": ["--inline"],
"U": ["--unformatted"],
"T": ["--content_unformatted"],
"I": ["--indent_inner_html"],
"H": ["--indent_handlebars"],
"S": ["--indent_scripts"],
"E": ["--extra_liners"],
// non-dasherized hybrid shortcuts
"good-stuff": [
"--keep_array_indentation",
"--keep_function_indentation",
"--jslint_happy"
],
"js": ["--type", "js"],
"css": ["--type", "css"],
"html": ["--type", "html"],
// CLI
"v": ["--version"],
"h": ["--help"],
"f": ["--files"],
"file": ["--files"],
"o": ["--outfile"],
"r": ["--replace"],
"q": ["--quiet"]
// no shorthand for "config"
// no shorthand for "editorconfig"
});
function verifyExists(fullPath) {
return fs.existsSync(fullPath) ? fullPath : null;
}
function findRecursive(dir, fileName) {
var fullPath = path.join(dir, fileName);
var nextDir = path.dirname(dir);
var result = verifyExists(fullPath);
if (!result && (nextDir !== dir)) {
result = findRecursive(nextDir, fileName);
}
return result;
}
function getUserHome() {
var user_home = '';
try {
user_home = process.env.USERPROFILE || process.env.HOME || '';
} catch (ex) {}
return user_home;
}
function set_file_editorconfig_opts(file, config) {
try {
var eConfigs = editorconfig.parseSync(file);
if (eConfigs.indent_style === "tab") {
config.indent_with_tabs = true;
} else if (eConfigs.indent_style === "space") {
config.indent_with_tabs = false;
}
if (eConfigs.indent_size) {
config.indent_size = eConfigs.indent_size;
}
if (eConfigs.max_line_length) {
if (eConfigs.max_line_length === "off") {
config.wrap_line_length = 0;
} else {
config.wrap_line_length = parseInt(eConfigs.max_line_length, 10);
}
}
if (eConfigs.insert_final_newline === true) {
config.end_with_newline = true;
} else if (eConfigs.insert_final_newline === false) {
config.end_with_newline = false;
}
if (eConfigs.end_of_line) {
if (eConfigs.end_of_line === 'cr') {
config.eol = '\r';
} else if (eConfigs.end_of_line === 'lf') {
config.eol = '\n';
} else if (eConfigs.end_of_line === 'crlf') {
config.eol = '\r\n';
}
}
} catch (e) {
debug(e);
}
}
// var cli = require('js-beautify/cli'); cli.interpret();
var interpret = exports.interpret = function(argv, slice) {
var parsed;
try {
parsed = nopt(knownOpts, shortHands, argv, slice);
} catch (ex) {
usage(ex);
// console.error(ex);
// console.error('Run `' + getScriptName() + ' -h` for help.');
process.exit(1);
}
if (parsed.version) {
console.log(require('../../package.json').version);
process.exit(0);
} else if (parsed.help) {
usage();
process.exit(0);
}
var cfg;
var configRecursive = findRecursive(process.cwd(), '.jsbeautifyrc');
var configHome = verifyExists(path.join(getUserHome() || "", ".jsbeautifyrc"));
var configDefault = __dirname + '/../config/defaults.json';
try {
cfg = cc(
parsed,
cleanOptions(cc.env('jsbeautify_'), knownOpts),
parsed.config,
configRecursive,
configHome,
configDefault
).snapshot;
} catch (ex) {
debug(cfg);
// usage(ex);
console.error(ex);
console.error('Error while loading beautifier configuration.');
console.error('Configuration file chain included:');
if (parsed.config) {
console.error(parsed.config);
}
if (configRecursive) {
console.error(configRecursive);
}
if (configHome) {
console.error(configHome);
}
console.error(configDefault);
console.error('Run `' + getScriptName() + ' -h` for help.');
process.exit(1);
}
try {
// Verify arguments
checkType(cfg);
checkFiles(cfg);
debug(cfg);
// Process files synchronously to avoid EMFILE error
cfg.files.forEach(processInputSync, {
cfg: cfg
});
} catch (ex) {
debug(cfg);
// usage(ex);
console.error(ex);
console.error('Run `' + getScriptName() + ' -h` for help.');
process.exit(1);
}
};
// interpret args immediately when called as executable
if (require.main === module) {
interpret();
}
function usage(err) {
var scriptName = getScriptName();
var msg = [
scriptName + '@' + require('../../package.json').version,
'',
'CLI Options:',
' -f, --file Input file(s) (Pass \'-\' for stdin)',
' -r, --replace Write output in-place, replacing input',
' -o, --outfile Write output to file (default stdout)',
' --config Path to config file',
' --type [js|css|html] ["js"]',
' -q, --quiet Suppress logging to stdout',
' -h, --help Show this help',
' -v, --version Show the version',
'',
'Beautifier Options:',
' -s, --indent-size Indentation size [4]',
' -c, --indent-char Indentation character [" "]',
' -t, --indent-with-tabs Indent with tabs, overrides -s and -c',
' -e, --eol Character(s) to use as line terminators.',
' [first newline in file, otherwise "\\n]',
' -n, --end-with-newline End output with newline',
' --editorconfig Use EditorConfig to set up the options'
];
switch (scriptName.split('-').shift()) {
case "js":
msg.push(' -l, --indent-level Initial indentation level [0]');
msg.push(' -p, --preserve-newlines Preserve line-breaks (--no-preserve-newlines disables)');
msg.push(' -m, --max-preserve-newlines Number of line-breaks to be preserved in one chunk [10]');
msg.push(' -P, --space-in-paren Add padding spaces within paren, ie. f( a, b )');
msg.push(' -E, --space-in-empty-paren Add a single space inside empty paren, ie. f( )');
msg.push(' -j, --jslint-happy Enable jslint-stricter mode');
msg.push(' -a, --space-after-anon-function Add a space before an anonymous function\'s parens, ie. function ()');
msg.push(' --space_after_named_function Add a space before a named function\'s parens, ie. function example ()');
msg.push(' -b, --brace-style [collapse|expand|end-expand|none][,preserve-inline] [collapse,preserve-inline]');
msg.push(' -u, --unindent-chained-methods Don\'t indent chained method calls');
msg.push(' -B, --break-chained-methods Break chained method calls across subsequent lines');
msg.push(' -k, --keep-array-indentation Preserve array indentation');
msg.push(' -x, --unescape-strings Decode printable characters encoded in xNN notation');
msg.push(' -w, --wrap-line-length Wrap lines at next opportunity after N characters [0]');
msg.push(' -X, --e4x Pass E4X xml literals through untouched');
msg.push(' --good-stuff Warm the cockles of Crockford\'s heart');
msg.push(' -C, --comma-first Put commas at the beginning of new line instead of end');
msg.push(' -O, --operator-position Set operator position (before-newline|after-newline|preserve-newline) [before-newline]');
break;
case "html":
msg.push(' -b, --brace-style [collapse|expand|end-expand] ["collapse"]');
msg.push(' -I, --indent-inner-html Indent body and head sections. Default is false.');
msg.push(' -H, --indent-handlebars Indent handlebars. Default is false.');
msg.push(' -S, --indent-scripts [keep|separate|normal] ["normal"]');
msg.push(' -w, --wrap-line-length Wrap lines at next opportunity after N characters [0]');
msg.push(' -A, --wrap-attributes Wrap html tag attributes to new lines [auto|force|force-aligned|force-expand-multiline|aligned-multiple|preserve|preserve-aligned] ["auto"]');
msg.push(' -i, --wrap-attributes-indent-size Indent wrapped tags to after N characters [indent-level]');
msg.push(' -p, --preserve-newlines Preserve line-breaks (--no-preserve-newlines disables)');
msg.push(' -m, --max-preserve-newlines Number of line-breaks to be preserved in one chunk [10]');
msg.push(' -U, --unformatted List of tags (defaults to inline) that should not be reformatted');
msg.push(' -T, --content_unformatted List of tags (defaults to pre) whose content should not be reformatted');
msg.push(' -E, --extra_liners List of tags (defaults to [head,body,/html] that should have an extra newline');
break;
case "css":
msg.push(' -L, --selector-separator-newline Add a newline between multiple selectors.');
msg.push(' -N, --newline-between-rules Add a newline between CSS rules.');
}
if (err) {
msg.push(err);
msg.push('');
console.error(msg.join('\n'));
} else {
console.log(msg.join('\n'));
}
}
// main iterator, {cfg} passed as thisArg of forEach call
function processInputSync(filepath) {
var data = null,
config = this.cfg,
outfile = config.outfile,
input;
// -o passed with no value overwrites
if (outfile === true || config.replace) {
outfile = filepath;
}
var fileType = getOutputType(outfile, filepath, config.type);
if (config.editorconfig) {
var editorconfig_filepath = filepath;
if (editorconfig_filepath === '-') {
if (outfile) {
editorconfig_filepath = outfile;
} else {
editorconfig_filepath = 'stdin.' + fileType;
}
}
debug("EditorConfig is enabled for ", editorconfig_filepath);
config = cc(config).snapshot;
set_file_editorconfig_opts(editorconfig_filepath, config);
debug(config);
}
if (filepath === '-') {
input = process.stdin;
input.setEncoding('utf8');
input.on('error', function() {
throw 'Must pipe input or define at least one file.';
});
input.on('data', function(chunk) {
data = data || '';
data += chunk;
});
input.on('end', function() {
if (data === null) {
throw 'Must pipe input or define at least one file.';
}
makePretty(fileType, data, config, outfile, writePretty); // Where things get beautified
});
input.resume();
} else {
data = fs.readFileSync(filepath, 'utf8');
makePretty(fileType, data, config, outfile, writePretty);
}
}
function makePretty(fileType, code, config, outfile, callback) {
try {
var pretty = beautify[fileType](code, config);
callback(null, pretty, outfile, config);
} catch (ex) {
callback(ex);
}
}
function writePretty(err, pretty, outfile, config) {
debug('writing ' + outfile);
if (err) {
console.error(err);
process.exit(1);
}
if (outfile) {
mkdirp.sync(path.dirname(outfile));
if (isFileDifferent(outfile, pretty)) {
try {
fs.writeFileSync(outfile, pretty, 'utf8');
logToStdout('beautified ' + path.relative(process.cwd(), outfile), config);
} catch (ex) {
onOutputError(ex);
}
} else {
logToStdout('beautified ' + path.relative(process.cwd(), outfile) + ' - unchanged', config);
}
} else {
process.stdout.write(pretty);
}
}
function isFileDifferent(filePath, expected) {
try {
return fs.readFileSync(filePath, 'utf8') !== expected;
} catch (ex) {
// failing to read is the same as different
return true;
}
}
// workaround the fact that nopt.clean doesn't return the object passed in :P
function cleanOptions(data, types) {
nopt.clean(data, types);
return data;
}
// error handler for output stream that swallows errors silently,
// allowing the loop to continue over unwritable files.
function onOutputError(err) {
if (err.code === 'EACCES') {
console.error(err.path + " is not writable. Skipping!");
} else {
console.error(err);
process.exit(0);
}
}
// turn "--foo_bar" into "foo-bar"
function dasherizeFlag(str) {
return str.replace(/^\-+/, '').replace(/_/g, '-');
}
// translate weird python underscored keys into dashed argv,
// avoiding single character aliases.
function dasherizeShorthands(hash) {
// operate in-place
Object.keys(hash).forEach(function(key) {
// each key value is an array
var val = hash[key][0];
// only dasherize one-character shorthands
if (key.length === 1 && val.indexOf('_') > -1) {
hash[dasherizeFlag(val)] = val;
}
});
return hash;
}
function getOutputType(outfile, filepath, configType) {
if (outfile && /\.(js|css|html)$/.test(outfile)) {
return outfile.split('.').pop();
} else if (filepath !== '-' && /\.(js|css|html)$/.test(filepath)) {
return filepath.split('.').pop();
} else if (configType) {
return configType;
} else {
throw 'Could not determine appropriate beautifier from file paths: ' + filepath;
}
}
function getScriptName() {
return path.basename(process.argv[1]);
}
function checkType(parsed) {
var scriptType = getScriptName().split('-').shift();
if (!/^(js|css|html)$/.test(scriptType)) {
scriptType = null;
}
debug("executable type:", scriptType);
var parsedType = parsed.type;
debug("parsed type:", parsedType);
if (!parsedType) {
debug("type defaulted:", scriptType);
parsed.type = scriptType;
}
}
function checkFiles(parsed) {
var argv = parsed.argv;
var isTTY = true;
var file_params = parsed.files || [];
var hadGlob = false;
try {
isTTY = process.stdin.isTTY;
} catch (ex) {
debug("error querying for isTTY:", ex);
}
debug('isTTY: ' + isTTY);
// assume any remaining args are files
file_params = file_params.concat(argv.remain);
parsed.files = [];
// assume any remaining args are files
file_params.forEach(function(f) {
// strip stdin path eagerly added by nopt in '-f -' case
if (f === '-') {
return;
}
var foundFiles = [];
var isGlob = glob.hasMagic(f);
// Input was a glob
if (isGlob) {
hadGlob = true;
foundFiles = glob(f, {
sync: true,
absolute: true,
ignore: ['**/node_modules/**', '**/.git/**']
});
} else {
// Input was not a glob, add it to an array so we are able to handle it in the same loop below
testFilePath(f);
foundFiles = [f];
}
if (foundFiles && foundFiles.length) {
// Add files to the parsed.files if it didn't exist in the array yet
foundFiles.forEach(function(file) {
var filePath = path.resolve(file);
if (parsed.files.indexOf(filePath) === -1) {
parsed.files.push(filePath);
}
});
}
});
if ('string' === typeof parsed.outfile && isTTY && !parsed.files.length) {
testFilePath(parsed.outfile);
// use outfile as input when no other files passed in args
parsed.files.push(parsed.outfile);
// operation is now an implicit overwrite
parsed.replace = true;
}
if (hadGlob || parsed.files.length > 1) {
parsed.replace = true;
}
if (!parsed.files.length && !hadGlob) {
// read stdin by default
parsed.files.push('-');
}
debug('files.length ' + parsed.files.length);
if (parsed.files.indexOf('-') > -1 && isTTY && !hadGlob) {
throw 'Must pipe input or define at least one file.';
}
return parsed;
}
function testFilePath(filepath) {
try {
if (filepath !== "-") {
fs.statSync(filepath);
}
} catch (err) {
throw 'Unable to open path "' + filepath + '"';
}
}
function logToStdout(str, config) {
if (typeof config.quiet === "undefined" || !config.quiet) {
console.log(str);
}
}
|
PypiClean
|
/multi-vector-simulator-1.0.6rc12.tar.gz/multi-vector-simulator-1.0.6rc12/src/multi_vector_simulator/F0_output.py
|
import json
import logging
import os
import pandas as pd
from multi_vector_simulator.B0_data_input_json import convert_from_special_types_to_json
from multi_vector_simulator.E1_process_results import get_units_of_cost_matrix_entries
import multi_vector_simulator.F1_plotting as F1_plots
try:
import multi_vector_simulator.F2_autoreport as autoreport
AUTOREPORT = True
except ModuleNotFoundError:
logging.warning("The reporting feature is disabled")
AUTOREPORT = False
from multi_vector_simulator.utils.constants import (
SIMULATION_SETTINGS,
PATH_OUTPUT_FOLDER,
OUTPUT_FOLDER,
LOGFILE,
PATHS_TO_PLOTS,
)
from multi_vector_simulator.utils.constants import (
JSON_WITH_RESULTS,
JSON_FILE_EXTENSION,
)
from multi_vector_simulator.utils.constants_json_strings import (
UNIT,
KPI,
OPTIMIZED_FLOWS,
DEMANDS,
RESOURCES,
LES_ENERGY_VECTOR_S,
KPI_SCALARS_DICT,
KPI_SCALAR_MATRIX,
KPI_COST_MATRIX,
PROJECT_DATA,
ECONOMIC_DATA,
SIMULATION_RESULTS,
LOGS,
ERRORS,
WARNINGS,
FIX_COST,
ENERGY_BUSSES,
)
def evaluate_dict(dict_values, path_pdf_report=None, path_png_figs=None):
"""This is the main function of F0. It calls all functions that prepare the simulation output, ie. Storing all simulation output into excellent files, bar charts, and graphs.
Parameters
----------
dict_values :
dict Of all input and output parameters up to F0
path_pdf_report : (str)
if provided, generate a pdf report of the simulation to the given path
path_png_figs : (str)
if provided, generate png figures of the simulation's results to the given path
Returns
-------
type
NA
"""
logging.info(
"Summarizing simulation results to results_timeseries and results_scalars_assets."
)
parse_simulation_log(
path_log_file=os.path.join(
dict_values[SIMULATION_SETTINGS][PATH_OUTPUT_FOLDER], LOGFILE
),
dict_values=dict_values,
)
# storing all flows to exel.
store_timeseries_all_busses_to_excel(dict_values)
# Write everything to file with multiple tabs
store_scalars_to_excel(dict_values)
store_as_json(
dict_values,
dict_values[SIMULATION_SETTINGS][PATH_OUTPUT_FOLDER],
JSON_WITH_RESULTS,
)
# generate png figures
if path_png_figs is not None:
# plot demand timeseries
F1_plots.plot_timeseries(
dict_values, data_type=DEMANDS, file_path=path_png_figs
)
# plot demand timeseries for the first 2 weeks only
F1_plots.plot_timeseries(
dict_values, data_type=DEMANDS, max_days=14, file_path=path_png_figs
)
# plot supply timeseries
F1_plots.plot_timeseries(
dict_values, data_type=RESOURCES, file_path=path_png_figs
)
# plot supply timeseries for the first 2 weeks only
F1_plots.plot_timeseries(
dict_values, data_type=RESOURCES, max_days=14, file_path=path_png_figs
)
# plot power flows in the energy system
F1_plots.plot_instant_power(dict_values, file_path=path_png_figs)
# plot optimal capacities if there are optimized assets
F1_plots.plot_optimized_capacities(dict_values, file_path=path_png_figs)
# plot annuity, first-investment and om costs
F1_plots.plot_piecharts_of_costs(dict_values, file_path=path_png_figs)
# generate a pdf report
if path_pdf_report is not None:
app = autoreport.create_app(dict_values)
autoreport.print_pdf(app, path_pdf_report=path_pdf_report)
logging.info(
"Generating PDF report of the simulation: {}".format(path_pdf_report)
)
def store_scalars_to_excel(dict_values):
"""All output data that is a scalar is storage to an excellent file tab. This could for example be economical data or technical data.
Parameters
----------
dict_values :
dict Of all input and output parameters up to F0
Returns
-------
type
Excel file with scalar data
"""
results_scalar_output_file = "/scalars" + ".xlsx"
with pd.ExcelWriter(
dict_values[SIMULATION_SETTINGS][PATH_OUTPUT_FOLDER]
+ results_scalar_output_file
) as open_file: # doctest: +SKIP
for kpi_set in dict_values[KPI]:
if isinstance(dict_values[KPI][kpi_set], dict):
data = pd.DataFrame([dict_values[KPI][kpi_set]])
else:
data = dict_values[KPI][kpi_set]
# Transpose results and add units to the entries
if kpi_set == KPI_SCALARS_DICT:
data = data.transpose()
units_cost_kpi = get_units_of_cost_matrix_entries(
dict_values[ECONOMIC_DATA], dict_values[KPI][kpi_set]
)
data[UNIT] = units_cost_kpi
data.to_excel(open_file, sheet_name=kpi_set)
logging.debug(
"Saved scalar results to: %s, tab %s.",
results_scalar_output_file,
kpi_set,
)
def store_timeseries_all_busses_to_excel(dict_values):
"""This function plots the energy flows of each single bus and the energy system and saves it as PNG and additionally as a tab and an Excel sheet.
Parameters
----------
dict_values :
dict Of all input and output parameters up to F0
Returns
-------
type
Plots and excel with all timeseries of each bus
"""
timeseries_output_file = "/timeseries_all_busses" + ".xlsx"
with pd.ExcelWriter(
dict_values[SIMULATION_SETTINGS][PATH_OUTPUT_FOLDER] + timeseries_output_file
) as open_file: # doctest: +SKIP
for bus in dict_values[OPTIMIZED_FLOWS]:
dict_values[OPTIMIZED_FLOWS][bus].to_excel(open_file, sheet_name=bus)
logging.debug("Saved flows at busses to: %s.", timeseries_output_file)
def parse_simulation_log(path_log_file, dict_values):
"""Gather a log file with several log messages, this function gathers them all and inputs them into the dict with
all input and output parameters up to F0
Parameters
----------
path_log_file: str/None
path to the mvs log file
Default: None
dict_values :
dict Of all input and output parameters up to F0
Returns
-------
Updates the results dictionary with the log messages of the simulation
Notes
-----
This function is tested with:
- test_F0_output.TestLogCreation.test_parse_simulation_log
"""
# Dictionaries to gather non-fatal warning and error messages that appear during the simulation
error_dict, warning_dict = {}, {}
if path_log_file is None:
path_log_file = os.path.join(OUTPUT_FOLDER, LOGFILE)
with open(path_log_file) as log_messages:
log_messages = log_messages.readlines()
i = j = 0
# Loop through the list of lines of the log file to check for the relevant log messages and gather them in dicts
for line in log_messages:
if "ERROR" in line:
i = i + 1
substrings = line.split(" - ")
message_string = substrings[-1]
error_dict.update({i: message_string})
elif "WARNING" in line:
j = j + 1
substrings = line.split(" - ")
message_string = substrings[-1]
warning_dict.update({j: message_string})
log_dict = {ERRORS: error_dict, WARNINGS: warning_dict}
dict_values[SIMULATION_RESULTS].update({LOGS: log_dict})
def store_as_json(dict_values, output_folder=None, file_name=None):
"""Converts dict_values to JSON format and saves dict_values as a JSON file or return json
Parameters
----------
dict_values : (dict)
dict to be stored as json
output_folder : (path)
Folder into which json should be stored
Default None
file_name : (str)
Name of the file the json should be stored as
Default None
Returns
-------
If file_name is provided, the json variable converted from the dict_values is saved under
this file_name, otherwise the json variable is returned
"""
json_data = json.dumps(
dict_values,
skipkeys=False,
sort_keys=True,
default=convert_from_special_types_to_json,
indent=4,
)
if file_name is not None:
file_path = os.path.abspath(os.path.join(output_folder, file_name + ".json"))
if os.path.exists(os.path.dirname(file_path)):
myfile = open(file_path, "w")
myfile.write(json_data)
myfile.close()
logging.info(
"Converted and stored processed simulation data to json: %s", file_path
)
answer = file_path
else:
answer = None
else:
answer = json_data
return answer
|
PypiClean
|
/realms-wiki-0.9.3.tar.gz/realms-wiki-0.9.3/realms/static/vendor/ace-builds/src-min-noconflict/mode-dockerfile.js
|
ace.define("ace/mode/sh_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text_highlight_rules").TextHighlightRules,s=t.reservedKeywords="!|{|}|case|do|done|elif|else|esac|fi|for|if|in|then|until|while|&|;|export|local|read|typeset|unset|elif|select|set|function|declare|readonly",o=t.languageConstructs="[|]|alias|bg|bind|break|builtin|cd|command|compgen|complete|continue|dirs|disown|echo|enable|eval|exec|exit|fc|fg|getopts|hash|help|history|jobs|kill|let|logout|popd|printf|pushd|pwd|return|set|shift|shopt|source|suspend|test|times|trap|type|ulimit|umask|unalias|wait",u=function(){var e=this.createKeywordMapper({keyword:s,"support.function.builtin":o,"invalid.deprecated":"debugger"},"identifier"),t="(?:(?:[1-9]\\d*)|(?:0))",n="(?:\\.\\d+)",r="(?:\\d+)",i="(?:(?:"+r+"?"+n+")|(?:"+r+"\\.))",u="(?:(?:"+i+"|"+r+")"+")",a="(?:"+u+"|"+i+")",f="(?:&"+r+")",l="[a-zA-Z_][a-zA-Z0-9_]*",c="(?:"+l+"=)",h="(?:\\$(?:SHLVL|\\$|\\!|\\?))",p="(?:"+l+"\\s*\\(\\))";this.$rules={start:[{token:"constant",regex:/\\./},{token:["text","comment"],regex:/(^|\s)(#.*)$/},{token:"string.start",regex:'"',push:[{token:"constant.language.escape",regex:/\\(?:[$`"\\]|$)/},{include:"variables"},{token:"keyword.operator",regex:/`/},{token:"string.end",regex:'"',next:"pop"},{defaultToken:"string"}]},{token:"string",regex:"\\$'",push:[{token:"constant.language.escape",regex:/\\(?:[abeEfnrtv\\'"]|x[a-fA-F\d]{1,2}|u[a-fA-F\d]{4}([a-fA-F\d]{4})?|c.|\d{1,3})/},{token:"string",regex:"'",next:"pop"},{defaultToken:"string"}]},{regex:"<<<",token:"keyword.operator"},{stateName:"heredoc",regex:"(<<-?)(\\s*)(['\"`]?)([\\w\\-]+)(['\"`]?)",onMatch:function(e,t,n){var r=e[2]=="-"?"indentedHeredoc":"heredoc",i=e.split(this.splitRegex);return n.push(r,i[4]),[{type:"constant",value:i[1]},{type:"text",value:i[2]},{type:"string",value:i[3]},{type:"support.class",value:i[4]},{type:"string",value:i[5]}]},rules:{heredoc:[{onMatch:function(e,t,n){return e===n[1]?(n.shift(),n.shift(),this.next=n[0]||"start","support.class"):(this.next="","string")},regex:".*$",next:"start"}],indentedHeredoc:[{token:"string",regex:"^ +"},{onMatch:function(e,t,n){return e===n[1]?(n.shift(),n.shift(),this.next=n[0]||"start","support.class"):(this.next="","string")},regex:".*$",next:"start"}]}},{regex:"$",token:"empty",next:function(e,t){return t[0]==="heredoc"||t[0]==="indentedHeredoc"?t[0]:e}},{token:["keyword","text","text","text","variable"],regex:/(declare|local|readonly)(\s+)(?:(-[fixar]+)(\s+))?([a-zA-Z_][a-zA-Z0-9_]*\b)/},{token:"variable.language",regex:h},{token:"variable",regex:c},{include:"variables"},{token:"support.function",regex:p},{token:"support.function",regex:f},{token:"string",start:"'",end:"'"},{token:"constant.numeric",regex:a},{token:"constant.numeric",regex:t+"\\b"},{token:e,regex:"[a-zA-Z_][a-zA-Z0-9_]*\\b"},{token:"keyword.operator",regex:"\\+|\\-|\\*|\\*\\*|\\/|\\/\\/|~|<|>|<=|=>|=|!=|[%&|`]"},{token:"punctuation.operator",regex:";"},{token:"paren.lparen",regex:"[\\[\\(\\{]"},{token:"paren.rparen",regex:"[\\]]"},{token:"paren.rparen",regex:"[\\)\\}]",next:"pop"}],variables:[{token:"variable",regex:/(\$)(\w+)/},{token:["variable","paren.lparen"],regex:/(\$)(\()/,push:"start"},{token:["variable","paren.lparen","keyword.operator","variable","keyword.operator"],regex:/(\$)(\{)([#!]?)(\w+|[*@#?\-$!0_])(:[?+\-=]?|##?|%%?|,,?\/|\^\^?)?/,push:"start"},{token:"variable",regex:/\$[*@#?\-$!0_]/},{token:["variable","paren.lparen"],regex:/(\$)(\{)/,push:"start"}]},this.normalizeRules()};r.inherits(u,i),t.ShHighlightRules=u}),ace.define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"],function(e,t,n){"use strict";var r=e("../../lib/oop"),i=e("../../range").Range,s=e("./fold_mode").FoldMode,o=t.FoldMode=function(e){e&&(this.foldingStartMarker=new RegExp(this.foldingStartMarker.source.replace(/\|[^|]*?$/,"|"+e.start)),this.foldingStopMarker=new RegExp(this.foldingStopMarker.source.replace(/\|[^|]*?$/,"|"+e.end)))};r.inherits(o,s),function(){this.foldingStartMarker=/(\{|\[)[^\}\]]*$|^\s*(\/\*)/,this.foldingStopMarker=/^[^\[\{]*(\}|\])|^[\s\*]*(\*\/)/,this.singleLineBlockCommentRe=/^\s*(\/\*).*\*\/\s*$/,this.tripleStarBlockCommentRe=/^\s*(\/\*\*\*).*\*\/\s*$/,this.startRegionRe=/^\s*(\/\*|\/\/)#?region\b/,this._getFoldWidgetBase=this.getFoldWidget,this.getFoldWidget=function(e,t,n){var r=e.getLine(n);if(this.singleLineBlockCommentRe.test(r)&&!this.startRegionRe.test(r)&&!this.tripleStarBlockCommentRe.test(r))return"";var i=this._getFoldWidgetBase(e,t,n);return!i&&this.startRegionRe.test(r)?"start":i},this.getFoldWidgetRange=function(e,t,n,r){var i=e.getLine(n);if(this.startRegionRe.test(i))return this.getCommentRegionBlock(e,i,n);var s=i.match(this.foldingStartMarker);if(s){var o=s.index;if(s[1])return this.openingBracketBlock(e,s[1],n,o);var u=e.getCommentFoldRange(n,o+s[0].length,1);return u&&!u.isMultiLine()&&(r?u=this.getSectionRange(e,n):t!="all"&&(u=null)),u}if(t==="markbegin")return;var s=i.match(this.foldingStopMarker);if(s){var o=s.index+s[0].length;return s[1]?this.closingBracketBlock(e,s[1],n,o):e.getCommentFoldRange(n,o,-1)}},this.getSectionRange=function(e,t){var n=e.getLine(t),r=n.search(/\S/),s=t,o=n.length;t+=1;var u=t,a=e.getLength();while(++t<a){n=e.getLine(t);var f=n.search(/\S/);if(f===-1)continue;if(r>f)break;var l=this.getFoldWidgetRange(e,"all",t);if(l){if(l.start.row<=s)break;if(l.isMultiLine())t=l.end.row;else if(r==f)break}u=t}return new i(s,o,u,e.getLine(u).length)},this.getCommentRegionBlock=function(e,t,n){var r=t.search(/\s*$/),s=e.getLength(),o=n,u=/^\s*(?:\/\*|\/\/|--)#?(end)?region\b/,a=1;while(++n<s){t=e.getLine(n);var f=u.exec(t);if(!f)continue;f[1]?a--:a++;if(!a)break}var l=n;if(l>o)return new i(o,r,l,t.length)}}.call(o.prototype)}),ace.define("ace/mode/sh",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/sh_highlight_rules","ace/range","ace/mode/folding/cstyle","ace/mode/behaviour/cstyle"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text").Mode,s=e("./sh_highlight_rules").ShHighlightRules,o=e("../range").Range,u=e("./folding/cstyle").FoldMode,a=e("./behaviour/cstyle").CstyleBehaviour,f=function(){this.HighlightRules=s,this.foldingRules=new u,this.$behaviour=new a};r.inherits(f,i),function(){this.lineCommentStart="#",this.getNextLineIndent=function(e,t,n){var r=this.$getIndent(t),i=this.getTokenizer().getLineTokens(t,e),s=i.tokens;if(s.length&&s[s.length-1].type=="comment")return r;if(e=="start"){var o=t.match(/^.*[\{\(\[:]\s*$/);o&&(r+=n)}return r};var e={pass:1,"return":1,raise:1,"break":1,"continue":1};this.checkOutdent=function(t,n,r){if(r!=="\r\n"&&r!=="\r"&&r!=="\n")return!1;var i=this.getTokenizer().getLineTokens(n.trim(),t).tokens;if(!i)return!1;do var s=i.pop();while(s&&(s.type=="comment"||s.type=="text"&&s.value.match(/^\s+$/)));return s?s.type=="keyword"&&e[s.value]:!1},this.autoOutdent=function(e,t,n){n+=1;var r=this.$getIndent(t.getLine(n)),i=t.getTabString();r.slice(-i.length)==i&&t.remove(new o(n,r.length-i.length,n,r.length))},this.$id="ace/mode/sh"}.call(f.prototype),t.Mode=f}),ace.define("ace/mode/dockerfile_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/sh_highlight_rules"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./sh_highlight_rules").ShHighlightRules,s=function(){i.call(this);var e=this.$rules.start;for(var t=0;t<e.length;t++)if(e[t].token=="variable.language"){e.splice(t,0,{token:"constant.language",regex:"(?:^(?:FROM|MAINTAINER|RUN|CMD|EXPOSE|ENV|ADD|ENTRYPOINT|VOLUME|USER|WORKDIR|ONBUILD|COPY|LABEL)\\b)",caseInsensitive:!0});break}};r.inherits(s,i),t.DockerfileHighlightRules=s}),ace.define("ace/mode/dockerfile",["require","exports","module","ace/lib/oop","ace/mode/sh","ace/mode/dockerfile_highlight_rules","ace/mode/folding/cstyle"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./sh").Mode,s=e("./dockerfile_highlight_rules").DockerfileHighlightRules,o=e("./folding/cstyle").FoldMode,u=function(){i.call(this),this.HighlightRules=s,this.foldingRules=new o};r.inherits(u,i),function(){this.$id="ace/mode/dockerfile"}.call(u.prototype),t.Mode=u})
|
PypiClean
|
/ogdp_apis-0.1-py3-none-any.whl/ogdp_apis/aqi/aqi.py
|
import requests
import json
class aqiData:
def __init__(self, apiKey, *args):
self.apiPath = "https://api.data.gov.in/resource/3b01bcb8-0b14-4abf-b6f2-c1bfd384ba69"
self.session = requests.Session()
self.session.params = {
"api-key": apiKey,
"format": "json",
"limit": 10000,
"fields": ""
}
self.states = {
"AP": "Andhra_Pradesh",
"AS": "Assam",
"BR": "Bihar",
"CH": "Chandigarh",
"CT": "Chhattisgarh",
"DL": "Delhi",
"GA": "Goa",
"GJ": "Gujarat",
"HR": "Haryana",
"JH": "Jharkhand",
"KA": "Karnataka",
"KL": "Kerala",
"LK": "Lakshadweep",
"MP": "Madhya Pradesh",
"MH": "Maharashtra",
"MN": "Manipur",
"ML": "Meghalaya",
"MZ": "Mizoram",
"OR": "Odisha",
"PY": "Puducherry",
"PB": "Punjab",
"RJ": "Rajasthan",
"SK": "Sikkim",
"TN": "TamilNadu",
"TG": "Telangana",
"TP": "Tripura",
"UP": "Uttar_Pradesh",
"UK": "Uttarakhand",
"WB": "West_Bengal"
}
def get_data(self, fields=['state','city','station','last_update','pollutant_id','pollutant_min','pollutant_max','pollutant_avg']):
"""
Query API for data in raw form
"""
self.session.params["fields"] = ','.join(fields)
request = self.session.get(self.apiPath)
return json.loads(request.content)
def filter_data(self, data, filter={}):
"""
Fetch data that matches the values specified in the dict 'filter'
filter={'city': 'Mumbai', 'pollutant': 'NO'}
"""
data = data['records']
filteredData = []
for x in range(len(data)):
for key in filter:
if data[x][key] == filter[key]:
filteredData.append(data[x])
return filteredData
def format_data(self, data):
"""
Condense data for each station into one list item in the
following format:
{'city': 'Aurangabad',
'last_update': '29-07-2020 09:00:00',
'pollutants': {'CO': {'avg': '38', 'max': '80', 'min': '3'},
'NH3': {'avg': '3', 'max': '3', 'min': '3'},
'NO2': {'avg': '30', 'max': '33', 'min': '27'},
'OZONE': {'avg': '10', 'max': '33', 'min': '7'},
'PM10': {'avg': '36', 'max': '49', 'min': '31'},
'SO2': {'avg': '15', 'max': '23', 'min': '8'}},
'state': 'Maharashtra',
'station': 'More Chowk Waluj, Aurangabad - MPCB'}
"""
# Create list of stations and organize pollutant values
station_list = []
for x in range(len(data)):
data[x][data[x]["pollutant_id"]] = {
"min": data[x].pop('pollutant_min'),
"max": data[x].pop('pollutant_max'),
"avg": data[x].pop('pollutant_avg')
}
if station_list.count(data[x]['station']) == 0:
station_list.append(data[x]['station'])
# Count number of pollutants listed under each station
station_map = {}
for x in range(len(station_list)):
station_map[station_list[x]] = 0
for x in range(len(data)):
if data[x]['station'] in station_list:
station_map[data[x]['station']] += 1
# Format to final form
formatted_data = []
pollutants = {}
for key in station_map:
for x in range(station_map[key]):
for pollutant in data[x]:
if pollutant in ['PM2.5', 'PM10', 'NO2', 'NH3', 'SO2', 'CO', 'OZONE']:
pollutants[pollutant] = data[x][pollutant]
data[0]['pollutants'] = pollutants
pollutants = {}
formatted_data.append(data[0])
for x in range(station_map[key]):
data.pop(0)
for x in range(len(station_list)):
formatted_data[x].pop(formatted_data[x]['pollutant_id'])
formatted_data[x].pop('pollutant_id')
# Fix state names
for x in range(len(formatted_data)):
formatted_data[x]['state'] = formatted_data[x]['state'].replace("_", " ")
formatted_data[x]['state'] = formatted_data[x]['state'].replace("TamilNadu", "Tamil Nadu")
# Return formatted data
return formatted_data
def state(self, state):
"""
Fetch a formatted list of data from stations in the specified state
"""
data = self.get_data()
data = self.filter_data(data, filter={'state': self.states[state]})
data = self.format_data(data)
return data
def city(self, city):
"""
Fetch a formatted list of data from stations in the specified city
"""
data = self.get_data()
data = self.filter_data(data, filter={'city': city})
data = self.format_data(data)
return data
def station(self, station):
"""
Fetch a formatted list of data from stations in the specified city
"""
data = self.get_data()
data = self.filter_data(data, filter={'station': station})
data = self.format_data(data)
return data
|
PypiClean
|
/applepy-ui-0.0.5.tar.gz/applepy-ui-0.0.5/README.md
|
# applepy
Applepy is a UI library inspired by Swift UI that leverages AppKit and UIKit to create native MacOS, iOS and iPadOS user interfaces in the Python programming language with a declarative syntax.
*This project is at proof of concept stage and is not feature complete. Please do not use it in production.*
## Dependencies
* [rubicon-objc](https://github.com/beeware/rubicon-objc)
* [pydispatcher](https://github.com/mcfletch/pydispatcher)
## Installation
The latest version is available for installation in PyPI:
>pip install applepy-ui
## Usage
```python
class Sample(App):
def body(self) -> Scene:
with Window(title='Applepy example', size=Size(640, 100)) as w:
with VerticalStack():
with HorizontalStack():
Label(text='Hello')
Label(text='World')
return w.center()
Sample().run()
```

It also works on mobile:
```python
class MobileSample(App):
def body(self):
with SimpleScreen() as s:
with HorizontalStack():
with VerticalStack():
Label(text='Hello World')
return s
def main():
sys.exit(MobileSample().run())
```
<img src="https://github.com/eduardohleite/applepy/blob/master/screenshot-mobile.png" height="600">
Events can be handled synchronously and asynchronously in the same context:
```python
class AsyncSample(App):
def clicked(self) -> None:
time.sleep(2)
Alert.show_info(informative_text='Hello', message_text='Synchronous World')
async def clicked_async(self) -> None:
await asyncio.sleep(2)
Alert.show_info(informative_text='Hello', message_text='Asynchronous World')
def body(self) -> Scene:
with Window(title='Applepy async example', size=Size(640,480)) as w:
with HorizontalStack():
with VerticalStack():
Button(title='Sync action', action=self.clicked)
Button(title='Async action', action=self.clicked_async)
return w.center()
AsyncSample().run_async()
```
For a more complete example, please check [example.py](example.py)
|
PypiClean
|
/prueba_paquete-0.1.7-py3-none-any.whl/prueba_paquete/classification.py
|
import collections
from nltk import NaiveBayesClassifier, DecisionTreeClassifier
from nltk.metrics import precision, recall, f_measure
from nltk.classify import apply_features, accuracy
from nltk.classify.scikitlearn import SklearnClassifier
from prueba_paquete.utils import clean_html_tags, shuffled, tokenize_and_stem
from prueba_paquete.concept_extraction import ConceptExtractor
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction import DictVectorizer
class DocumentClassifier():
'''
Train a classifier with labeled documents and classify new documents
into one of the labeled clases.
We call 'dev docs' to the documents set provided for training the
classifier. These 'dev docs' are splitted into two sub sets: 'train docs'
and 'test docs' that would be used to train and test the machine learning
model respectively.
Parameters
----------
train_p : float, 0.8 by default
The proportion of the 'dev docs' used as 'train docs'
Use values greater than 0 and lower than 1.
The remaining docs will be using as 'test docs'
eq_label_num : boolean, True by default
If true, 'train docs' will have equal number of documents for each
class. This number will be the lowest label count.
complete_p : boolean, True by default
Used when eq_label_num is True, but the lowest label count is not
enough for getting the train_p proportion of 'train docs'. If this
attribute is True, more documents from 'test docs' will be moved
to 'train docs' until we get train_p
n_folds : integer, 10 by default
Number of folds to be used in k-fold cross validation technique for
choosing different sets as 'train docs'
vocab_size : integer, 500 by default
This is the size of the vocabulary set that will be used for extracting
features out of the docs
t_classifier : string, 'NB' by default
This is the type of classifier model used. Available types are 'NB'
(Naive Bayes), 'DT' (decision tree), 'RF' (Random Forest), and 'SVM'
(Support Vector Machine)
language: string, 'english'; by default
Language on which documents are written
'''
def __init__(self, train_p=0.8, eq_label_num=True,
complete_p=True, n_folds=10,
vocab_size=250,
t_classifier="NB", language="english",
stem=False):
self.train_p = train_p
self.eq_label_num = eq_label_num
self.complete_p = complete_p
self.n_folds = n_folds
self.vocab_size = vocab_size
self.t_classifier = t_classifier
self.language = language
self.stem = stem
self._vocab = []
self._classified_docs = []
self._classifier = None
self._accuracy = 0
self._precision = {}
self._recall = {}
self._f_measure = {}
self._train_docs = []
self._test_docs = []
def split_train_and_test(self, docs):
'''
Split the 'dev docs' set into the 'train docs' and 'test docs' subsets
Parameters
----------
docs: iterable
An iterable which yields a list of strings
'''
categories_count = self.count_categories(docs)
label_limit = min([c for (k,c) in categories_count.items()])
labeled_docs = {}
train_docs = []
test_docs = []
# Split docs by label
for (cat,count) in categories_count.items():
labeled_docs[cat] = shuffled([t for (t,k) in docs if k == cat])
if self.eq_label_num:
# Select the same number of doc for all labels
for cat, cat_docs in labeled_docs.items():
cat_limit = label_limit
cat_train_docs = cat_docs[:cat_limit]
cat_test_docs = cat_docs[cat_limit:]
train_docs += [(doc, cat) for doc in cat_train_docs]
test_docs += [(doc, cat) for doc in cat_test_docs]
l_train = len(train_docs)
l_docs = len(docs)
l_test = len(test_docs)
actual_p = l_train / l_docs
# If the training proportion is not
if self.complete_p == True and actual_p < self.train_p:
shuffled_extra = shuffled(test_docs)
extra_i = 0
while(actual_p < self.train_p and extra_i < l_test):
aux_l_train = l_train + extra_i
actual_p = aux_l_train / l_docs
extra_i += 1
train_docs += shuffled_extra[:extra_i]
test_docs = shuffled_extra[extra_i:]
else:
label_limit = int(self.train_p * len(docs))
shuffled_docs = shuffled(docs)
train_docs = shuffled_docs[:label_limit]
test_docs = shuffled_docs[label_limit:]
self._train_docs = train_docs
self._test_docs = test_docs
def cross_validation_train(self, dev_docs):
'''
Applies k-fold cross validation technique to split the docs into different
pairs of training and testing sets. For each pair, it trains and evals the
a classifier, choosing the one with the best accuracy
Parameters
----------
dev_docs: iterable
An iterable which yields a list of strings
'''
dev_docs = shuffled(dev_docs)
accuracies = []
best_accuracy = 0
subset_size = int(len(dev_docs)/self.n_folds)
for i in range(self.n_folds):
classifier_list = []
train_docs = (dev_docs[(i + 1) * subset_size:] + \
dev_docs[:i * subset_size])
test_docs = dev_docs[i * subset_size:(i + 1) * subset_size]
train_set = apply_features(self.get_doc_features, train_docs)
if self.t_classifier == "NB":
classifier = NaiveBayesClassifier.train(train_set)
elif self.t_classifier == "DT":
classifier = DecisionTreeClassifier.train(train_set)
elif self.t_classifier == "RF":
classifier = SklearnClassifier(RandomForestClassifier())\
.train(train_set)
elif self.t_classifier == "SVM":
classifier = SklearnClassifier(LinearSVC(), sparse=False)\
.train(train_set)
classifier_list.append(classifier)
test_set = apply_features(self.get_doc_features, test_docs, True)
accuracies.append((accuracy(classifier, test_set)) * 100)
if accuracies[-1] > best_accuracy:
best_accuracy = accuracies[-1]
self._classifier = classifier
self._train_docs = train_docs
self._test_docs = test_docs
def equitative_class_train(self, dev_docs):
categories_count = self.count_categories(dev_docs)
labeled_docs = {}
for (cat,count) in categories_count.items():
labeled_docs[cat] = shuffled([t for (t,k) in dev_docs if k == cat])
train_docs = []
test_docs = []
for cat, l in labeled_docs.items():
cat_limit = int(self.train_p * len(l))
train_docs += [(t, cat) for t in l[:cat_limit]]
test_docs += [(t, cat) for t in l[cat_limit:]]
self._train_docs = train_docs
self._test_docs = test_docs
# print("len dev docs", len(dev_docs))
# print("categories count", categories_count)
# print("count train", self.count_categories(train_docs))
# print("count test", self.count_categories(test_docs))
# split dev docs and create traning and test set
# self.split_train_and_test(dev_docs)
train_set = apply_features(self.get_doc_features, self._train_docs)
# create and train the classification model according to t_classifier
if self.t_classifier == "NB":
self._classifier = NaiveBayesClassifier.train(train_set)
elif self.t_classifier == "DT":
self._classifier = DecisionTreeClassifier.train(train_set)
elif self.t_classifier == "RF":
self._classifier = SklearnClassifier(RandomForestClassifier())\
.train(train_set)
elif self.t_classifier == "SVM":
self._classifier = SklearnClassifier(LinearSVC(), sparse=False)\
.train(train_set)
def count_categories(self, docs):
'''
Count how many documents of each class are in the 'dev docs' set
Parameters
----------
docs: iterable
An iterable which yields a list of strings
Returns
-------
counters: dictionary
A dictiionary where each item is the number of docs for a class
'''
categories = set([c for (t,c) in docs])
counters = {}
for cat in categories:
counters[cat] = 0
for (text, cat) in docs:
counters[cat] += 1
self._categories = sorted(categories)
return counters
def get_doc_features(self, doc):
'''
Extract features of a document, checking the presence of the words
in the vocabulary
Parameters
----------
doc: string
The doc from which features will be extracted
Returns
-------
features: dictionary
A dictionary where each item indicates the presence of a
word from the vocabulary in the input doc
'''
features = {}
for word in self._vocab:
features['contains({})'.format(word)] = (word in doc)
return features
def train_classifier(self, dev_docs):
'''
Create the features vocabulary from 'dev docs',
Split 'dev docs', train the classifier with 'train docs',
Evaluate accuracy with 'test docs'
Parameters
----------
dev_docs: iterable
An iterable which yields a list of strings
'''
# create vocabulary for feature extraction
ce = ConceptExtractor(num_concepts=self.vocab_size,
language=self.language)
ce.extract_concepts([t for (t,c) in dev_docs])
self._vocab = sorted([c for (c,f) in ce.common_concepts], key=str.lower)
if (self.stem):
self._vocab = [tokenize_and_stem(w, language=self.language)[0] \
for w in self._vocab]
# self.cross_validation_train(dev_docs)
self.equitative_class_train(dev_docs)
def eval_classifier(self):
'''
Test the model and calculates the metrics of accuracy, precision,
recall and f-measure
'''
test_set = apply_features(self.get_doc_features, self._test_docs, True)
self._accuracy = accuracy(self._classifier, test_set)
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(test_set):
refsets[label].add(i)
observed = self._classifier.classify(feats)
testsets[observed].add(i)
self.count_categories(self._train_docs)
for cat in self._categories:
self._precision[cat] = precision(refsets[cat], testsets[cat])
self._recall[cat] = recall(refsets[cat], testsets[cat])
self._f_measure[cat] = f_measure(refsets[cat], testsets[cat])
def classify_docs(self, docs):
'''
First train the classifier with the labeled data.
Then classifies the unlabeled data.
Parameters
----------
docs: iterable
An iterable which yields a list of strings
'''
dev_docs = [(t, c) for (t, c) in docs if c!=""]
unlabeled_docs = [t for (t, c) in docs if c==""]
self.train_classifier(dev_docs)
self.eval_classifier()
results = []
for doc in unlabeled_docs:
doc_feats = self.get_doc_features(doc)
result = self._classifier.classify(doc_feats)
results.append((doc, result))
self._classified_docs = results
self._final_cat_count = self.count_categories(dev_docs+results)
@property
def classified_docs(self):
return self._classified_docs
@property
def accuracy(self):
return self._accuracy
@property
def precision(self):
return self._precision
@property
def recall(self):
return self._recall
@property
def f_measure(self):
return self._f_measure
@property
def category_count(self):
return self._final_cat_count
|
PypiClean
|
/sanic_security-1.11.7-py3-none-any.whl/sanic_security/exceptions.py
|
from sanic.exceptions import SanicException
from sanic_security.utils import json
"""
An effective, simple, and async security library for the Sanic framework.
Copyright (C) 2020-present Aidan Stewart
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
class SecurityError(SanicException):
"""
Sanic Security related error.
Attributes:
json (HTTPResponse): Security error json response.
Args:
message (str): Human readable error message.
code (int): HTTP error code.
"""
def __init__(self, message: str, code: int):
self.json = json(message, self.__class__.__name__, code)
super().__init__(message, code)
class NotFoundError(SecurityError):
"""
Raised when a resource cannot be found.
"""
def __init__(self, message):
super().__init__(message, 404)
class DeletedError(SecurityError):
"""
Raised when attempting to access a deleted resource.
"""
def __init__(self, message):
super().__init__(message, 410)
class AccountError(SecurityError):
"""
Base account error that all other account errors derive from.
"""
def __init__(self, message, code):
super().__init__(message, code)
class DisabledError(AccountError):
"""
Raised when account is disabled.
"""
def __init__(self, message: str = "Account is disabled.", code: int = 401):
super().__init__(message, code)
class UnverifiedError(AccountError):
"""
Raised when account is unverified.
"""
def __init__(self):
super().__init__("Account requires verification.", 401)
class VerifiedError(AccountError):
"""
Raised when account is already verified.
"""
def __init__(self):
super().__init__("Account already verified.", 403)
class SessionError(SecurityError):
"""
Base session error that all other session errors derive from.
"""
def __init__(self, message, code=401):
super().__init__(message, code)
class JWTDecodeError(SessionError):
"""
Raised when client JWT is invalid.
"""
def __init__(self, message, code=400):
super().__init__(message, code)
class DeactivatedError(SessionError):
"""
Raised when session is deactivated.
"""
def __init__(self, message: str = "Session is deactivated.", code: int = 401):
super().__init__(message, code)
class ExpiredError(SessionError):
"""
Raised when session has expired.
"""
def __init__(self):
super().__init__("Session has expired")
class SecondFactorRequiredError(SessionError):
"""
Raised when authentication session two-factor requirement isn't met.
"""
def __init__(self):
super().__init__("Session requires second factor for authentication.")
class SecondFactorFulfilledError(SessionError):
"""
Raised when authentication session two-factor requirement is already met.
"""
def __init__(self):
super().__init__("Session second factor requirement already met.", 403)
class ChallengeError(SessionError):
"""
Raised when a session challenge attempt is invalid.
"""
def __init__(self, message):
super().__init__(message)
class MaxedOutChallengeError(ChallengeError):
"""
Raised when a session's challenge attempt limit is reached.
"""
def __init__(self):
super().__init__("The maximum amount of attempts has been reached.")
class AuthorizationError(SecurityError):
"""
Raised when an account has insufficient permissions or roles for an action.
"""
def __init__(self, message):
super().__init__(message, 403)
class CredentialsError(SecurityError):
"""
Raised when credentials are invalid.
"""
def __init__(self, message, code=400):
super().__init__(message, code)
|
PypiClean
|
/RsCMPX_NrFr1Meas-4.0.185.tar.gz/RsCMPX_NrFr1Meas-4.0.185/RsCMPX_NrFr1Meas/Implementations/Configure/NrSubMeas/MultiEval/Limit/SeMask/Standard/Area/Caggregation.py
|
from .........Internal.Core import Core
from .........Internal.CommandsGroup import CommandsGroup
from .........Internal import Conversions
from ......... import repcap
# noinspection PyPep8Naming,PyAttributeOutsideInit,SpellCheckingInspection
class CaggregationCls:
"""Caggregation commands group definition. 1 total commands, 0 Subgroups, 1 group commands"""
def __init__(self, core: Core, parent):
self._core = core
self._cmd_group = CommandsGroup("caggregation", core, parent)
def set(self, enable: bool, areaReduced=repcap.AreaReduced.Default) -> None:
"""SCPI: CONFigure:NRSub:MEASurement<Instance>:MEValuation:LIMit:SEMask:STANdard:AREA<nr>:CAGGregation \n
Snippet: driver.configure.nrSubMeas.multiEval.limit.seMask.standard.area.caggregation.set(enable = False, areaReduced = repcap.AreaReduced.Default) \n
Configures the activation state of area number <no> of the standard emission mask for NR SA with carrier aggregation. \n
:param enable: OFF: disables the limit check for this area ON: enables the limit check for this area
:param areaReduced: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Area')
"""
param = Conversions.bool_to_str(enable)
areaReduced_cmd_val = self._cmd_group.get_repcap_cmd_value(areaReduced, repcap.AreaReduced)
self._core.io.write(f'CONFigure:NRSub:MEASurement<Instance>:MEValuation:LIMit:SEMask:STANdard:AREA{areaReduced_cmd_val}:CAGGregation {param}')
def get(self, areaReduced=repcap.AreaReduced.Default) -> bool:
"""SCPI: CONFigure:NRSub:MEASurement<Instance>:MEValuation:LIMit:SEMask:STANdard:AREA<nr>:CAGGregation \n
Snippet: value: bool = driver.configure.nrSubMeas.multiEval.limit.seMask.standard.area.caggregation.get(areaReduced = repcap.AreaReduced.Default) \n
Configures the activation state of area number <no> of the standard emission mask for NR SA with carrier aggregation. \n
:param areaReduced: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Area')
:return: enable: OFF: disables the limit check for this area ON: enables the limit check for this area"""
areaReduced_cmd_val = self._cmd_group.get_repcap_cmd_value(areaReduced, repcap.AreaReduced)
response = self._core.io.query_str(f'CONFigure:NRSub:MEASurement<Instance>:MEValuation:LIMit:SEMask:STANdard:AREA{areaReduced_cmd_val}:CAGGregation?')
return Conversions.str_to_bool(response)
|
PypiClean
|
/microservices_connector-0.3.7.tar.gz/microservices_connector-0.3.7/microservices_connector/minisocket.py
|
import asyncio
import os
import traceback
import threading
import websockets
import websocket
import uvloop
import time
import threading
from microservices_connector.url_parser.url_namespace import ArgsParse
def SocketClient(host='localhost:8765', url='/'):
return websocket.create_connection(f'ws://{host}{url}')
async def raw_middleware(websocket, handler, *args):
message = await websocket.recv()
await handler(websocket, message, *args)
class SocketServer(threading.Thread):
def __init__(self, name=__file__):
threading.Thread.__init__(self)
self.name = name
self.url = {}
self.url_args = {}
self.timeout = 1000*1000*60
def router(self, rule):
def response(handler):
self.add_route(rule, handler)
return handler
return response
route = router
def render(self, rule):
def response(handler):
self.add_route(rule, handler, middleware=raw_middleware)
return handler
return response
def add_route(self, rule, handler, middleware=None):
if middleware is None:
print(rule, 'middelware is None')
middleware = self.basic_middleware
args = ArgsParse(rule)
if args.is_hashable():
self.url[rule] = handler, middleware
else:
self.url_args[rule] = handler, middleware
async def basic_middleware(self, websocket, handler, *args):
message = '?'
while message != 'exit':
message = await websocket.recv()
reply = handler(message, *args)
if reply is not None:
await websocket.send(reply)
async def handle_immutalble_route(self, websocket, path, *args):
handler, middleware = self.url[path]
await middleware(websocket, handler, *args)
async def handle_mutalble_route(self, websocket, path, *args):
handler, middleware = self.url_args[path]
await middleware(websocket, handler, *args)
async def connect(self, websocket, path):
# check if url is immutalble or contain args
if path in self.url:
await self.handle_immutalble_route(websocket, path)
else:
matched_rule = None
for rule in self.url_args:
args = ArgsParse(rule)
if args.parse(path) is not None:
matched_rule = rule
break
if matched_rule:
await self.handle_mutalble_route(websocket, rule, *args.parse(path))
else:
await websocket.send('Websocket close: path does not exist')
def server(self, host='127.0.0.1', port=8765):
print("Starting socket in %s:%s" % (host, port))
loop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
start_server = websockets.serve(self.connect, host, port)
loop.run_until_complete(start_server)
loop.run_forever()
def run(self, host='127.0.0.1', port=8765):
s = threading.Thread(target=self.server, args=(host, port))
s.daemon = True
s.start()
def main():
sk = SocketServer(__name__)
@sk.router('/hello')
def test(message):
print(message)
return 'ok:'+message
sk.run()
if __name__ == '__main__':
main()
|
PypiClean
|
/pYSOVAR-1.0.tar.gz/pYSOVAR-1.0/YSOVAR/autofuncs.py
|
import math
import numpy as np
import scipy.stats
import scipy.stats.mstats
import scipy.odr
from astropy.utils.compat.odict import OrderedDict
from .registry import register
#def redvec_36_45():
# ''' Rieke & Lebofsky 1985 (bibcode 1985ApJ...288..618R)
# I take the extinctions from the L and M band (3.5, 5.0).'''
# A36 = 0.058
# A45 = 0.023
# R36 = - A36/(A45 - A36)
# return np.array([R36, A36])
redvecs = {'36_45_rieke_Lebofsky85_vsAV': np.array([1.66, 0.058]),
'36_45_Flaherty07_vsAK':np.array([-0.632/(0.53-0.632),0.632]),
'36_45_Indebetouw05_vsAK': np.array([-0.56/(0.43-0.56), .56]),
'36_45': np.array([-0.56/(0.43-0.56), .56])
}
'''Dictionary of default reddening vectors
The form of the vectors is a following:
redvec[0] : slope of reddening law in CMD
redvec[1] : reddening value in first band (the delta_y in CMD)
'''
### simple function for one band ###
def mad(data):
'''Median absolute deviation.'''
return np.median(np.abs(data - np.median(data)))
def redchi2tomean(data, error):
'''reduced chi^2 to mean'''
return np.sum( (data - np.mean(data))**2/(error**2) )/(len(data)-1)
def delta(data):
'''width of distribution from 10% to 90%'''
return (scipy.stats.mstats.mquantiles(data, prob=0.9) - scipy.stats.mstats.mquantiles(data, prob=0.1))
def AMCM(data):
'''So-called M value from Ann-Marie Cody's 2014 paper. Light curve asymmetry across the median; specifically, average of top and bottom 10% minus median, divided by rms scatter.'''
return (np.mean([scipy.stats.mstats.mquantiles(data, prob=0.9), scipy.stats.mstats.mquantiles(data, prob=0.1)]) - np.median(data))/np.sqrt( ( (data - data.mean())**2).sum() / len(data) )
def wmean(data, error):
'''error weighted mean'''
return np.average(data, weights=1./error**2.)
def isnormal(data):
'p-value for a 2-sided chi^2 probability that the distribution is normal'
if len(data) >=20:
return scipy.stats.normaltest(data)[1]
else:
return np.nan
register(np.mean, n_bands = 1, error = False, time = False, force = True, default_colunits=['mag'],
default_coldescriptions=['mean magnitude'])
register(np.median, n_bands = 1, error = False, time = False, force = True, default_colunits=['mag'],
default_coldescriptions=['median magnitude'])
register(mad, n_bands = 1, error = False, time = False, force = True, default_colunits=['mag'])
register(delta, n_bands = 1, error = False, time = False, force = True, default_colunits=['mag'])
register(AMCM, n_bands = 1, error = False, time = False, force = True, default_colunits=['mag'])
register(len, n_bands = 1, error = False, time = False, name = 'n',
other_cols = OrderedDict([('n', int)]), force = True,
default_coldescriptions=['Number of datapoints'], default_colunits=['ct'])
register(np.min, n_bands = 1, error = False, time = False, name = 'min', force = True,
default_colunits=['mag'], default_coldescriptions=['minimum magnitude in lightcurve'])
register(np.max, n_bands = 1, error = False, time = False, name = 'max', force = True,
default_colunits=['mag'], default_coldescriptions=['maximum magnitude in lightcurve'])
register(np.std, n_bands = 1, time = False, error = False, name = 'stddev',
description = 'standard deviation calculated from non-biased variance',
kwargs = {'ddof': 1}, force = True, default_colunits=['mag'])
register(scipy.stats.skew, n_bands = 1, error = False, time = False,
description = 'biased (no correction for dof) skew', force = True, default_colunits=['mag'])
register(scipy.stats.kurtosis, n_bands = 1, error = False, time = False,
description = 'biased (no correction for dof) kurtosis', force = True, default_colunits=['mag'])
register(isnormal, n_bands = 1, error = False, time = False, force = True)
for func in [redchi2tomean, wmean]:
register(func, n_bands = 1, time = False, error = True, force = True)
### functions for two bands ###
def stetson(data1, data2, data1_error, data2_error):
'''Stetson index for a two-band lightcurve.
According to eqn (1) in Stetson 1996, PSAP, 108, 851.
This procedure uses on the matched lightcurves
(not frames with one band only) and assignes a weight (g_i) in
Stetson (1996) of 1 to each datapoint.
Parameters
----------
data1 : np.array
single lightcurve of band 1 in magnitudes
data2 : np.array
single lightcurve of band 2 in magnitudes
data1_error : np.array
error on data points of band 1 in magnitudes
data2_error : np.array
error on data points of band 2 in magnitudes
Returns
-------
stetson : float
Stetson value for the provided two-band lightcurve
'''
# number of datapoints:
N = float(len(data1))
if (len(data2) != N) or (len(data1_error) !=N) or (len(data2_error) !=N):
raise ValueError('All input arrays must have the same length')
if N > 1:
# weighted mean magnitudes in each passband:
wmean1 = wmean(data1, data1_error)
wmean2 = wmean(data2, data2_error)
# normalized residual from the weighted mean for each datapoint:
res_1 = (data1 - wmean1) / data1_error
res_2 = (data2 - wmean2) / data2_error
P_ik = res_1 * res_2
return np.sqrt(1./(N*(N-1))) * np.sum( np.sign(P_ik) * np.sqrt(np.abs(P_ik)) )
else:
return np.nan
register(stetson, n_bands = 2, error = True, time = False, force = True)
def cmd_slope_simple(data1, data2, data1_error, data2_error, redvec = redvecs['36_45']):
'''Slope of the data points in the color-magnitude diagram
This is just fitted with ordinary least squares, using the analytic formula.
This is then used as a first guess for an orthogonal least squares fit with simultaneous treatment of errors in x and y (see fit_twocolor_odr)
Parameters
----------
data1 : np.array
single lightcurve of band 1 in magnitudes
data2 : np.array
single lightcurve of band 2 in magnitudes
data1_error : np.array
error on data points of band 1 in magnitudes
data2_error : np.array
error on data points of band 2 in magnitudes
redvec : np.array with two elements
theoretical reddening vector for the two bands chosen
Returns
-------
m : float
slope of fit in color-magnitude diagram
b : float
axis intercept of fit
m2 : float
slope of the input theoretical reddening vector `redvec`
b2 : float
axis intercept of fit forcin the slope to `m2`
redchi2 : float
reduced chi^2 of fit of `[m,b]`
redchi2_2 : float
reduced chi^2 of fit of `b2`
'''
# number of datapoints:
N = float(len(data1))
if N < 3:
return np.nan
if (len(data2) != N) or (len(data1_error) !=N) or (len(data2_error) !=N):
raise ValueError('All input arrays must have the same length')
x = data1 - data2
y = data1
x_error = np.sqrt( data1_error**2 + data2_error**2 )
y_error = data1_error
# calculate the different sums:
sum_x = np.sum(x)
sum_y = np.sum(y)
sum_xx = np.sum(x**2)
sum_xy = np.sum(x*y)
# now get b and m from analytic formula:
m = (-sum_x*sum_y + N*sum_xy) / (N*sum_xx - sum_x*sum_x)
b = (-sum_x*sum_xy + sum_xx*sum_y) / (N*sum_xx - sum_x*sum_x)
# now calculate chisquared for this line:
redchi2 = np.sum( (y - (m*x+b))**2/ y_error**2)/(N-2)
# now fit theoretical reddening vector to data, for plotting purposes (i.e. just shifting it in y:)
m2 = redvec[0] # the sign is okay, because the y axis is inverted in the plots
b2 = 1/N * ( sum_y - m2 * sum_x )
redchi2_2 = np.sum( (y - (m2*x+b2))**2/y_error**2 )/(N-2)
return m,b,m2,b2,redchi2,redchi2_2
register(cmd_slope_simple, n_bands = 2, error = True, time = False, default_colnames = ['cmd_m_plain', 'cmd_b_plain', 'cmd_m_redvec', 'cmd_b_redvec'], name = 'cmdslopesimple', force = True)
def fit_twocolor_odr(band1, band2, band1_err, band2_err, outroot = None, n_bootstrap = None, xyswitch = False, p_guess = None, redvec = redvecs['36_45']):
'''Fits a straight line to a single CMD, using a weighted orthogonal least squares algorithm (ODR).
Parameters
----------
data1 : np.array
single lightcurve of band 1 in magnitudes
data2 : np.array
single lightcurve of band 2 in magnitudes
data1_error : np.array
error on data points of band 1 in magnitudes
data2_error : np.array
error on data points of band 2 in magnitudes
dataset : np.ndarray
data collection for one detected source
index : integer
the index of the dataset within the data structure
p_guess : tuple
initial fit parameters derived from fit_twocolor
outroot : string or None
dictionary where to save the plot, set to `None` for no plotting
n_bootstrap : integer or None
how many bootstrap trials, set to `None` for no bootstrapping
xyswitch : boolean
if the X and Y axis will be switched for the fit or not. This has nothing to do with bisector fitting! The fitting algorithm used here takes care of errors in x and y simultaneously; the xyswitch is only for taking care of pathological cases where a vertical fitted line would occur without coordinate switching.
redvec : np.array with two elements
theoretical reddening vector for the two bands chosen
Returns
-------
result : tuple
contains output = fit parameters, bootstrap_output = results from the bootstrap, bootstrap_raw = the actual bootstrapped data, alpha = the fitted slope angle, sd_alpha = the error on the fitted slope angle, x_spread = the spread of the data along the fitted line (0.5*(90th percentile - 10th percentile)))
'''
#define the fitting function (in this case a straight line)
def fitfunc(p, x):
return p[0]*x + p[1]
if p_guess is None:
p_guess = list(cmd_slope_simple(band1, band2, band1_err, band2_err))[0:2]
if ~np.isfinite(p_guess[0]): # pathological case
p_guess[0] = 0
if ~np.isfinite(p_guess[1]): # pathological case
p_guess[1] = np.mean(band1-band2)
# define what the x and y data is:
x_data = band1 - band2
y_data = band1
x_error = np.sqrt( band1_err**2 + band2_err**2 )
y_error = band1_err
if xyswitch:
y_data, x_data = (x_data, y_data)
y_error, x_error = (x_error, y_error)
# load data into ODR
data = scipy.odr.RealData(x=x_data, y=y_data, sx=x_error, sy=y_error)
# tell ODR what the fitting function is:
model = scipy.odr.Model(fitfunc)
# now do the fit:
fit = scipy.odr.ODR(data, model, p_guess, maxit=1000)
output = fit.run()
p = output.beta # the fitted function parameters
delta = output.delta # array of estimated errors in input variables
eps = output.eps # array of estimated errors in response variables
#print output.stopreason[0]
bootstrap_output = np.array([np.NaN, np.NaN, np.NaN, np.NaN])
bootstrap_raw = (np.NaN, np.NaN, np.NaN)
# calculate slope angle. This is vs. horizontal axis.
alpha = math.atan(output.beta[0])
# calculate error on slope angle by taking the mean difference of the angles derived from m+m_error and m-m_error.
alpha_plus = math.asin((output.beta[0]+output.sd_beta[0])/np.sqrt((output.beta[0]+output.sd_beta[0])**2 + 1**2))
alpha_minus = math.asin((output.beta[0]-output.sd_beta[0])/np.sqrt((output.beta[0]-output.sd_beta[0])**2 + 1**2))
sd_alpha = 0.5*( np.abs(alpha - alpha_plus) + np.abs(alpha - alpha_minus) )
# define the spread along the fitted line. Use 90th and 10th quantile.
# output.xplus and output.y are the x and y values of the projection of the original data onto the fit.
# okay, first transform coordinate system so that x axis is along fit. To do this, first shift everything by -p[1] (this is -b), then rotate by -alpha. New x and y coordinates are then:
#
# |x'| |cos(-alpha) -sin(-alpha)| | x |
# | | = | | | |
# |y'| |sin(-alpha) cos(-alpha)| |y-b|
#
x_new = math.cos(-alpha) * output.xplus - math.sin(-alpha)*(output.y - p[1])
y_new = math.sin(-alpha) * output.xplus + math.cos(-alpha)*(output.y - p[1])
# The y_new values are now essentially zero. (As they should.)
# Now sort x_new and get 90th and 10th quantile:
x_new.sort()
x_spread = scipy.stats.mstats.mquantiles(x_new, prob=0.9)[0] - scipy.stats.mstats.mquantiles(x_new, prob=0.1)[0]
#print x_spread
if outroot is not None:
# I got the following from a python script from http://www.physics.utoronto.ca/~phy326/python/odr_fit_to_data.py, I have to check this properly.
# This does a residual plot, and some bootstrapping if desired.
# error ellipses:
xstar = x_error*np.sqrt( ((y_error*delta)**2) / ( (y_error*delta)**2 + (x_error*eps)**2 ) )
ystar = y_error*np.sqrt( ((x_error*eps)**2) / ( (y_error*delta)**2 + (x_error*eps)**2 ) )
adjusted_err = np.sqrt(xstar**2 + ystar**2)
residual = np.sign(y_data - fitfunc(p,x_data))*np.sqrt(delta**2 + eps**2)
fig = plt.figure()
fit = fig.add_subplot(211)
fit.set_xticklabels( () )
plt.ylabel("[3.6]")
plt.title("Orthogonal Distance Regression Fit to Data")
# plot data as circles and model as line
x_model = np.arange(min(x_data),max(x_data),(max(x_data)-min(x_data))/1000.)
fit.plot(x_data,y_data,'ro', x_model, fitfunc(p,x_model))
fit.errorbar(x_data, y_data, xerr=x_error, yerr=y_error, fmt='r+')
fit.set_yscale('linear')
a = np.array([output.xplus,x_data]) # output.xplus: x-values of datapoints projected onto fit
b = np.array([output.y,y_data]) # output.y: y-values of datapoints projected onto fit
fit.plot(np.array([a[0][0],a[1][0]]), np.array([b[0][0],b[1][0]]), 'k-', label = 'Residuals')
print np.array([a[0][0],a[1][0]])
print np.array([b[0][0],b[1][0]])
for i in range(1,len(y_data)):
fit.plot(np.array([a[0][i],a[1][i]]), np.array([b[0][i],b[1][i]]),'k-')
fit.set_ylim([min(y_data)-0.05, max(y_data)+0.05])
fit.set_ylim(fit.get_ylim()[::-1])
fit.legend(loc='lower left')
# separate plot to show residuals
residuals = fig.add_subplot(212) # 3 rows, 1 column, subplot 2
residuals.errorbar(x=a[0][:],y=residual,yerr=adjusted_err, fmt="r+", label = "Residuals")
# make sure residual plot has same x axis as fit plot
residuals.set_xlim(fit.get_xlim())
residuals.set_ylim(residuals.get_ylim()[::-1])
# Draw a horizontal line at zero on residuals plot
plt.axhline(y=0, color='b')
# Label axes
plt.xlabel("[3.6] - [4.5]")
plt.ylabel("Residuals")
plt.savefig(outroot + str(index) + '_odrfit.eps')
if n_bootstrap is not None:
print 'bootstrapping...'
# take a random half of the data and do the fit (choosing without replacement, standard bootstrap). Do this a lot of times and construct a cumulative distribution function for the slope and the intercept of the fitted line.
# now what I actually want is the slope angle a, not m.
m = np.array([])
b = np.array([])
for i in np.arange(0, n_bootstrap):
indices = np.arange(0,len(x_data))
np.random.shuffle(indices)
ind = indices[0:len(x_data)/2] # dividing by integer on purpose.
dat = scipy.odr.RealData(x=x_data[ind], y=y_data[ind], sx=x_error[ind], sy=y_error[ind])
fit = scipy.odr.ODR(dat, model, p_guess, maxit=5000,job=10)
out = fit.run()
m = np.append(m, out.beta[0])
b = np.append(b, out.beta[1])
a = np.arctan(m) # in radian
# plot histograms for m and b:
plt.clf()
n_m, bins_m, patches_m = plt.hist(m, 100, normed=True )
plt.savefig('m_hist.eps')
plt.clf()
n_b, bins_b, patches_b = plt.hist(b, 100, normed=True)
plt.savefig('b_hist.eps')
plt.clf()
n_a, bins_a, patches_a = plt.hist(a, 100, normed=True)
plt.savefig('a_hist.eps')
plt.clf()
# get median and symmetric 68% interval for m, b and alpha:
m_median = np.median(m)
m_down = np.sort(m)[ int(round(0.16*len(m))) ]
m_up = np.sort(m)[ int(round(0.84*len(m))) ]
m_error = np.mean([abs(m_down-m_median), abs(m_up-m_median)])
#print (m_median, m_up, m_down, m_error)
b_median = np.median(b)
b_down = np.sort(b)[ int(round(0.16*len(b))) ]
b_up = np.sort(b)[ int(round(0.84*len(b))) ]
b_error = np.mean([abs(b_down-b_median), abs(b_up-b_median)])
#print (b_median, b_up, b_down, b_error)
a_median = np.median(a)
a_down = np.sort(a)[ int(round(0.16*len(a))) ]
a_up = np.sort(a)[ int(round(0.84*len(a))) ]
a_error = np.mean([abs(a_down-a_median), abs(a_up-a_median)])
#print (b_median, b_up, b_down, b_error)
bootstrap_output = np.array([m_median, m_error, b_median, b_error, a_median, a_error])
bootstrap_raw = (m, b, a)
result = (output, bootstrap_output, bootstrap_raw, alpha, sd_alpha, x_spread)
return result
def cmdslope_odr(band1, band2, band1_err, band2_err, p_guess = None, redvec = redvecs['36_45']):
'''Fits a straight line to a single CMD, using a weighted orthogonal least squares algorithm (ODR).
Parameters
----------
data1 : np.array
single lightcurve of band 1 in magnitudes
data2 : np.array
single lightcurve of band 2 in magnitudes
data1_error : np.array
error on data points of band 1 in magnitudes
data2_error : np.array
error on data points of band 2 in magnitudes
p_guess : tuple
initial fit parameters derived from fit_twocolor
redvec : np.array with two elements
theoretical reddening vector for the two bands chosen
Returns
-------
result : tuple
contains output = fit parameters, bootstrap_output = results from the bootstrap, bootstrap_raw = the actual bootstrapped data, alpha = the fitted slope angle, sd_alpha = the error on the fitted slope angle, x_spread = the spread of the data along the fitted line (0.5*(90th percentile - 10th percentile)))
'''
if len(band1) < 10:
return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, ''
if p_guess is None:
p_guess = cmd_slope_simple(band1, band2, band1_err, band2_err, redvec = redvec)
(fit_output2, bootstrap_output2, bootstrap_raw2, alpha2, alpha_error2, spread2) = fit_twocolor_odr(band1, band2, band1_err, band2_err, xyswitch = True, p_guess = p_guess, redvec = redvec)
(fit_output, bootstrap_output, bootstrap_raw, alpha, alpha_error, spread) = fit_twocolor_odr(band1, band2, band1_err, band2_err, xyswitch = False, p_guess = p_guess, redvec = redvec)
# Checks if the ODR fit with switched X and Y axes yields a more
# constrained fit than the original axes. This basically catches the
# pathological cases with a (nearly) vertical fit with large nominal errors.
if alpha_error/alpha > alpha_error2/alpha2:
alpha, alpha_error = (alpha2, alpha_error2)
cmd_m = 1./fit_output2.beta[0]
cmd_b = -fit_output2.beta[1] / fit_output2.beta[0]
cmd_m_error = fit_output2.sd_beta[0] / cmd_m**2
cmd_b_error = np.sqrt((fit_output2.sd_beta[1]/cmd_m)**2 +
(cmd_b**2*cmd_m_error**2)**2)
spread = spread2
else:
cmd_m = fit_output.beta[0]
cmd_b = fit_output.beta[1]
cmd_m_error = fit_output.sd_beta[0]
cmd_b_error = fit_output.sd_beta[1]
# Make new alpha to avoid confusion in case of x/y switch
alpha = math.atan(cmd_m)
'''crude classification of CMD slope
This is some crude classification of the cmd slope.
anything that goes up and has a relative slope error of <40% is
"accretion-dominated", anything that is within some cone around
the theoretical reddening and has error <40% is "extinction-dominated",
anything else is "other".
If slope is classified as extinction, the spread in the CMD is converted
to AV and stored.
'''
# angle of standard reddening
alpha_red = math.atan(redvec[0])
cmd_dominated = 'bad'
AV = np.nan
if alpha_error/alpha <=0.4:
cmd_dominated = 'other'
if np.abs(alpha - alpha_red) < 0.3:
cmd_dominated = 'extinc.'
AV = spread/redvec[1]
if alpha < 0.:
cmd_dominated = 'accr.'
return alpha, alpha_error, cmd_m, cmd_b, cmd_m_error, cmd_b_error, AV, cmd_dominated, spread
register(cmdslope_odr, n_bands= 2, error = True, time = False, default_colnames = ['cmd_alpha', 'cmd_alpha_error', 'cmd_m', 'cmd_b', 'cmd_m_error', 'cmd_b_error', 'AV'], other_cols = OrderedDict([['cmd_dominated', 'S10'], ['CMD_length', 'float']]), name = 'cmdslopeodr', force = True, default_colunits=['rad','rad',None, None, None, None, 'mag',None, None, 'mag'], default_coldescriptions=['angle of best-fit line in CMD', 'uncertainty on angle', 'slope in CMD', 'offset of best-fits line', 'uncertainty on slope', 'uncertainty on angle', 'length of reddening vector', 'classification of slope in CMD', '90% spread in slope in CMD'])
|
PypiClean
|
/pcl_pangu-1.2.6.2.tar.gz/pcl_pangu-1.2.6.2/pcl_pangu/tokenizer/spm_13w/tokenizer.py
|
import sentencepiece as spm
import jieba
langs_ID = {'zh': 128301, 'ko': 128302, 'vi': 128303,
'de': 128317, 'en': 128318, 'nl': 128132,
'ms': 128109, 'id': 128110, 'tl': 128111,
'mn': 128103, 'my': 128104, 'th': 128105, 'lo': 128106, 'km': 128107,
'lt': 128112, 'et': 128113, 'lv': 128133, 'hu': 128115,
'pl': 128116, 'cs': 128117, 'sk': 128118, 'sl': 128119, 'hr': 128120, 'bs': 128121, 'sr': 128306, 'bg': 128304,
'mk': 128122, 'ru': 128305, 'uk': 128307, 'be': 128123,
'sq': 128124, 'el': 128125, 'ka': 128126, 'hy': 128127,
'ro': 128108, 'fr': 128100, 'es': 128102, 'pt': 128101,
'fa': 128310, 'he': 128311, 'ar': 128308, 'ps': 128309,
'tr': 128128, 'kk': 128129, 'uz': 128130, 'az': 128131,
'hi': 128315, 'ta': 128316, 'ur': 128313, 'bn': 128312, 'si': 128314, 'ne': 128114}
translate_ID = 128300
class SpmTokenizer(object):
def __init__(self, model_file):
self.sp = spm.SentencePieceProcessor(model_file=model_file)
self.specialIDNum = 300
self.eod_id = self.vocab_size - 1
self.eot_id = self.vocab_size - 2
self.pad_id = self.vocab_size - 3
# langsList=['ar','bg','bs','cs','de','el','en','es','et','fa',
# 'fr','he','hr','hu','id','it','nl','pl','pt','ru',
# 'sl','tr','ur']
# self.connectTxtToId = {}
# i = 1
# for lang in langsList:
# self.connectTxtToId[f'zh-{lang}'] = i
# i += 1
# self.connectTxtToId[f'{lang}-zh'] = i
# i += 1
@property
def vocab_size(self):
return self.sp.vocab_size() + self.specialIDNum
@property
def spmVocabSize(self):
return self.sp.vocab_size()
@property
def eod(self):
return self.eod_id
def tokenize(self, text):
""" Tokenize a string. """
return self.sp.encode(text)
# # Adapted parallel corpus
# texts = text.split(' _☪☣_ ')
# if len(texts) == 1:
# return self.sp.encode(text)
# if len(texts) == 3:
# ids1 = self.sp.encode(texts[0])
# connectId = self.sp.vocab_size() + self.connectTxtToId[texts[1]]
# ids2 = self.sp.encode(texts[2])
# return ids1 + [connectId] + ids2
# return []
def convert_tokens_to_ids(self, tokens):
return tokens
def convert_ids_to_tokens(self, ids):
ids = [id if id < self.sp.vocab_size() else 0 for id in ids]
return self.decode(ids)
def encode(self, text):
res = self.tokenize(text)
return res
def decode(self, tokens):
text = self.sp.decode(tokens)
text = text.replace('\u2583', '\n')
return text
if __name__ == '__main__':
Hindi_text = 'उत्साह बढ़ाने वाली कविताप्रेरणा दायक कविताप्रेरणा देने वाली कविताप्रेरणादायक कविता बच्चों के लिएप्रेरणादायक गजलप्रेरणादायक शायरी इन हिंदीप्रेरणादायक सुविचारप्रेरणादायक हिन्दी कविताप्रेरणादायी कविता हिंदीमनोबल बढ़ाने वाली कवितामनोबल बढ़ाने वाले विचारसकारात्मक कवितायेसकारात्मक सुविचारसकारात्मक सोच पर कविताहौसला पर कविताहौसला पर शायरीहौसला बढ़ाने वाली कविताहौसला बढ़ाने वाले विचारहौसला बढ़ाने वाले सुविचारज़िंदादिल ज़िन्दगी कविताज़िन्दगी पर कविता'
Chinese_text = '湖人 板\n凳双枪乔丹-法玛尔和萨沙-武贾西奇回访斯台普斯,面对旧东家他们肯定想有所表现。第二节的故事是香农-布朗PK武贾西奇,奥多姆PK汉弗里斯,都是有故事的人。汉弗里斯篮下自投自抢拿下2分,别看情场对付卡戴珊的本领不如人,可到了球场上可不输奥多姆。小布朗单挑萨沙得手一次就没了声息,但湖人的整体进攻却哑火,篮网打的没多少章法,但依然扩大了领先优势,奥多姆没人防守的快攻上篮都不进,湖人问题不小。'
Urdu_text = 'حماسهآفرینی جدید بسیجیان سلحشور در برگزاری رزمایش سراسری اقتدار عاشورایی بسیج تحت عنوان سپاهیان حضرت محمد رسول الله (ص) که یادآور اعزام غرورآفرین سپاهیان حضرت محمد رسولالله در دوران دفاع مقدس است، برگ زرین دیگری در کارنامه درخشان بسیج است که با حضور دهها هزار نیرو بسیجی فداکار و داوطلب در عرصههای مختلف دفاعی، امنیتی، خدماترسانی،محرومیتزدایی، بهداشتی و درمانی و غیره جلوهای از همدلی، ایمان، قدرت و عمل انقلابی را به نمایش گذاشت.'
Thai_text = 'ใน VDO ชุดนี้นะครับเราจะมาพูดถึง Amibroker Backtest นะ Backtest คืออะไร Backtest คือการทดสอบระบบของเรานะครับ ระบบซื้อ-ขาย ว่าระบบซื้อ-ขายของเราเนี่ยทำงานได้ดีขนาดไหน ทำกำไรให้เราขนาดไหน หรือว่าทำให้เราเจ๊งขนาดไหนนะครับ เพราะฉะนั้นเนี่ยเราจะมาดูในส่วนนี้กัน คราวนี้เดี๋ยวเปิด Amibroker ขึ้นมาก่อนนะครับโดยที่ในส่วนของ Backtest เนี่ย หลักๆมีส่วนประกอบอะไรบ้าง มาที่ปุ่มบวกนี่นะครับเพิ่ม windows นะครับ เห็นมั้ยครับแล้วเลื่อนลงมาในส่วนของ Backtest เนี่ยจะประกอบด้วย 2 ส่วนหลักๆคือส่วนของ Analysis document นี่นะครับเป็นการตั้งค่าว่าจะ test จากวันไหนถึงวันไหน จะ test หุ้นอะไรบ้าง ลองเปิดขึ้นมาดูแล้วกันนี่นะครับ Analysis ก็จะเห็นว่ามี Backtest อยู่ตรงนี้แล้วก็จะ test ตามอะไร ตามสูตรในนี้ อันนี้ก็เป็นสูตรที่ผมเขียนค้างไว้นะครับอันนี้ไม่เป็นไร เราก็จะ test กับหุ้นอะไรบ้างเนี่ย ก็บอกว่า test หุ้นทั้งหมดนะครับโดย test ในช่วงไหนบ้างนี่นะครับก็จะมีให้ set ได้ว่า test วันล่าสุดหรือ test bar ล่าสุดนี่นะครับ อันนี้คือ test จากวันไหน from ถึง to นะครับแล้วก็ตั้งค่าได้ในนี้ว่าจะเอาเป็นวันที่เท่าไร หรือพิมพ์ก็ได้นี่นะครับ พิมพ์เข้าไปของผมนี่เป็น format ของอังกฤษนะครับก็คือจะเป็น เดือน/วัน/ปีถ้าท่านใช้ format เป็นไทย จะขึ้นเป็น วัน/เดือน/ปี นะครับแล้วแต่ว่า windows คิดยังไงอันนี้เข้าใจตรงกันนะครับ คราวนี้สมมุติผมพิมพ์เข้าไปผมเปลี่ยนเป็น2012 เห็นมั้ยครับ ก็เปลี่ยนเดือนของผมตรงนี้แต่เดือนของท่านอาจอยู่ตรงนี้ก็ได้นะครับแล้วแต่ format ผมเปลี่ยนกลับแล้วกัน 2011 อันนี้ก็เป็นส่วนของ Analysis ต่อไปถ้าจะต้องมีส่วนของ windows ด้วย อ้าวส่วนของ formula ด้วยว่าท่านจะเขียนเงื่อนไขในการซื้อ ขายอย่างไรก็กด บวกนะครับแล้วก็บอก new formula คราวนี้จะมี windows โผล่ขึ้นมา มาโผล่อีกหน้าต่างนึง อ่ะอันนี้ก็เป็น windows ของผมเห็นมั้ยครับก็เป็น windows ที่เอาไว้เขียน formula โดยที่ formula ที่ท่านเขียนขึ้นมาเองเนี่ยมันจะไม่ไปรวมกับ default ของ Amibroker อันนี้เป็น default นะครับ เดี๋ยวปิดเข้าไปส่วน code formula ที่ผมเขียนขึ้นมาเนี่ย มันจะไปอยู่ในส่วนของ customs นี่นะครับ ผมก็มีเขียนอะไรทิ้งไว้นะครับก็ว่ากันไป อันนี้ก็ให้ทำความเข้าใจนะครับว่าในส่วนของ backtest เนี่ยประกอบด้วย 2 ส่วนหลักๆก็คือ new Analysis … Analysis document หรือ Formula 2 ส่วนนี้นะครับเดี๋ยวเราจะมาพูดถึง 2 ส่วนนี้กัน ว่าไปทีละส่วน ทีนี้ในส่วนของ backtest เนี่ยเป็นส่วนที่ยากที่สุดไม่ว่าจะเป็น level ของ introduction เป็น level basic advance หรือ inter media ก็ตามเพราะงั้นจะใช้เวลาในส่วนนี้เยอะสุด Ok นะครับ'
Malay_text = 'ചായ കുടിയും പത്രം വായനയും \n കഴിഞ്ഞ ഹൈദ്രോസിക്കായുടെ ഒപ്പം സുലമാനിക്കയും എഴുന്നേറ്റു. പതുക്കെ കൂടെ നടന്ന് അമേരിക്കന് വിശേഷങ്ങള് എടുത്തിട്ടങ്ങലക്കി. അതോടെ ഹൈദ്രോസിക്ക കോടീശ്വരനില് ചോദ്യം ചോദിക്കുന്ന സുരേഷ് ഗോപിയുടെ വീറും വാശിയോടും കൂടെ ചോദ്യങ്ങള് ഓപഷനോടു കൂടിയും ഇല്ലാതെയും വീശി തുടങ്ങി. മണിച്ചിത്രത്താഴില് നാഗവല്ലിയെ ഡാന്സ് ചെയ്ത് കൊണ്ടു പോകുന്നത് പോലെ ചോദ്യങ്ങള്ക്കുത്തരങ്ങളും കൊടുത്ത് കഥകളും പറഞ്ഞ് സുലൈമാനിക്ക മിഷന് സക്സസ് ആക്കി. പാടവരമ്പിലെത്തി പാടമെല്ലാം കണ്ടിട്ടും ഹൈദ്രോസിക്കാന്റെ കരിനാക്കില് നിന്നൊന്നും വരുന്നില്ല. അവസാനം സുലൈമാനിക്ക തന്നെ വിഷയം എടുത്തിട്ടു.'
Arabic_text = 'با توجه به آنچه گفته شد، میدان مطالعاتی مرزها در ایران با محدودیتهای اندیشگی، رشتهای و نهادی گستردهای همراه بوده است. بیشتر این مطالعات محصور در حوزهی جغرافیای سیاسی، انتظامی، بینالمللی و سیاسیِ صرف بوده است. این در حالی است که مرزهای سیاسی در ایران، بهلحاظ فرهنگی مرزهای گشودهای هستند که نشان از فضای گستردهی ایران فرهنگی دارند. بر این اساس، مرز بیشتر از آنکه بهمعنای امتناع باشد، فضایی حیاتی و زیستهی زیست-جهانها و فرهنگهایی است که حیات و چالشهای آنها موضوعاتی ملی است.'
tokenizer = SpmTokenizer('spm.128k.model.1')
tokens = tokenizer.tokenize(Chinese_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
txt = tokenizer.convert_ids_to_tokens(ids)
line1 = '34'
line2 = '4434'
a = f"{line1} _☪☣_ {'zh'}-{'ar'} _☪☣_ {line2}"
b = tokenizer.tokenize(a)
aa = '使 妇女 更 容易 感染 艾滋病毒 的 原因 还 包括 受 教育 机会 不 平等 , 其中 包括 全面 的 性 教育 和 艾滋病毒 防治 教育 , 很难 获得 固定收入 和 就业 , 缺乏 经济 安全 , 以及 暴力 和 恐惧 。'
tokens2 = tokenizer.tokenize(aa)
tokens2 = [i for i in tokens2 if i != 119132]
tokens3 = tokenizer.tokenize(''.join(aa.split()))
tokens3 = [i for i in tokens3 if i != 119132]
for i in tokens2:
if i != 119132:
print(tokenizer.convert_ids_to_tokens([i]))
for i in tokens3:
print(tokenizer.convert_ids_to_tokens([i]))
aaa = ' '.join(jieba.cut(''.join(aa.split()).strip()))
print(txt)
pass
|
PypiClean
|
/alibabacloud_ons20190214-1.0.4-py3-none-any.whl/alibabacloud_ons20190214/client.py
|
from typing import Dict
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_endpoint_util.client import Client as EndpointUtilClient
from alibabacloud_ons20190214 import models as ons_20190214_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = 'regional'
self._endpoint_map = {
'ap-northeast-2-pop': 'ons.ap-northeast-1.aliyuncs.com',
'cn-beijing-finance-1': 'ons.aliyuncs.com',
'cn-beijing-finance-pop': 'ons.aliyuncs.com',
'cn-beijing-gov-1': 'ons.aliyuncs.com',
'cn-beijing-nu16-b01': 'ons.aliyuncs.com',
'cn-edge-1': 'ons.aliyuncs.com',
'cn-fujian': 'ons.aliyuncs.com',
'cn-haidian-cm12-c01': 'ons.aliyuncs.com',
'cn-hangzhou-bj-b01': 'ons.aliyuncs.com',
'cn-hangzhou-internal-prod-1': 'ons.aliyuncs.com',
'cn-hangzhou-internal-test-1': 'ons.aliyuncs.com',
'cn-hangzhou-internal-test-2': 'ons.aliyuncs.com',
'cn-hangzhou-internal-test-3': 'ons.aliyuncs.com',
'cn-hangzhou-test-306': 'ons.aliyuncs.com',
'cn-hongkong-finance-pop': 'ons.aliyuncs.com',
'cn-qingdao-nebula': 'ons.aliyuncs.com',
'cn-shanghai-et15-b01': 'ons.aliyuncs.com',
'cn-shanghai-et2-b01': 'ons.aliyuncs.com',
'cn-shanghai-inner': 'ons.aliyuncs.com',
'cn-shanghai-internal-test-1': 'ons.aliyuncs.com',
'cn-shenzhen-inner': 'ons.aliyuncs.com',
'cn-shenzhen-st4-d01': 'ons.aliyuncs.com',
'cn-shenzhen-su18-b01': 'ons.aliyuncs.com',
'cn-wuhan': 'ons.aliyuncs.com',
'cn-yushanfang': 'ons.aliyuncs.com',
'cn-zhangbei-na61-b01': 'ons.aliyuncs.com',
'cn-zhangjiakou-na62-a01': 'ons.aliyuncs.com',
'cn-zhengzhou-nebula-1': 'ons.aliyuncs.com',
'eu-west-1-oxs': 'ons.ap-northeast-1.aliyuncs.com',
'rus-west-1-pop': 'ons.ap-northeast-1.aliyuncs.com'
}
self.check_config(config)
self._endpoint = self.get_endpoint('ons', self._region_id, self._endpoint_rule, self._network, self._suffix, self._endpoint_map, self._endpoint)
def get_endpoint(
self,
product_id: str,
region_id: str,
endpoint_rule: str,
network: str,
suffix: str,
endpoint_map: Dict[str, str],
endpoint: str,
) -> str:
if not UtilClient.empty(endpoint):
return endpoint
if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(endpoint_map.get(region_id)):
return endpoint_map.get(region_id)
return EndpointUtilClient.get_endpoint_rules(product_id, region_id, endpoint_rule, network, suffix)
def list_tag_resources_with_options(
self,
request: ons_20190214_models.ListTagResourcesRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.ListTagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you call the **ListTagResources** operation, specify at least one of the following parameters in the request: **Key** and **ResourceId**. You can specify a resource ID to query all tags that are attached to the specified resource. You can also specify a tag key to query the tag value and resource to which the tag is attached.
* If you include the **Key** parameter in a request, you can obtain the tag value and the ID of the resource to which the tag is attached.********\
* If you include the **ResourceId** parameter in a request, you can obtain the keys and values of all tags that are attached to the specified resource.
@param request: ListTagResourcesRequest
@param runtime: runtime options for this request RuntimeOptions
@return: ListTagResourcesResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.next_token):
query['NextToken'] = request.next_token
if not UtilClient.is_unset(request.resource_id):
query['ResourceId'] = request.resource_id
if not UtilClient.is_unset(request.resource_type):
query['ResourceType'] = request.resource_type
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='ListTagResources',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.ListTagResourcesResponse(),
self.call_api(params, req, runtime)
)
async def list_tag_resources_with_options_async(
self,
request: ons_20190214_models.ListTagResourcesRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.ListTagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you call the **ListTagResources** operation, specify at least one of the following parameters in the request: **Key** and **ResourceId**. You can specify a resource ID to query all tags that are attached to the specified resource. You can also specify a tag key to query the tag value and resource to which the tag is attached.
* If you include the **Key** parameter in a request, you can obtain the tag value and the ID of the resource to which the tag is attached.********\
* If you include the **ResourceId** parameter in a request, you can obtain the keys and values of all tags that are attached to the specified resource.
@param request: ListTagResourcesRequest
@param runtime: runtime options for this request RuntimeOptions
@return: ListTagResourcesResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.next_token):
query['NextToken'] = request.next_token
if not UtilClient.is_unset(request.resource_id):
query['ResourceId'] = request.resource_id
if not UtilClient.is_unset(request.resource_type):
query['ResourceType'] = request.resource_type
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='ListTagResources',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.ListTagResourcesResponse(),
await self.call_api_async(params, req, runtime)
)
def list_tag_resources(
self,
request: ons_20190214_models.ListTagResourcesRequest,
) -> ons_20190214_models.ListTagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you call the **ListTagResources** operation, specify at least one of the following parameters in the request: **Key** and **ResourceId**. You can specify a resource ID to query all tags that are attached to the specified resource. You can also specify a tag key to query the tag value and resource to which the tag is attached.
* If you include the **Key** parameter in a request, you can obtain the tag value and the ID of the resource to which the tag is attached.********\
* If you include the **ResourceId** parameter in a request, you can obtain the keys and values of all tags that are attached to the specified resource.
@param request: ListTagResourcesRequest
@return: ListTagResourcesResponse
"""
runtime = util_models.RuntimeOptions()
return self.list_tag_resources_with_options(request, runtime)
async def list_tag_resources_async(
self,
request: ons_20190214_models.ListTagResourcesRequest,
) -> ons_20190214_models.ListTagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you call the **ListTagResources** operation, specify at least one of the following parameters in the request: **Key** and **ResourceId**. You can specify a resource ID to query all tags that are attached to the specified resource. You can also specify a tag key to query the tag value and resource to which the tag is attached.
* If you include the **Key** parameter in a request, you can obtain the tag value and the ID of the resource to which the tag is attached.********\
* If you include the **ResourceId** parameter in a request, you can obtain the keys and values of all tags that are attached to the specified resource.
@param request: ListTagResourcesRequest
@return: ListTagResourcesResponse
"""
runtime = util_models.RuntimeOptions()
return await self.list_tag_resources_with_options_async(request, runtime)
def ons_consumer_accumulate_with_options(
self,
request: ons_20190214_models.OnsConsumerAccumulateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsConsumerAccumulateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation in scenarios in which you want to know the message consumption progress of a specified consumer group in production environments. You can obtain the information about message consumption and consumption latency based on the returned information. This operation returns the total number of accumulated messages in all topics to which the specified consumer group subscribes and the number of accumulated messages in each topic.
@param request: OnsConsumerAccumulateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsConsumerAccumulateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.detail):
query['Detail'] = request.detail
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsConsumerAccumulate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsConsumerAccumulateResponse(),
self.call_api(params, req, runtime)
)
async def ons_consumer_accumulate_with_options_async(
self,
request: ons_20190214_models.OnsConsumerAccumulateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsConsumerAccumulateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation in scenarios in which you want to know the message consumption progress of a specified consumer group in production environments. You can obtain the information about message consumption and consumption latency based on the returned information. This operation returns the total number of accumulated messages in all topics to which the specified consumer group subscribes and the number of accumulated messages in each topic.
@param request: OnsConsumerAccumulateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsConsumerAccumulateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.detail):
query['Detail'] = request.detail
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsConsumerAccumulate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsConsumerAccumulateResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_consumer_accumulate(
self,
request: ons_20190214_models.OnsConsumerAccumulateRequest,
) -> ons_20190214_models.OnsConsumerAccumulateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation in scenarios in which you want to know the message consumption progress of a specified consumer group in production environments. You can obtain the information about message consumption and consumption latency based on the returned information. This operation returns the total number of accumulated messages in all topics to which the specified consumer group subscribes and the number of accumulated messages in each topic.
@param request: OnsConsumerAccumulateRequest
@return: OnsConsumerAccumulateResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_consumer_accumulate_with_options(request, runtime)
async def ons_consumer_accumulate_async(
self,
request: ons_20190214_models.OnsConsumerAccumulateRequest,
) -> ons_20190214_models.OnsConsumerAccumulateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation in scenarios in which you want to know the message consumption progress of a specified consumer group in production environments. You can obtain the information about message consumption and consumption latency based on the returned information. This operation returns the total number of accumulated messages in all topics to which the specified consumer group subscribes and the number of accumulated messages in each topic.
@param request: OnsConsumerAccumulateRequest
@return: OnsConsumerAccumulateResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_consumer_accumulate_with_options_async(request, runtime)
def ons_consumer_get_connection_with_options(
self,
request: ons_20190214_models.OnsConsumerGetConnectionRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsConsumerGetConnectionResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When messages are accumulated in a topic, you can call this operation to check whether a consumer is online.
@param request: OnsConsumerGetConnectionRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsConsumerGetConnectionResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsConsumerGetConnection',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsConsumerGetConnectionResponse(),
self.call_api(params, req, runtime)
)
async def ons_consumer_get_connection_with_options_async(
self,
request: ons_20190214_models.OnsConsumerGetConnectionRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsConsumerGetConnectionResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When messages are accumulated in a topic, you can call this operation to check whether a consumer is online.
@param request: OnsConsumerGetConnectionRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsConsumerGetConnectionResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsConsumerGetConnection',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsConsumerGetConnectionResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_consumer_get_connection(
self,
request: ons_20190214_models.OnsConsumerGetConnectionRequest,
) -> ons_20190214_models.OnsConsumerGetConnectionResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When messages are accumulated in a topic, you can call this operation to check whether a consumer is online.
@param request: OnsConsumerGetConnectionRequest
@return: OnsConsumerGetConnectionResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_consumer_get_connection_with_options(request, runtime)
async def ons_consumer_get_connection_async(
self,
request: ons_20190214_models.OnsConsumerGetConnectionRequest,
) -> ons_20190214_models.OnsConsumerGetConnectionResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When messages are accumulated in a topic, you can call this operation to check whether a consumer is online.
@param request: OnsConsumerGetConnectionRequest
@return: OnsConsumerGetConnectionResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_consumer_get_connection_with_options_async(request, runtime)
def ons_consumer_reset_offset_with_options(
self,
request: ons_20190214_models.OnsConsumerResetOffsetRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsConsumerResetOffsetResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to clear accumulated messages or reset a consumer offset to a specified timestamp. You can use one of the following methods to clear accumulated messages:
* Clear all accumulated messages in a specified topic.
* Clear the messages that were published to the specified topic before a specified point in time.
@param request: OnsConsumerResetOffsetRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsConsumerResetOffsetResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.reset_timestamp):
query['ResetTimestamp'] = request.reset_timestamp
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
if not UtilClient.is_unset(request.type):
query['Type'] = request.type
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsConsumerResetOffset',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsConsumerResetOffsetResponse(),
self.call_api(params, req, runtime)
)
async def ons_consumer_reset_offset_with_options_async(
self,
request: ons_20190214_models.OnsConsumerResetOffsetRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsConsumerResetOffsetResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to clear accumulated messages or reset a consumer offset to a specified timestamp. You can use one of the following methods to clear accumulated messages:
* Clear all accumulated messages in a specified topic.
* Clear the messages that were published to the specified topic before a specified point in time.
@param request: OnsConsumerResetOffsetRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsConsumerResetOffsetResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.reset_timestamp):
query['ResetTimestamp'] = request.reset_timestamp
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
if not UtilClient.is_unset(request.type):
query['Type'] = request.type
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsConsumerResetOffset',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsConsumerResetOffsetResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_consumer_reset_offset(
self,
request: ons_20190214_models.OnsConsumerResetOffsetRequest,
) -> ons_20190214_models.OnsConsumerResetOffsetResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to clear accumulated messages or reset a consumer offset to a specified timestamp. You can use one of the following methods to clear accumulated messages:
* Clear all accumulated messages in a specified topic.
* Clear the messages that were published to the specified topic before a specified point in time.
@param request: OnsConsumerResetOffsetRequest
@return: OnsConsumerResetOffsetResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_consumer_reset_offset_with_options(request, runtime)
async def ons_consumer_reset_offset_async(
self,
request: ons_20190214_models.OnsConsumerResetOffsetRequest,
) -> ons_20190214_models.OnsConsumerResetOffsetResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to clear accumulated messages or reset a consumer offset to a specified timestamp. You can use one of the following methods to clear accumulated messages:
* Clear all accumulated messages in a specified topic.
* Clear the messages that were published to the specified topic before a specified point in time.
@param request: OnsConsumerResetOffsetRequest
@return: OnsConsumerResetOffsetResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_consumer_reset_offset_with_options_async(request, runtime)
def ons_consumer_status_with_options(
self,
request: ons_20190214_models.OnsConsumerStatusRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsConsumerStatusResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation in scenarios in which consumers are online and messages are accumulated. You can troubleshoot errors based on the information that is returned by this operation. You can check whether all consumers in the consumer group subscribe to the same topics and tags, and whether load balancing is performed as expected. You can also obtain the information about thread stack traces of online consumers.
* This operation uses multiple backend operations to query and aggregate data. The system requires a long period of time to process a request. We recommend that you do not frequently call this operation.
@param request: OnsConsumerStatusRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsConsumerStatusResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.detail):
query['Detail'] = request.detail
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.need_jstack):
query['NeedJstack'] = request.need_jstack
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsConsumerStatus',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsConsumerStatusResponse(),
self.call_api(params, req, runtime)
)
async def ons_consumer_status_with_options_async(
self,
request: ons_20190214_models.OnsConsumerStatusRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsConsumerStatusResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation in scenarios in which consumers are online and messages are accumulated. You can troubleshoot errors based on the information that is returned by this operation. You can check whether all consumers in the consumer group subscribe to the same topics and tags, and whether load balancing is performed as expected. You can also obtain the information about thread stack traces of online consumers.
* This operation uses multiple backend operations to query and aggregate data. The system requires a long period of time to process a request. We recommend that you do not frequently call this operation.
@param request: OnsConsumerStatusRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsConsumerStatusResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.detail):
query['Detail'] = request.detail
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.need_jstack):
query['NeedJstack'] = request.need_jstack
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsConsumerStatus',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsConsumerStatusResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_consumer_status(
self,
request: ons_20190214_models.OnsConsumerStatusRequest,
) -> ons_20190214_models.OnsConsumerStatusResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation in scenarios in which consumers are online and messages are accumulated. You can troubleshoot errors based on the information that is returned by this operation. You can check whether all consumers in the consumer group subscribe to the same topics and tags, and whether load balancing is performed as expected. You can also obtain the information about thread stack traces of online consumers.
* This operation uses multiple backend operations to query and aggregate data. The system requires a long period of time to process a request. We recommend that you do not frequently call this operation.
@param request: OnsConsumerStatusRequest
@return: OnsConsumerStatusResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_consumer_status_with_options(request, runtime)
async def ons_consumer_status_async(
self,
request: ons_20190214_models.OnsConsumerStatusRequest,
) -> ons_20190214_models.OnsConsumerStatusResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation in scenarios in which consumers are online and messages are accumulated. You can troubleshoot errors based on the information that is returned by this operation. You can check whether all consumers in the consumer group subscribe to the same topics and tags, and whether load balancing is performed as expected. You can also obtain the information about thread stack traces of online consumers.
* This operation uses multiple backend operations to query and aggregate data. The system requires a long period of time to process a request. We recommend that you do not frequently call this operation.
@param request: OnsConsumerStatusRequest
@return: OnsConsumerStatusResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_consumer_status_with_options_async(request, runtime)
def ons_consumer_time_span_with_options(
self,
request: ons_20190214_models.OnsConsumerTimeSpanRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsConsumerTimeSpanResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the point in time when the earliest stored message was published to a specified topic and the point in time when the most recently stored message was published to the specified topic. You can also call this operation to query the most recent point in time when a message in the topic was consumed. This operation is usually used with the \\*\\*OnsConsumerAccumulate\\*\\* operation to display the overview of the consumption progress.
@param request: OnsConsumerTimeSpanRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsConsumerTimeSpanResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsConsumerTimeSpan',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsConsumerTimeSpanResponse(),
self.call_api(params, req, runtime)
)
async def ons_consumer_time_span_with_options_async(
self,
request: ons_20190214_models.OnsConsumerTimeSpanRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsConsumerTimeSpanResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the point in time when the earliest stored message was published to a specified topic and the point in time when the most recently stored message was published to the specified topic. You can also call this operation to query the most recent point in time when a message in the topic was consumed. This operation is usually used with the \\*\\*OnsConsumerAccumulate\\*\\* operation to display the overview of the consumption progress.
@param request: OnsConsumerTimeSpanRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsConsumerTimeSpanResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsConsumerTimeSpan',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsConsumerTimeSpanResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_consumer_time_span(
self,
request: ons_20190214_models.OnsConsumerTimeSpanRequest,
) -> ons_20190214_models.OnsConsumerTimeSpanResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the point in time when the earliest stored message was published to a specified topic and the point in time when the most recently stored message was published to the specified topic. You can also call this operation to query the most recent point in time when a message in the topic was consumed. This operation is usually used with the \\*\\*OnsConsumerAccumulate\\*\\* operation to display the overview of the consumption progress.
@param request: OnsConsumerTimeSpanRequest
@return: OnsConsumerTimeSpanResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_consumer_time_span_with_options(request, runtime)
async def ons_consumer_time_span_async(
self,
request: ons_20190214_models.OnsConsumerTimeSpanRequest,
) -> ons_20190214_models.OnsConsumerTimeSpanResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the point in time when the earliest stored message was published to a specified topic and the point in time when the most recently stored message was published to the specified topic. You can also call this operation to query the most recent point in time when a message in the topic was consumed. This operation is usually used with the \\*\\*OnsConsumerAccumulate\\*\\* operation to display the overview of the consumption progress.
@param request: OnsConsumerTimeSpanRequest
@return: OnsConsumerTimeSpanResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_consumer_time_span_with_options_async(request, runtime)
def ons_dlqmessage_get_by_id_with_options(
self,
request: ons_20190214_models.OnsDLQMessageGetByIdRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsDLQMessageGetByIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation uses the exact match method to query a dead-letter message based on the message ID. You can obtain the message ID that is required to query the information about a dead-letter message from the SendResult parameter that is returned after the message is sent. You can also obtain the message ID by calling the OnsDLQMessagePageQueryByGroupId operation to query multiple messages at a time. The queried information about the dead-letter message includes the point in time when the message is stored, the message body, and attributes such as the message tag and the message key.
@param request: OnsDLQMessageGetByIdRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsDLQMessageGetByIdResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsDLQMessageGetById',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsDLQMessageGetByIdResponse(),
self.call_api(params, req, runtime)
)
async def ons_dlqmessage_get_by_id_with_options_async(
self,
request: ons_20190214_models.OnsDLQMessageGetByIdRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsDLQMessageGetByIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation uses the exact match method to query a dead-letter message based on the message ID. You can obtain the message ID that is required to query the information about a dead-letter message from the SendResult parameter that is returned after the message is sent. You can also obtain the message ID by calling the OnsDLQMessagePageQueryByGroupId operation to query multiple messages at a time. The queried information about the dead-letter message includes the point in time when the message is stored, the message body, and attributes such as the message tag and the message key.
@param request: OnsDLQMessageGetByIdRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsDLQMessageGetByIdResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsDLQMessageGetById',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsDLQMessageGetByIdResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_dlqmessage_get_by_id(
self,
request: ons_20190214_models.OnsDLQMessageGetByIdRequest,
) -> ons_20190214_models.OnsDLQMessageGetByIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation uses the exact match method to query a dead-letter message based on the message ID. You can obtain the message ID that is required to query the information about a dead-letter message from the SendResult parameter that is returned after the message is sent. You can also obtain the message ID by calling the OnsDLQMessagePageQueryByGroupId operation to query multiple messages at a time. The queried information about the dead-letter message includes the point in time when the message is stored, the message body, and attributes such as the message tag and the message key.
@param request: OnsDLQMessageGetByIdRequest
@return: OnsDLQMessageGetByIdResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_dlqmessage_get_by_id_with_options(request, runtime)
async def ons_dlqmessage_get_by_id_async(
self,
request: ons_20190214_models.OnsDLQMessageGetByIdRequest,
) -> ons_20190214_models.OnsDLQMessageGetByIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation uses the exact match method to query a dead-letter message based on the message ID. You can obtain the message ID that is required to query the information about a dead-letter message from the SendResult parameter that is returned after the message is sent. You can also obtain the message ID by calling the OnsDLQMessagePageQueryByGroupId operation to query multiple messages at a time. The queried information about the dead-letter message includes the point in time when the message is stored, the message body, and attributes such as the message tag and the message key.
@param request: OnsDLQMessageGetByIdRequest
@return: OnsDLQMessageGetByIdResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_dlqmessage_get_by_id_with_options_async(request, runtime)
def ons_dlqmessage_page_query_by_group_id_with_options(
self,
request: ons_20190214_models.OnsDLQMessagePageQueryByGroupIdRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsDLQMessagePageQueryByGroupIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If you do not know the ID of the dead-letter message that you want to query, you can call this operation to query all dead-letter messages that are sent to a specified consumer group within a specified time range. The results are returned by page.
* We recommend that you specify a short time range to query dead-letter messages in this method. If you specify a long time range, a large number of dead-letter messages are returned. In this case, you cannot find the dead-letter message that you want to query in an efficient manner. You can perform the following steps to query dead-letter messages:
1. Perform a paged query by specifying the group ID, start time, end time, and number of entries to return on each page. If matched messages are found, the information about the dead-letter messages on the first page, total number of pages, and task ID are returned by default.
2. Specify the task ID and a page number to call this operation again to query the dead-letter messages on the specified page. In this query, the BeginTime, EndTime, and PageSize parameters do not take effect. By default, the system uses the values of these parameters that you specified in the request when you created the specified query task.
@param request: OnsDLQMessagePageQueryByGroupIdRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsDLQMessagePageQueryByGroupIdResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.current_page):
query['CurrentPage'] = request.current_page
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.page_size):
query['PageSize'] = request.page_size
if not UtilClient.is_unset(request.task_id):
query['TaskId'] = request.task_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsDLQMessagePageQueryByGroupId',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsDLQMessagePageQueryByGroupIdResponse(),
self.call_api(params, req, runtime)
)
async def ons_dlqmessage_page_query_by_group_id_with_options_async(
self,
request: ons_20190214_models.OnsDLQMessagePageQueryByGroupIdRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsDLQMessagePageQueryByGroupIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If you do not know the ID of the dead-letter message that you want to query, you can call this operation to query all dead-letter messages that are sent to a specified consumer group within a specified time range. The results are returned by page.
* We recommend that you specify a short time range to query dead-letter messages in this method. If you specify a long time range, a large number of dead-letter messages are returned. In this case, you cannot find the dead-letter message that you want to query in an efficient manner. You can perform the following steps to query dead-letter messages:
1. Perform a paged query by specifying the group ID, start time, end time, and number of entries to return on each page. If matched messages are found, the information about the dead-letter messages on the first page, total number of pages, and task ID are returned by default.
2. Specify the task ID and a page number to call this operation again to query the dead-letter messages on the specified page. In this query, the BeginTime, EndTime, and PageSize parameters do not take effect. By default, the system uses the values of these parameters that you specified in the request when you created the specified query task.
@param request: OnsDLQMessagePageQueryByGroupIdRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsDLQMessagePageQueryByGroupIdResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.current_page):
query['CurrentPage'] = request.current_page
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.page_size):
query['PageSize'] = request.page_size
if not UtilClient.is_unset(request.task_id):
query['TaskId'] = request.task_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsDLQMessagePageQueryByGroupId',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsDLQMessagePageQueryByGroupIdResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_dlqmessage_page_query_by_group_id(
self,
request: ons_20190214_models.OnsDLQMessagePageQueryByGroupIdRequest,
) -> ons_20190214_models.OnsDLQMessagePageQueryByGroupIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If you do not know the ID of the dead-letter message that you want to query, you can call this operation to query all dead-letter messages that are sent to a specified consumer group within a specified time range. The results are returned by page.
* We recommend that you specify a short time range to query dead-letter messages in this method. If you specify a long time range, a large number of dead-letter messages are returned. In this case, you cannot find the dead-letter message that you want to query in an efficient manner. You can perform the following steps to query dead-letter messages:
1. Perform a paged query by specifying the group ID, start time, end time, and number of entries to return on each page. If matched messages are found, the information about the dead-letter messages on the first page, total number of pages, and task ID are returned by default.
2. Specify the task ID and a page number to call this operation again to query the dead-letter messages on the specified page. In this query, the BeginTime, EndTime, and PageSize parameters do not take effect. By default, the system uses the values of these parameters that you specified in the request when you created the specified query task.
@param request: OnsDLQMessagePageQueryByGroupIdRequest
@return: OnsDLQMessagePageQueryByGroupIdResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_dlqmessage_page_query_by_group_id_with_options(request, runtime)
async def ons_dlqmessage_page_query_by_group_id_async(
self,
request: ons_20190214_models.OnsDLQMessagePageQueryByGroupIdRequest,
) -> ons_20190214_models.OnsDLQMessagePageQueryByGroupIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If you do not know the ID of the dead-letter message that you want to query, you can call this operation to query all dead-letter messages that are sent to a specified consumer group within a specified time range. The results are returned by page.
* We recommend that you specify a short time range to query dead-letter messages in this method. If you specify a long time range, a large number of dead-letter messages are returned. In this case, you cannot find the dead-letter message that you want to query in an efficient manner. You can perform the following steps to query dead-letter messages:
1. Perform a paged query by specifying the group ID, start time, end time, and number of entries to return on each page. If matched messages are found, the information about the dead-letter messages on the first page, total number of pages, and task ID are returned by default.
2. Specify the task ID and a page number to call this operation again to query the dead-letter messages on the specified page. In this query, the BeginTime, EndTime, and PageSize parameters do not take effect. By default, the system uses the values of these parameters that you specified in the request when you created the specified query task.
@param request: OnsDLQMessagePageQueryByGroupIdRequest
@return: OnsDLQMessagePageQueryByGroupIdResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_dlqmessage_page_query_by_group_id_with_options_async(request, runtime)
def ons_dlqmessage_resend_by_id_with_options(
self,
request: ons_20190214_models.OnsDLQMessageResendByIdRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsDLQMessageResendByIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* A dead-letter message is a message that still fails to be consumed after the number of consumption retries reaches the upper limit. If the message still cannot be consumed after you re-send it, a message with the same message ID is added to the corresponding dead-letter queue. You can query the message ID on the Dead-letter Queues page in the ApsaraMQ for RocketMQ console or by calling API operations. You can obtain the number of consumption failures for a message based on the number of dead-letter messages with the same message ID in the dead-letter queue.
* A dead-letter message is a message that fails to be consumed after the number of consumption retries reaches the upper limit. Generally, dead-letter messages are produced because of incorrect consumption logic. We recommend that you troubleshoot the consumption failures and then call this operation to send the message to the consumer group for consumption again.
* ApsaraMQ for RocketMQ does not manage the status of dead-letter messages based on the consumption status of the dead-letter messages. After you call this operation to send a dead-letter message to a consumer group and the message is consumed, ApsaraMQ for RocketMQ does not remove the dead-letter message from the dead-letter queue. You must manage dead-letter messages and determine whether to send a dead-letter message to a consumer group for consumption. This way, you do not resend or reconsume the messages that are consumed.
@param request: OnsDLQMessageResendByIdRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsDLQMessageResendByIdResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsDLQMessageResendById',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsDLQMessageResendByIdResponse(),
self.call_api(params, req, runtime)
)
async def ons_dlqmessage_resend_by_id_with_options_async(
self,
request: ons_20190214_models.OnsDLQMessageResendByIdRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsDLQMessageResendByIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* A dead-letter message is a message that still fails to be consumed after the number of consumption retries reaches the upper limit. If the message still cannot be consumed after you re-send it, a message with the same message ID is added to the corresponding dead-letter queue. You can query the message ID on the Dead-letter Queues page in the ApsaraMQ for RocketMQ console or by calling API operations. You can obtain the number of consumption failures for a message based on the number of dead-letter messages with the same message ID in the dead-letter queue.
* A dead-letter message is a message that fails to be consumed after the number of consumption retries reaches the upper limit. Generally, dead-letter messages are produced because of incorrect consumption logic. We recommend that you troubleshoot the consumption failures and then call this operation to send the message to the consumer group for consumption again.
* ApsaraMQ for RocketMQ does not manage the status of dead-letter messages based on the consumption status of the dead-letter messages. After you call this operation to send a dead-letter message to a consumer group and the message is consumed, ApsaraMQ for RocketMQ does not remove the dead-letter message from the dead-letter queue. You must manage dead-letter messages and determine whether to send a dead-letter message to a consumer group for consumption. This way, you do not resend or reconsume the messages that are consumed.
@param request: OnsDLQMessageResendByIdRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsDLQMessageResendByIdResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsDLQMessageResendById',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsDLQMessageResendByIdResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_dlqmessage_resend_by_id(
self,
request: ons_20190214_models.OnsDLQMessageResendByIdRequest,
) -> ons_20190214_models.OnsDLQMessageResendByIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* A dead-letter message is a message that still fails to be consumed after the number of consumption retries reaches the upper limit. If the message still cannot be consumed after you re-send it, a message with the same message ID is added to the corresponding dead-letter queue. You can query the message ID on the Dead-letter Queues page in the ApsaraMQ for RocketMQ console or by calling API operations. You can obtain the number of consumption failures for a message based on the number of dead-letter messages with the same message ID in the dead-letter queue.
* A dead-letter message is a message that fails to be consumed after the number of consumption retries reaches the upper limit. Generally, dead-letter messages are produced because of incorrect consumption logic. We recommend that you troubleshoot the consumption failures and then call this operation to send the message to the consumer group for consumption again.
* ApsaraMQ for RocketMQ does not manage the status of dead-letter messages based on the consumption status of the dead-letter messages. After you call this operation to send a dead-letter message to a consumer group and the message is consumed, ApsaraMQ for RocketMQ does not remove the dead-letter message from the dead-letter queue. You must manage dead-letter messages and determine whether to send a dead-letter message to a consumer group for consumption. This way, you do not resend or reconsume the messages that are consumed.
@param request: OnsDLQMessageResendByIdRequest
@return: OnsDLQMessageResendByIdResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_dlqmessage_resend_by_id_with_options(request, runtime)
async def ons_dlqmessage_resend_by_id_async(
self,
request: ons_20190214_models.OnsDLQMessageResendByIdRequest,
) -> ons_20190214_models.OnsDLQMessageResendByIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* A dead-letter message is a message that still fails to be consumed after the number of consumption retries reaches the upper limit. If the message still cannot be consumed after you re-send it, a message with the same message ID is added to the corresponding dead-letter queue. You can query the message ID on the Dead-letter Queues page in the ApsaraMQ for RocketMQ console or by calling API operations. You can obtain the number of consumption failures for a message based on the number of dead-letter messages with the same message ID in the dead-letter queue.
* A dead-letter message is a message that fails to be consumed after the number of consumption retries reaches the upper limit. Generally, dead-letter messages are produced because of incorrect consumption logic. We recommend that you troubleshoot the consumption failures and then call this operation to send the message to the consumer group for consumption again.
* ApsaraMQ for RocketMQ does not manage the status of dead-letter messages based on the consumption status of the dead-letter messages. After you call this operation to send a dead-letter message to a consumer group and the message is consumed, ApsaraMQ for RocketMQ does not remove the dead-letter message from the dead-letter queue. You must manage dead-letter messages and determine whether to send a dead-letter message to a consumer group for consumption. This way, you do not resend or reconsume the messages that are consumed.
@param request: OnsDLQMessageResendByIdRequest
@return: OnsDLQMessageResendByIdResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_dlqmessage_resend_by_id_with_options_async(request, runtime)
def ons_group_consumer_update_with_options(
self,
request: ons_20190214_models.OnsGroupConsumerUpdateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsGroupConsumerUpdateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to configure the permissions for a consumer group to read messages based on a specified region of ApsaraMQ for RocketMQ and a specified group ID. You can call this operation in scenarios in which you want to forbid consumers in a specific group from reading messages.
@param request: OnsGroupConsumerUpdateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsGroupConsumerUpdateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.read_enable):
query['ReadEnable'] = request.read_enable
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsGroupConsumerUpdate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsGroupConsumerUpdateResponse(),
self.call_api(params, req, runtime)
)
async def ons_group_consumer_update_with_options_async(
self,
request: ons_20190214_models.OnsGroupConsumerUpdateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsGroupConsumerUpdateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to configure the permissions for a consumer group to read messages based on a specified region of ApsaraMQ for RocketMQ and a specified group ID. You can call this operation in scenarios in which you want to forbid consumers in a specific group from reading messages.
@param request: OnsGroupConsumerUpdateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsGroupConsumerUpdateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.read_enable):
query['ReadEnable'] = request.read_enable
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsGroupConsumerUpdate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsGroupConsumerUpdateResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_group_consumer_update(
self,
request: ons_20190214_models.OnsGroupConsumerUpdateRequest,
) -> ons_20190214_models.OnsGroupConsumerUpdateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to configure the permissions for a consumer group to read messages based on a specified region of ApsaraMQ for RocketMQ and a specified group ID. You can call this operation in scenarios in which you want to forbid consumers in a specific group from reading messages.
@param request: OnsGroupConsumerUpdateRequest
@return: OnsGroupConsumerUpdateResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_group_consumer_update_with_options(request, runtime)
async def ons_group_consumer_update_async(
self,
request: ons_20190214_models.OnsGroupConsumerUpdateRequest,
) -> ons_20190214_models.OnsGroupConsumerUpdateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to configure the permissions for a consumer group to read messages based on a specified region of ApsaraMQ for RocketMQ and a specified group ID. You can call this operation in scenarios in which you want to forbid consumers in a specific group from reading messages.
@param request: OnsGroupConsumerUpdateRequest
@return: OnsGroupConsumerUpdateResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_group_consumer_update_with_options_async(request, runtime)
def ons_group_create_with_options(
self,
request: ons_20190214_models.OnsGroupCreateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsGroupCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you release a new application or implement new business logic, you need new consumer groups. You can call this operation to create a consumer group.
@param request: OnsGroupCreateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsGroupCreateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.group_type):
query['GroupType'] = request.group_type
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.remark):
query['Remark'] = request.remark
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsGroupCreate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsGroupCreateResponse(),
self.call_api(params, req, runtime)
)
async def ons_group_create_with_options_async(
self,
request: ons_20190214_models.OnsGroupCreateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsGroupCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you release a new application or implement new business logic, you need new consumer groups. You can call this operation to create a consumer group.
@param request: OnsGroupCreateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsGroupCreateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.group_type):
query['GroupType'] = request.group_type
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.remark):
query['Remark'] = request.remark
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsGroupCreate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsGroupCreateResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_group_create(
self,
request: ons_20190214_models.OnsGroupCreateRequest,
) -> ons_20190214_models.OnsGroupCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you release a new application or implement new business logic, you need new consumer groups. You can call this operation to create a consumer group.
@param request: OnsGroupCreateRequest
@return: OnsGroupCreateResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_group_create_with_options(request, runtime)
async def ons_group_create_async(
self,
request: ons_20190214_models.OnsGroupCreateRequest,
) -> ons_20190214_models.OnsGroupCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you release a new application or implement new business logic, you need new consumer groups. You can call this operation to create a consumer group.
@param request: OnsGroupCreateRequest
@return: OnsGroupCreateResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_group_create_with_options_async(request, runtime)
def ons_group_delete_with_options(
self,
request: ons_20190214_models.OnsGroupDeleteRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsGroupDeleteResponse:
"""
>
* API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* After you delete a group, the consumers in the group immediately stop receiving messages. Exercise caution when you call this operation.
You can call this operation to delete a group when you need to reclaim the resources of the group. For example, after an application is brought offline, you can delete the groups that are used for the application. After you delete a group, the backend of ApsaraMQ for RocketMQ reclaims the resources of the group. The system requires a long period of time to reclaim the resources. We recommend that you do not create a group that uses the same name as a deleted group immediately after you delete the group. If the system fails to delete the specified group, troubleshoot the issue based on the error code.
@param request: OnsGroupDeleteRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsGroupDeleteResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsGroupDelete',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsGroupDeleteResponse(),
self.call_api(params, req, runtime)
)
async def ons_group_delete_with_options_async(
self,
request: ons_20190214_models.OnsGroupDeleteRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsGroupDeleteResponse:
"""
>
* API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* After you delete a group, the consumers in the group immediately stop receiving messages. Exercise caution when you call this operation.
You can call this operation to delete a group when you need to reclaim the resources of the group. For example, after an application is brought offline, you can delete the groups that are used for the application. After you delete a group, the backend of ApsaraMQ for RocketMQ reclaims the resources of the group. The system requires a long period of time to reclaim the resources. We recommend that you do not create a group that uses the same name as a deleted group immediately after you delete the group. If the system fails to delete the specified group, troubleshoot the issue based on the error code.
@param request: OnsGroupDeleteRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsGroupDeleteResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsGroupDelete',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsGroupDeleteResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_group_delete(
self,
request: ons_20190214_models.OnsGroupDeleteRequest,
) -> ons_20190214_models.OnsGroupDeleteResponse:
"""
>
* API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* After you delete a group, the consumers in the group immediately stop receiving messages. Exercise caution when you call this operation.
You can call this operation to delete a group when you need to reclaim the resources of the group. For example, after an application is brought offline, you can delete the groups that are used for the application. After you delete a group, the backend of ApsaraMQ for RocketMQ reclaims the resources of the group. The system requires a long period of time to reclaim the resources. We recommend that you do not create a group that uses the same name as a deleted group immediately after you delete the group. If the system fails to delete the specified group, troubleshoot the issue based on the error code.
@param request: OnsGroupDeleteRequest
@return: OnsGroupDeleteResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_group_delete_with_options(request, runtime)
async def ons_group_delete_async(
self,
request: ons_20190214_models.OnsGroupDeleteRequest,
) -> ons_20190214_models.OnsGroupDeleteResponse:
"""
>
* API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* After you delete a group, the consumers in the group immediately stop receiving messages. Exercise caution when you call this operation.
You can call this operation to delete a group when you need to reclaim the resources of the group. For example, after an application is brought offline, you can delete the groups that are used for the application. After you delete a group, the backend of ApsaraMQ for RocketMQ reclaims the resources of the group. The system requires a long period of time to reclaim the resources. We recommend that you do not create a group that uses the same name as a deleted group immediately after you delete the group. If the system fails to delete the specified group, troubleshoot the issue based on the error code.
@param request: OnsGroupDeleteRequest
@return: OnsGroupDeleteResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_group_delete_with_options_async(request, runtime)
def ons_group_list_with_options(
self,
request: ons_20190214_models.OnsGroupListRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsGroupListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsGroupListRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsGroupListResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.group_type):
query['GroupType'] = request.group_type
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsGroupList',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsGroupListResponse(),
self.call_api(params, req, runtime)
)
async def ons_group_list_with_options_async(
self,
request: ons_20190214_models.OnsGroupListRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsGroupListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsGroupListRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsGroupListResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.group_type):
query['GroupType'] = request.group_type
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsGroupList',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsGroupListResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_group_list(
self,
request: ons_20190214_models.OnsGroupListRequest,
) -> ons_20190214_models.OnsGroupListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsGroupListRequest
@return: OnsGroupListResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_group_list_with_options(request, runtime)
async def ons_group_list_async(
self,
request: ons_20190214_models.OnsGroupListRequest,
) -> ons_20190214_models.OnsGroupListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsGroupListRequest
@return: OnsGroupListResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_group_list_with_options_async(request, runtime)
def ons_group_sub_detail_with_options(
self,
request: ons_20190214_models.OnsGroupSubDetailRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsGroupSubDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsGroupSubDetailRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsGroupSubDetailResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsGroupSubDetail',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsGroupSubDetailResponse(),
self.call_api(params, req, runtime)
)
async def ons_group_sub_detail_with_options_async(
self,
request: ons_20190214_models.OnsGroupSubDetailRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsGroupSubDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsGroupSubDetailRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsGroupSubDetailResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsGroupSubDetail',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsGroupSubDetailResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_group_sub_detail(
self,
request: ons_20190214_models.OnsGroupSubDetailRequest,
) -> ons_20190214_models.OnsGroupSubDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsGroupSubDetailRequest
@return: OnsGroupSubDetailResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_group_sub_detail_with_options(request, runtime)
async def ons_group_sub_detail_async(
self,
request: ons_20190214_models.OnsGroupSubDetailRequest,
) -> ons_20190214_models.OnsGroupSubDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsGroupSubDetailRequest
@return: OnsGroupSubDetailResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_group_sub_detail_with_options_async(request, runtime)
def ons_instance_base_info_with_options(
self,
request: ons_20190214_models.OnsInstanceBaseInfoRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsInstanceBaseInfoResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
To send and receive messages, a client must be connected to a ApsaraMQ for RocketMQ instance by using an endpoint. You can call this operation to query the endpoints of the instance.
@param request: OnsInstanceBaseInfoRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsInstanceBaseInfoResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsInstanceBaseInfo',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsInstanceBaseInfoResponse(),
self.call_api(params, req, runtime)
)
async def ons_instance_base_info_with_options_async(
self,
request: ons_20190214_models.OnsInstanceBaseInfoRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsInstanceBaseInfoResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
To send and receive messages, a client must be connected to a ApsaraMQ for RocketMQ instance by using an endpoint. You can call this operation to query the endpoints of the instance.
@param request: OnsInstanceBaseInfoRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsInstanceBaseInfoResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsInstanceBaseInfo',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsInstanceBaseInfoResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_instance_base_info(
self,
request: ons_20190214_models.OnsInstanceBaseInfoRequest,
) -> ons_20190214_models.OnsInstanceBaseInfoResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
To send and receive messages, a client must be connected to a ApsaraMQ for RocketMQ instance by using an endpoint. You can call this operation to query the endpoints of the instance.
@param request: OnsInstanceBaseInfoRequest
@return: OnsInstanceBaseInfoResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_instance_base_info_with_options(request, runtime)
async def ons_instance_base_info_async(
self,
request: ons_20190214_models.OnsInstanceBaseInfoRequest,
) -> ons_20190214_models.OnsInstanceBaseInfoResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
To send and receive messages, a client must be connected to a ApsaraMQ for RocketMQ instance by using an endpoint. You can call this operation to query the endpoints of the instance.
@param request: OnsInstanceBaseInfoRequest
@return: OnsInstanceBaseInfoResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_instance_base_info_with_options_async(request, runtime)
def ons_instance_create_with_options(
self,
request: ons_20190214_models.OnsInstanceCreateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsInstanceCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
An instance is a virtual machine (VM) that can be used to store information about the topics and groups of ApsaraMQ for RocketMQ. You can call this operation when you need to create service resources for the business that you want to launch. Before you call this operation, take note of the following limits:
* A maximum of eight ApsaraMQ for RocketMQ instances can be deployed in each region.
* This operation can be called to create only a Standard Edition instance. You can use the ApsaraMQ for RocketMQ console to create Standard Edition instances and Enterprise Platinum Edition instances. For information about how to create ApsaraMQ for RocketMQ instances, see [Manage instances](~~200153~~).
@param request: OnsInstanceCreateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsInstanceCreateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_name):
query['InstanceName'] = request.instance_name
if not UtilClient.is_unset(request.remark):
query['Remark'] = request.remark
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsInstanceCreate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsInstanceCreateResponse(),
self.call_api(params, req, runtime)
)
async def ons_instance_create_with_options_async(
self,
request: ons_20190214_models.OnsInstanceCreateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsInstanceCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
An instance is a virtual machine (VM) that can be used to store information about the topics and groups of ApsaraMQ for RocketMQ. You can call this operation when you need to create service resources for the business that you want to launch. Before you call this operation, take note of the following limits:
* A maximum of eight ApsaraMQ for RocketMQ instances can be deployed in each region.
* This operation can be called to create only a Standard Edition instance. You can use the ApsaraMQ for RocketMQ console to create Standard Edition instances and Enterprise Platinum Edition instances. For information about how to create ApsaraMQ for RocketMQ instances, see [Manage instances](~~200153~~).
@param request: OnsInstanceCreateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsInstanceCreateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_name):
query['InstanceName'] = request.instance_name
if not UtilClient.is_unset(request.remark):
query['Remark'] = request.remark
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsInstanceCreate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsInstanceCreateResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_instance_create(
self,
request: ons_20190214_models.OnsInstanceCreateRequest,
) -> ons_20190214_models.OnsInstanceCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
An instance is a virtual machine (VM) that can be used to store information about the topics and groups of ApsaraMQ for RocketMQ. You can call this operation when you need to create service resources for the business that you want to launch. Before you call this operation, take note of the following limits:
* A maximum of eight ApsaraMQ for RocketMQ instances can be deployed in each region.
* This operation can be called to create only a Standard Edition instance. You can use the ApsaraMQ for RocketMQ console to create Standard Edition instances and Enterprise Platinum Edition instances. For information about how to create ApsaraMQ for RocketMQ instances, see [Manage instances](~~200153~~).
@param request: OnsInstanceCreateRequest
@return: OnsInstanceCreateResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_instance_create_with_options(request, runtime)
async def ons_instance_create_async(
self,
request: ons_20190214_models.OnsInstanceCreateRequest,
) -> ons_20190214_models.OnsInstanceCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
An instance is a virtual machine (VM) that can be used to store information about the topics and groups of ApsaraMQ for RocketMQ. You can call this operation when you need to create service resources for the business that you want to launch. Before you call this operation, take note of the following limits:
* A maximum of eight ApsaraMQ for RocketMQ instances can be deployed in each region.
* This operation can be called to create only a Standard Edition instance. You can use the ApsaraMQ for RocketMQ console to create Standard Edition instances and Enterprise Platinum Edition instances. For information about how to create ApsaraMQ for RocketMQ instances, see [Manage instances](~~200153~~).
@param request: OnsInstanceCreateRequest
@return: OnsInstanceCreateResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_instance_create_with_options_async(request, runtime)
def ons_instance_delete_with_options(
self,
request: ons_20190214_models.OnsInstanceDeleteRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsInstanceDeleteResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation when you need to reclaim resources. For example, after you unpublish an application, you can reclaim the resources that were used for the application. An instance can be deleted only when the instance does not contain topics and groups.
* After an instance is deleted, the instance cannot be restored. Exercise caution when you call this operation.
@param request: OnsInstanceDeleteRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsInstanceDeleteResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsInstanceDelete',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsInstanceDeleteResponse(),
self.call_api(params, req, runtime)
)
async def ons_instance_delete_with_options_async(
self,
request: ons_20190214_models.OnsInstanceDeleteRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsInstanceDeleteResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation when you need to reclaim resources. For example, after you unpublish an application, you can reclaim the resources that were used for the application. An instance can be deleted only when the instance does not contain topics and groups.
* After an instance is deleted, the instance cannot be restored. Exercise caution when you call this operation.
@param request: OnsInstanceDeleteRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsInstanceDeleteResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsInstanceDelete',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsInstanceDeleteResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_instance_delete(
self,
request: ons_20190214_models.OnsInstanceDeleteRequest,
) -> ons_20190214_models.OnsInstanceDeleteResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation when you need to reclaim resources. For example, after you unpublish an application, you can reclaim the resources that were used for the application. An instance can be deleted only when the instance does not contain topics and groups.
* After an instance is deleted, the instance cannot be restored. Exercise caution when you call this operation.
@param request: OnsInstanceDeleteRequest
@return: OnsInstanceDeleteResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_instance_delete_with_options(request, runtime)
async def ons_instance_delete_async(
self,
request: ons_20190214_models.OnsInstanceDeleteRequest,
) -> ons_20190214_models.OnsInstanceDeleteResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation when you need to reclaim resources. For example, after you unpublish an application, you can reclaim the resources that were used for the application. An instance can be deleted only when the instance does not contain topics and groups.
* After an instance is deleted, the instance cannot be restored. Exercise caution when you call this operation.
@param request: OnsInstanceDeleteRequest
@return: OnsInstanceDeleteResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_instance_delete_with_options_async(request, runtime)
def ons_instance_in_service_list_with_options(
self,
request: ons_20190214_models.OnsInstanceInServiceListRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsInstanceInServiceListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsInstanceInServiceListRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsInstanceInServiceListResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.need_resource_info):
query['NeedResourceInfo'] = request.need_resource_info
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsInstanceInServiceList',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsInstanceInServiceListResponse(),
self.call_api(params, req, runtime)
)
async def ons_instance_in_service_list_with_options_async(
self,
request: ons_20190214_models.OnsInstanceInServiceListRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsInstanceInServiceListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsInstanceInServiceListRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsInstanceInServiceListResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.need_resource_info):
query['NeedResourceInfo'] = request.need_resource_info
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsInstanceInServiceList',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsInstanceInServiceListResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_instance_in_service_list(
self,
request: ons_20190214_models.OnsInstanceInServiceListRequest,
) -> ons_20190214_models.OnsInstanceInServiceListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsInstanceInServiceListRequest
@return: OnsInstanceInServiceListResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_instance_in_service_list_with_options(request, runtime)
async def ons_instance_in_service_list_async(
self,
request: ons_20190214_models.OnsInstanceInServiceListRequest,
) -> ons_20190214_models.OnsInstanceInServiceListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsInstanceInServiceListRequest
@return: OnsInstanceInServiceListResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_instance_in_service_list_with_options_async(request, runtime)
def ons_instance_update_with_options(
self,
request: ons_20190214_models.OnsInstanceUpdateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsInstanceUpdateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
A maximum of eight ApsaraMQ for RocketMQ instances can be deployed in each region.
@param request: OnsInstanceUpdateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsInstanceUpdateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.instance_name):
query['InstanceName'] = request.instance_name
if not UtilClient.is_unset(request.remark):
query['Remark'] = request.remark
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsInstanceUpdate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsInstanceUpdateResponse(),
self.call_api(params, req, runtime)
)
async def ons_instance_update_with_options_async(
self,
request: ons_20190214_models.OnsInstanceUpdateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsInstanceUpdateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
A maximum of eight ApsaraMQ for RocketMQ instances can be deployed in each region.
@param request: OnsInstanceUpdateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsInstanceUpdateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.instance_name):
query['InstanceName'] = request.instance_name
if not UtilClient.is_unset(request.remark):
query['Remark'] = request.remark
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsInstanceUpdate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsInstanceUpdateResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_instance_update(
self,
request: ons_20190214_models.OnsInstanceUpdateRequest,
) -> ons_20190214_models.OnsInstanceUpdateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
A maximum of eight ApsaraMQ for RocketMQ instances can be deployed in each region.
@param request: OnsInstanceUpdateRequest
@return: OnsInstanceUpdateResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_instance_update_with_options(request, runtime)
async def ons_instance_update_async(
self,
request: ons_20190214_models.OnsInstanceUpdateRequest,
) -> ons_20190214_models.OnsInstanceUpdateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
A maximum of eight ApsaraMQ for RocketMQ instances can be deployed in each region.
@param request: OnsInstanceUpdateRequest
@return: OnsInstanceUpdateResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_instance_update_with_options_async(request, runtime)
def ons_message_detail_with_options(
self,
request: ons_20190214_models.OnsMessageDetailRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessageDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsMessageDetailRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessageDetailResponse
"""
UtilClient.validate_model(request)
query = OpenApiUtilClient.query(UtilClient.to_map(request))
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessageDetail',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='GET',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessageDetailResponse(),
self.call_api(params, req, runtime)
)
async def ons_message_detail_with_options_async(
self,
request: ons_20190214_models.OnsMessageDetailRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessageDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsMessageDetailRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessageDetailResponse
"""
UtilClient.validate_model(request)
query = OpenApiUtilClient.query(UtilClient.to_map(request))
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessageDetail',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='GET',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessageDetailResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_message_detail(
self,
request: ons_20190214_models.OnsMessageDetailRequest,
) -> ons_20190214_models.OnsMessageDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsMessageDetailRequest
@return: OnsMessageDetailResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_message_detail_with_options(request, runtime)
async def ons_message_detail_async(
self,
request: ons_20190214_models.OnsMessageDetailRequest,
) -> ons_20190214_models.OnsMessageDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: OnsMessageDetailRequest
@return: OnsMessageDetailResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_message_detail_with_options_async(request, runtime)
def ons_message_get_by_key_with_options(
self,
request: ons_20190214_models.OnsMessageGetByKeyRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessageGetByKeyResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* This operation uses the fuzzy match method to query messages based on a specified message key. The same message key may be used by multiple messages. Therefore, the returned result may contain information about multiple messages.
* This operation can be used in scenarios in which you cannot obtain the IDs of the messages that you want to query. You can perform the following steps to query the information about messages:
1. Call this operation to query message IDs.
2. Call the **OnsMessageGetByMsgId** operation that uses the exact match method to query the details of a specified message. For more information about the **OnsMessageGetByMsgId** operation, see [OnsMessageGetByMsgId](~~29607~~).
@param request: OnsMessageGetByKeyRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessageGetByKeyResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.key):
query['Key'] = request.key
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessageGetByKey',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessageGetByKeyResponse(),
self.call_api(params, req, runtime)
)
async def ons_message_get_by_key_with_options_async(
self,
request: ons_20190214_models.OnsMessageGetByKeyRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessageGetByKeyResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* This operation uses the fuzzy match method to query messages based on a specified message key. The same message key may be used by multiple messages. Therefore, the returned result may contain information about multiple messages.
* This operation can be used in scenarios in which you cannot obtain the IDs of the messages that you want to query. You can perform the following steps to query the information about messages:
1. Call this operation to query message IDs.
2. Call the **OnsMessageGetByMsgId** operation that uses the exact match method to query the details of a specified message. For more information about the **OnsMessageGetByMsgId** operation, see [OnsMessageGetByMsgId](~~29607~~).
@param request: OnsMessageGetByKeyRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessageGetByKeyResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.key):
query['Key'] = request.key
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessageGetByKey',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessageGetByKeyResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_message_get_by_key(
self,
request: ons_20190214_models.OnsMessageGetByKeyRequest,
) -> ons_20190214_models.OnsMessageGetByKeyResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* This operation uses the fuzzy match method to query messages based on a specified message key. The same message key may be used by multiple messages. Therefore, the returned result may contain information about multiple messages.
* This operation can be used in scenarios in which you cannot obtain the IDs of the messages that you want to query. You can perform the following steps to query the information about messages:
1. Call this operation to query message IDs.
2. Call the **OnsMessageGetByMsgId** operation that uses the exact match method to query the details of a specified message. For more information about the **OnsMessageGetByMsgId** operation, see [OnsMessageGetByMsgId](~~29607~~).
@param request: OnsMessageGetByKeyRequest
@return: OnsMessageGetByKeyResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_message_get_by_key_with_options(request, runtime)
async def ons_message_get_by_key_async(
self,
request: ons_20190214_models.OnsMessageGetByKeyRequest,
) -> ons_20190214_models.OnsMessageGetByKeyResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* This operation uses the fuzzy match method to query messages based on a specified message key. The same message key may be used by multiple messages. Therefore, the returned result may contain information about multiple messages.
* This operation can be used in scenarios in which you cannot obtain the IDs of the messages that you want to query. You can perform the following steps to query the information about messages:
1. Call this operation to query message IDs.
2. Call the **OnsMessageGetByMsgId** operation that uses the exact match method to query the details of a specified message. For more information about the **OnsMessageGetByMsgId** operation, see [OnsMessageGetByMsgId](~~29607~~).
@param request: OnsMessageGetByKeyRequest
@return: OnsMessageGetByKeyResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_message_get_by_key_with_options_async(request, runtime)
def ons_message_get_by_msg_id_with_options(
self,
request: ons_20190214_models.OnsMessageGetByMsgIdRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessageGetByMsgIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If a message is not consumed as expected, you can call this operation to query the information about the message for troubleshooting.
* This operation uses the exact match method to query a message based on the message ID. You can obtain the message ID from the SendResult parameter that is returned after the message is sent. You must store the returned information after each message is sent. The queried information about a message includes the point in time when the message was sent, the broker on which the message is stored, and the attributes of the message such as the message key and tag.
@param request: OnsMessageGetByMsgIdRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessageGetByMsgIdResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessageGetByMsgId',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessageGetByMsgIdResponse(),
self.call_api(params, req, runtime)
)
async def ons_message_get_by_msg_id_with_options_async(
self,
request: ons_20190214_models.OnsMessageGetByMsgIdRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessageGetByMsgIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If a message is not consumed as expected, you can call this operation to query the information about the message for troubleshooting.
* This operation uses the exact match method to query a message based on the message ID. You can obtain the message ID from the SendResult parameter that is returned after the message is sent. You must store the returned information after each message is sent. The queried information about a message includes the point in time when the message was sent, the broker on which the message is stored, and the attributes of the message such as the message key and tag.
@param request: OnsMessageGetByMsgIdRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessageGetByMsgIdResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessageGetByMsgId',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessageGetByMsgIdResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_message_get_by_msg_id(
self,
request: ons_20190214_models.OnsMessageGetByMsgIdRequest,
) -> ons_20190214_models.OnsMessageGetByMsgIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If a message is not consumed as expected, you can call this operation to query the information about the message for troubleshooting.
* This operation uses the exact match method to query a message based on the message ID. You can obtain the message ID from the SendResult parameter that is returned after the message is sent. You must store the returned information after each message is sent. The queried information about a message includes the point in time when the message was sent, the broker on which the message is stored, and the attributes of the message such as the message key and tag.
@param request: OnsMessageGetByMsgIdRequest
@return: OnsMessageGetByMsgIdResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_message_get_by_msg_id_with_options(request, runtime)
async def ons_message_get_by_msg_id_async(
self,
request: ons_20190214_models.OnsMessageGetByMsgIdRequest,
) -> ons_20190214_models.OnsMessageGetByMsgIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If a message is not consumed as expected, you can call this operation to query the information about the message for troubleshooting.
* This operation uses the exact match method to query a message based on the message ID. You can obtain the message ID from the SendResult parameter that is returned after the message is sent. You must store the returned information after each message is sent. The queried information about a message includes the point in time when the message was sent, the broker on which the message is stored, and the attributes of the message such as the message key and tag.
@param request: OnsMessageGetByMsgIdRequest
@return: OnsMessageGetByMsgIdResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_message_get_by_msg_id_with_options_async(request, runtime)
def ons_message_page_query_by_topic_with_options(
self,
request: ons_20190214_models.OnsMessagePageQueryByTopicRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessagePageQueryByTopicResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If you do not know the ID or key of a message that you want to query, you can call this operation to query all messages that are stored in the topic within a specified time range. The results are displayed by page.
* We recommend that you specify a short time range to query messages. If you specify a long time range, a large number of messages are returned. In this case, you cannot find the message that you want to query in an efficient manner. You can perform the following steps to query messages:
1. Perform a paged query by specifying the topic, start time, end time, and number of entries to return on each page. If the topic contains messages, the information about the messages on the first page, total number of pages, and task ID are returned by default.
2. Specify the task ID and a page number to call this operation again to query the messages on the specified page. The BeginTime, EndTime, and PageSize parameters do not take effect. By default, the system uses the values of these parameters that you specified in the request when you created the specified query task.
@param request: OnsMessagePageQueryByTopicRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessagePageQueryByTopicResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.current_page):
query['CurrentPage'] = request.current_page
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.page_size):
query['PageSize'] = request.page_size
if not UtilClient.is_unset(request.task_id):
query['TaskId'] = request.task_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessagePageQueryByTopic',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessagePageQueryByTopicResponse(),
self.call_api(params, req, runtime)
)
async def ons_message_page_query_by_topic_with_options_async(
self,
request: ons_20190214_models.OnsMessagePageQueryByTopicRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessagePageQueryByTopicResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If you do not know the ID or key of a message that you want to query, you can call this operation to query all messages that are stored in the topic within a specified time range. The results are displayed by page.
* We recommend that you specify a short time range to query messages. If you specify a long time range, a large number of messages are returned. In this case, you cannot find the message that you want to query in an efficient manner. You can perform the following steps to query messages:
1. Perform a paged query by specifying the topic, start time, end time, and number of entries to return on each page. If the topic contains messages, the information about the messages on the first page, total number of pages, and task ID are returned by default.
2. Specify the task ID and a page number to call this operation again to query the messages on the specified page. The BeginTime, EndTime, and PageSize parameters do not take effect. By default, the system uses the values of these parameters that you specified in the request when you created the specified query task.
@param request: OnsMessagePageQueryByTopicRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessagePageQueryByTopicResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.current_page):
query['CurrentPage'] = request.current_page
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.page_size):
query['PageSize'] = request.page_size
if not UtilClient.is_unset(request.task_id):
query['TaskId'] = request.task_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessagePageQueryByTopic',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessagePageQueryByTopicResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_message_page_query_by_topic(
self,
request: ons_20190214_models.OnsMessagePageQueryByTopicRequest,
) -> ons_20190214_models.OnsMessagePageQueryByTopicResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If you do not know the ID or key of a message that you want to query, you can call this operation to query all messages that are stored in the topic within a specified time range. The results are displayed by page.
* We recommend that you specify a short time range to query messages. If you specify a long time range, a large number of messages are returned. In this case, you cannot find the message that you want to query in an efficient manner. You can perform the following steps to query messages:
1. Perform a paged query by specifying the topic, start time, end time, and number of entries to return on each page. If the topic contains messages, the information about the messages on the first page, total number of pages, and task ID are returned by default.
2. Specify the task ID and a page number to call this operation again to query the messages on the specified page. The BeginTime, EndTime, and PageSize parameters do not take effect. By default, the system uses the values of these parameters that you specified in the request when you created the specified query task.
@param request: OnsMessagePageQueryByTopicRequest
@return: OnsMessagePageQueryByTopicResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_message_page_query_by_topic_with_options(request, runtime)
async def ons_message_page_query_by_topic_async(
self,
request: ons_20190214_models.OnsMessagePageQueryByTopicRequest,
) -> ons_20190214_models.OnsMessagePageQueryByTopicResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* If you do not know the ID or key of a message that you want to query, you can call this operation to query all messages that are stored in the topic within a specified time range. The results are displayed by page.
* We recommend that you specify a short time range to query messages. If you specify a long time range, a large number of messages are returned. In this case, you cannot find the message that you want to query in an efficient manner. You can perform the following steps to query messages:
1. Perform a paged query by specifying the topic, start time, end time, and number of entries to return on each page. If the topic contains messages, the information about the messages on the first page, total number of pages, and task ID are returned by default.
2. Specify the task ID and a page number to call this operation again to query the messages on the specified page. The BeginTime, EndTime, and PageSize parameters do not take effect. By default, the system uses the values of these parameters that you specified in the request when you created the specified query task.
@param request: OnsMessagePageQueryByTopicRequest
@return: OnsMessagePageQueryByTopicResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_message_page_query_by_topic_with_options_async(request, runtime)
def ons_message_push_with_options(
self,
request: ons_20190214_models.OnsMessagePushRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessagePushResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation can be used to check whether messages in a specified topic can be consumed by consumers in a specified consumer group. This operation obtains the body of the message that is specified by the MsgId parameter, re-encapsulates the message body to produce a new message, and then pushes the new message to a specified consumer. The content of the message that is sent to the consumer is the same as the content of the original message. They are not the same message because they use different message IDs.
@param request: OnsMessagePushRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessagePushResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.client_id):
query['ClientId'] = request.client_id
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessagePush',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessagePushResponse(),
self.call_api(params, req, runtime)
)
async def ons_message_push_with_options_async(
self,
request: ons_20190214_models.OnsMessagePushRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessagePushResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation can be used to check whether messages in a specified topic can be consumed by consumers in a specified consumer group. This operation obtains the body of the message that is specified by the MsgId parameter, re-encapsulates the message body to produce a new message, and then pushes the new message to a specified consumer. The content of the message that is sent to the consumer is the same as the content of the original message. They are not the same message because they use different message IDs.
@param request: OnsMessagePushRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessagePushResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.client_id):
query['ClientId'] = request.client_id
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessagePush',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessagePushResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_message_push(
self,
request: ons_20190214_models.OnsMessagePushRequest,
) -> ons_20190214_models.OnsMessagePushResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation can be used to check whether messages in a specified topic can be consumed by consumers in a specified consumer group. This operation obtains the body of the message that is specified by the MsgId parameter, re-encapsulates the message body to produce a new message, and then pushes the new message to a specified consumer. The content of the message that is sent to the consumer is the same as the content of the original message. They are not the same message because they use different message IDs.
@param request: OnsMessagePushRequest
@return: OnsMessagePushResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_message_push_with_options(request, runtime)
async def ons_message_push_async(
self,
request: ons_20190214_models.OnsMessagePushRequest,
) -> ons_20190214_models.OnsMessagePushResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation can be used to check whether messages in a specified topic can be consumed by consumers in a specified consumer group. This operation obtains the body of the message that is specified by the MsgId parameter, re-encapsulates the message body to produce a new message, and then pushes the new message to a specified consumer. The content of the message that is sent to the consumer is the same as the content of the original message. They are not the same message because they use different message IDs.
@param request: OnsMessagePushRequest
@return: OnsMessagePushResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_message_push_with_options_async(request, runtime)
def ons_message_trace_with_options(
self,
request: ons_20190214_models.OnsMessageTraceRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessageTraceResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation to check whether a specified message is consumed. If the message is not consumed, you can troubleshoot the issue based on the returned information.
* This operation queries information based on the built-in offset mechanism of ApsaraMQ for RocketMQ. In most cases, the results are correct. If you have reset the consumer offset or cleared accumulated messages, the results may not be correct.
@param request: OnsMessageTraceRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessageTraceResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessageTrace',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessageTraceResponse(),
self.call_api(params, req, runtime)
)
async def ons_message_trace_with_options_async(
self,
request: ons_20190214_models.OnsMessageTraceRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsMessageTraceResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation to check whether a specified message is consumed. If the message is not consumed, you can troubleshoot the issue based on the returned information.
* This operation queries information based on the built-in offset mechanism of ApsaraMQ for RocketMQ. In most cases, the results are correct. If you have reset the consumer offset or cleared accumulated messages, the results may not be correct.
@param request: OnsMessageTraceRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsMessageTraceResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsMessageTrace',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsMessageTraceResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_message_trace(
self,
request: ons_20190214_models.OnsMessageTraceRequest,
) -> ons_20190214_models.OnsMessageTraceResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation to check whether a specified message is consumed. If the message is not consumed, you can troubleshoot the issue based on the returned information.
* This operation queries information based on the built-in offset mechanism of ApsaraMQ for RocketMQ. In most cases, the results are correct. If you have reset the consumer offset or cleared accumulated messages, the results may not be correct.
@param request: OnsMessageTraceRequest
@return: OnsMessageTraceResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_message_trace_with_options(request, runtime)
async def ons_message_trace_async(
self,
request: ons_20190214_models.OnsMessageTraceRequest,
) -> ons_20190214_models.OnsMessageTraceResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* You can call this operation to check whether a specified message is consumed. If the message is not consumed, you can troubleshoot the issue based on the returned information.
* This operation queries information based on the built-in offset mechanism of ApsaraMQ for RocketMQ. In most cases, the results are correct. If you have reset the consumer offset or cleared accumulated messages, the results may not be correct.
@param request: OnsMessageTraceRequest
@return: OnsMessageTraceResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_message_trace_with_options_async(request, runtime)
def ons_region_list_with_options(
self,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsRegionListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you use an SDK to access and manage a ApsaraMQ for RocketMQ instance, you must sequentially specify the information about two regions. You can query the information about the second region by calling the OnsRegionList operation. You must apply for a public endpoint in the following scenarios:
* Connect your application to ApsaraMQ for RocketMQ: Select the nearest API gateway endpoint based on the region where your application is deployed, and enter the corresponding **region ID**. The **regionId** is used to access Alibaba Cloud API Gateway because ApsaraMQ for RocketMQ instances provide API services by using the OpenAPI Explorer platform, which is also called POP.
* Access a region to manage its resources: Specify a region where you want to manage ApsaraMQ for RocketMQ resources and enter the region ID. You can call the **OnsRegionList** operation to query a region ID.
@param request: OnsRegionListRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsRegionListResponse
"""
req = open_api_models.OpenApiRequest()
params = open_api_models.Params(
action='OnsRegionList',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsRegionListResponse(),
self.call_api(params, req, runtime)
)
async def ons_region_list_with_options_async(
self,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsRegionListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you use an SDK to access and manage a ApsaraMQ for RocketMQ instance, you must sequentially specify the information about two regions. You can query the information about the second region by calling the OnsRegionList operation. You must apply for a public endpoint in the following scenarios:
* Connect your application to ApsaraMQ for RocketMQ: Select the nearest API gateway endpoint based on the region where your application is deployed, and enter the corresponding **region ID**. The **regionId** is used to access Alibaba Cloud API Gateway because ApsaraMQ for RocketMQ instances provide API services by using the OpenAPI Explorer platform, which is also called POP.
* Access a region to manage its resources: Specify a region where you want to manage ApsaraMQ for RocketMQ resources and enter the region ID. You can call the **OnsRegionList** operation to query a region ID.
@param request: OnsRegionListRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsRegionListResponse
"""
req = open_api_models.OpenApiRequest()
params = open_api_models.Params(
action='OnsRegionList',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsRegionListResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_region_list(self) -> ons_20190214_models.OnsRegionListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you use an SDK to access and manage a ApsaraMQ for RocketMQ instance, you must sequentially specify the information about two regions. You can query the information about the second region by calling the OnsRegionList operation. You must apply for a public endpoint in the following scenarios:
* Connect your application to ApsaraMQ for RocketMQ: Select the nearest API gateway endpoint based on the region where your application is deployed, and enter the corresponding **region ID**. The **regionId** is used to access Alibaba Cloud API Gateway because ApsaraMQ for RocketMQ instances provide API services by using the OpenAPI Explorer platform, which is also called POP.
* Access a region to manage its resources: Specify a region where you want to manage ApsaraMQ for RocketMQ resources and enter the region ID. You can call the **OnsRegionList** operation to query a region ID.
@return: OnsRegionListResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_region_list_with_options(runtime)
async def ons_region_list_async(self) -> ons_20190214_models.OnsRegionListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you use an SDK to access and manage a ApsaraMQ for RocketMQ instance, you must sequentially specify the information about two regions. You can query the information about the second region by calling the OnsRegionList operation. You must apply for a public endpoint in the following scenarios:
* Connect your application to ApsaraMQ for RocketMQ: Select the nearest API gateway endpoint based on the region where your application is deployed, and enter the corresponding **region ID**. The **regionId** is used to access Alibaba Cloud API Gateway because ApsaraMQ for RocketMQ instances provide API services by using the OpenAPI Explorer platform, which is also called POP.
* Access a region to manage its resources: Specify a region where you want to manage ApsaraMQ for RocketMQ resources and enter the region ID. You can call the **OnsRegionList** operation to query a region ID.
@return: OnsRegionListResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_region_list_with_options_async(runtime)
def ons_topic_create_with_options(
self,
request: ons_20190214_models.OnsTopicCreateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you want to release a new application or expand your business, you can call this operation to create a topic based on your business requirements.
@param request: OnsTopicCreateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicCreateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.message_type):
query['MessageType'] = request.message_type
if not UtilClient.is_unset(request.remark):
query['Remark'] = request.remark
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicCreate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicCreateResponse(),
self.call_api(params, req, runtime)
)
async def ons_topic_create_with_options_async(
self,
request: ons_20190214_models.OnsTopicCreateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you want to release a new application or expand your business, you can call this operation to create a topic based on your business requirements.
@param request: OnsTopicCreateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicCreateResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.message_type):
query['MessageType'] = request.message_type
if not UtilClient.is_unset(request.remark):
query['Remark'] = request.remark
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicCreate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicCreateResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_topic_create(
self,
request: ons_20190214_models.OnsTopicCreateRequest,
) -> ons_20190214_models.OnsTopicCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you want to release a new application or expand your business, you can call this operation to create a topic based on your business requirements.
@param request: OnsTopicCreateRequest
@return: OnsTopicCreateResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_topic_create_with_options(request, runtime)
async def ons_topic_create_async(
self,
request: ons_20190214_models.OnsTopicCreateRequest,
) -> ons_20190214_models.OnsTopicCreateResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
When you want to release a new application or expand your business, you can call this operation to create a topic based on your business requirements.
@param request: OnsTopicCreateRequest
@return: OnsTopicCreateResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_topic_create_with_options_async(request, runtime)
def ons_topic_delete_with_options(
self,
request: ons_20190214_models.OnsTopicDeleteRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicDeleteResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur. - After you delete the topic, the publishing and subscription relationships that are constructed based on the topic are cleared. Exercise caution when you call this operation.
You can call this operation to delete a topic when you need to reclaim the resources from the topic. For example, after an application is brought offline, you can delete the topics that are used for the application. After you delete a topic, the backend of ApsaraMQ for RocketMQ reclaims the resources from the topic. The system requires a long period of time to reclaim the resources. After you delete a topic, we recommend that you do not create a topic that uses the same name as the deleted topic within a short period of time. If the system fails to delete the specified topic, troubleshoot the issue based on the error code.
@param request: OnsTopicDeleteRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicDeleteResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicDelete',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicDeleteResponse(),
self.call_api(params, req, runtime)
)
async def ons_topic_delete_with_options_async(
self,
request: ons_20190214_models.OnsTopicDeleteRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicDeleteResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur. - After you delete the topic, the publishing and subscription relationships that are constructed based on the topic are cleared. Exercise caution when you call this operation.
You can call this operation to delete a topic when you need to reclaim the resources from the topic. For example, after an application is brought offline, you can delete the topics that are used for the application. After you delete a topic, the backend of ApsaraMQ for RocketMQ reclaims the resources from the topic. The system requires a long period of time to reclaim the resources. After you delete a topic, we recommend that you do not create a topic that uses the same name as the deleted topic within a short period of time. If the system fails to delete the specified topic, troubleshoot the issue based on the error code.
@param request: OnsTopicDeleteRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicDeleteResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicDelete',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicDeleteResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_topic_delete(
self,
request: ons_20190214_models.OnsTopicDeleteRequest,
) -> ons_20190214_models.OnsTopicDeleteResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur. - After you delete the topic, the publishing and subscription relationships that are constructed based on the topic are cleared. Exercise caution when you call this operation.
You can call this operation to delete a topic when you need to reclaim the resources from the topic. For example, after an application is brought offline, you can delete the topics that are used for the application. After you delete a topic, the backend of ApsaraMQ for RocketMQ reclaims the resources from the topic. The system requires a long period of time to reclaim the resources. After you delete a topic, we recommend that you do not create a topic that uses the same name as the deleted topic within a short period of time. If the system fails to delete the specified topic, troubleshoot the issue based on the error code.
@param request: OnsTopicDeleteRequest
@return: OnsTopicDeleteResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_topic_delete_with_options(request, runtime)
async def ons_topic_delete_async(
self,
request: ons_20190214_models.OnsTopicDeleteRequest,
) -> ons_20190214_models.OnsTopicDeleteResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur. - After you delete the topic, the publishing and subscription relationships that are constructed based on the topic are cleared. Exercise caution when you call this operation.
You can call this operation to delete a topic when you need to reclaim the resources from the topic. For example, after an application is brought offline, you can delete the topics that are used for the application. After you delete a topic, the backend of ApsaraMQ for RocketMQ reclaims the resources from the topic. The system requires a long period of time to reclaim the resources. After you delete a topic, we recommend that you do not create a topic that uses the same name as the deleted topic within a short period of time. If the system fails to delete the specified topic, troubleshoot the issue based on the error code.
@param request: OnsTopicDeleteRequest
@return: OnsTopicDeleteResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_topic_delete_with_options_async(request, runtime)
def ons_topic_list_with_options(
self,
request: ons_20190214_models.OnsTopicListRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation returns the basic information about topics and does not return the details of topics.
@param request: OnsTopicListRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicListResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
if not UtilClient.is_unset(request.user_id):
query['UserId'] = request.user_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicList',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicListResponse(),
self.call_api(params, req, runtime)
)
async def ons_topic_list_with_options_async(
self,
request: ons_20190214_models.OnsTopicListRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation returns the basic information about topics and does not return the details of topics.
@param request: OnsTopicListRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicListResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
if not UtilClient.is_unset(request.user_id):
query['UserId'] = request.user_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicList',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicListResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_topic_list(
self,
request: ons_20190214_models.OnsTopicListRequest,
) -> ons_20190214_models.OnsTopicListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation returns the basic information about topics and does not return the details of topics.
@param request: OnsTopicListRequest
@return: OnsTopicListResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_topic_list_with_options(request, runtime)
async def ons_topic_list_async(
self,
request: ons_20190214_models.OnsTopicListRequest,
) -> ons_20190214_models.OnsTopicListResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
This operation returns the basic information about topics and does not return the details of topics.
@param request: OnsTopicListRequest
@return: OnsTopicListResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_topic_list_with_options_async(request, runtime)
def ons_topic_status_with_options(
self,
request: ons_20190214_models.OnsTopicStatusRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicStatusResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can determine the resource usage of a topic based on the information that is returned by this operation. The returned information includes the total number of messages in the topic and the most recent point in time when a message was published to the topic.
@param request: OnsTopicStatusRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicStatusResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicStatus',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicStatusResponse(),
self.call_api(params, req, runtime)
)
async def ons_topic_status_with_options_async(
self,
request: ons_20190214_models.OnsTopicStatusRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicStatusResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can determine the resource usage of a topic based on the information that is returned by this operation. The returned information includes the total number of messages in the topic and the most recent point in time when a message was published to the topic.
@param request: OnsTopicStatusRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicStatusResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicStatus',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicStatusResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_topic_status(
self,
request: ons_20190214_models.OnsTopicStatusRequest,
) -> ons_20190214_models.OnsTopicStatusResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can determine the resource usage of a topic based on the information that is returned by this operation. The returned information includes the total number of messages in the topic and the most recent point in time when a message was published to the topic.
@param request: OnsTopicStatusRequest
@return: OnsTopicStatusResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_topic_status_with_options(request, runtime)
async def ons_topic_status_async(
self,
request: ons_20190214_models.OnsTopicStatusRequest,
) -> ons_20190214_models.OnsTopicStatusResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can determine the resource usage of a topic based on the information that is returned by this operation. The returned information includes the total number of messages in the topic and the most recent point in time when a message was published to the topic.
@param request: OnsTopicStatusRequest
@return: OnsTopicStatusResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_topic_status_with_options_async(request, runtime)
def ons_topic_sub_detail_with_options(
self,
request: ons_20190214_models.OnsTopicSubDetailRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicSubDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the online consumer groups that subscribe to a specified topic. If all consumers in a group are offline, the information about the group is not returned.
@param request: OnsTopicSubDetailRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicSubDetailResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicSubDetail',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicSubDetailResponse(),
self.call_api(params, req, runtime)
)
async def ons_topic_sub_detail_with_options_async(
self,
request: ons_20190214_models.OnsTopicSubDetailRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicSubDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the online consumer groups that subscribe to a specified topic. If all consumers in a group are offline, the information about the group is not returned.
@param request: OnsTopicSubDetailRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicSubDetailResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicSubDetail',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicSubDetailResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_topic_sub_detail(
self,
request: ons_20190214_models.OnsTopicSubDetailRequest,
) -> ons_20190214_models.OnsTopicSubDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the online consumer groups that subscribe to a specified topic. If all consumers in a group are offline, the information about the group is not returned.
@param request: OnsTopicSubDetailRequest
@return: OnsTopicSubDetailResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_topic_sub_detail_with_options(request, runtime)
async def ons_topic_sub_detail_async(
self,
request: ons_20190214_models.OnsTopicSubDetailRequest,
) -> ons_20190214_models.OnsTopicSubDetailResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the online consumer groups that subscribe to a specified topic. If all consumers in a group are offline, the information about the group is not returned.
@param request: OnsTopicSubDetailRequest
@return: OnsTopicSubDetailResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_topic_sub_detail_with_options_async(request, runtime)
def ons_topic_update_with_options(
self,
request: ons_20190214_models.OnsTopicUpdateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicUpdateResponse:
"""
@deprecated
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to forbid read or write operations on a specific topic.
@param request: OnsTopicUpdateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicUpdateResponse
Deprecated
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.perm):
query['Perm'] = request.perm
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicUpdate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicUpdateResponse(),
self.call_api(params, req, runtime)
)
async def ons_topic_update_with_options_async(
self,
request: ons_20190214_models.OnsTopicUpdateRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTopicUpdateResponse:
"""
@deprecated
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to forbid read or write operations on a specific topic.
@param request: OnsTopicUpdateRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTopicUpdateResponse
Deprecated
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.perm):
query['Perm'] = request.perm
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTopicUpdate',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTopicUpdateResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_topic_update(
self,
request: ons_20190214_models.OnsTopicUpdateRequest,
) -> ons_20190214_models.OnsTopicUpdateResponse:
"""
@deprecated
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to forbid read or write operations on a specific topic.
@param request: OnsTopicUpdateRequest
@return: OnsTopicUpdateResponse
Deprecated
"""
runtime = util_models.RuntimeOptions()
return self.ons_topic_update_with_options(request, runtime)
async def ons_topic_update_async(
self,
request: ons_20190214_models.OnsTopicUpdateRequest,
) -> ons_20190214_models.OnsTopicUpdateResponse:
"""
@deprecated
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to forbid read or write operations on a specific topic.
@param request: OnsTopicUpdateRequest
@return: OnsTopicUpdateResponse
Deprecated
"""
runtime = util_models.RuntimeOptions()
return await self.ons_topic_update_with_options_async(request, runtime)
def ons_trace_get_result_with_options(
self,
request: ons_20190214_models.OnsTraceGetResultRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTraceGetResultResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* Before you call this operation to query the details of the trace of a message, you must create a task to query the trace of the message based on the message ID or message key and obtain the task ID. Then, you can call this operation to query the details of the message trace based on the task ID. You can call the [OnsTraceQueryByMsgId](~~445322~~) operation or the [OnsTraceQueryByMsgKey](~~445324~~) operation to create a task to query the trace of the message and obtain the task ID from the **QueryId** response parameter.
* A trace query task is time-consuming. If you call this operation to query the details immediately after you create a trace query task, the results may be empty. In this case, we recommend that you try again later.
@param request: OnsTraceGetResultRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTraceGetResultResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.query_id):
query['QueryId'] = request.query_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTraceGetResult',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTraceGetResultResponse(),
self.call_api(params, req, runtime)
)
async def ons_trace_get_result_with_options_async(
self,
request: ons_20190214_models.OnsTraceGetResultRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTraceGetResultResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* Before you call this operation to query the details of the trace of a message, you must create a task to query the trace of the message based on the message ID or message key and obtain the task ID. Then, you can call this operation to query the details of the message trace based on the task ID. You can call the [OnsTraceQueryByMsgId](~~445322~~) operation or the [OnsTraceQueryByMsgKey](~~445324~~) operation to create a task to query the trace of the message and obtain the task ID from the **QueryId** response parameter.
* A trace query task is time-consuming. If you call this operation to query the details immediately after you create a trace query task, the results may be empty. In this case, we recommend that you try again later.
@param request: OnsTraceGetResultRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTraceGetResultResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.query_id):
query['QueryId'] = request.query_id
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTraceGetResult',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTraceGetResultResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_trace_get_result(
self,
request: ons_20190214_models.OnsTraceGetResultRequest,
) -> ons_20190214_models.OnsTraceGetResultResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* Before you call this operation to query the details of the trace of a message, you must create a task to query the trace of the message based on the message ID or message key and obtain the task ID. Then, you can call this operation to query the details of the message trace based on the task ID. You can call the [OnsTraceQueryByMsgId](~~445322~~) operation or the [OnsTraceQueryByMsgKey](~~445324~~) operation to create a task to query the trace of the message and obtain the task ID from the **QueryId** response parameter.
* A trace query task is time-consuming. If you call this operation to query the details immediately after you create a trace query task, the results may be empty. In this case, we recommend that you try again later.
@param request: OnsTraceGetResultRequest
@return: OnsTraceGetResultResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_trace_get_result_with_options(request, runtime)
async def ons_trace_get_result_async(
self,
request: ons_20190214_models.OnsTraceGetResultRequest,
) -> ons_20190214_models.OnsTraceGetResultResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
* Before you call this operation to query the details of the trace of a message, you must create a task to query the trace of the message based on the message ID or message key and obtain the task ID. Then, you can call this operation to query the details of the message trace based on the task ID. You can call the [OnsTraceQueryByMsgId](~~445322~~) operation or the [OnsTraceQueryByMsgKey](~~445324~~) operation to create a task to query the trace of the message and obtain the task ID from the **QueryId** response parameter.
* A trace query task is time-consuming. If you call this operation to query the details immediately after you create a trace query task, the results may be empty. In this case, we recommend that you try again later.
@param request: OnsTraceGetResultRequest
@return: OnsTraceGetResultResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_trace_get_result_with_options_async(request, runtime)
def ons_trace_query_by_msg_id_with_options(
self,
request: ons_20190214_models.OnsTraceQueryByMsgIdRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTraceQueryByMsgIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
If you want to query the trace of a message based on the message ID, you can call this operation to create a query task. After you obtain the task ID, you can call the [OnsTraceGetResult](~~59832~~) operation to query the details of the message trace based on the task ID.
@param request: OnsTraceQueryByMsgIdRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTraceQueryByMsgIdResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTraceQueryByMsgId',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTraceQueryByMsgIdResponse(),
self.call_api(params, req, runtime)
)
async def ons_trace_query_by_msg_id_with_options_async(
self,
request: ons_20190214_models.OnsTraceQueryByMsgIdRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTraceQueryByMsgIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
If you want to query the trace of a message based on the message ID, you can call this operation to create a query task. After you obtain the task ID, you can call the [OnsTraceGetResult](~~59832~~) operation to query the details of the message trace based on the task ID.
@param request: OnsTraceQueryByMsgIdRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTraceQueryByMsgIdResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_id):
query['MsgId'] = request.msg_id
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTraceQueryByMsgId',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTraceQueryByMsgIdResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_trace_query_by_msg_id(
self,
request: ons_20190214_models.OnsTraceQueryByMsgIdRequest,
) -> ons_20190214_models.OnsTraceQueryByMsgIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
If you want to query the trace of a message based on the message ID, you can call this operation to create a query task. After you obtain the task ID, you can call the [OnsTraceGetResult](~~59832~~) operation to query the details of the message trace based on the task ID.
@param request: OnsTraceQueryByMsgIdRequest
@return: OnsTraceQueryByMsgIdResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_trace_query_by_msg_id_with_options(request, runtime)
async def ons_trace_query_by_msg_id_async(
self,
request: ons_20190214_models.OnsTraceQueryByMsgIdRequest,
) -> ons_20190214_models.OnsTraceQueryByMsgIdResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
If you want to query the trace of a message based on the message ID, you can call this operation to create a query task. After you obtain the task ID, you can call the [OnsTraceGetResult](~~59832~~) operation to query the details of the message trace based on the task ID.
@param request: OnsTraceQueryByMsgIdRequest
@return: OnsTraceQueryByMsgIdResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_trace_query_by_msg_id_with_options_async(request, runtime)
def ons_trace_query_by_msg_key_with_options(
self,
request: ons_20190214_models.OnsTraceQueryByMsgKeyRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTraceQueryByMsgKeyResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
If you want to query the trace of a message based on the message key that you obtained, you can call this operation to create a query task. After you obtain the task ID, you can call the OnsTraceGetResult operation to query the details of the message trace based on the task ID.
@param request: OnsTraceQueryByMsgKeyRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTraceQueryByMsgKeyResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_key):
query['MsgKey'] = request.msg_key
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTraceQueryByMsgKey',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTraceQueryByMsgKeyResponse(),
self.call_api(params, req, runtime)
)
async def ons_trace_query_by_msg_key_with_options_async(
self,
request: ons_20190214_models.OnsTraceQueryByMsgKeyRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTraceQueryByMsgKeyResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
If you want to query the trace of a message based on the message key that you obtained, you can call this operation to create a query task. After you obtain the task ID, you can call the OnsTraceGetResult operation to query the details of the message trace based on the task ID.
@param request: OnsTraceQueryByMsgKeyRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTraceQueryByMsgKeyResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.msg_key):
query['MsgKey'] = request.msg_key
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTraceQueryByMsgKey',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTraceQueryByMsgKeyResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_trace_query_by_msg_key(
self,
request: ons_20190214_models.OnsTraceQueryByMsgKeyRequest,
) -> ons_20190214_models.OnsTraceQueryByMsgKeyResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
If you want to query the trace of a message based on the message key that you obtained, you can call this operation to create a query task. After you obtain the task ID, you can call the OnsTraceGetResult operation to query the details of the message trace based on the task ID.
@param request: OnsTraceQueryByMsgKeyRequest
@return: OnsTraceQueryByMsgKeyResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_trace_query_by_msg_key_with_options(request, runtime)
async def ons_trace_query_by_msg_key_async(
self,
request: ons_20190214_models.OnsTraceQueryByMsgKeyRequest,
) -> ons_20190214_models.OnsTraceQueryByMsgKeyResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
If you want to query the trace of a message based on the message key that you obtained, you can call this operation to create a query task. After you obtain the task ID, you can call the OnsTraceGetResult operation to query the details of the message trace based on the task ID.
@param request: OnsTraceQueryByMsgKeyRequest
@return: OnsTraceQueryByMsgKeyResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_trace_query_by_msg_key_with_options_async(request, runtime)
def ons_trend_group_output_tps_with_options(
self,
request: ons_20190214_models.OnsTrendGroupOutputTpsRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTrendGroupOutputTpsResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the following statistics that are collected in a production environment:
* The number of messages that are consumed during each sampling period
* The transactions per second (TPS) for message consumption during each sampling period
If your application consumes a small number of messages and does not consume messages at specific intervals, we recommend that you query the number of messages that are consumed during each sampling period because the statistics of TPS may not show a clear change trend.
@param request: OnsTrendGroupOutputTpsRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTrendGroupOutputTpsResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.period):
query['Period'] = request.period
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
if not UtilClient.is_unset(request.type):
query['Type'] = request.type
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTrendGroupOutputTps',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTrendGroupOutputTpsResponse(),
self.call_api(params, req, runtime)
)
async def ons_trend_group_output_tps_with_options_async(
self,
request: ons_20190214_models.OnsTrendGroupOutputTpsRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTrendGroupOutputTpsResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the following statistics that are collected in a production environment:
* The number of messages that are consumed during each sampling period
* The transactions per second (TPS) for message consumption during each sampling period
If your application consumes a small number of messages and does not consume messages at specific intervals, we recommend that you query the number of messages that are consumed during each sampling period because the statistics of TPS may not show a clear change trend.
@param request: OnsTrendGroupOutputTpsRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTrendGroupOutputTpsResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.group_id):
query['GroupId'] = request.group_id
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.period):
query['Period'] = request.period
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
if not UtilClient.is_unset(request.type):
query['Type'] = request.type
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTrendGroupOutputTps',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTrendGroupOutputTpsResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_trend_group_output_tps(
self,
request: ons_20190214_models.OnsTrendGroupOutputTpsRequest,
) -> ons_20190214_models.OnsTrendGroupOutputTpsResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the following statistics that are collected in a production environment:
* The number of messages that are consumed during each sampling period
* The transactions per second (TPS) for message consumption during each sampling period
If your application consumes a small number of messages and does not consume messages at specific intervals, we recommend that you query the number of messages that are consumed during each sampling period because the statistics of TPS may not show a clear change trend.
@param request: OnsTrendGroupOutputTpsRequest
@return: OnsTrendGroupOutputTpsResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_trend_group_output_tps_with_options(request, runtime)
async def ons_trend_group_output_tps_async(
self,
request: ons_20190214_models.OnsTrendGroupOutputTpsRequest,
) -> ons_20190214_models.OnsTrendGroupOutputTpsResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the following statistics that are collected in a production environment:
* The number of messages that are consumed during each sampling period
* The transactions per second (TPS) for message consumption during each sampling period
If your application consumes a small number of messages and does not consume messages at specific intervals, we recommend that you query the number of messages that are consumed during each sampling period because the statistics of TPS may not show a clear change trend.
@param request: OnsTrendGroupOutputTpsRequest
@return: OnsTrendGroupOutputTpsResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_trend_group_output_tps_with_options_async(request, runtime)
def ons_trend_topic_input_tps_with_options(
self,
request: ons_20190214_models.OnsTrendTopicInputTpsRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTrendTopicInputTpsResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the statistics of messages that are published to a specific topic in a production environment. You can query the number of messages that are published to the topic or the transactions per second (TPS) for message publishing within a specified time range based on your business requirements.
If your application publishes a small number of messages and does not publish messages at specific intervals, we recommend that you query the number of messages that are published to the topic during each sampling period because the statistics of TPS may not show a clear change trend.
@param request: OnsTrendTopicInputTpsRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTrendTopicInputTpsResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.period):
query['Period'] = request.period
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
if not UtilClient.is_unset(request.type):
query['Type'] = request.type
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTrendTopicInputTps',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTrendTopicInputTpsResponse(),
self.call_api(params, req, runtime)
)
async def ons_trend_topic_input_tps_with_options_async(
self,
request: ons_20190214_models.OnsTrendTopicInputTpsRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OnsTrendTopicInputTpsResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the statistics of messages that are published to a specific topic in a production environment. You can query the number of messages that are published to the topic or the transactions per second (TPS) for message publishing within a specified time range based on your business requirements.
If your application publishes a small number of messages and does not publish messages at specific intervals, we recommend that you query the number of messages that are published to the topic during each sampling period because the statistics of TPS may not show a clear change trend.
@param request: OnsTrendTopicInputTpsRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OnsTrendTopicInputTpsResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.begin_time):
query['BeginTime'] = request.begin_time
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.period):
query['Period'] = request.period
if not UtilClient.is_unset(request.topic):
query['Topic'] = request.topic
if not UtilClient.is_unset(request.type):
query['Type'] = request.type
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OnsTrendTopicInputTps',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OnsTrendTopicInputTpsResponse(),
await self.call_api_async(params, req, runtime)
)
def ons_trend_topic_input_tps(
self,
request: ons_20190214_models.OnsTrendTopicInputTpsRequest,
) -> ons_20190214_models.OnsTrendTopicInputTpsResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the statistics of messages that are published to a specific topic in a production environment. You can query the number of messages that are published to the topic or the transactions per second (TPS) for message publishing within a specified time range based on your business requirements.
If your application publishes a small number of messages and does not publish messages at specific intervals, we recommend that you query the number of messages that are published to the topic during each sampling period because the statistics of TPS may not show a clear change trend.
@param request: OnsTrendTopicInputTpsRequest
@return: OnsTrendTopicInputTpsResponse
"""
runtime = util_models.RuntimeOptions()
return self.ons_trend_topic_input_tps_with_options(request, runtime)
async def ons_trend_topic_input_tps_async(
self,
request: ons_20190214_models.OnsTrendTopicInputTpsRequest,
) -> ons_20190214_models.OnsTrendTopicInputTpsResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to query the statistics of messages that are published to a specific topic in a production environment. You can query the number of messages that are published to the topic or the transactions per second (TPS) for message publishing within a specified time range based on your business requirements.
If your application publishes a small number of messages and does not publish messages at specific intervals, we recommend that you query the number of messages that are published to the topic during each sampling period because the statistics of TPS may not show a clear change trend.
@param request: OnsTrendTopicInputTpsRequest
@return: OnsTrendTopicInputTpsResponse
"""
runtime = util_models.RuntimeOptions()
return await self.ons_trend_topic_input_tps_with_options_async(request, runtime)
def open_ons_service_with_options(
self,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OpenOnsServiceResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation the first time you use ApsaraMQ for RocketMQ. You can use ApsaraMQ for RocketMQ only after the service is activated.
The ApsaraMQ for RocketMQ service can be activated only in the China (Hangzhou) region. Service activation is not billed.
@param request: OpenOnsServiceRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OpenOnsServiceResponse
"""
req = open_api_models.OpenApiRequest()
params = open_api_models.Params(
action='OpenOnsService',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OpenOnsServiceResponse(),
self.call_api(params, req, runtime)
)
async def open_ons_service_with_options_async(
self,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.OpenOnsServiceResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation the first time you use ApsaraMQ for RocketMQ. You can use ApsaraMQ for RocketMQ only after the service is activated.
The ApsaraMQ for RocketMQ service can be activated only in the China (Hangzhou) region. Service activation is not billed.
@param request: OpenOnsServiceRequest
@param runtime: runtime options for this request RuntimeOptions
@return: OpenOnsServiceResponse
"""
req = open_api_models.OpenApiRequest()
params = open_api_models.Params(
action='OpenOnsService',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.OpenOnsServiceResponse(),
await self.call_api_async(params, req, runtime)
)
def open_ons_service(self) -> ons_20190214_models.OpenOnsServiceResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation the first time you use ApsaraMQ for RocketMQ. You can use ApsaraMQ for RocketMQ only after the service is activated.
The ApsaraMQ for RocketMQ service can be activated only in the China (Hangzhou) region. Service activation is not billed.
@return: OpenOnsServiceResponse
"""
runtime = util_models.RuntimeOptions()
return self.open_ons_service_with_options(runtime)
async def open_ons_service_async(self) -> ons_20190214_models.OpenOnsServiceResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation the first time you use ApsaraMQ for RocketMQ. You can use ApsaraMQ for RocketMQ only after the service is activated.
The ApsaraMQ for RocketMQ service can be activated only in the China (Hangzhou) region. Service activation is not billed.
@return: OpenOnsServiceResponse
"""
runtime = util_models.RuntimeOptions()
return await self.open_ons_service_with_options_async(runtime)
def tag_resources_with_options(
self,
request: ons_20190214_models.TagResourcesRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.TagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to attach tags to a source. You can use tags to classify resources in ApsaraMQ for RocketMQ. This can help you aggregate and search resources in an efficient manner.
@param request: TagResourcesRequest
@param runtime: runtime options for this request RuntimeOptions
@return: TagResourcesResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.resource_id):
query['ResourceId'] = request.resource_id
if not UtilClient.is_unset(request.resource_type):
query['ResourceType'] = request.resource_type
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='TagResources',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.TagResourcesResponse(),
self.call_api(params, req, runtime)
)
async def tag_resources_with_options_async(
self,
request: ons_20190214_models.TagResourcesRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.TagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to attach tags to a source. You can use tags to classify resources in ApsaraMQ for RocketMQ. This can help you aggregate and search resources in an efficient manner.
@param request: TagResourcesRequest
@param runtime: runtime options for this request RuntimeOptions
@return: TagResourcesResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.resource_id):
query['ResourceId'] = request.resource_id
if not UtilClient.is_unset(request.resource_type):
query['ResourceType'] = request.resource_type
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='TagResources',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.TagResourcesResponse(),
await self.call_api_async(params, req, runtime)
)
def tag_resources(
self,
request: ons_20190214_models.TagResourcesRequest,
) -> ons_20190214_models.TagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to attach tags to a source. You can use tags to classify resources in ApsaraMQ for RocketMQ. This can help you aggregate and search resources in an efficient manner.
@param request: TagResourcesRequest
@return: TagResourcesResponse
"""
runtime = util_models.RuntimeOptions()
return self.tag_resources_with_options(request, runtime)
async def tag_resources_async(
self,
request: ons_20190214_models.TagResourcesRequest,
) -> ons_20190214_models.TagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
You can call this operation to attach tags to a source. You can use tags to classify resources in ApsaraMQ for RocketMQ. This can help you aggregate and search resources in an efficient manner.
@param request: TagResourcesRequest
@return: TagResourcesResponse
"""
runtime = util_models.RuntimeOptions()
return await self.tag_resources_with_options_async(request, runtime)
def untag_resources_with_options(
self,
request: ons_20190214_models.UntagResourcesRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.UntagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: UntagResourcesRequest
@param runtime: runtime options for this request RuntimeOptions
@return: UntagResourcesResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.all):
query['All'] = request.all
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.resource_id):
query['ResourceId'] = request.resource_id
if not UtilClient.is_unset(request.resource_type):
query['ResourceType'] = request.resource_type
if not UtilClient.is_unset(request.tag_key):
query['TagKey'] = request.tag_key
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='UntagResources',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.UntagResourcesResponse(),
self.call_api(params, req, runtime)
)
async def untag_resources_with_options_async(
self,
request: ons_20190214_models.UntagResourcesRequest,
runtime: util_models.RuntimeOptions,
) -> ons_20190214_models.UntagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: UntagResourcesRequest
@param runtime: runtime options for this request RuntimeOptions
@return: UntagResourcesResponse
"""
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.all):
query['All'] = request.all
if not UtilClient.is_unset(request.instance_id):
query['InstanceId'] = request.instance_id
if not UtilClient.is_unset(request.resource_id):
query['ResourceId'] = request.resource_id
if not UtilClient.is_unset(request.resource_type):
query['ResourceType'] = request.resource_type
if not UtilClient.is_unset(request.tag_key):
query['TagKey'] = request.tag_key
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='UntagResources',
version='2019-02-14',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
ons_20190214_models.UntagResourcesResponse(),
await self.call_api_async(params, req, runtime)
)
def untag_resources(
self,
request: ons_20190214_models.UntagResourcesRequest,
) -> ons_20190214_models.UntagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: UntagResourcesRequest
@return: UntagResourcesResponse
"""
runtime = util_models.RuntimeOptions()
return self.untag_resources_with_options(request, runtime)
async def untag_resources_async(
self,
request: ons_20190214_models.UntagResourcesRequest,
) -> ons_20190214_models.UntagResourcesResponse:
"""
> API operations provided by Alibaba Cloud are used to manage and query resources of Alibaba Cloud services. We recommend that you integrate these API operations only in management systems. Do not use these API operations in the core system of messaging services. Otherwise, system risks may occur.
@param request: UntagResourcesRequest
@return: UntagResourcesResponse
"""
runtime = util_models.RuntimeOptions()
return await self.untag_resources_with_options_async(request, runtime)
|
PypiClean
|
/dl1_data_handler-0.10.11-py3-none-any.whl/dl1_data_handler/write_data.py
|
from dl1_data_handler.writer import DL1DataDumper, DL1DataWriter
import argparse
import logging
import sys
import warnings
import os
import yaml
logger = logging.getLogger(__name__)
# Disable warnings by default
if not sys.warnoptions:
warnings.simplefilter("ignore")
def main():
parser = argparse.ArgumentParser(
description=(
"Read DL1 data via event source into ctapipe containers, \
then write to a specified output file format."
)
)
parser.add_argument(
"runlist",
help="YAML file containing matched groups of input filenames and \
output filenames.",
)
parser.add_argument(
"--output_dir",
"-o",
help="Path to directory to create output files. It overwrites the output path found in the runlist.",
)
parser.add_argument(
"--config_file", "-c", help="YAML configuration file for settings."
)
parser.add_argument(
"--debug", help="Print all debug logger messages", action="store_true"
)
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.INFO)
logging.getLogger("dl1_data_handler.writer").setLevel(logging.INFO)
runlist = yaml.load(open(args.runlist, "r"))
for run in runlist:
if args.output_dir:
run["target"] = os.path.join(
args.output_dir, os.path.basename(run["target"])
)
logger.info(
"Number of input files in runlist: {}".format(
len([input_file for run in runlist for input_file in run["inputs"]])
)
)
logger.info("Number of output files requested: {}".format(len(runlist)))
# load options from config file and create DL1 Data Writer object
if args.config_file:
logger.info("Reading config file {}...".format(args.config_file))
config = yaml.load(open(args.config_file, "r"))
logger.info("Config file {} loaded.".format(args.config_file))
logger.info(yaml.dump(config, default_flow_style=False, default_style=""))
writer_settings = config["Data Writer"]["Settings"]
event_src_settings = config["Event Source"]["Settings"]
dumper_name = config["Data Dumper"]["Type"]
dumper_settings = config["Data Dumper"]["Settings"]
# Locate DL1DataDumper subclass
dumpers = {i.__name__: i for i in DL1DataDumper.__subclasses__()}
if dumper_name in dumpers:
data_dumper_class = dumpers[dumper_name]
else:
raise ValueError("No subclass of DL1DataDumper: {}".format(dumper_name))
data_writer = DL1DataWriter(
event_source_class=None,
event_source_settings=event_src_settings,
data_dumper_class=data_dumper_class,
data_dumper_settings=dumper_settings,
**writer_settings
)
else:
logger.info("No config file provided, using default settings")
data_writer = DL1DataWriter()
data_writer.process_data(runlist)
if __name__ == "__main__":
main()
|
PypiClean
|
/virtual-storage-manager-2.0.2.tar.gz/virtual-storage-manager-2.0.2/vsm/agent/rpcapi.py
|
"""Client side of the agent RPC API."""
import logging
from oslo.config import cfg
from vsm.openstack.common import jsonutils
from vsm.openstack.common import rpc
import vsm.openstack.common.rpc.proxy
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class AgentAPI(vsm.openstack.common.rpc.proxy.RpcProxy):
"""Client side of the agent RPC API"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic=None):
super(AgentAPI, self).__init__(
topic = topic or CONF.agent_topic,
default_version=self.BASE_RPC_API_VERSION)
def ping(self, context, arg, timeout=None):
arg_p = jsonutils.to_primitive(arg)
msg = self.make_msg('ping', arg=arg_p)
return self.call(context, msg, version='1.0', timeout=timeout)
def test_service(self, ctxt, topic, host=None):
if host:
topic = rpc.queue_get_for(ctxt, self.topic, host)
ret = self.call(ctxt, self.make_msg('test_service'), topic, timeout=30, need_try=False)
return ret
def update_pool_info(self, ctxt, body=None):
res = self.cast(ctxt, self.make_msg('update_pool_info', body=body))
def update_recipe_info(self, ctxt, body=None):
res = self.call(ctxt, self.make_msg('update_recipe_info', body=body))
return res
def present_storage_pools(self, context, body=None):
return self.cast(context, self.make_msg('present_storage_pools', body=body))
def revoke_storage_pool(self, context, id):
return self.call(context, self.make_msg('revoke_storage_pool', id=id))
def update_keyring_admin_from_db(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('update_keyring_admin_from_db'),
topic)
def upload_keyring_admin_into_db(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('upload_keyring_admin_into_db'),
topic, version='1.0', timeout=6000)
def init_ceph(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context,
self.make_msg('init_ceph',
body=body),
topic,
version='1.0', timeout=6000)
def add_osd(self, context, host_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context,
self.make_msg('add_osd',
host_id=host_id),
topic,
version='1.0', timeout=6000)
def add_monitor(self, context, host_id, mon_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context,
self.make_msg('add_monitor',
mon_id=mon_id,
host_id=host_id),
topic,
version='1.0', timeout=6000)
def remove_osd(self, context, host_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context,
self.make_msg('remove_osd',
host_id=host_id),
topic,
version='1.0', timeout=6000)
def remove_monitor(self, context, host_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context,
self.make_msg('remove_monitor',
host_id=host_id),
topic,
version='1.0', timeout=6000)
def remove_mds(self, context, host_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context,
self.make_msg('remove_mds',
host_id=host_id),
topic,
version='1.0', timeout=6000)
def add_mds(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context,
self.make_msg('add_mds'),
topic,
version='1.0', timeout=6000)
def get_ceph_disk_list(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context,
self.make_msg('get_ceph_disk_list',),
topic, version='1.0', timeout=6000)
def get_ceph_config(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context,
self.make_msg('get_ceph_config',),
topic, version='1.0', timeout=6000)
def save_ceph_config(self, context, config, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('save_ceph_config', config=config),
topic, version='1.0', timeout=6000)
def get_ceph_admin_keyring(self, context, host,):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('get_ceph_admin_keyring',),
topic, version='1.0', timeout=6000)
def save_ceph_admin_keyring(self, context, keyring_str, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('save_ceph_admin_keyring',
keyring_str=keyring_str),
topic, version='1.0', timeout=6000)
def clean_ceph_data(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('clean_ceph_data'),
topic,
version='1.0', timeout=6000)
def mount_disks(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('mount_disks'),
topic,
version='1.0', timeout=6000)
def start_osd_daemon(self, context, number, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('start_osd_daemon',
num=number),
topic,
version='1.0', timeout=6000)
def get_ceph_health(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context,
self.make_msg('get_ceph_health'),
topic, version='1.0', timeout=6000)
def get_ceph_health_list(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('get_ceph_health_list'),
topic, version='1.0', timeout=6000)
def get_osds_total_num(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context, self.make_msg('get_osds_total_num'), topic,
version='1.0', timeout=6000)
def start_ceph(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
return self.call(context, self.make_msg('start_ceph'), topic,
version='1.0', timeout=6000)
def create_storage_pool(self, ctxt, body=None):
return self.call(ctxt, self.make_msg('create_storage_pool', body=body))
def get_pool_id_by_name(self, context, name, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('get_pool_id_by_name',
name=name),
topic, version='1.0', timeout=6000)
return res
def set_crushmap(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('set_crushmap'),
topic,
version='1.0', timeout=6000)
return res
def update_ssh_keys(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('update_ssh_keys'),
topic, version='1.0', timeout=6000)
return res
def get_smart_info(self, context, host, device):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('get_smart_info', device=device),
topic, version='1.0', timeout=6000)
return res
def create_crushmap(self, context, server_list, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('create_crushmap',
server_list=server_list),
topic, version='1.0', timeout=6000)
return res
def refresh_osd_num(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.cast(context,
self.make_msg('refresh_osd_number'),
topic, version='1.0', timeout=6000)
return res
def add_new_zone(self, context, zone_name, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('add_new_zone',
zone_name=zone_name),
topic,
version='1.0', timeout=6000)
return res
def start_server(self, context, node_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('start_server',
node_id=node_id),
topic,
version='1.0', timeout=6000)
return res
def stop_server(self, context, node_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('stop_server',
node_id=node_id),
topic,
version='1.0', timeout=6000)
return res
def ceph_upgrade(self, context, node_id, host, key_url, pkg_url,restart):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('ceph_upgrade',node_id=node_id,key_url=key_url,pkg_url=pkg_url,restart=restart),
topic,
version='1.0', timeout=6000)
return res
def osd_remove(self, context, osd_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
#self.test_service(context, topic)
res = self.call(context,
self.make_msg('osd_remove',
osd_id=osd_id),
topic, version='1.0', timeout=6000)
return res
def osd_restart(self, context, osd_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
#self.test_service(context, topic)
res = self.call(context,
self.make_msg('osd_restart',
osd_id=osd_id),
topic, version='1.0', timeout=6000)
return res
def osd_add(self, context, osd_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
#self.test_service(context, topic)
res = self.call(context,
self.make_msg('osd_add',
osd_id=osd_id),
topic, version='1.0', timeout=6000)
return res
def osd_restore(self, context, osd_id, host):
topic = rpc.queue_get_for(context, self.topic, host)
#self.test_service(context, topic)
res = self.call(context,
self.make_msg('osd_restore',
osd_id=osd_id),
topic, version='1.0', timeout=6000)
return res
def osd_refresh(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('osd_refresh'),
topic, version='1.0', timeout=6000)
return res
def cluster_refresh(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('cluster_refresh'),
topic, version='1.0', timeout=6000)
return res
def integrate_cluster_update_status(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('integrate_cluster_update_status'),
topic, version='1.0', timeout=6000)
return res
def integrate_cluster_sync_osd_states(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('integrate_cluster_sync_osd_states'),
topic, version='1.0', timeout=6000)
return res
def integrate_cluster_from_ceph(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('integrate_cluster_from_ceph'),
topic, version='1.0', timeout=6000)
return res
def cluster_id(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('cluster_id'),
topic, version='1.0', timeout=6000)
return res
def update_osd_state(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.cast(context, self.make_msg('update_osd_state'), topic)
def update_pool_state(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context, self.make_msg('update_pool_state'), topic)
def update_mon_state(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.cast(context, self.make_msg('update_mon_health'), topic)
def set_pool_pg_pgp_num(self, context, host, pool, pg_num, pgp_num):
topic = rpc.queue_get_for(context, self.topic, host)
self.cast(context, self.make_msg('set_pool_pg_pgp_num',
pool=pool, pg_num=pg_num, pgp_num=pgp_num), topic)
def update_all_status(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.cast(context, self.make_msg('update_all_status'), topic)
def update_ceph_conf(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.cast(context, self.make_msg('update_ceph_conf'), topic)
def start_monitor(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context, self.make_msg('start_monitor'), topic,
version='1.0', timeout=6000)
def start_mds(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context, self.make_msg('start_mds'), topic,
version='1.0', timeout=6000)
def start_osd(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context, self.make_msg('start_osd'), topic,
version='1.0', timeout=6000)
def inital_ceph_osd_db_conf(self, context, server_list,ceph_conf_in_cluster_manifest,host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('inital_ceph_osd_db_conf',
server_list=server_list,
ceph_conf_in_cluster_manifest=ceph_conf_in_cluster_manifest),
topic,
version='1.0',
timeout=6000)
def mkcephfs(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('mkcephfs'),
topic,
version='1.0',
timeout=6000)
def stop_mds(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context, self.make_msg('stop_mds'), topic,
version='1.0', timeout=6000)
def health_status(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context, self.make_msg('health_status'), topic)
def write_monitor_keyring(self, context, monitor_keyring, host):
topic = rpc.queue_get_for(context, self.topic, host)
return self.call(context,
self.make_msg('write_monitor_keyring',
monitor_keyring=monitor_keyring),
topic,
version='1.0', timeout=6000)
def track_monitors(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('track_monitors'),
topic,
version='1.0', timeout=6000)
return res
def create_keyring(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('create_keyring'),
topic,
version='1.0', timeout=6000)
return res
def prepare_osds(self, context, server_list, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('prepare_osds',
server_list=server_list),
topic,
version='1.0', timeout=6000)
return res
def add_cache_tier(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('add_cache_tier',
body=body),
topic,
version='1.0', timeout=6000)
def remove_cache_tier(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('remove_cache_tier',
body=body),
topic,
version='1.0', timeout=6000)
def start_cluster(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('start_cluster'),
topic,
version='1.0', timeout=6000)
return res
def stop_cluster(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
self.test_service(context, topic)
res = self.call(context,
self.make_msg('stop_cluster'),
topic,
version='1.0', timeout=6000)
return res
def monitor_restart(self, context, monitor_num, host):
topic = rpc.queue_get_for(context, self.topic, host)
#self.test_service(context, topic)
res = self.call(context,
self.make_msg('monitor_restart',
monitor_num=monitor_num),
topic, version='1.0', timeout=6000)
return res
def get_available_disks(self, context, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('get_available_disks'),
topic, version='1.0', timeout=6000)
return res
def add_new_disks_to_cluster(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('add_new_disks_to_cluster',
body=body),
topic,
version='1.0', timeout=6000)
def reconfig_diamond(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.cast(context,
self.make_msg('reconfig_diamond',
body=body),
topic)
def check_pre_existing_cluster(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('check_pre_existing_cluster',
body=body),
topic,
version='1.0', timeout=6000)
return res
def import_cluster(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('import_cluster',
body=body),
topic,
version='1.0', timeout=6000)
return res
def detect_cephconf(self, context, keyring, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('detect_cephconf',
keyring=keyring),
topic,
version='1.0', timeout=6000)
return res
def detect_crushmap(self, context, keyring, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('detect_crushmap',
keyring=keyring),
topic,
version='1.0', timeout=6000)
return res
def add_rule_to_crushmap(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('add_rule_to_crushmap',
body=body),
topic,
version='1.0', timeout=6000)
return res
def modify_rule_in_crushmap(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('modify_rule_in_crushmap',
body=body),
topic,
version='1.0', timeout=6000)
return res
def update_zones_from_crushmap_to_db(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('update_zones_from_crushmap_to_db',
body=body),
topic,
version='1.0', timeout=6000)
return res
def update_storage_groups_from_crushmap_to_db(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('update_storage_groups_from_crushmap_to_db',
body=body),
topic,
version='1.0', timeout=6000)
return res
def add_zone_to_crushmap_and_db(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('add_zone_to_crushmap_and_db',
body=body),
topic,
version='1.0', timeout=6000)
return res
def get_default_pg_num_by_storage_group(self, context, body, host):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context,
self.make_msg('get_default_pg_num_by_storage_group',
body=body),
topic,
version='1.0', timeout=6000)
return res
def rgw_create(self, context, name, host, keyring, log_file, rgw_frontends,
is_ssl, s3_user_uid, s3_user_display_name, s3_user_email,
swift_user_subuser, swift_user_access, swift_user_key_type):
topic = rpc.queue_get_for(context, self.topic, host)
res = self.call(context, self.make_msg('rgw_create',
name=name,
host=host,
keyring=keyring,
log_file=log_file,
rgw_frontends=rgw_frontends,
is_ssl=is_ssl,
s3_user_uid=s3_user_uid,
s3_user_display_name=s3_user_display_name,
s3_user_email=s3_user_email,
swift_user_subuser=swift_user_subuser,
swift_user_access=swift_user_access,
swift_user_key_type=swift_user_key_type),
topic, version='1.0', timeout=6000)
return res
|
PypiClean
|
/tigerasi-0.0.19.tar.gz/tigerasi-0.0.19/README.md
|
# TigerASI
a feature-rich Python interface for ASI Tiger Controllers.
This driver was written to simplify the serial api to ASI's [Tiger Controllers](https://www.asiimaging.com/controllers/tiger-controller/) while reducing reliance on the full [documentation](https://asiimaging.com/docs/products/serial_commands) for most users.
Many (but not all!) commands have been exposed and wrapped in a simplified, self-consistent interface and documented for easy usage.
## Installation
To install this package from [PyPI](https://pypi.org/project/TigerASI/0.0.2/), invoke: `pip install TigerASI`.
To install this package from the Github in editable mode, from this directory invoke: `pip install -e .`
To install this package in editable mode and build the docs locally, invoke: `pip install -e .[dev]`
## Intro and Basic Usage
````python
from tigerasi.tiger_controller import TigerController
box = TigerController("COM4")
````
The basic command syntax looks like this:
````python
box.zero_in_place('x', 'y') # Zero out the specified axes at their current location.
box.move_absolute(x=1000, y=25) # Move to an absolute location in "stage units" (tenths of microns).
box.move_relative(z=100) # Move z +100 stage units in the positive z direction.
````
### Syntax Basics
All commands that reference stage axes accept a variable, optional number of arguments.
````python
box.zero_in_place('x') # only zeros the x axis. Other axes are ignored.
````
Stage axes are also case-insensitive,
````python
box.zero_in_place('X', 'y', 'Z') # also ok
````
and the order doesn't matter.
````python
box.zero_in_place('y', 'z', 'x') # also ok
````
All commands that query stage axes return a dict, keyed by *upper-case* stage axis.
````python
box.get_position('x', 'z', 'y')
# {'X': 100.0, 'Y': 305.0, 'Z': 10000.0}
````
Some commands can take an axis setting to be "current value" and another axis setting to be a specified value.
The syntax for these commands look like this:
````python
box.set_home('x', 'z', y=100.0) # Set x and z axes homing location to current spot. Set y axis to specific spot.
box.set_home('z', 'y', 'x', m=100.0, n=200.0) # variable number of arguments ok! order and case don't matter.
````
Some commands assume *all* axes if none are specified.
````python
box.zero_in_place() # will zero ALL lettered axes.
box.reset_lower_travel_limits() # will reset ALL lettered axes.
box.get_home() # will get ALL lettered axis home positions.
box.get_lower_travel_limits() # will get ALL lettered axis lower travel limits.
````
For setting values, this might not be your desired behavior, so it is safer to default to passing in axes explicitly.
````python
box.zero_in_place('x', 'y', 'z') # will zero only x, y, and z axes.
box.reset_lower_travel_limits('x', 'y', 'z') # will reset only x, y, and z axes.
````
When in doubt, check the docs.
## Simulation
This package also features a simulated version of the TigerController
````python
from tigerasi.sim_tiger_controller import SimTigerController
box = SimTigerController() # OR
box = SimTigerController('COM4') # com port is ignored. # OR
box = SimTigerController(build_config={'Motor Axes': ['X', 'Y', 'Z']})
# This object tracks its internal state for position and speed.
box.home_in_place('x', 'y', 'z') # home mocked axes.
box.move_absolute(z=10) # move mocked axis.
````
This feature can be useful for testing higher level code using the current api without the need to interact with real hardware.
## Advanced Usage
Many (but not all!) of ASI's more advanced features have been made available via this simplified API.
This list includes joystick enabling/disabling and remapping, setting stage travel limits, queuing moves into the hardware buffer, and many other more nuanced features.
For a breakdown of what commands have been exposed, have a look at the [examples folder](https://github.com/AllenNeuralDynamics/TigerASI/tree/main/examples) and the docs.
## Documentation
Docs can be generated via Sphinx but are also available on [readthedocs](https://tigerasi.readthedocs.io/en/latest/).
## Implementation Details
### Blocking or Non-Blocking?
All commands to the Tigerbox return a reply.
Commands that query the Tigerbox state will also return data with that reply.
Waiting for a reply introduces 10-20[ms] of execution time before the function returns an 'ACK'knowledgement.
By default, methods *will block* until receiving this acknowledgement unless otherwise specified, like this:
````python
box.move_absolute(x=1000, y=25, wait=False) # will not block.
````
This behavior can only be used for commands to change the Tigerbox state.
Commands that query the Tigerbox state will always block until they receive a hardware reply.
|
PypiClean
|
/django-dmcadmin-0.1.1.tar.gz/django-dmcadmin-0.1.1/dmcadmin/static/AdminLTE/plugins/popper/esm/popper.js
|
var isBrowser = typeof window !== 'undefined' && typeof document !== 'undefined' && typeof navigator !== 'undefined';
var timeoutDuration = function () {
var longerTimeoutBrowsers = ['Edge', 'Trident', 'Firefox'];
for (var i = 0; i < longerTimeoutBrowsers.length; i += 1) {
if (isBrowser && navigator.userAgent.indexOf(longerTimeoutBrowsers[i]) >= 0) {
return 1;
}
}
return 0;
}();
function microtaskDebounce(fn) {
var called = false;
return function () {
if (called) {
return;
}
called = true;
window.Promise.resolve().then(function () {
called = false;
fn();
});
};
}
function taskDebounce(fn) {
var scheduled = false;
return function () {
if (!scheduled) {
scheduled = true;
setTimeout(function () {
scheduled = false;
fn();
}, timeoutDuration);
}
};
}
var supportsMicroTasks = isBrowser && window.Promise;
/**
* Create a debounced version of a method, that's asynchronously deferred
* but called in the minimum time possible.
*
* @method
* @memberof Popper.Utils
* @argument {Function} fn
* @returns {Function}
*/
var debounce = supportsMicroTasks ? microtaskDebounce : taskDebounce;
/**
* Check if the given variable is a function
* @method
* @memberof Popper.Utils
* @argument {Any} functionToCheck - variable to check
* @returns {Boolean} answer to: is a function?
*/
function isFunction(functionToCheck) {
var getType = {};
return functionToCheck && getType.toString.call(functionToCheck) === '[object Function]';
}
/**
* Get CSS computed property of the given element
* @method
* @memberof Popper.Utils
* @argument {Eement} element
* @argument {String} property
*/
function getStyleComputedProperty(element, property) {
if (element.nodeType !== 1) {
return [];
}
// NOTE: 1 DOM access here
var window = element.ownerDocument.defaultView;
var css = window.getComputedStyle(element, null);
return property ? css[property] : css;
}
/**
* Returns the parentNode or the host of the element
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @returns {Element} parent
*/
function getParentNode(element) {
if (element.nodeName === 'HTML') {
return element;
}
return element.parentNode || element.host;
}
/**
* Returns the scrolling parent of the given element
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @returns {Element} scroll parent
*/
function getScrollParent(element) {
// Return body, `getScroll` will take care to get the correct `scrollTop` from it
if (!element) {
return document.body;
}
switch (element.nodeName) {
case 'HTML':
case 'BODY':
return element.ownerDocument.body;
case '#document':
return element.body;
}
// Firefox want us to check `-x` and `-y` variations as well
var _getStyleComputedProp = getStyleComputedProperty(element),
overflow = _getStyleComputedProp.overflow,
overflowX = _getStyleComputedProp.overflowX,
overflowY = _getStyleComputedProp.overflowY;
if (/(auto|scroll|overlay)/.test(overflow + overflowY + overflowX)) {
return element;
}
return getScrollParent(getParentNode(element));
}
/**
* Returns the reference node of the reference object, or the reference object itself.
* @method
* @memberof Popper.Utils
* @param {Element|Object} reference - the reference element (the popper will be relative to this)
* @returns {Element} parent
*/
function getReferenceNode(reference) {
return reference && reference.referenceNode ? reference.referenceNode : reference;
}
var isIE11 = isBrowser && !!(window.MSInputMethodContext && document.documentMode);
var isIE10 = isBrowser && /MSIE 10/.test(navigator.userAgent);
/**
* Determines if the browser is Internet Explorer
* @method
* @memberof Popper.Utils
* @param {Number} version to check
* @returns {Boolean} isIE
*/
function isIE(version) {
if (version === 11) {
return isIE11;
}
if (version === 10) {
return isIE10;
}
return isIE11 || isIE10;
}
/**
* Returns the offset parent of the given element
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @returns {Element} offset parent
*/
function getOffsetParent(element) {
if (!element) {
return document.documentElement;
}
var noOffsetParent = isIE(10) ? document.body : null;
// NOTE: 1 DOM access here
var offsetParent = element.offsetParent || null;
// Skip hidden elements which don't have an offsetParent
while (offsetParent === noOffsetParent && element.nextElementSibling) {
offsetParent = (element = element.nextElementSibling).offsetParent;
}
var nodeName = offsetParent && offsetParent.nodeName;
if (!nodeName || nodeName === 'BODY' || nodeName === 'HTML') {
return element ? element.ownerDocument.documentElement : document.documentElement;
}
// .offsetParent will return the closest TH, TD or TABLE in case
// no offsetParent is present, I hate this job...
if (['TH', 'TD', 'TABLE'].indexOf(offsetParent.nodeName) !== -1 && getStyleComputedProperty(offsetParent, 'position') === 'static') {
return getOffsetParent(offsetParent);
}
return offsetParent;
}
function isOffsetContainer(element) {
var nodeName = element.nodeName;
if (nodeName === 'BODY') {
return false;
}
return nodeName === 'HTML' || getOffsetParent(element.firstElementChild) === element;
}
/**
* Finds the root node (document, shadowDOM root) of the given element
* @method
* @memberof Popper.Utils
* @argument {Element} node
* @returns {Element} root node
*/
function getRoot(node) {
if (node.parentNode !== null) {
return getRoot(node.parentNode);
}
return node;
}
/**
* Finds the offset parent common to the two provided nodes
* @method
* @memberof Popper.Utils
* @argument {Element} element1
* @argument {Element} element2
* @returns {Element} common offset parent
*/
function findCommonOffsetParent(element1, element2) {
// This check is needed to avoid errors in case one of the elements isn't defined for any reason
if (!element1 || !element1.nodeType || !element2 || !element2.nodeType) {
return document.documentElement;
}
// Here we make sure to give as "start" the element that comes first in the DOM
var order = element1.compareDocumentPosition(element2) & Node.DOCUMENT_POSITION_FOLLOWING;
var start = order ? element1 : element2;
var end = order ? element2 : element1;
// Get common ancestor container
var range = document.createRange();
range.setStart(start, 0);
range.setEnd(end, 0);
var commonAncestorContainer = range.commonAncestorContainer;
// Both nodes are inside #document
if (element1 !== commonAncestorContainer && element2 !== commonAncestorContainer || start.contains(end)) {
if (isOffsetContainer(commonAncestorContainer)) {
return commonAncestorContainer;
}
return getOffsetParent(commonAncestorContainer);
}
// one of the nodes is inside shadowDOM, find which one
var element1root = getRoot(element1);
if (element1root.host) {
return findCommonOffsetParent(element1root.host, element2);
} else {
return findCommonOffsetParent(element1, getRoot(element2).host);
}
}
/**
* Gets the scroll value of the given element in the given side (top and left)
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @argument {String} side `top` or `left`
* @returns {number} amount of scrolled pixels
*/
function getScroll(element) {
var side = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'top';
var upperSide = side === 'top' ? 'scrollTop' : 'scrollLeft';
var nodeName = element.nodeName;
if (nodeName === 'BODY' || nodeName === 'HTML') {
var html = element.ownerDocument.documentElement;
var scrollingElement = element.ownerDocument.scrollingElement || html;
return scrollingElement[upperSide];
}
return element[upperSide];
}
/*
* Sum or subtract the element scroll values (left and top) from a given rect object
* @method
* @memberof Popper.Utils
* @param {Object} rect - Rect object you want to change
* @param {HTMLElement} element - The element from the function reads the scroll values
* @param {Boolean} subtract - set to true if you want to subtract the scroll values
* @return {Object} rect - The modifier rect object
*/
function includeScroll(rect, element) {
var subtract = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false;
var scrollTop = getScroll(element, 'top');
var scrollLeft = getScroll(element, 'left');
var modifier = subtract ? -1 : 1;
rect.top += scrollTop * modifier;
rect.bottom += scrollTop * modifier;
rect.left += scrollLeft * modifier;
rect.right += scrollLeft * modifier;
return rect;
}
/*
* Helper to detect borders of a given element
* @method
* @memberof Popper.Utils
* @param {CSSStyleDeclaration} styles
* Result of `getStyleComputedProperty` on the given element
* @param {String} axis - `x` or `y`
* @return {number} borders - The borders size of the given axis
*/
function getBordersSize(styles, axis) {
var sideA = axis === 'x' ? 'Left' : 'Top';
var sideB = sideA === 'Left' ? 'Right' : 'Bottom';
return parseFloat(styles['border' + sideA + 'Width']) + parseFloat(styles['border' + sideB + 'Width']);
}
function getSize(axis, body, html, computedStyle) {
return Math.max(body['offset' + axis], body['scroll' + axis], html['client' + axis], html['offset' + axis], html['scroll' + axis], isIE(10) ? parseInt(html['offset' + axis]) + parseInt(computedStyle['margin' + (axis === 'Height' ? 'Top' : 'Left')]) + parseInt(computedStyle['margin' + (axis === 'Height' ? 'Bottom' : 'Right')]) : 0);
}
function getWindowSizes(document) {
var body = document.body;
var html = document.documentElement;
var computedStyle = isIE(10) && getComputedStyle(html);
return {
height: getSize('Height', body, html, computedStyle),
width: getSize('Width', body, html, computedStyle)
};
}
var classCallCheck = function (instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
};
var createClass = function () {
function defineProperties(target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];
descriptor.enumerable = descriptor.enumerable || false;
descriptor.configurable = true;
if ("value" in descriptor) descriptor.writable = true;
Object.defineProperty(target, descriptor.key, descriptor);
}
}
return function (Constructor, protoProps, staticProps) {
if (protoProps) defineProperties(Constructor.prototype, protoProps);
if (staticProps) defineProperties(Constructor, staticProps);
return Constructor;
};
}();
var defineProperty = function (obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true
});
} else {
obj[key] = value;
}
return obj;
};
var _extends = Object.assign || function (target) {
for (var i = 1; i < arguments.length; i++) {
var source = arguments[i];
for (var key in source) {
if (Object.prototype.hasOwnProperty.call(source, key)) {
target[key] = source[key];
}
}
}
return target;
};
/**
* Given element offsets, generate an output similar to getBoundingClientRect
* @method
* @memberof Popper.Utils
* @argument {Object} offsets
* @returns {Object} ClientRect like output
*/
function getClientRect(offsets) {
return _extends({}, offsets, {
right: offsets.left + offsets.width,
bottom: offsets.top + offsets.height
});
}
/**
* Get bounding client rect of given element
* @method
* @memberof Popper.Utils
* @param {HTMLElement} element
* @return {Object} client rect
*/
function getBoundingClientRect(element) {
var rect = {};
// IE10 10 FIX: Please, don't ask, the element isn't
// considered in DOM in some circumstances...
// This isn't reproducible in IE10 compatibility mode of IE11
try {
if (isIE(10)) {
rect = element.getBoundingClientRect();
var scrollTop = getScroll(element, 'top');
var scrollLeft = getScroll(element, 'left');
rect.top += scrollTop;
rect.left += scrollLeft;
rect.bottom += scrollTop;
rect.right += scrollLeft;
} else {
rect = element.getBoundingClientRect();
}
} catch (e) {}
var result = {
left: rect.left,
top: rect.top,
width: rect.right - rect.left,
height: rect.bottom - rect.top
};
// subtract scrollbar size from sizes
var sizes = element.nodeName === 'HTML' ? getWindowSizes(element.ownerDocument) : {};
var width = sizes.width || element.clientWidth || result.width;
var height = sizes.height || element.clientHeight || result.height;
var horizScrollbar = element.offsetWidth - width;
var vertScrollbar = element.offsetHeight - height;
// if an hypothetical scrollbar is detected, we must be sure it's not a `border`
// we make this check conditional for performance reasons
if (horizScrollbar || vertScrollbar) {
var styles = getStyleComputedProperty(element);
horizScrollbar -= getBordersSize(styles, 'x');
vertScrollbar -= getBordersSize(styles, 'y');
result.width -= horizScrollbar;
result.height -= vertScrollbar;
}
return getClientRect(result);
}
function getOffsetRectRelativeToArbitraryNode(children, parent) {
var fixedPosition = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false;
var isIE10 = isIE(10);
var isHTML = parent.nodeName === 'HTML';
var childrenRect = getBoundingClientRect(children);
var parentRect = getBoundingClientRect(parent);
var scrollParent = getScrollParent(children);
var styles = getStyleComputedProperty(parent);
var borderTopWidth = parseFloat(styles.borderTopWidth);
var borderLeftWidth = parseFloat(styles.borderLeftWidth);
// In cases where the parent is fixed, we must ignore negative scroll in offset calc
if (fixedPosition && isHTML) {
parentRect.top = Math.max(parentRect.top, 0);
parentRect.left = Math.max(parentRect.left, 0);
}
var offsets = getClientRect({
top: childrenRect.top - parentRect.top - borderTopWidth,
left: childrenRect.left - parentRect.left - borderLeftWidth,
width: childrenRect.width,
height: childrenRect.height
});
offsets.marginTop = 0;
offsets.marginLeft = 0;
// Subtract margins of documentElement in case it's being used as parent
// we do this only on HTML because it's the only element that behaves
// differently when margins are applied to it. The margins are included in
// the box of the documentElement, in the other cases not.
if (!isIE10 && isHTML) {
var marginTop = parseFloat(styles.marginTop);
var marginLeft = parseFloat(styles.marginLeft);
offsets.top -= borderTopWidth - marginTop;
offsets.bottom -= borderTopWidth - marginTop;
offsets.left -= borderLeftWidth - marginLeft;
offsets.right -= borderLeftWidth - marginLeft;
// Attach marginTop and marginLeft because in some circumstances we may need them
offsets.marginTop = marginTop;
offsets.marginLeft = marginLeft;
}
if (isIE10 && !fixedPosition ? parent.contains(scrollParent) : parent === scrollParent && scrollParent.nodeName !== 'BODY') {
offsets = includeScroll(offsets, parent);
}
return offsets;
}
function getViewportOffsetRectRelativeToArtbitraryNode(element) {
var excludeScroll = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false;
var html = element.ownerDocument.documentElement;
var relativeOffset = getOffsetRectRelativeToArbitraryNode(element, html);
var width = Math.max(html.clientWidth, window.innerWidth || 0);
var height = Math.max(html.clientHeight, window.innerHeight || 0);
var scrollTop = !excludeScroll ? getScroll(html) : 0;
var scrollLeft = !excludeScroll ? getScroll(html, 'left') : 0;
var offset = {
top: scrollTop - relativeOffset.top + relativeOffset.marginTop,
left: scrollLeft - relativeOffset.left + relativeOffset.marginLeft,
width: width,
height: height
};
return getClientRect(offset);
}
/**
* Check if the given element is fixed or is inside a fixed parent
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @argument {Element} customContainer
* @returns {Boolean} answer to "isFixed?"
*/
function isFixed(element) {
var nodeName = element.nodeName;
if (nodeName === 'BODY' || nodeName === 'HTML') {
return false;
}
if (getStyleComputedProperty(element, 'position') === 'fixed') {
return true;
}
var parentNode = getParentNode(element);
if (!parentNode) {
return false;
}
return isFixed(parentNode);
}
/**
* Finds the first parent of an element that has a transformed property defined
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @returns {Element} first transformed parent or documentElement
*/
function getFixedPositionOffsetParent(element) {
// This check is needed to avoid errors in case one of the elements isn't defined for any reason
if (!element || !element.parentElement || isIE()) {
return document.documentElement;
}
var el = element.parentElement;
while (el && getStyleComputedProperty(el, 'transform') === 'none') {
el = el.parentElement;
}
return el || document.documentElement;
}
/**
* Computed the boundaries limits and return them
* @method
* @memberof Popper.Utils
* @param {HTMLElement} popper
* @param {HTMLElement} reference
* @param {number} padding
* @param {HTMLElement} boundariesElement - Element used to define the boundaries
* @param {Boolean} fixedPosition - Is in fixed position mode
* @returns {Object} Coordinates of the boundaries
*/
function getBoundaries(popper, reference, padding, boundariesElement) {
var fixedPosition = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : false;
// NOTE: 1 DOM access here
var boundaries = { top: 0, left: 0 };
var offsetParent = fixedPosition ? getFixedPositionOffsetParent(popper) : findCommonOffsetParent(popper, getReferenceNode(reference));
// Handle viewport case
if (boundariesElement === 'viewport') {
boundaries = getViewportOffsetRectRelativeToArtbitraryNode(offsetParent, fixedPosition);
} else {
// Handle other cases based on DOM element used as boundaries
var boundariesNode = void 0;
if (boundariesElement === 'scrollParent') {
boundariesNode = getScrollParent(getParentNode(reference));
if (boundariesNode.nodeName === 'BODY') {
boundariesNode = popper.ownerDocument.documentElement;
}
} else if (boundariesElement === 'window') {
boundariesNode = popper.ownerDocument.documentElement;
} else {
boundariesNode = boundariesElement;
}
var offsets = getOffsetRectRelativeToArbitraryNode(boundariesNode, offsetParent, fixedPosition);
// In case of HTML, we need a different computation
if (boundariesNode.nodeName === 'HTML' && !isFixed(offsetParent)) {
var _getWindowSizes = getWindowSizes(popper.ownerDocument),
height = _getWindowSizes.height,
width = _getWindowSizes.width;
boundaries.top += offsets.top - offsets.marginTop;
boundaries.bottom = height + offsets.top;
boundaries.left += offsets.left - offsets.marginLeft;
boundaries.right = width + offsets.left;
} else {
// for all the other DOM elements, this one is good
boundaries = offsets;
}
}
// Add paddings
padding = padding || 0;
var isPaddingNumber = typeof padding === 'number';
boundaries.left += isPaddingNumber ? padding : padding.left || 0;
boundaries.top += isPaddingNumber ? padding : padding.top || 0;
boundaries.right -= isPaddingNumber ? padding : padding.right || 0;
boundaries.bottom -= isPaddingNumber ? padding : padding.bottom || 0;
return boundaries;
}
function getArea(_ref) {
var width = _ref.width,
height = _ref.height;
return width * height;
}
/**
* Utility used to transform the `auto` placement to the placement with more
* available space.
* @method
* @memberof Popper.Utils
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function computeAutoPlacement(placement, refRect, popper, reference, boundariesElement) {
var padding = arguments.length > 5 && arguments[5] !== undefined ? arguments[5] : 0;
if (placement.indexOf('auto') === -1) {
return placement;
}
var boundaries = getBoundaries(popper, reference, padding, boundariesElement);
var rects = {
top: {
width: boundaries.width,
height: refRect.top - boundaries.top
},
right: {
width: boundaries.right - refRect.right,
height: boundaries.height
},
bottom: {
width: boundaries.width,
height: boundaries.bottom - refRect.bottom
},
left: {
width: refRect.left - boundaries.left,
height: boundaries.height
}
};
var sortedAreas = Object.keys(rects).map(function (key) {
return _extends({
key: key
}, rects[key], {
area: getArea(rects[key])
});
}).sort(function (a, b) {
return b.area - a.area;
});
var filteredAreas = sortedAreas.filter(function (_ref2) {
var width = _ref2.width,
height = _ref2.height;
return width >= popper.clientWidth && height >= popper.clientHeight;
});
var computedPlacement = filteredAreas.length > 0 ? filteredAreas[0].key : sortedAreas[0].key;
var variation = placement.split('-')[1];
return computedPlacement + (variation ? '-' + variation : '');
}
/**
* Get offsets to the reference element
* @method
* @memberof Popper.Utils
* @param {Object} state
* @param {Element} popper - the popper element
* @param {Element} reference - the reference element (the popper will be relative to this)
* @param {Element} fixedPosition - is in fixed position mode
* @returns {Object} An object containing the offsets which will be applied to the popper
*/
function getReferenceOffsets(state, popper, reference) {
var fixedPosition = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : null;
var commonOffsetParent = fixedPosition ? getFixedPositionOffsetParent(popper) : findCommonOffsetParent(popper, getReferenceNode(reference));
return getOffsetRectRelativeToArbitraryNode(reference, commonOffsetParent, fixedPosition);
}
/**
* Get the outer sizes of the given element (offset size + margins)
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @returns {Object} object containing width and height properties
*/
function getOuterSizes(element) {
var window = element.ownerDocument.defaultView;
var styles = window.getComputedStyle(element);
var x = parseFloat(styles.marginTop || 0) + parseFloat(styles.marginBottom || 0);
var y = parseFloat(styles.marginLeft || 0) + parseFloat(styles.marginRight || 0);
var result = {
width: element.offsetWidth + y,
height: element.offsetHeight + x
};
return result;
}
/**
* Get the opposite placement of the given one
* @method
* @memberof Popper.Utils
* @argument {String} placement
* @returns {String} flipped placement
*/
function getOppositePlacement(placement) {
var hash = { left: 'right', right: 'left', bottom: 'top', top: 'bottom' };
return placement.replace(/left|right|bottom|top/g, function (matched) {
return hash[matched];
});
}
/**
* Get offsets to the popper
* @method
* @memberof Popper.Utils
* @param {Object} position - CSS position the Popper will get applied
* @param {HTMLElement} popper - the popper element
* @param {Object} referenceOffsets - the reference offsets (the popper will be relative to this)
* @param {String} placement - one of the valid placement options
* @returns {Object} popperOffsets - An object containing the offsets which will be applied to the popper
*/
function getPopperOffsets(popper, referenceOffsets, placement) {
placement = placement.split('-')[0];
// Get popper node sizes
var popperRect = getOuterSizes(popper);
// Add position, width and height to our offsets object
var popperOffsets = {
width: popperRect.width,
height: popperRect.height
};
// depending by the popper placement we have to compute its offsets slightly differently
var isHoriz = ['right', 'left'].indexOf(placement) !== -1;
var mainSide = isHoriz ? 'top' : 'left';
var secondarySide = isHoriz ? 'left' : 'top';
var measurement = isHoriz ? 'height' : 'width';
var secondaryMeasurement = !isHoriz ? 'height' : 'width';
popperOffsets[mainSide] = referenceOffsets[mainSide] + referenceOffsets[measurement] / 2 - popperRect[measurement] / 2;
if (placement === secondarySide) {
popperOffsets[secondarySide] = referenceOffsets[secondarySide] - popperRect[secondaryMeasurement];
} else {
popperOffsets[secondarySide] = referenceOffsets[getOppositePlacement(secondarySide)];
}
return popperOffsets;
}
/**
* Mimics the `find` method of Array
* @method
* @memberof Popper.Utils
* @argument {Array} arr
* @argument prop
* @argument value
* @returns index or -1
*/
function find(arr, check) {
// use native find if supported
if (Array.prototype.find) {
return arr.find(check);
}
// use `filter` to obtain the same behavior of `find`
return arr.filter(check)[0];
}
/**
* Return the index of the matching object
* @method
* @memberof Popper.Utils
* @argument {Array} arr
* @argument prop
* @argument value
* @returns index or -1
*/
function findIndex(arr, prop, value) {
// use native findIndex if supported
if (Array.prototype.findIndex) {
return arr.findIndex(function (cur) {
return cur[prop] === value;
});
}
// use `find` + `indexOf` if `findIndex` isn't supported
var match = find(arr, function (obj) {
return obj[prop] === value;
});
return arr.indexOf(match);
}
/**
* Loop trough the list of modifiers and run them in order,
* each of them will then edit the data object.
* @method
* @memberof Popper.Utils
* @param {dataObject} data
* @param {Array} modifiers
* @param {String} ends - Optional modifier name used as stopper
* @returns {dataObject}
*/
function runModifiers(modifiers, data, ends) {
var modifiersToRun = ends === undefined ? modifiers : modifiers.slice(0, findIndex(modifiers, 'name', ends));
modifiersToRun.forEach(function (modifier) {
if (modifier['function']) {
// eslint-disable-line dot-notation
console.warn('`modifier.function` is deprecated, use `modifier.fn`!');
}
var fn = modifier['function'] || modifier.fn; // eslint-disable-line dot-notation
if (modifier.enabled && isFunction(fn)) {
// Add properties to offsets to make them a complete clientRect object
// we do this before each modifier to make sure the previous one doesn't
// mess with these values
data.offsets.popper = getClientRect(data.offsets.popper);
data.offsets.reference = getClientRect(data.offsets.reference);
data = fn(data, modifier);
}
});
return data;
}
/**
* Updates the position of the popper, computing the new offsets and applying
* the new style.<br />
* Prefer `scheduleUpdate` over `update` because of performance reasons.
* @method
* @memberof Popper
*/
function update() {
// if popper is destroyed, don't perform any further update
if (this.state.isDestroyed) {
return;
}
var data = {
instance: this,
styles: {},
arrowStyles: {},
attributes: {},
flipped: false,
offsets: {}
};
// compute reference element offsets
data.offsets.reference = getReferenceOffsets(this.state, this.popper, this.reference, this.options.positionFixed);
// compute auto placement, store placement inside the data object,
// modifiers will be able to edit `placement` if needed
// and refer to originalPlacement to know the original value
data.placement = computeAutoPlacement(this.options.placement, data.offsets.reference, this.popper, this.reference, this.options.modifiers.flip.boundariesElement, this.options.modifiers.flip.padding);
// store the computed placement inside `originalPlacement`
data.originalPlacement = data.placement;
data.positionFixed = this.options.positionFixed;
// compute the popper offsets
data.offsets.popper = getPopperOffsets(this.popper, data.offsets.reference, data.placement);
data.offsets.popper.position = this.options.positionFixed ? 'fixed' : 'absolute';
// run the modifiers
data = runModifiers(this.modifiers, data);
// the first `update` will call `onCreate` callback
// the other ones will call `onUpdate` callback
if (!this.state.isCreated) {
this.state.isCreated = true;
this.options.onCreate(data);
} else {
this.options.onUpdate(data);
}
}
/**
* Helper used to know if the given modifier is enabled.
* @method
* @memberof Popper.Utils
* @returns {Boolean}
*/
function isModifierEnabled(modifiers, modifierName) {
return modifiers.some(function (_ref) {
var name = _ref.name,
enabled = _ref.enabled;
return enabled && name === modifierName;
});
}
/**
* Get the prefixed supported property name
* @method
* @memberof Popper.Utils
* @argument {String} property (camelCase)
* @returns {String} prefixed property (camelCase or PascalCase, depending on the vendor prefix)
*/
function getSupportedPropertyName(property) {
var prefixes = [false, 'ms', 'Webkit', 'Moz', 'O'];
var upperProp = property.charAt(0).toUpperCase() + property.slice(1);
for (var i = 0; i < prefixes.length; i++) {
var prefix = prefixes[i];
var toCheck = prefix ? '' + prefix + upperProp : property;
if (typeof document.body.style[toCheck] !== 'undefined') {
return toCheck;
}
}
return null;
}
/**
* Destroys the popper.
* @method
* @memberof Popper
*/
function destroy() {
this.state.isDestroyed = true;
// touch DOM only if `applyStyle` modifier is enabled
if (isModifierEnabled(this.modifiers, 'applyStyle')) {
this.popper.removeAttribute('x-placement');
this.popper.style.position = '';
this.popper.style.top = '';
this.popper.style.left = '';
this.popper.style.right = '';
this.popper.style.bottom = '';
this.popper.style.willChange = '';
this.popper.style[getSupportedPropertyName('transform')] = '';
}
this.disableEventListeners();
// remove the popper if user explicitly asked for the deletion on destroy
// do not use `remove` because IE11 doesn't support it
if (this.options.removeOnDestroy) {
this.popper.parentNode.removeChild(this.popper);
}
return this;
}
/**
* Get the window associated with the element
* @argument {Element} element
* @returns {Window}
*/
function getWindow(element) {
var ownerDocument = element.ownerDocument;
return ownerDocument ? ownerDocument.defaultView : window;
}
function attachToScrollParents(scrollParent, event, callback, scrollParents) {
var isBody = scrollParent.nodeName === 'BODY';
var target = isBody ? scrollParent.ownerDocument.defaultView : scrollParent;
target.addEventListener(event, callback, { passive: true });
if (!isBody) {
attachToScrollParents(getScrollParent(target.parentNode), event, callback, scrollParents);
}
scrollParents.push(target);
}
/**
* Setup needed event listeners used to update the popper position
* @method
* @memberof Popper.Utils
* @private
*/
function setupEventListeners(reference, options, state, updateBound) {
// Resize event listener on window
state.updateBound = updateBound;
getWindow(reference).addEventListener('resize', state.updateBound, { passive: true });
// Scroll event listener on scroll parents
var scrollElement = getScrollParent(reference);
attachToScrollParents(scrollElement, 'scroll', state.updateBound, state.scrollParents);
state.scrollElement = scrollElement;
state.eventsEnabled = true;
return state;
}
/**
* It will add resize/scroll events and start recalculating
* position of the popper element when they are triggered.
* @method
* @memberof Popper
*/
function enableEventListeners() {
if (!this.state.eventsEnabled) {
this.state = setupEventListeners(this.reference, this.options, this.state, this.scheduleUpdate);
}
}
/**
* Remove event listeners used to update the popper position
* @method
* @memberof Popper.Utils
* @private
*/
function removeEventListeners(reference, state) {
// Remove resize event listener on window
getWindow(reference).removeEventListener('resize', state.updateBound);
// Remove scroll event listener on scroll parents
state.scrollParents.forEach(function (target) {
target.removeEventListener('scroll', state.updateBound);
});
// Reset state
state.updateBound = null;
state.scrollParents = [];
state.scrollElement = null;
state.eventsEnabled = false;
return state;
}
/**
* It will remove resize/scroll events and won't recalculate popper position
* when they are triggered. It also won't trigger `onUpdate` callback anymore,
* unless you call `update` method manually.
* @method
* @memberof Popper
*/
function disableEventListeners() {
if (this.state.eventsEnabled) {
cancelAnimationFrame(this.scheduleUpdate);
this.state = removeEventListeners(this.reference, this.state);
}
}
/**
* Tells if a given input is a number
* @method
* @memberof Popper.Utils
* @param {*} input to check
* @return {Boolean}
*/
function isNumeric(n) {
return n !== '' && !isNaN(parseFloat(n)) && isFinite(n);
}
/**
* Set the style to the given popper
* @method
* @memberof Popper.Utils
* @argument {Element} element - Element to apply the style to
* @argument {Object} styles
* Object with a list of properties and values which will be applied to the element
*/
function setStyles(element, styles) {
Object.keys(styles).forEach(function (prop) {
var unit = '';
// add unit if the value is numeric and is one of the following
if (['width', 'height', 'top', 'right', 'bottom', 'left'].indexOf(prop) !== -1 && isNumeric(styles[prop])) {
unit = 'px';
}
element.style[prop] = styles[prop] + unit;
});
}
/**
* Set the attributes to the given popper
* @method
* @memberof Popper.Utils
* @argument {Element} element - Element to apply the attributes to
* @argument {Object} styles
* Object with a list of properties and values which will be applied to the element
*/
function setAttributes(element, attributes) {
Object.keys(attributes).forEach(function (prop) {
var value = attributes[prop];
if (value !== false) {
element.setAttribute(prop, attributes[prop]);
} else {
element.removeAttribute(prop);
}
});
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by `update` method
* @argument {Object} data.styles - List of style properties - values to apply to popper element
* @argument {Object} data.attributes - List of attribute properties - values to apply to popper element
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The same data object
*/
function applyStyle(data) {
// any property present in `data.styles` will be applied to the popper,
// in this way we can make the 3rd party modifiers add custom styles to it
// Be aware, modifiers could override the properties defined in the previous
// lines of this modifier!
setStyles(data.instance.popper, data.styles);
// any property present in `data.attributes` will be applied to the popper,
// they will be set as HTML attributes of the element
setAttributes(data.instance.popper, data.attributes);
// if arrowElement is defined and arrowStyles has some properties
if (data.arrowElement && Object.keys(data.arrowStyles).length) {
setStyles(data.arrowElement, data.arrowStyles);
}
return data;
}
/**
* Set the x-placement attribute before everything else because it could be used
* to add margins to the popper margins needs to be calculated to get the
* correct popper offsets.
* @method
* @memberof Popper.modifiers
* @param {HTMLElement} reference - The reference element used to position the popper
* @param {HTMLElement} popper - The HTML element used as popper
* @param {Object} options - Popper.js options
*/
function applyStyleOnLoad(reference, popper, options, modifierOptions, state) {
// compute reference element offsets
var referenceOffsets = getReferenceOffsets(state, popper, reference, options.positionFixed);
// compute auto placement, store placement inside the data object,
// modifiers will be able to edit `placement` if needed
// and refer to originalPlacement to know the original value
var placement = computeAutoPlacement(options.placement, referenceOffsets, popper, reference, options.modifiers.flip.boundariesElement, options.modifiers.flip.padding);
popper.setAttribute('x-placement', placement);
// Apply `position` to popper before anything else because
// without the position applied we can't guarantee correct computations
setStyles(popper, { position: options.positionFixed ? 'fixed' : 'absolute' });
return options;
}
/**
* @function
* @memberof Popper.Utils
* @argument {Object} data - The data object generated by `update` method
* @argument {Boolean} shouldRound - If the offsets should be rounded at all
* @returns {Object} The popper's position offsets rounded
*
* The tale of pixel-perfect positioning. It's still not 100% perfect, but as
* good as it can be within reason.
* Discussion here: https://github.com/FezVrasta/popper.js/pull/715
*
* Low DPI screens cause a popper to be blurry if not using full pixels (Safari
* as well on High DPI screens).
*
* Firefox prefers no rounding for positioning and does not have blurriness on
* high DPI screens.
*
* Only horizontal placement and left/right values need to be considered.
*/
function getRoundedOffsets(data, shouldRound) {
var _data$offsets = data.offsets,
popper = _data$offsets.popper,
reference = _data$offsets.reference;
var round = Math.round,
floor = Math.floor;
var noRound = function noRound(v) {
return v;
};
var referenceWidth = round(reference.width);
var popperWidth = round(popper.width);
var isVertical = ['left', 'right'].indexOf(data.placement) !== -1;
var isVariation = data.placement.indexOf('-') !== -1;
var sameWidthParity = referenceWidth % 2 === popperWidth % 2;
var bothOddWidth = referenceWidth % 2 === 1 && popperWidth % 2 === 1;
var horizontalToInteger = !shouldRound ? noRound : isVertical || isVariation || sameWidthParity ? round : floor;
var verticalToInteger = !shouldRound ? noRound : round;
return {
left: horizontalToInteger(bothOddWidth && !isVariation && shouldRound ? popper.left - 1 : popper.left),
top: verticalToInteger(popper.top),
bottom: verticalToInteger(popper.bottom),
right: horizontalToInteger(popper.right)
};
}
var isFirefox = isBrowser && /Firefox/i.test(navigator.userAgent);
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by `update` method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function computeStyle(data, options) {
var x = options.x,
y = options.y;
var popper = data.offsets.popper;
// Remove this legacy support in Popper.js v2
var legacyGpuAccelerationOption = find(data.instance.modifiers, function (modifier) {
return modifier.name === 'applyStyle';
}).gpuAcceleration;
if (legacyGpuAccelerationOption !== undefined) {
console.warn('WARNING: `gpuAcceleration` option moved to `computeStyle` modifier and will not be supported in future versions of Popper.js!');
}
var gpuAcceleration = legacyGpuAccelerationOption !== undefined ? legacyGpuAccelerationOption : options.gpuAcceleration;
var offsetParent = getOffsetParent(data.instance.popper);
var offsetParentRect = getBoundingClientRect(offsetParent);
// Styles
var styles = {
position: popper.position
};
var offsets = getRoundedOffsets(data, window.devicePixelRatio < 2 || !isFirefox);
var sideA = x === 'bottom' ? 'top' : 'bottom';
var sideB = y === 'right' ? 'left' : 'right';
// if gpuAcceleration is set to `true` and transform is supported,
// we use `translate3d` to apply the position to the popper we
// automatically use the supported prefixed version if needed
var prefixedProperty = getSupportedPropertyName('transform');
// now, let's make a step back and look at this code closely (wtf?)
// If the content of the popper grows once it's been positioned, it
// may happen that the popper gets misplaced because of the new content
// overflowing its reference element
// To avoid this problem, we provide two options (x and y), which allow
// the consumer to define the offset origin.
// If we position a popper on top of a reference element, we can set
// `x` to `top` to make the popper grow towards its top instead of
// its bottom.
var left = void 0,
top = void 0;
if (sideA === 'bottom') {
// when offsetParent is <html> the positioning is relative to the bottom of the screen (excluding the scrollbar)
// and not the bottom of the html element
if (offsetParent.nodeName === 'HTML') {
top = -offsetParent.clientHeight + offsets.bottom;
} else {
top = -offsetParentRect.height + offsets.bottom;
}
} else {
top = offsets.top;
}
if (sideB === 'right') {
if (offsetParent.nodeName === 'HTML') {
left = -offsetParent.clientWidth + offsets.right;
} else {
left = -offsetParentRect.width + offsets.right;
}
} else {
left = offsets.left;
}
if (gpuAcceleration && prefixedProperty) {
styles[prefixedProperty] = 'translate3d(' + left + 'px, ' + top + 'px, 0)';
styles[sideA] = 0;
styles[sideB] = 0;
styles.willChange = 'transform';
} else {
// othwerise, we use the standard `top`, `left`, `bottom` and `right` properties
var invertTop = sideA === 'bottom' ? -1 : 1;
var invertLeft = sideB === 'right' ? -1 : 1;
styles[sideA] = top * invertTop;
styles[sideB] = left * invertLeft;
styles.willChange = sideA + ', ' + sideB;
}
// Attributes
var attributes = {
'x-placement': data.placement
};
// Update `data` attributes, styles and arrowStyles
data.attributes = _extends({}, attributes, data.attributes);
data.styles = _extends({}, styles, data.styles);
data.arrowStyles = _extends({}, data.offsets.arrow, data.arrowStyles);
return data;
}
/**
* Helper used to know if the given modifier depends from another one.<br />
* It checks if the needed modifier is listed and enabled.
* @method
* @memberof Popper.Utils
* @param {Array} modifiers - list of modifiers
* @param {String} requestingName - name of requesting modifier
* @param {String} requestedName - name of requested modifier
* @returns {Boolean}
*/
function isModifierRequired(modifiers, requestingName, requestedName) {
var requesting = find(modifiers, function (_ref) {
var name = _ref.name;
return name === requestingName;
});
var isRequired = !!requesting && modifiers.some(function (modifier) {
return modifier.name === requestedName && modifier.enabled && modifier.order < requesting.order;
});
if (!isRequired) {
var _requesting = '`' + requestingName + '`';
var requested = '`' + requestedName + '`';
console.warn(requested + ' modifier is required by ' + _requesting + ' modifier in order to work, be sure to include it before ' + _requesting + '!');
}
return isRequired;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function arrow(data, options) {
var _data$offsets$arrow;
// arrow depends on keepTogether in order to work
if (!isModifierRequired(data.instance.modifiers, 'arrow', 'keepTogether')) {
return data;
}
var arrowElement = options.element;
// if arrowElement is a string, suppose it's a CSS selector
if (typeof arrowElement === 'string') {
arrowElement = data.instance.popper.querySelector(arrowElement);
// if arrowElement is not found, don't run the modifier
if (!arrowElement) {
return data;
}
} else {
// if the arrowElement isn't a query selector we must check that the
// provided DOM node is child of its popper node
if (!data.instance.popper.contains(arrowElement)) {
console.warn('WARNING: `arrow.element` must be child of its popper element!');
return data;
}
}
var placement = data.placement.split('-')[0];
var _data$offsets = data.offsets,
popper = _data$offsets.popper,
reference = _data$offsets.reference;
var isVertical = ['left', 'right'].indexOf(placement) !== -1;
var len = isVertical ? 'height' : 'width';
var sideCapitalized = isVertical ? 'Top' : 'Left';
var side = sideCapitalized.toLowerCase();
var altSide = isVertical ? 'left' : 'top';
var opSide = isVertical ? 'bottom' : 'right';
var arrowElementSize = getOuterSizes(arrowElement)[len];
//
// extends keepTogether behavior making sure the popper and its
// reference have enough pixels in conjunction
//
// top/left side
if (reference[opSide] - arrowElementSize < popper[side]) {
data.offsets.popper[side] -= popper[side] - (reference[opSide] - arrowElementSize);
}
// bottom/right side
if (reference[side] + arrowElementSize > popper[opSide]) {
data.offsets.popper[side] += reference[side] + arrowElementSize - popper[opSide];
}
data.offsets.popper = getClientRect(data.offsets.popper);
// compute center of the popper
var center = reference[side] + reference[len] / 2 - arrowElementSize / 2;
// Compute the sideValue using the updated popper offsets
// take popper margin in account because we don't have this info available
var css = getStyleComputedProperty(data.instance.popper);
var popperMarginSide = parseFloat(css['margin' + sideCapitalized]);
var popperBorderSide = parseFloat(css['border' + sideCapitalized + 'Width']);
var sideValue = center - data.offsets.popper[side] - popperMarginSide - popperBorderSide;
// prevent arrowElement from being placed not contiguously to its popper
sideValue = Math.max(Math.min(popper[len] - arrowElementSize, sideValue), 0);
data.arrowElement = arrowElement;
data.offsets.arrow = (_data$offsets$arrow = {}, defineProperty(_data$offsets$arrow, side, Math.round(sideValue)), defineProperty(_data$offsets$arrow, altSide, ''), _data$offsets$arrow);
return data;
}
/**
* Get the opposite placement variation of the given one
* @method
* @memberof Popper.Utils
* @argument {String} placement variation
* @returns {String} flipped placement variation
*/
function getOppositeVariation(variation) {
if (variation === 'end') {
return 'start';
} else if (variation === 'start') {
return 'end';
}
return variation;
}
/**
* List of accepted placements to use as values of the `placement` option.<br />
* Valid placements are:
* - `auto`
* - `top`
* - `right`
* - `bottom`
* - `left`
*
* Each placement can have a variation from this list:
* - `-start`
* - `-end`
*
* Variations are interpreted easily if you think of them as the left to right
* written languages. Horizontally (`top` and `bottom`), `start` is left and `end`
* is right.<br />
* Vertically (`left` and `right`), `start` is top and `end` is bottom.
*
* Some valid examples are:
* - `top-end` (on top of reference, right aligned)
* - `right-start` (on right of reference, top aligned)
* - `bottom` (on bottom, centered)
* - `auto-end` (on the side with more space available, alignment depends by placement)
*
* @static
* @type {Array}
* @enum {String}
* @readonly
* @method placements
* @memberof Popper
*/
var placements = ['auto-start', 'auto', 'auto-end', 'top-start', 'top', 'top-end', 'right-start', 'right', 'right-end', 'bottom-end', 'bottom', 'bottom-start', 'left-end', 'left', 'left-start'];
// Get rid of `auto` `auto-start` and `auto-end`
var validPlacements = placements.slice(3);
/**
* Given an initial placement, returns all the subsequent placements
* clockwise (or counter-clockwise).
*
* @method
* @memberof Popper.Utils
* @argument {String} placement - A valid placement (it accepts variations)
* @argument {Boolean} counter - Set to true to walk the placements counterclockwise
* @returns {Array} placements including their variations
*/
function clockwise(placement) {
var counter = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false;
var index = validPlacements.indexOf(placement);
var arr = validPlacements.slice(index + 1).concat(validPlacements.slice(0, index));
return counter ? arr.reverse() : arr;
}
var BEHAVIORS = {
FLIP: 'flip',
CLOCKWISE: 'clockwise',
COUNTERCLOCKWISE: 'counterclockwise'
};
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function flip(data, options) {
// if `inner` modifier is enabled, we can't use the `flip` modifier
if (isModifierEnabled(data.instance.modifiers, 'inner')) {
return data;
}
if (data.flipped && data.placement === data.originalPlacement) {
// seems like flip is trying to loop, probably there's not enough space on any of the flippable sides
return data;
}
var boundaries = getBoundaries(data.instance.popper, data.instance.reference, options.padding, options.boundariesElement, data.positionFixed);
var placement = data.placement.split('-')[0];
var placementOpposite = getOppositePlacement(placement);
var variation = data.placement.split('-')[1] || '';
var flipOrder = [];
switch (options.behavior) {
case BEHAVIORS.FLIP:
flipOrder = [placement, placementOpposite];
break;
case BEHAVIORS.CLOCKWISE:
flipOrder = clockwise(placement);
break;
case BEHAVIORS.COUNTERCLOCKWISE:
flipOrder = clockwise(placement, true);
break;
default:
flipOrder = options.behavior;
}
flipOrder.forEach(function (step, index) {
if (placement !== step || flipOrder.length === index + 1) {
return data;
}
placement = data.placement.split('-')[0];
placementOpposite = getOppositePlacement(placement);
var popperOffsets = data.offsets.popper;
var refOffsets = data.offsets.reference;
// using floor because the reference offsets may contain decimals we are not going to consider here
var floor = Math.floor;
var overlapsRef = placement === 'left' && floor(popperOffsets.right) > floor(refOffsets.left) || placement === 'right' && floor(popperOffsets.left) < floor(refOffsets.right) || placement === 'top' && floor(popperOffsets.bottom) > floor(refOffsets.top) || placement === 'bottom' && floor(popperOffsets.top) < floor(refOffsets.bottom);
var overflowsLeft = floor(popperOffsets.left) < floor(boundaries.left);
var overflowsRight = floor(popperOffsets.right) > floor(boundaries.right);
var overflowsTop = floor(popperOffsets.top) < floor(boundaries.top);
var overflowsBottom = floor(popperOffsets.bottom) > floor(boundaries.bottom);
var overflowsBoundaries = placement === 'left' && overflowsLeft || placement === 'right' && overflowsRight || placement === 'top' && overflowsTop || placement === 'bottom' && overflowsBottom;
// flip the variation if required
var isVertical = ['top', 'bottom'].indexOf(placement) !== -1;
// flips variation if reference element overflows boundaries
var flippedVariationByRef = !!options.flipVariations && (isVertical && variation === 'start' && overflowsLeft || isVertical && variation === 'end' && overflowsRight || !isVertical && variation === 'start' && overflowsTop || !isVertical && variation === 'end' && overflowsBottom);
// flips variation if popper content overflows boundaries
var flippedVariationByContent = !!options.flipVariationsByContent && (isVertical && variation === 'start' && overflowsRight || isVertical && variation === 'end' && overflowsLeft || !isVertical && variation === 'start' && overflowsBottom || !isVertical && variation === 'end' && overflowsTop);
var flippedVariation = flippedVariationByRef || flippedVariationByContent;
if (overlapsRef || overflowsBoundaries || flippedVariation) {
// this boolean to detect any flip loop
data.flipped = true;
if (overlapsRef || overflowsBoundaries) {
placement = flipOrder[index + 1];
}
if (flippedVariation) {
variation = getOppositeVariation(variation);
}
data.placement = placement + (variation ? '-' + variation : '');
// this object contains `position`, we want to preserve it along with
// any additional property we may add in the future
data.offsets.popper = _extends({}, data.offsets.popper, getPopperOffsets(data.instance.popper, data.offsets.reference, data.placement));
data = runModifiers(data.instance.modifiers, data, 'flip');
}
});
return data;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function keepTogether(data) {
var _data$offsets = data.offsets,
popper = _data$offsets.popper,
reference = _data$offsets.reference;
var placement = data.placement.split('-')[0];
var floor = Math.floor;
var isVertical = ['top', 'bottom'].indexOf(placement) !== -1;
var side = isVertical ? 'right' : 'bottom';
var opSide = isVertical ? 'left' : 'top';
var measurement = isVertical ? 'width' : 'height';
if (popper[side] < floor(reference[opSide])) {
data.offsets.popper[opSide] = floor(reference[opSide]) - popper[measurement];
}
if (popper[opSide] > floor(reference[side])) {
data.offsets.popper[opSide] = floor(reference[side]);
}
return data;
}
/**
* Converts a string containing value + unit into a px value number
* @function
* @memberof {modifiers~offset}
* @private
* @argument {String} str - Value + unit string
* @argument {String} measurement - `height` or `width`
* @argument {Object} popperOffsets
* @argument {Object} referenceOffsets
* @returns {Number|String}
* Value in pixels, or original string if no values were extracted
*/
function toValue(str, measurement, popperOffsets, referenceOffsets) {
// separate value from unit
var split = str.match(/((?:\-|\+)?\d*\.?\d*)(.*)/);
var value = +split[1];
var unit = split[2];
// If it's not a number it's an operator, I guess
if (!value) {
return str;
}
if (unit.indexOf('%') === 0) {
var element = void 0;
switch (unit) {
case '%p':
element = popperOffsets;
break;
case '%':
case '%r':
default:
element = referenceOffsets;
}
var rect = getClientRect(element);
return rect[measurement] / 100 * value;
} else if (unit === 'vh' || unit === 'vw') {
// if is a vh or vw, we calculate the size based on the viewport
var size = void 0;
if (unit === 'vh') {
size = Math.max(document.documentElement.clientHeight, window.innerHeight || 0);
} else {
size = Math.max(document.documentElement.clientWidth, window.innerWidth || 0);
}
return size / 100 * value;
} else {
// if is an explicit pixel unit, we get rid of the unit and keep the value
// if is an implicit unit, it's px, and we return just the value
return value;
}
}
/**
* Parse an `offset` string to extrapolate `x` and `y` numeric offsets.
* @function
* @memberof {modifiers~offset}
* @private
* @argument {String} offset
* @argument {Object} popperOffsets
* @argument {Object} referenceOffsets
* @argument {String} basePlacement
* @returns {Array} a two cells array with x and y offsets in numbers
*/
function parseOffset(offset, popperOffsets, referenceOffsets, basePlacement) {
var offsets = [0, 0];
// Use height if placement is left or right and index is 0 otherwise use width
// in this way the first offset will use an axis and the second one
// will use the other one
var useHeight = ['right', 'left'].indexOf(basePlacement) !== -1;
// Split the offset string to obtain a list of values and operands
// The regex addresses values with the plus or minus sign in front (+10, -20, etc)
var fragments = offset.split(/(\+|\-)/).map(function (frag) {
return frag.trim();
});
// Detect if the offset string contains a pair of values or a single one
// they could be separated by comma or space
var divider = fragments.indexOf(find(fragments, function (frag) {
return frag.search(/,|\s/) !== -1;
}));
if (fragments[divider] && fragments[divider].indexOf(',') === -1) {
console.warn('Offsets separated by white space(s) are deprecated, use a comma (,) instead.');
}
// If divider is found, we divide the list of values and operands to divide
// them by ofset X and Y.
var splitRegex = /\s*,\s*|\s+/;
var ops = divider !== -1 ? [fragments.slice(0, divider).concat([fragments[divider].split(splitRegex)[0]]), [fragments[divider].split(splitRegex)[1]].concat(fragments.slice(divider + 1))] : [fragments];
// Convert the values with units to absolute pixels to allow our computations
ops = ops.map(function (op, index) {
// Most of the units rely on the orientation of the popper
var measurement = (index === 1 ? !useHeight : useHeight) ? 'height' : 'width';
var mergeWithPrevious = false;
return op
// This aggregates any `+` or `-` sign that aren't considered operators
// e.g.: 10 + +5 => [10, +, +5]
.reduce(function (a, b) {
if (a[a.length - 1] === '' && ['+', '-'].indexOf(b) !== -1) {
a[a.length - 1] = b;
mergeWithPrevious = true;
return a;
} else if (mergeWithPrevious) {
a[a.length - 1] += b;
mergeWithPrevious = false;
return a;
} else {
return a.concat(b);
}
}, [])
// Here we convert the string values into number values (in px)
.map(function (str) {
return toValue(str, measurement, popperOffsets, referenceOffsets);
});
});
// Loop trough the offsets arrays and execute the operations
ops.forEach(function (op, index) {
op.forEach(function (frag, index2) {
if (isNumeric(frag)) {
offsets[index] += frag * (op[index2 - 1] === '-' ? -1 : 1);
}
});
});
return offsets;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @argument {Number|String} options.offset=0
* The offset value as described in the modifier description
* @returns {Object} The data object, properly modified
*/
function offset(data, _ref) {
var offset = _ref.offset;
var placement = data.placement,
_data$offsets = data.offsets,
popper = _data$offsets.popper,
reference = _data$offsets.reference;
var basePlacement = placement.split('-')[0];
var offsets = void 0;
if (isNumeric(+offset)) {
offsets = [+offset, 0];
} else {
offsets = parseOffset(offset, popper, reference, basePlacement);
}
if (basePlacement === 'left') {
popper.top += offsets[0];
popper.left -= offsets[1];
} else if (basePlacement === 'right') {
popper.top += offsets[0];
popper.left += offsets[1];
} else if (basePlacement === 'top') {
popper.left += offsets[0];
popper.top -= offsets[1];
} else if (basePlacement === 'bottom') {
popper.left += offsets[0];
popper.top += offsets[1];
}
data.popper = popper;
return data;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by `update` method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function preventOverflow(data, options) {
var boundariesElement = options.boundariesElement || getOffsetParent(data.instance.popper);
// If offsetParent is the reference element, we really want to
// go one step up and use the next offsetParent as reference to
// avoid to make this modifier completely useless and look like broken
if (data.instance.reference === boundariesElement) {
boundariesElement = getOffsetParent(boundariesElement);
}
// NOTE: DOM access here
// resets the popper's position so that the document size can be calculated excluding
// the size of the popper element itself
var transformProp = getSupportedPropertyName('transform');
var popperStyles = data.instance.popper.style; // assignment to help minification
var top = popperStyles.top,
left = popperStyles.left,
transform = popperStyles[transformProp];
popperStyles.top = '';
popperStyles.left = '';
popperStyles[transformProp] = '';
var boundaries = getBoundaries(data.instance.popper, data.instance.reference, options.padding, boundariesElement, data.positionFixed);
// NOTE: DOM access here
// restores the original style properties after the offsets have been computed
popperStyles.top = top;
popperStyles.left = left;
popperStyles[transformProp] = transform;
options.boundaries = boundaries;
var order = options.priority;
var popper = data.offsets.popper;
var check = {
primary: function primary(placement) {
var value = popper[placement];
if (popper[placement] < boundaries[placement] && !options.escapeWithReference) {
value = Math.max(popper[placement], boundaries[placement]);
}
return defineProperty({}, placement, value);
},
secondary: function secondary(placement) {
var mainSide = placement === 'right' ? 'left' : 'top';
var value = popper[mainSide];
if (popper[placement] > boundaries[placement] && !options.escapeWithReference) {
value = Math.min(popper[mainSide], boundaries[placement] - (placement === 'right' ? popper.width : popper.height));
}
return defineProperty({}, mainSide, value);
}
};
order.forEach(function (placement) {
var side = ['left', 'top'].indexOf(placement) !== -1 ? 'primary' : 'secondary';
popper = _extends({}, popper, check[side](placement));
});
data.offsets.popper = popper;
return data;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by `update` method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function shift(data) {
var placement = data.placement;
var basePlacement = placement.split('-')[0];
var shiftvariation = placement.split('-')[1];
// if shift shiftvariation is specified, run the modifier
if (shiftvariation) {
var _data$offsets = data.offsets,
reference = _data$offsets.reference,
popper = _data$offsets.popper;
var isVertical = ['bottom', 'top'].indexOf(basePlacement) !== -1;
var side = isVertical ? 'left' : 'top';
var measurement = isVertical ? 'width' : 'height';
var shiftOffsets = {
start: defineProperty({}, side, reference[side]),
end: defineProperty({}, side, reference[side] + reference[measurement] - popper[measurement])
};
data.offsets.popper = _extends({}, popper, shiftOffsets[shiftvariation]);
}
return data;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function hide(data) {
if (!isModifierRequired(data.instance.modifiers, 'hide', 'preventOverflow')) {
return data;
}
var refRect = data.offsets.reference;
var bound = find(data.instance.modifiers, function (modifier) {
return modifier.name === 'preventOverflow';
}).boundaries;
if (refRect.bottom < bound.top || refRect.left > bound.right || refRect.top > bound.bottom || refRect.right < bound.left) {
// Avoid unnecessary DOM access if visibility hasn't changed
if (data.hide === true) {
return data;
}
data.hide = true;
data.attributes['x-out-of-boundaries'] = '';
} else {
// Avoid unnecessary DOM access if visibility hasn't changed
if (data.hide === false) {
return data;
}
data.hide = false;
data.attributes['x-out-of-boundaries'] = false;
}
return data;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by `update` method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function inner(data) {
var placement = data.placement;
var basePlacement = placement.split('-')[0];
var _data$offsets = data.offsets,
popper = _data$offsets.popper,
reference = _data$offsets.reference;
var isHoriz = ['left', 'right'].indexOf(basePlacement) !== -1;
var subtractLength = ['top', 'left'].indexOf(basePlacement) === -1;
popper[isHoriz ? 'left' : 'top'] = reference[basePlacement] - (subtractLength ? popper[isHoriz ? 'width' : 'height'] : 0);
data.placement = getOppositePlacement(placement);
data.offsets.popper = getClientRect(popper);
return data;
}
/**
* Modifier function, each modifier can have a function of this type assigned
* to its `fn` property.<br />
* These functions will be called on each update, this means that you must
* make sure they are performant enough to avoid performance bottlenecks.
*
* @function ModifierFn
* @argument {dataObject} data - The data object generated by `update` method
* @argument {Object} options - Modifiers configuration and options
* @returns {dataObject} The data object, properly modified
*/
/**
* Modifiers are plugins used to alter the behavior of your poppers.<br />
* Popper.js uses a set of 9 modifiers to provide all the basic functionalities
* needed by the library.
*
* Usually you don't want to override the `order`, `fn` and `onLoad` props.
* All the other properties are configurations that could be tweaked.
* @namespace modifiers
*/
var modifiers = {
/**
* Modifier used to shift the popper on the start or end of its reference
* element.<br />
* It will read the variation of the `placement` property.<br />
* It can be one either `-end` or `-start`.
* @memberof modifiers
* @inner
*/
shift: {
/** @prop {number} order=100 - Index used to define the order of execution */
order: 100,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: shift
},
/**
* The `offset` modifier can shift your popper on both its axis.
*
* It accepts the following units:
* - `px` or unit-less, interpreted as pixels
* - `%` or `%r`, percentage relative to the length of the reference element
* - `%p`, percentage relative to the length of the popper element
* - `vw`, CSS viewport width unit
* - `vh`, CSS viewport height unit
*
* For length is intended the main axis relative to the placement of the popper.<br />
* This means that if the placement is `top` or `bottom`, the length will be the
* `width`. In case of `left` or `right`, it will be the `height`.
*
* You can provide a single value (as `Number` or `String`), or a pair of values
* as `String` divided by a comma or one (or more) white spaces.<br />
* The latter is a deprecated method because it leads to confusion and will be
* removed in v2.<br />
* Additionally, it accepts additions and subtractions between different units.
* Note that multiplications and divisions aren't supported.
*
* Valid examples are:
* ```
* 10
* '10%'
* '10, 10'
* '10%, 10'
* '10 + 10%'
* '10 - 5vh + 3%'
* '-10px + 5vh, 5px - 6%'
* ```
* > **NB**: If you desire to apply offsets to your poppers in a way that may make them overlap
* > with their reference element, unfortunately, you will have to disable the `flip` modifier.
* > You can read more on this at this [issue](https://github.com/FezVrasta/popper.js/issues/373).
*
* @memberof modifiers
* @inner
*/
offset: {
/** @prop {number} order=200 - Index used to define the order of execution */
order: 200,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: offset,
/** @prop {Number|String} offset=0
* The offset value as described in the modifier description
*/
offset: 0
},
/**
* Modifier used to prevent the popper from being positioned outside the boundary.
*
* A scenario exists where the reference itself is not within the boundaries.<br />
* We can say it has "escaped the boundaries" — or just "escaped".<br />
* In this case we need to decide whether the popper should either:
*
* - detach from the reference and remain "trapped" in the boundaries, or
* - if it should ignore the boundary and "escape with its reference"
*
* When `escapeWithReference` is set to`true` and reference is completely
* outside its boundaries, the popper will overflow (or completely leave)
* the boundaries in order to remain attached to the edge of the reference.
*
* @memberof modifiers
* @inner
*/
preventOverflow: {
/** @prop {number} order=300 - Index used to define the order of execution */
order: 300,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: preventOverflow,
/**
* @prop {Array} [priority=['left','right','top','bottom']]
* Popper will try to prevent overflow following these priorities by default,
* then, it could overflow on the left and on top of the `boundariesElement`
*/
priority: ['left', 'right', 'top', 'bottom'],
/**
* @prop {number} padding=5
* Amount of pixel used to define a minimum distance between the boundaries
* and the popper. This makes sure the popper always has a little padding
* between the edges of its container
*/
padding: 5,
/**
* @prop {String|HTMLElement} boundariesElement='scrollParent'
* Boundaries used by the modifier. Can be `scrollParent`, `window`,
* `viewport` or any DOM element.
*/
boundariesElement: 'scrollParent'
},
/**
* Modifier used to make sure the reference and its popper stay near each other
* without leaving any gap between the two. Especially useful when the arrow is
* enabled and you want to ensure that it points to its reference element.
* It cares only about the first axis. You can still have poppers with margin
* between the popper and its reference element.
* @memberof modifiers
* @inner
*/
keepTogether: {
/** @prop {number} order=400 - Index used to define the order of execution */
order: 400,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: keepTogether
},
/**
* This modifier is used to move the `arrowElement` of the popper to make
* sure it is positioned between the reference element and its popper element.
* It will read the outer size of the `arrowElement` node to detect how many
* pixels of conjunction are needed.
*
* It has no effect if no `arrowElement` is provided.
* @memberof modifiers
* @inner
*/
arrow: {
/** @prop {number} order=500 - Index used to define the order of execution */
order: 500,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: arrow,
/** @prop {String|HTMLElement} element='[x-arrow]' - Selector or node used as arrow */
element: '[x-arrow]'
},
/**
* Modifier used to flip the popper's placement when it starts to overlap its
* reference element.
*
* Requires the `preventOverflow` modifier before it in order to work.
*
* **NOTE:** this modifier will interrupt the current update cycle and will
* restart it if it detects the need to flip the placement.
* @memberof modifiers
* @inner
*/
flip: {
/** @prop {number} order=600 - Index used to define the order of execution */
order: 600,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: flip,
/**
* @prop {String|Array} behavior='flip'
* The behavior used to change the popper's placement. It can be one of
* `flip`, `clockwise`, `counterclockwise` or an array with a list of valid
* placements (with optional variations)
*/
behavior: 'flip',
/**
* @prop {number} padding=5
* The popper will flip if it hits the edges of the `boundariesElement`
*/
padding: 5,
/**
* @prop {String|HTMLElement} boundariesElement='viewport'
* The element which will define the boundaries of the popper position.
* The popper will never be placed outside of the defined boundaries
* (except if `keepTogether` is enabled)
*/
boundariesElement: 'viewport',
/**
* @prop {Boolean} flipVariations=false
* The popper will switch placement variation between `-start` and `-end` when
* the reference element overlaps its boundaries.
*
* The original placement should have a set variation.
*/
flipVariations: false,
/**
* @prop {Boolean} flipVariationsByContent=false
* The popper will switch placement variation between `-start` and `-end` when
* the popper element overlaps its reference boundaries.
*
* The original placement should have a set variation.
*/
flipVariationsByContent: false
},
/**
* Modifier used to make the popper flow toward the inner of the reference element.
* By default, when this modifier is disabled, the popper will be placed outside
* the reference element.
* @memberof modifiers
* @inner
*/
inner: {
/** @prop {number} order=700 - Index used to define the order of execution */
order: 700,
/** @prop {Boolean} enabled=false - Whether the modifier is enabled or not */
enabled: false,
/** @prop {ModifierFn} */
fn: inner
},
/**
* Modifier used to hide the popper when its reference element is outside of the
* popper boundaries. It will set a `x-out-of-boundaries` attribute which can
* be used to hide with a CSS selector the popper when its reference is
* out of boundaries.
*
* Requires the `preventOverflow` modifier before it in order to work.
* @memberof modifiers
* @inner
*/
hide: {
/** @prop {number} order=800 - Index used to define the order of execution */
order: 800,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: hide
},
/**
* Computes the style that will be applied to the popper element to gets
* properly positioned.
*
* Note that this modifier will not touch the DOM, it just prepares the styles
* so that `applyStyle` modifier can apply it. This separation is useful
* in case you need to replace `applyStyle` with a custom implementation.
*
* This modifier has `850` as `order` value to maintain backward compatibility
* with previous versions of Popper.js. Expect the modifiers ordering method
* to change in future major versions of the library.
*
* @memberof modifiers
* @inner
*/
computeStyle: {
/** @prop {number} order=850 - Index used to define the order of execution */
order: 850,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: computeStyle,
/**
* @prop {Boolean} gpuAcceleration=true
* If true, it uses the CSS 3D transformation to position the popper.
* Otherwise, it will use the `top` and `left` properties
*/
gpuAcceleration: true,
/**
* @prop {string} [x='bottom']
* Where to anchor the X axis (`bottom` or `top`). AKA X offset origin.
* Change this if your popper should grow in a direction different from `bottom`
*/
x: 'bottom',
/**
* @prop {string} [x='left']
* Where to anchor the Y axis (`left` or `right`). AKA Y offset origin.
* Change this if your popper should grow in a direction different from `right`
*/
y: 'right'
},
/**
* Applies the computed styles to the popper element.
*
* All the DOM manipulations are limited to this modifier. This is useful in case
* you want to integrate Popper.js inside a framework or view library and you
* want to delegate all the DOM manipulations to it.
*
* Note that if you disable this modifier, you must make sure the popper element
* has its position set to `absolute` before Popper.js can do its work!
*
* Just disable this modifier and define your own to achieve the desired effect.
*
* @memberof modifiers
* @inner
*/
applyStyle: {
/** @prop {number} order=900 - Index used to define the order of execution */
order: 900,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: applyStyle,
/** @prop {Function} */
onLoad: applyStyleOnLoad,
/**
* @deprecated since version 1.10.0, the property moved to `computeStyle` modifier
* @prop {Boolean} gpuAcceleration=true
* If true, it uses the CSS 3D transformation to position the popper.
* Otherwise, it will use the `top` and `left` properties
*/
gpuAcceleration: undefined
}
};
/**
* The `dataObject` is an object containing all the information used by Popper.js.
* This object is passed to modifiers and to the `onCreate` and `onUpdate` callbacks.
* @name dataObject
* @property {Object} data.instance The Popper.js instance
* @property {String} data.placement Placement applied to popper
* @property {String} data.originalPlacement Placement originally defined on init
* @property {Boolean} data.flipped True if popper has been flipped by flip modifier
* @property {Boolean} data.hide True if the reference element is out of boundaries, useful to know when to hide the popper
* @property {HTMLElement} data.arrowElement Node used as arrow by arrow modifier
* @property {Object} data.styles Any CSS property defined here will be applied to the popper. It expects the JavaScript nomenclature (eg. `marginBottom`)
* @property {Object} data.arrowStyles Any CSS property defined here will be applied to the popper arrow. It expects the JavaScript nomenclature (eg. `marginBottom`)
* @property {Object} data.boundaries Offsets of the popper boundaries
* @property {Object} data.offsets The measurements of popper, reference and arrow elements
* @property {Object} data.offsets.popper `top`, `left`, `width`, `height` values
* @property {Object} data.offsets.reference `top`, `left`, `width`, `height` values
* @property {Object} data.offsets.arrow] `top` and `left` offsets, only one of them will be different from 0
*/
/**
* Default options provided to Popper.js constructor.<br />
* These can be overridden using the `options` argument of Popper.js.<br />
* To override an option, simply pass an object with the same
* structure of the `options` object, as the 3rd argument. For example:
* ```
* new Popper(ref, pop, {
* modifiers: {
* preventOverflow: { enabled: false }
* }
* })
* ```
* @type {Object}
* @static
* @memberof Popper
*/
var Defaults = {
/**
* Popper's placement.
* @prop {Popper.placements} placement='bottom'
*/
placement: 'bottom',
/**
* Set this to true if you want popper to position it self in 'fixed' mode
* @prop {Boolean} positionFixed=false
*/
positionFixed: false,
/**
* Whether events (resize, scroll) are initially enabled.
* @prop {Boolean} eventsEnabled=true
*/
eventsEnabled: true,
/**
* Set to true if you want to automatically remove the popper when
* you call the `destroy` method.
* @prop {Boolean} removeOnDestroy=false
*/
removeOnDestroy: false,
/**
* Callback called when the popper is created.<br />
* By default, it is set to no-op.<br />
* Access Popper.js instance with `data.instance`.
* @prop {onCreate}
*/
onCreate: function onCreate() {},
/**
* Callback called when the popper is updated. This callback is not called
* on the initialization/creation of the popper, but only on subsequent
* updates.<br />
* By default, it is set to no-op.<br />
* Access Popper.js instance with `data.instance`.
* @prop {onUpdate}
*/
onUpdate: function onUpdate() {},
/**
* List of modifiers used to modify the offsets before they are applied to the popper.
* They provide most of the functionalities of Popper.js.
* @prop {modifiers}
*/
modifiers: modifiers
};
/**
* @callback onCreate
* @param {dataObject} data
*/
/**
* @callback onUpdate
* @param {dataObject} data
*/
// Utils
// Methods
var Popper = function () {
/**
* Creates a new Popper.js instance.
* @class Popper
* @param {Element|referenceObject} reference - The reference element used to position the popper
* @param {Element} popper - The HTML / XML element used as the popper
* @param {Object} options - Your custom options to override the ones defined in [Defaults](#defaults)
* @return {Object} instance - The generated Popper.js instance
*/
function Popper(reference, popper) {
var _this = this;
var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
classCallCheck(this, Popper);
this.scheduleUpdate = function () {
return requestAnimationFrame(_this.update);
};
// make update() debounced, so that it only runs at most once-per-tick
this.update = debounce(this.update.bind(this));
// with {} we create a new object with the options inside it
this.options = _extends({}, Popper.Defaults, options);
// init state
this.state = {
isDestroyed: false,
isCreated: false,
scrollParents: []
};
// get reference and popper elements (allow jQuery wrappers)
this.reference = reference && reference.jquery ? reference[0] : reference;
this.popper = popper && popper.jquery ? popper[0] : popper;
// Deep merge modifiers options
this.options.modifiers = {};
Object.keys(_extends({}, Popper.Defaults.modifiers, options.modifiers)).forEach(function (name) {
_this.options.modifiers[name] = _extends({}, Popper.Defaults.modifiers[name] || {}, options.modifiers ? options.modifiers[name] : {});
});
// Refactoring modifiers' list (Object => Array)
this.modifiers = Object.keys(this.options.modifiers).map(function (name) {
return _extends({
name: name
}, _this.options.modifiers[name]);
})
// sort the modifiers by order
.sort(function (a, b) {
return a.order - b.order;
});
// modifiers have the ability to execute arbitrary code when Popper.js get inited
// such code is executed in the same order of its modifier
// they could add new properties to their options configuration
// BE AWARE: don't add options to `options.modifiers.name` but to `modifierOptions`!
this.modifiers.forEach(function (modifierOptions) {
if (modifierOptions.enabled && isFunction(modifierOptions.onLoad)) {
modifierOptions.onLoad(_this.reference, _this.popper, _this.options, modifierOptions, _this.state);
}
});
// fire the first update to position the popper in the right place
this.update();
var eventsEnabled = this.options.eventsEnabled;
if (eventsEnabled) {
// setup event listeners, they will take care of update the position in specific situations
this.enableEventListeners();
}
this.state.eventsEnabled = eventsEnabled;
}
// We can't use class properties because they don't get listed in the
// class prototype and break stuff like Sinon stubs
createClass(Popper, [{
key: 'update',
value: function update$$1() {
return update.call(this);
}
}, {
key: 'destroy',
value: function destroy$$1() {
return destroy.call(this);
}
}, {
key: 'enableEventListeners',
value: function enableEventListeners$$1() {
return enableEventListeners.call(this);
}
}, {
key: 'disableEventListeners',
value: function disableEventListeners$$1() {
return disableEventListeners.call(this);
}
/**
* Schedules an update. It will run on the next UI update available.
* @method scheduleUpdate
* @memberof Popper
*/
/**
* Collection of utilities useful when writing custom modifiers.
* Starting from version 1.7, this method is available only if you
* include `popper-utils.js` before `popper.js`.
*
* **DEPRECATION**: This way to access PopperUtils is deprecated
* and will be removed in v2! Use the PopperUtils module directly instead.
* Due to the high instability of the methods contained in Utils, we can't
* guarantee them to follow semver. Use them at your own risk!
* @static
* @private
* @type {Object}
* @deprecated since version 1.8
* @member Utils
* @memberof Popper
*/
}]);
return Popper;
}();
/**
* The `referenceObject` is an object that provides an interface compatible with Popper.js
* and lets you use it as replacement of a real DOM node.<br />
* You can use this method to position a popper relatively to a set of coordinates
* in case you don't have a DOM node to use as reference.
*
* ```
* new Popper(referenceObject, popperNode);
* ```
*
* NB: This feature isn't supported in Internet Explorer 10.
* @name referenceObject
* @property {Function} data.getBoundingClientRect
* A function that returns a set of coordinates compatible with the native `getBoundingClientRect` method.
* @property {number} data.clientWidth
* An ES6 getter that will return the width of the virtual reference element.
* @property {number} data.clientHeight
* An ES6 getter that will return the height of the virtual reference element.
*/
Popper.Utils = (typeof window !== 'undefined' ? window : global).PopperUtils;
Popper.placements = placements;
Popper.Defaults = Defaults;
export default Popper;
//# sourceMappingURL=popper.js.map
|
PypiClean
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/django_channel.py
|
import types
import threading
from django import http
import amfast
from amfast.remoting import Packet
import amfast.remoting.flex_messages as messaging
from amfast.remoting.channel import HttpChannel, ChannelError
def django_response_wrapper(func):
'''
A decorator which wrap a bare response to a DjangoResopnse
'''
def _(channel, django_request):
response_packet = func(channel, django_request)
if response_packet is None:
return http.HttpResponse(mimetype = channel.CONTENT_TYPE)
elif type(response_packet) is types.GeneratorType:
http_response = http.HttpResponse(content=response_packet, mimetype=channel.CONTENT_TYPE)
return http_response
else:
raise ChannelError('Invalid response type.')
return _
class DjangoChannel(HttpChannel):
"""A channel that works with Django."""
# Attribute that holds Django's
# request object, so that it can
# be accessed from a target.
DJANGO_REQUEST = '_django_request'
def __call__(self, http_request):
if http_request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
try:
request_packet = self.decode(http_request.raw_post_data)
setattr(request_packet, self.DJANGO_REQUEST, http_request)
except amfast.AmFastError, exc:
return http.HttpResponseBadRequest(mimetype='text/plain', content=self.getBadEncodingMsg())
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return http.HttpResponseServerError(mimetype='text/plain', content=self.getBadServerMsg())
try:
response_packet = self.invoke(request_packet)
raw_response = self.encode(response_packet)
http_response = http.HttpResponse(mimetype=self.CONTENT_TYPE)
http_response['Content-Length'] = str(len(raw_response))
http_response.write(raw_response)
return http_response
except amfast.AmFastError, exc:
return http.HttpResponseServerError(mimetype='text/plain', content=self.getBadServerMsg())
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return http.HttpResponseServerError(mimetype='text/plain', content=self.getBadServerMsg())
class StreamingDjangoChannel(DjangoChannel):
"""Experimental support for streaming with Django."""
def __init__(self, name, max_connections=-1, endpoint=None,
wait_interval=0, heart_interval=30000):
DjangoChannel.__init__(self, name, max_connections=max_connections,
endpoint=endpoint, wait_interval=wait_interval)
self.heart_interval = heart_interval
def __call__(self, http_request):
if http_request.META['CONTENT_TYPE'] == self.CONTENT_TYPE:
return DjangoChannel.__call__(self, http_request)
try:
body = http_request.raw_post_data
msg = messaging.StreamingMessage()
msg.parseBody(body)
#django has a well wrapped http_request object which contents all the wsgi options
msg.parseParams(http_request.META['QUERY_STRING'])
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
if msg.operation == msg.OPEN_COMMAND:
return self.startStream(msg)
elif msg.operation == msg.CLOSE_COMMAND:
return self.stopStream(msg)
raise ChannelError('Http streaming operation unknown: %s' % msg.operation)
@django_response_wrapper
def startStream(self, msg):
try:
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
raise ChannelError('Http streaming operation unknown: %s' % msg.operation)
try:
timer = threading.Timer(float(self.heart_interval) / 1000, self.beat, (connection, ))
timer.daemon = True
timer.start()
inited = False
event = threading.Event()
connection.setNotifyFunc(event.set)
poll_secs = float(self.poll_interval) / 1000
while True:
if connection.connected is False:
msg = messaging.StreamingMessage.getDisconnectMsg()
try:
yield messaging.StreamingMessage.prepareMsg(msg, self.endpoint)
finally:
# Client may have already disconnected
return
if inited is False:
# Send acknowledge message
response = msg.acknowledge()
response.body = connection.id
bytes = messaging.StreamingMessage.prepareMsg(response, self.endpoint)
inited = True
bytes += chr(messaging.StreamingMessage.NULL_BYTE) * self.KICKSTART_BYTES
yield bytes
if self.channel_set.notify_connections is True:
# Block until notification of new message
event.wait()
else:
# Block until poll_interval is reached
event.wait(poll_secs)
# Message has been published,
# or it's time for a heart beat
# Remove notify_func so that
# New messages don't trigger event.
connection.unSetNotifyFunc()
msgs = self.channel_set.subscription_manager.pollConnection(connection)
if len(msgs) > 0:
while len(msgs) > 0:
# Dispatch all messages to client
for msg in msgs:
try:
bytes = messaging.StreamingMessage.prepareMsg(msg, self.endpoint)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
self.channel_set.disconnect(connection)
break
try:
yield bytes
# return bytes
except (KeyboardInterrupt, SystemExit):
raise
except:
# Client has disconnected
self.channel_set.disconnect(connection)
return
msgs = self.channel_set.subscription_manager.pollConnection(connection)
else:
# Send heart beat
try:
yield chr(messaging.StreamingMessage.NULL_BYTE)
except (KeyboardInterrupt, SystemExit):
raise
except:
# Client has disconnected
self.channel_set.disconnect(connection)
return
# Create new event to trigger new messages or heart beats
event = threading.Event()
connection.setNotifyFunc(event.set)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
self.channel_set.disconnect(connection)
return
@django_response_wrapper
def stopStream(self, msg):
"""Stop a streaming connection."""
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
connection.disconnect()
if hasattr(connection, "notify_func") and connection.notify_func is not None:
connection.notify_func()
@django_response_wrapper
def beat(self, connection):
"""Send a heart beat."""
if hasattr(connection, "notify_func") and connection.notify_func is not None:
connection.notify_func()
else:
return
# Create timer for next beat
timer = threading.Timer(float(self.heart_interval) / 1000, self.beat, (connection, ))
timer.daemon = True
timer.start()
|
PypiClean
|
/fastybird_metadata-0.77.0-py3-none-any.whl/fastybird_metadata/devices_module.py
|
# Copyright 2021. FastyBird s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sets of enums for Devices Module
"""
# Python base dependencies
from enum import unique
# Library libs
from fastybird_metadata.enum import ExtendedEnum
@unique
class PropertyType(ExtendedEnum):
"""
Property entity type
@package FastyBird:Metadata!
@module devices_module
@author Adam Kadlec <[email protected]>
"""
DYNAMIC: str = "dynamic"
VARIABLE: str = "variable"
MAPPED: str = "mapped"
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return hash(self._name_) # pylint: disable=no-member
@unique
class ConnectionState(ExtendedEnum):
"""
Device connection state
@package FastyBird:Metadata!
@module devices_module
@author Adam Kadlec <[email protected]>
"""
# Device is connected to gateway
CONNECTED: str = "connected"
# Device is disconnected from gateway
DISCONNECTED: str = "disconnected"
# Device is in initialization process
INIT: str = "init"
# Device is ready to operate
READY: str = "ready"
# Device is in operating mode
RUNNING: str = "running"
# Device is in sleep mode - support fow low power devices
SLEEPING: str = "sleeping"
# Device is not ready for receiving commands
STOPPED: str = "stopped"
# Connection with device is lost
LOST: str = "lost"
# Device has some error
ALERT: str = "alert"
# Device is in unknown state
UNKNOWN: str = "unknown"
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return hash(self._name_) # pylint: disable=no-member
@unique
class DeviceModel(ExtendedEnum):
"""
Device known models
@package FastyBird:Metadata!
@module devices_module
@author Adam Kadlec <[email protected]>
"""
CUSTOM: str = "custom"
SONOFF_BASIC: str = "sonoff_basic"
SONOFF_RF: str = "sonoff_rf"
SONOFF_TH: str = "sonoff_th"
SONOFF_SV: str = "sonoff_sv"
SONOFF_SLAMPHER: str = "sonoff_slampher"
SONOFF_S20: str = "sonoff_s20"
SONOFF_TOUCH: str = "sonoff_touch"
SONOFF_POW: str = "sonoff_pow"
SONOFF_POW_R2: str = "sonoff_pow_r2"
SONOFF_DUAL: str = "sonoff_dual"
SONOFF_DUAL_R2: str = "sonoff_dual_r2"
SONOFF_4CH: str = "sonoff_4ch"
SONOFF_4CH_PRO: str = "sonoff_4ch_pro"
SONOFF_RF_BRIDGE: str = "sonoff_rf_bridge"
SONOFF_B1: str = "sonoff_b1"
SONOFF_LED: str = "sonoff_led"
SONOFF_T1_1CH: str = "sonoff_t1_1ch"
SONOFF_T1_2CH: str = "sonoff_t1_2ch"
SONOFF_T1_3CH: str = "sonoff_t1_3ch"
SONOFF_S31: str = "sonoff_s31"
SONOFF_SC: str = "sonoff_sc"
SONOFF_SC_PRO: str = "sonoff_sc_pro"
SONOFF_PS_15: str = "sonoff_ps_15"
AI_THINKER_AI_LIGHT: str = "ai_thinker_ai_light"
FASTYBIRD_WIFI_GW: str = "fastybird_wifi_gw"
FASTYBIRD_3CH_POWER_STRIP_R1: str = "fastybird_3ch_power_strip_r1"
FASTYBIRD_8CH_BUTTONS: str = "8ch_buttons"
FASTYBIRD_16CH_BUTTONS: str = "16ch_buttons"
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return hash(self._name_) # pylint: disable=no-member
@unique
class FirmwareManufacturer(ExtendedEnum):
"""
Device firmware manufacturer
@package FastyBird:Metadata!
@module devices_module
@author Adam Kadlec <[email protected]>
"""
GENERIC: str = "generic"
FASTYBIRD: str = "fastybird"
ITEAD = "itead"
SHELLY: str = "shelly"
TUYA: str = "tuya"
SONOFF: str = "sonoff"
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return hash(self._name_) # pylint: disable=no-member
@unique
class HardwareManufacturer(ExtendedEnum):
"""
Device hardware manufacturer
@package FastyBird:Metadata!
@module devices_module
@author Adam Kadlec <[email protected]>
"""
GENERIC = "generic"
FASTYBIRD = "fastybird"
ITEAD = "itead"
AI_THINKER = "ai_thinker"
SHELLY: str = "shelly"
TUYA: str = "tuya"
SONOFF: str = "sonoff"
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return hash(self._name_) # pylint: disable=no-member
@unique
class DevicePropertyIdentifier(ExtendedEnum):
"""
Device known property identifier
@package FastyBird:Metadata!
@module devices_module
@author Adam Kadlec <[email protected]>
"""
STATE: str = "state"
BATTERY: str = "battery"
WIFI: str = "wifi"
SIGNAL: str = "signal"
RSSI: str = "rssi"
SSID: str = "ssid"
VCC: str = "vcc"
CPU_LOAD: str = "cpu_load"
UPTIME: str = "uptime"
IP_ADDRESS: str = "ip_address"
ADDRESS: str = "address"
STATUS_LED: str = "status_led"
FREE_HEAP: str = "free_heap"
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return hash(self._name_) # pylint: disable=no-member
@unique
class DeviceAttributeIdentifier(ExtendedEnum):
"""
Device known attribute identifier
@package FastyBird:Metadata!
@module devices_module
@author Adam Kadlec <[email protected]>
"""
HARDWARE_MANUFACTURER: str = "hardware_manufacturer"
HARDWARE_MODEL: str = "hardware_model"
HARDWARE_VERSION: str = "hardware_version"
HARDWARE_MAC_ADDRESS: str = "hardware_mac_address"
FIRMWARE_MANUFACTURER: str = "firmware_manufacturer"
FIRMWARE_NAME: str = "firmware_name"
FIRMWARE_VERSION: str = "firmware_version"
@unique
class ConnectorPropertyIdentifier(ExtendedEnum):
"""
Connector known property identifier
@package FastyBird:Metadata!
@module devices_module
@author Adam Kadlec <[email protected]>
"""
STATE: str = "state"
SERVER: str = "server"
PORT: str = "port"
SECURED_PORT: str = "secured_port"
BAUD_RATE: str = "baud_rate"
INTERFACE: str = "interface"
ADDRESS: str = "address"
# -----------------------------------------------------------------------------
def __hash__(self) -> int:
return hash(self._name_) # pylint: disable=no-member
|
PypiClean
|
/BEATluzgool-1.0.1-py3-none-any.whl/econml/causal_forest.py
|
from .utilities import LassoCVWrapper, deprecated
from sklearn.linear_model import LogisticRegressionCV
from .dml import CausalForestDML
@deprecated("The CausalForest class has been deprecated by the econml.dml.CausalForestDML; "
"an upcoming release will remove support for the old class")
def CausalForest(n_trees=500,
min_leaf_size=10,
max_depth=10,
subsample_ratio=0.7,
lambda_reg=0.01,
model_T='auto',
model_Y=LassoCVWrapper(cv=3),
cv=2,
discrete_treatment=False,
categories='auto',
n_jobs=-1,
backend='threading',
verbose=0,
batch_size='auto',
random_state=None):
"""CausalForest for continuous treatments. To apply to discrete
treatments, first one-hot-encode your treatments and then pass the one-hot-encoding.
Parameters
----------
n_trees : integer, optional (default=500)
Number of causal estimators in the forest.
min_leaf_size : integer, optional (default=10)
The minimum number of samples in a leaf.
max_depth : integer, optional (default=10)
The maximum number of splits to be performed when expanding the tree.
subsample_ratio : float, optional (default=0.7)
The ratio of the total sample to be used when training a causal tree.
Values greater than 1.0 will be considered equal to 1.0.
lambda_reg : float, optional (default=0.01)
The regularization coefficient in the ell_2 penalty imposed on the
locally linear part of the second stage fit. This is not applied to
the local intercept, only to the coefficient of the linear component.
model_T : estimator, optional (default=sklearn.linear_model.LassoCV(cv=3))
The estimator for residualizing the continuous treatment.
Must implement `fit` and `predict` methods.
model_Y : estimator, optional (default=sklearn.linear_model.LassoCV(cv=3)
The estimator for residualizing the outcome. Must implement
`fit` and `predict` methods.
cv : int, cross-validation generator or an iterable, optional (default=2)
The specification of the CV splitter to be used for cross-fitting, when constructing
the global residuals of Y and T.
discrete_treatment : bool, optional (default=False)
Whether the treatment should be treated as categorical. If True, then the treatment T is
one-hot-encoded and the model_T is treated as a classifier that must have a predict_proba
method.
categories : array like or 'auto', optional (default='auto')
A list of pre-specified treatment categories. If 'auto' then categories are automatically
recognized at fit time.
n_jobs : int, optional (default=-1)
The number of jobs to run in parallel for both :meth:`fit` and :meth:`effect`.
``-1`` means using all processors. Since OrthoForest methods are
computationally heavy, it is recommended to set `n_jobs` to -1.
backend : 'threading' or 'multiprocessing'
What backend should be used for parallelization with the joblib library.
random_state : int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
"""
return CausalForestDML(
model_t=model_T,
model_y=model_Y,
cv=cv,
discrete_treatment=discrete_treatment,
categories=categories,
n_estimators=n_trees,
criterion='het',
min_samples_leaf=min_leaf_size,
max_depth=max_depth,
max_samples=subsample_ratio / 2,
min_balancedness_tol=.3,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state
)
|
PypiClean
|
/decide_exchange_model-2022.1.18.tar.gz/decide_exchange_model-2022.1.18/decide/qt/mainwindow/errorgrid.py
|
import logging
import os
import sys
from typing import List
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QDialog
from decide import log_filename, input_folder
from decide.data.reader import InputDataFile
from decide.qt.utils import exception_hook
class ErrorGrid(QDialog):
"""
Visualisation of the input file in a dialog to inform the user of the errors a input file has
"""
def __init__(self, data_file: InputDataFile, *args, **kwargs):
super(ErrorGrid, self).__init__(*args, **kwargs)
self.data_file = data_file
self.row_pointer = 0
self.main = QtWidgets.QGridLayout()
self.init_window()
def init_window(self):
central_widget = QtWidgets.QWidget()
central_widget.setLayout(self.main)
layout = QtWidgets.QHBoxLayout(self)
scroll_area = QtWidgets.QScrollArea(self)
scroll_area.setWidgetResizable(True)
scroll_area_widget_contents = QtWidgets.QWidget()
self.main = QtWidgets.QGridLayout(scroll_area_widget_contents)
scroll_area.setWidget(scroll_area_widget_contents)
layout.addWidget(scroll_area)
self.setLayout(layout)
self.showMaximized()
# self.setGeometry(300, 300, 400, 400)
self.setWindowTitle("Error reporting tool")
self.init_grid()
self.show()
def init_grid(self):
for row, columns in self.data_file.rows.items():
self.add_row(row, columns)
def add_row(self, row, columns: List[str]):
for column, content in enumerate(columns):
label = QtWidgets.QLabel(str(content))
if row in self.data_file.errors:
label.setStyleSheet("color: red")
error = self.data_file.errors[row]
label.setToolTip(str(error))
self.main.addWidget(label, row, column)
def main():
logging.basicConfig(
filename=log_filename,
filemode="w",
level=logging.DEBUG,
format=" %(asctime)s - %(levelname)s - %(message)s",
)
sys.excepthook = exception_hook
app = QtWidgets.QApplication(sys.argv)
app.setQuitOnLastWindowClosed(True)
data_file = InputDataFile.open(
os.path.join(input_folder, "kopenhagen_with_errors.csv")
)
error_dialog = ErrorGrid(data_file)
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
PypiClean
|
/django-fiber-1.10.tar.gz/django-fiber-1.10/fiber/static/fiber/js/ckeditor_4.6.2_b52fb43f6f3e/plugins/a11yhelp/dialogs/lang/ku.js
|
/*
Copyright (c) 2003-2017, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang("a11yhelp","ku",{title:"ڕێنمای لەبەردەستدابوون",contents:"پێکهاتەی یارمەتی. کلیك ESC بۆ داخستنی ئەم دیالۆگه.",legend:[{name:"گشتی",items:[{name:"تووڵامرازی دەستكاریكەر",legend:"کلیك ${toolbarFocus} بۆ ڕابەری تووڵامراز. بۆ گواستنەوەی پێشوو داهاتووی گرووپی تووڵامرازی داگرتنی کلیلی TAB لەگەڵ SHIFT+TAB. بۆ گواستنەوەی پێشوو داهاتووی دووگمەی تووڵامرازی لەڕێی کلیلی تیری دەستی ڕاست یان کلیلی تیری دەستی چەپ. کلیکی کلیلی SPACE یان ENTER بۆ چالاککردنی دووگمەی تووڵامراز."},{name:"دیالۆگی دەستكاریكەر",
legend:"Inside a dialog, press TAB to navigate to the next dialog element, press SHIFT+TAB to move to the previous dialog element, press ENTER to submit the dialog, press ESC to cancel the dialog. When a dialog has multiple tabs, the tab list can be reached either with ALT+F10 or with TAB as part of the dialog tabbing order. With tab list focused, move to the next and previous tab with RIGHT and LEFT ARROW, respectively."},{name:"پێڕستی سەرنووسەر",legend:"کلیك ${contextMenu} یان دوگمەی لیسته(Menu) بۆ کردنەوەی لیستەی دەق. بۆ چوونە هەڵبژاردەیەکی تر له لیسته کلیکی کلیلی TAB یان کلیلی تیری ڕوو لەخوارەوه بۆ چوون بۆ هەڵبژاردەی پێشوو کلیکی کلیلی SHIFT+TAB یان کلیلی تیری ڕوو له سەرەوە. داگرتنی کلیلی SPACE یان ENTER بۆ هەڵبژاردنی هەڵبژاردەی لیسته. بۆ کردنەوەی لقی ژێر لیسته لەهەڵبژاردەی لیستە کلیکی کلیلی SPACE یان ENTER یان کلیلی تیری دەستی ڕاست. بۆ گەڕانەوه بۆ سەرەوەی لیسته کلیکی کلیلی ESC یان کلیلی تیری دەستی چەپ. بۆ داخستنی لیستە کلیكی کلیلی ESC بکە."},
{name:"لیستی سنووقی سەرنووسەر",legend:"لەناو سنوقی لیست, چۆن بۆ هەڵنبژاردەی لیستێکی تر کلیکی کلیلی TAB یان کلیلی تیری ڕوو لەخوار. چوون بۆ هەڵبژاردەی لیستی پێشوو کلیکی کلیلی SHIFT+TAB یان کلیلی تیری ڕوو لەسەرەوه. کلیکی کلیلی SPACE یان ENTER بۆ دیاریکردنی هەڵبژاردەی لیست. کلیکی کلیلی ESC بۆ داخستنی سنوقی لیست."},{name:"تووڵامرازی توخم",legend:"کلیك ${elementsPathFocus} بۆ ڕابەری تووڵامرازی توخمەکان. چوون بۆ دوگمەی توخمێکی تر کلیکی کلیلی TAB یان کلیلی تیری دەستی ڕاست. چوون بۆ دوگمەی توخمی پێشوو کلیلی SHIFT+TAB یان کلیکی کلیلی تیری دەستی چەپ. داگرتنی کلیلی SPACE یان ENTER بۆ دیاریکردنی توخمەکه لەسەرنووسه."}]},
{name:"فەرمانەکان",items:[{name:"پووچکردنەوەی فەرمان",legend:"کلیك ${undo}"},{name:"هەڵگەڕانەوەی فەرمان",legend:"کلیك ${redo}"},{name:"فەرمانی دەقی قەڵەو",legend:"کلیك ${bold}"},{name:"فەرمانی دەقی لار",legend:"کلیك ${italic}"},{name:"فەرمانی ژێرهێڵ",legend:"کلیك ${underline}"},{name:"فەرمانی بهستەر",legend:"کلیك ${link}"},{name:"شاردەنەوەی تووڵامراز",legend:"کلیك ${toolbarCollapse}"},{name:"چوونەناو سەرنجدانی پێشوی فەرمانی بۆشایی",legend:"کلیک ${accessPreviousSpace} to access the closest unreachable focus space before the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces."},
{name:"چوونەناو سەرنجدانی داهاتووی فەرمانی بۆشایی",legend:"کلیک ${accessNextSpace} to access the closest unreachable focus space after the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces."},{name:"دەستپێگەیشتنی یارمەتی",legend:"کلیك ${a11yHelp}"}]}],tab:"Tab",pause:"Pause",capslock:"Caps Lock",escape:"Escape",pageUp:"Page Up",pageDown:"Page Down",leftArrow:"Left Arrow",upArrow:"Up Arrow",rightArrow:"Right Arrow",downArrow:"Down Arrow",insert:"Insert",
leftWindowKey:"پەنجەرەی چەپ",rightWindowKey:"پەنجەرەی ڕاست",selectKey:"Select",numpad0:"Numpad 0",numpad1:"1",numpad2:"2",numpad3:"3",numpad4:"4",numpad5:"5",numpad6:"6",numpad7:"7",numpad8:"8",numpad9:"9",multiply:"*",add:"+",subtract:"-",decimalPoint:".",divide:"/",f1:"F1",f2:"F2",f3:"F3",f4:"F4",f5:"F5",f6:"F6",f7:"F7",f8:"F8",f9:"F9",f10:"F10",f11:"F11",f12:"F12",numLock:"Num Lock",scrollLock:"Scroll Lock",semiColon:";",equalSign:"\x3d",comma:",",dash:"-",period:".",forwardSlash:"/",graveAccent:"`",
openBracket:"[",backSlash:"\\\\",closeBracket:"}",singleQuote:"'"});
|
PypiClean
|
/segmentation_models_pytorch_deepflash2-0.3.0-py3-none-any.whl/segmentation_models_pytorch_deepflash2/encoders/efficientnet.py
|
import torch.nn as nn
from efficientnet_pytorch import EfficientNet
from efficientnet_pytorch.utils import url_map, url_map_advprop, get_model_params
from ._base import EncoderMixin
class EfficientNetEncoder(EfficientNet, EncoderMixin):
def __init__(self, stage_idxs, out_channels, model_name, depth=5):
blocks_args, global_params = get_model_params(model_name, override_params=None)
super().__init__(blocks_args, global_params)
self._stage_idxs = stage_idxs
self._out_channels = out_channels
self._depth = depth
self._in_channels = 3
del self._fc
def get_stages(self):
return [
nn.Identity(),
nn.Sequential(self._conv_stem, self._bn0, self._swish),
self._blocks[: self._stage_idxs[0]],
self._blocks[self._stage_idxs[0] : self._stage_idxs[1]],
self._blocks[self._stage_idxs[1] : self._stage_idxs[2]],
self._blocks[self._stage_idxs[2] :],
]
def forward(self, x):
stages = self.get_stages()
block_number = 0.0
drop_connect_rate = self._global_params.drop_connect_rate
features = []
for i in range(self._depth + 1):
# Identity and Sequential stages
if i < 2:
x = stages[i](x)
# Block stages need drop_connect rate
else:
for module in stages[i]:
drop_connect = drop_connect_rate * block_number / len(self._blocks)
block_number += 1.0
x = module(x, drop_connect)
features.append(x)
return features
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop("_fc.bias", None)
state_dict.pop("_fc.weight", None)
super().load_state_dict(state_dict, **kwargs)
def _get_pretrained_settings(encoder):
pretrained_settings = {
"imagenet": {
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
"url": url_map[encoder],
"input_space": "RGB",
"input_range": [0, 1],
},
"advprop": {
"mean": [0.5, 0.5, 0.5],
"std": [0.5, 0.5, 0.5],
"url": url_map_advprop[encoder],
"input_space": "RGB",
"input_range": [0, 1],
},
}
return pretrained_settings
efficient_net_encoders = {
"efficientnet-b0": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b0"),
"params": {
"out_channels": (3, 32, 24, 40, 112, 320),
"stage_idxs": (3, 5, 9, 16),
"model_name": "efficientnet-b0",
},
},
"efficientnet-b1": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b1"),
"params": {
"out_channels": (3, 32, 24, 40, 112, 320),
"stage_idxs": (5, 8, 16, 23),
"model_name": "efficientnet-b1",
},
},
"efficientnet-b2": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b2"),
"params": {
"out_channels": (3, 32, 24, 48, 120, 352),
"stage_idxs": (5, 8, 16, 23),
"model_name": "efficientnet-b2",
},
},
"efficientnet-b3": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b3"),
"params": {
"out_channels": (3, 40, 32, 48, 136, 384),
"stage_idxs": (5, 8, 18, 26),
"model_name": "efficientnet-b3",
},
},
"efficientnet-b4": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b4"),
"params": {
"out_channels": (3, 48, 32, 56, 160, 448),
"stage_idxs": (6, 10, 22, 32),
"model_name": "efficientnet-b4",
},
},
"efficientnet-b5": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b5"),
"params": {
"out_channels": (3, 48, 40, 64, 176, 512),
"stage_idxs": (8, 13, 27, 39),
"model_name": "efficientnet-b5",
},
},
"efficientnet-b6": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b6"),
"params": {
"out_channels": (3, 56, 40, 72, 200, 576),
"stage_idxs": (9, 15, 31, 45),
"model_name": "efficientnet-b6",
},
},
"efficientnet-b7": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b7"),
"params": {
"out_channels": (3, 64, 48, 80, 224, 640),
"stage_idxs": (11, 18, 38, 55),
"model_name": "efficientnet-b7",
},
},
}
|
PypiClean
|
/wasp-launcher-0.0.2.tar.gz/wasp-launcher-0.0.2/wasp_launcher/static/angular/angular-1.6.1/i18n/angular-locale_lu-cd.js
|
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"Dinda",
"Dilolo"
],
"DAY": [
"Lumingu",
"Nkodya",
"Nd\u00e0ay\u00e0",
"Ndang\u00f9",
"Nj\u00f2wa",
"Ng\u00f2vya",
"Lubingu"
],
"ERANAMES": [
"Kumpala kwa Yezu Kli",
"Kunyima kwa Yezu Kli"
],
"ERAS": [
"kmp. Y.K.",
"kny. Y. K."
],
"FIRSTDAYOFWEEK": 0,
"MONTH": [
"Ciongo",
"L\u00f9ishi",
"Lus\u00f2lo",
"M\u00f9uy\u00e0",
"Lum\u00f9ng\u00f9l\u00f9",
"Lufuimi",
"Kab\u00e0l\u00e0sh\u00ecp\u00f9",
"L\u00f9sh\u00eck\u00e0",
"Lutongolo",
"Lung\u00f9di",
"Kasw\u00e8k\u00e8s\u00e8",
"Cisw\u00e0"
],
"SHORTDAY": [
"Lum",
"Nko",
"Ndy",
"Ndg",
"Njw",
"Ngv",
"Lub"
],
"SHORTMONTH": [
"Cio",
"Lui",
"Lus",
"Muu",
"Lum",
"Luf",
"Kab",
"Lush",
"Lut",
"Lun",
"Kas",
"Cis"
],
"STANDALONEMONTH": [
"Ciongo",
"L\u00f9ishi",
"Lus\u00f2lo",
"M\u00f9uy\u00e0",
"Lum\u00f9ng\u00f9l\u00f9",
"Lufuimi",
"Kab\u00e0l\u00e0sh\u00ecp\u00f9",
"L\u00f9sh\u00eck\u00e0",
"Lutongolo",
"Lung\u00f9di",
"Kasw\u00e8k\u00e8s\u00e8",
"Cisw\u00e0"
],
"WEEKENDRANGE": [
5,
6
],
"fullDate": "EEEE d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y HH:mm:ss",
"mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "d/M/y HH:mm",
"shortDate": "d/M/y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "FrCD",
"DECIMAL_SEP": ",",
"GROUP_SEP": ".",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a4",
"posPre": "",
"posSuf": "\u00a4"
}
]
},
"id": "lu-cd",
"localeID": "lu_CD",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
|
PypiClean
|
/deepNets-0.1.8.tar.gz/deepNets-0.1.8/.eggs/py-1.10.0-py3.8.egg/py/_code/_assertionnew.py
|
import sys
import ast
import py
from py._code.assertion import _format_explanation, BuiltinAssertionError
def _is_ast_expr(node):
return isinstance(node, ast.expr)
def _is_ast_stmt(node):
return isinstance(node, ast.stmt)
class Failure(Exception):
"""Error found while interpreting AST."""
def __init__(self, explanation=""):
self.cause = sys.exc_info()
self.explanation = explanation
def interpret(source, frame, should_fail=False):
mod = ast.parse(source)
visitor = DebugInterpreter(frame)
try:
visitor.visit(mod)
except Failure:
failure = sys.exc_info()[1]
return getfailure(failure)
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --no-assert)")
def run(offending_line, frame=None):
if frame is None:
frame = py.code.Frame(sys._getframe(1))
return interpret(offending_line, frame)
def getfailure(failure):
explanation = _format_explanation(failure.explanation)
value = failure.cause[1]
if str(value):
lines = explanation.splitlines()
if not lines:
lines.append("")
lines[0] += " << %s" % (value,)
explanation = "\n".join(lines)
text = "%s: %s" % (failure.cause[0].__name__, explanation)
if text.startswith("AssertionError: assert "):
text = text[16:]
return text
operator_map = {
ast.BitOr : "|",
ast.BitXor : "^",
ast.BitAnd : "&",
ast.LShift : "<<",
ast.RShift : ">>",
ast.Add : "+",
ast.Sub : "-",
ast.Mult : "*",
ast.Div : "/",
ast.FloorDiv : "//",
ast.Mod : "%",
ast.Eq : "==",
ast.NotEq : "!=",
ast.Lt : "<",
ast.LtE : "<=",
ast.Gt : ">",
ast.GtE : ">=",
ast.Pow : "**",
ast.Is : "is",
ast.IsNot : "is not",
ast.In : "in",
ast.NotIn : "not in"
}
unary_map = {
ast.Not : "not %s",
ast.Invert : "~%s",
ast.USub : "-%s",
ast.UAdd : "+%s"
}
class DebugInterpreter(ast.NodeVisitor):
"""Interpret AST nodes to gleam useful debugging information. """
def __init__(self, frame):
self.frame = frame
def generic_visit(self, node):
# Fallback when we don't have a special implementation.
if _is_ast_expr(node):
mod = ast.Expression(node)
co = self._compile(mod)
try:
result = self.frame.eval(co)
except Exception:
raise Failure()
explanation = self.frame.repr(result)
return explanation, result
elif _is_ast_stmt(node):
mod = ast.Module([node])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co)
except Exception:
raise Failure()
return None, None
else:
raise AssertionError("can't handle %s" %(node,))
def _compile(self, source, mode="eval"):
return compile(source, "<assertion interpretation>", mode)
def visit_Expr(self, expr):
return self.visit(expr.value)
def visit_Module(self, mod):
for stmt in mod.body:
self.visit(stmt)
def visit_Name(self, name):
explanation, result = self.generic_visit(name)
# See if the name is local.
source = "%r in locals() is not globals()" % (name.id,)
co = self._compile(source)
try:
local = self.frame.eval(co)
except Exception:
# have to assume it isn't
local = False
if not local:
return name.id, result
return explanation, result
def visit_Compare(self, comp):
left = comp.left
left_explanation, left_result = self.visit(left)
for op, next_op in zip(comp.ops, comp.comparators):
next_explanation, next_result = self.visit(next_op)
op_symbol = operator_map[op.__class__]
explanation = "%s %s %s" % (left_explanation, op_symbol,
next_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=next_result)
except Exception:
raise Failure(explanation)
try:
if not result:
break
except KeyboardInterrupt:
raise
except:
break
left_explanation, left_result = next_explanation, next_result
rcomp = py.code._reprcompare
if rcomp:
res = rcomp(op_symbol, left_result, next_result)
if res:
explanation = res
return explanation, result
def visit_BoolOp(self, boolop):
is_or = isinstance(boolop.op, ast.Or)
explanations = []
for operand in boolop.values:
explanation, result = self.visit(operand)
explanations.append(explanation)
if result == is_or:
break
name = is_or and " or " or " and "
explanation = "(" + name.join(explanations) + ")"
return explanation, result
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_explanation, operand_result = self.visit(unary.operand)
explanation = pattern % (operand_explanation,)
co = self._compile(pattern % ("__exprinfo_expr",))
try:
result = self.frame.eval(co, __exprinfo_expr=operand_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_BinOp(self, binop):
left_explanation, left_result = self.visit(binop.left)
right_explanation, right_result = self.visit(binop.right)
symbol = operator_map[binop.op.__class__]
explanation = "(%s %s %s)" % (left_explanation, symbol,
right_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=right_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_Call(self, call):
func_explanation, func = self.visit(call.func)
arg_explanations = []
ns = {"__exprinfo_func" : func}
arguments = []
for arg in call.args:
arg_explanation, arg_result = self.visit(arg)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
arguments.append(arg_name)
arg_explanations.append(arg_explanation)
for keyword in call.keywords:
arg_explanation, arg_result = self.visit(keyword.value)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
keyword_source = "%s=%%s" % (keyword.arg)
arguments.append(keyword_source % (arg_name,))
arg_explanations.append(keyword_source % (arg_explanation,))
if call.starargs:
arg_explanation, arg_result = self.visit(call.starargs)
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
if call.kwargs:
arg_explanation, arg_result = self.visit(call.kwargs)
arg_name = "__exprinfo_kwds"
ns[arg_name] = arg_result
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
args_explained = ", ".join(arg_explanations)
explanation = "%s(%s)" % (func_explanation, args_explained)
args = ", ".join(arguments)
source = "__exprinfo_func(%s)" % (args,)
co = self._compile(source)
try:
result = self.frame.eval(co, **ns)
except Exception:
raise Failure(explanation)
pattern = "%s\n{%s = %s\n}"
rep = self.frame.repr(result)
explanation = pattern % (rep, rep, explanation)
return explanation, result
def _is_builtin_name(self, name):
pattern = "%r not in globals() and %r not in locals()"
source = pattern % (name.id, name.id)
co = self._compile(source)
try:
return self.frame.eval(co)
except Exception:
return False
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
source_explanation, source_result = self.visit(attr.value)
explanation = "%s.%s" % (source_explanation, attr.attr)
source = "__exprinfo_expr.%s" % (attr.attr,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
raise Failure(explanation)
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
self.frame.repr(result),
source_explanation, attr.attr)
# Check if the attr is from an instance.
source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
source = source % (attr.attr,)
co = self._compile(source)
try:
from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
from_instance = True
if from_instance:
rep = self.frame.repr(result)
pattern = "%s\n{%s = %s\n}"
explanation = pattern % (rep, rep, explanation)
return explanation, result
def visit_Assert(self, assrt):
test_explanation, test_result = self.visit(assrt.test)
if test_explanation.startswith("False\n{False =") and \
test_explanation.endswith("\n"):
test_explanation = test_explanation[15:-2]
explanation = "assert %s" % (test_explanation,)
if not test_result:
try:
raise BuiltinAssertionError
except Exception:
raise Failure(explanation)
return explanation, test_result
def visit_Assign(self, assign):
value_explanation, value_result = self.visit(assign.value)
explanation = "... = %s" % (value_explanation,)
name = ast.Name("__exprinfo_expr", ast.Load(),
lineno=assign.value.lineno,
col_offset=assign.value.col_offset)
new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
col_offset=assign.col_offset)
mod = ast.Module([new_assign])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co, __exprinfo_expr=value_result)
except Exception:
raise Failure(explanation)
return explanation, value_result
|
PypiClean
|
/django-projector-0.2.0.tar.gz/django-projector-0.2.0/example_project/settings.py
|
import os
import sys
from django.conf import global_settings
abspath = lambda *p: os.path.abspath(os.path.join(*p))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECTOR_HG_PUSH_SSL = False
PROJECT_ROOT = abspath(os.path.dirname(__file__))
PROJECTOR_MODULE_PATH = abspath(PROJECT_ROOT, '..')
sys.path.insert(0, PROJECTOR_MODULE_PATH)
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': abspath(PROJECT_ROOT, '.hidden.db'),
'TEST_NAME': ':memory:',
},
}
# Make sqlite3 files relative to project's directory
for db, conf in DATABASES.items():
if conf['ENGINE'] == 'sqlite3' and not conf['NAME'].startswith(':'):
conf['NAME'] = abspath(PROJECT_ROOT, conf['NAME'])
INSTALLED_APPS = (
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'native_tags',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.comments',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.webdesign',
# External
'djalog',
'django_extensions',
'django_sorting',
'djcelery',
'djcelery_email',
'ghettoq',
'gravatar',
'guardian',
'pagination',
'registration',
'richtemplates',
'projector',
'vcs.web.simplevcs',
'sss',
'example_project',
)
ADMINBROWSE_MEDIA_URL = '/media/adminbrowse/'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'djalog.middleware.SQLLoggingMiddleware',
'richtemplates.middleware.Http403Middleware',
'django_sorting.middleware.SortingMiddleware',
'vcs.web.simplevcs.middleware.PaginationMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
MEDIA_ROOT = abspath(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/admin-media/'
ROOT_URLCONF = 'example_project.urls'
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'richtemplates.context_processors.media',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
'django.template.loaders.eggs.load_template_source',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
SITE_ID = 1
USE_I18N = True
USE_L10N = True
CACHE_PREFIX = 'projector-example-project'
#CACHE_TIMEOUT = 1 # For dev server
LOGIN_REDIRECT_URL = '/'
AUTH_PROFILE_MODULE = 'projector.UserProfile'
# ================== #
# PROJECTOR SETTINGS #
# ================== #
PROJECTOR_PROJECTS_ROOT_DIR = abspath(
PROJECT_ROOT, 'projects')
PROJECTOR_BANNED_PROJECT_NAMES = ('barfoo',)
PROJECTOR_SEND_MAIL_ASYNCHRONOUSELY = True
PROJECTOR_CREATE_PROJECT_ASYNCHRONOUSLY = True
PROJECTOR_FORK_EXTERNAL_ENABLED = True
# =============== #
# DJALOG SETTINGS #
# =============== #
DJALOG_SQL = True
DJALOG_SQL_SUMMARY_ONLY = True
DJALOG_LEVEL = 5
DJALOG_USE_COLORS = True
DJALOG_FORMAT = "[%(levelname)s] %(message)s"
# ====================== #
# RICHTEMPLATES SETTINGS #
# ====================== #
RICHTEMPLATES_RESTRUCTUREDTEXT_DIRECTIVES = {
'code-block': 'richtemplates.rstdirectives.CodeBlock',
}
RICHTEMPLATES_DEFAULT_SKIN = 'ruby'
RICHTEMPLATES_PYGMENTS_STYLES = {
'irblack': 'richtemplates.pygstyles.irblack.IrBlackStyle',
}
# ==================== #
# NATIVE_TAGS SETTINGS #
# ==================== #
NATIVE_TAGS = (
'richtemplates.templatetags.native',
'projector.templatetags.native',
)
# ====================== #
# DEBUG TOOLBAR SETTINGS #
# ====================== #
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
# ================ #
# REQUEST SETTINGS #
# ================ #
REQUEST_IGNORE_PATHS = (
r'^%s' % MEDIA_URL.lstrip('/'),
r'^%s' % ADMIN_MEDIA_PREFIX.lstrip('/'),
)
# ============== #
# EMAIL SETTINGS #
# ============== #
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_SUBJECT_PREFIX = '[Django] '
EMAIL_USE_TLS = False
EMAIL_BACKEND = 'djcelery_email.backends.CeleryEmailBackend'
CELERY_EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
CELERY_EMAIL_TASK_CONFIG = {
'queue' : 'email',
'rate_limit' : '60/m', # 60 emails per minute
}
# ======================= #
# AUTHENTICATION SETTINGS #
# ======================= #
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # this is default
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
ACCOUNT_ACTIVATION_DAYS = 7
GRAVATAR_DEFAULT_IMAGE = 'mm'
try:
from conf.local_settings import *
try:
for app in LOCAL_INSTALLED_APPS:
if app not in INSTALLED_APPS:
INSTALLED_APPS += (app,)
for middleware in LOCAL_MIDDLEWARE_CLASSES:
if middleware not in MIDDLEWARE_CLASSES:
MIDDLEWARE_CLASSES += (middleware,)
except NameError:
pass
except ImportError:
pass
# ================ #
# CELLERY SETTINGS #
# ================ #
CARROT_BACKEND = "ghettoq.taproot.Database"
BROKER_CONNECTION_MAX_RETRIES = 0
CELERY_ALWAYS_EAGER = False
CELERYD_MAX_TASKS_PER_CHILD = 100
CELERYD_LOG_LEVEL = 'DEBUG'
|
PypiClean
|
/ddbc-0.0.4.tar.gz/ddbc-0.0.4/README.txt
|
ddbc
====
|Version| |Build Status| |Coverage|
Description
===========
Amazon DynamoDB as a cache store.
Requirements
============
- Python2.7
- pip
Installation
============
PyPI
----
.. code:: sh
pip install ddbc
Setup
=====
- Create IAM Role or User
Policy example:
.. code:: json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"dynamodb:CreateTable",
"dynamodb:DeleteItem",
"dynamodb:GetItem",
"dynamodb:PutItem"
],
"Resource": "arn:aws:dynamodb:<region>:<account-id>:table/<cache-table>"
}
]
}
- Create the DynamoDB table for cache
Script Example:
.. code:: python
#!/usr/bin/env python
import ddbc.utils
ddbc.utils.create_table(
table_name='cache_table',
region='us-east-1', # optional
read_units=10, # default: 5
write_units=10 # default: 5
)
Usage
=====
.. code:: python
import ddbc.cache
import time
cache = ddbc.cache.Client(
table_name='cache_table',
region='us-east-1', # optional
default_ttl=100, # default: -1 (Infinity)
report_error=True # default: False
)
cache['foo'] = 'bar'
print(cache['foo']) # => 'bar'
time.sleep(100)
print(cache['foo']) # => None
cache.set('foo', 'bar', 1000)
time.sleep(100)
print(cache['foo']) # => 'bar'
del cache['foo']
print(cache.get('foo', 'buz')) # => 'buz'
Development
-----------
- Source hosted at `GitHub <https://github.com/marcy-terui/ddbc>`__
- Report issues/questions/feature requests on `GitHub
Issues <https://github.com/marcy-terui/ddbc/issues>`__
Pull requests are very welcome! Make sure your patches are well tested.
Ideally create a topic branch for every separate change you make. For
example:
1. Fork the repo
2. Create your feature branch (``git checkout -b my-new-feature``)
3. Commit your changes (``git commit -am 'Added some feature'``)
4. Push to the branch (``git push origin my-new-feature``)
5. Create new Pull Request
Authors
-------
Created and maintained by `Masashi
Terui <https://github.com/marcy-terui>`__ ([email protected])
License
-------
MIT License (see
`LICENSE <https://github.com/marcy-terui/ddbc/blob/master/LICENSE>`__)
.. |Version| image:: https://img.shields.io/pypi/v/ddbc.svg
:target: https://pypi.python.org/pypi/ddbc
.. |Build Status| image:: https://img.shields.io/travis/marcy-terui/ddbc/master.svg
:target: http://travis-ci.org/marcy-terui/ddbc
.. |Coverage| image:: https://img.shields.io/coveralls/marcy-terui/ddbc.svg
:target: https://coveralls.io/github/marcy-terui/ddbc
|
PypiClean
|
/graphql-example-0.4.4.tar.gz/graphql-example-0.4.4/vendor/pipenv/vendor/jinja2/nodes.py
|
import types
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import izip, with_metaclass, text_type, PY2
#: the types we support for context functions
_context_function_types = (types.FunctionType, types.MethodType)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
def dump(self):
def _dump(node):
if not isinstance(node, Node):
buf.append(repr(node))
return
buf.append('nodes.%s(' % node.__class__.__name__)
if not node.fields:
buf.append(')')
return
for idx, field in enumerate(node.fields):
if idx:
buf.append(', ')
value = getattr(node, field)
if isinstance(value, list):
buf.append('[')
for idx, item in enumerate(value):
if idx:
buf.append(', ')
_dump(item)
buf.append(']')
else:
_dump(value)
buf.append(')')
buf = []
_dump(self)
return ''.join(buf)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class With(Stmt):
"""Specific node for with statements. In older versions of Jinja the
with statement was implemented on the base of the `Scope` node instead.
.. versionadded:: 2.9.3
"""
fields = ('targets', 'values', 'body')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class AssignBlock(Stmt):
"""Assigns a block to a target."""
fields = ('target', 'body')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
rv = self.value
if PY2 and type(rv) is text_type and \
self.environment.policies['compiler.ascii_str']:
try:
rv = rv.encode('ascii')
except UnicodeError:
pass
return rv
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
# We cannot constant handle async filters, so we need to make sure
# to not go down this path.
if eval_ctx.environment.is_async and \
getattr(filter_, 'asyncfiltervariant', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [obj] + [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(*args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Subtract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
|
PypiClean
|
/theprometheus-1.0.19.tar.gz/theprometheus-1.0.19/prometheus/prometheus.py
|
__authors__ = 'David Nidever <[email protected]?'
__version__ = '20210915' # yyyymmdd
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.table import Table,vstack
import logging
import time
from dlnpyutils import utils as dln
from . import detection, aperture, models, getpsf, allfit, utils
from .ccddata import CCDData
try:
import __builtin__ as builtins # Python 2
except ImportError:
import builtins # Python 3
# run PSF fitting on an image
def run(image,psfname='gaussian',iterdet=0,ndetsigma=1.5,snrthresh=5,psfsubnei=False,psffitradius=None,
fitradius=None,npsfpix=51,binned=False,lookup=False,lorder=0,psftrim=None,recenter=True,
reject=False,apcorr=False,timestamp=False,verbose=False):
"""
Run PSF photometry on an image.
Parameters
----------
image : string or CCDData object
The input image to fit. This can be the filename or CCDData object.
psfname : string, optional
The name of the PSF type to use. The options are "gaussian", "moffat",
"penny" and "gausspow". Default is "gaussian".
iterdet : boolean, optional
Number of iterations to use for detection. Default is iterdet=0, meaning
detection is only performed once.
ndetsigma : float, optional
Detection threshold in units of sigma. Default is 1.5.
snrthresh : float, optional
Signal-to-Noise threshold for detections. Default is 5.
psfsubnei : boolean, optional
Subtract neighboring stars to PSF stars when generating the PSF. Default is False.
psffitradius : float, optional
The fitting readius when constructing the PSF (in pixels). By default
the FWHM is used.
fitradius: float, optional
The fitting radius when fitting the PSF to the stars in the image (in pixels).
By default the PSF FWHM is used.
npsfpix : int, optional
The size of the PSF footprint. Default is 51.
binned : boolean, optional
Use a binned model that integrates the analytical function across a pixel.
Default is false.
lookup : boolean, optional
Use an empirical lookup table. Default is False.
lorder : int, optional
The order of the spatial variations (0=constant, 1=linear). Default is 0.
psftrim: float, optional
Trim the PSF size to a radius where "psftrim" fraction of flux is removed. Default is None.
recenter : boolean, optional
Allow the centroids to be fit. Default is True.
reject : boolean, optional
When constructin the PSF, reject PSF stars with high RMS values. Default is False.
apcorr : boolean, optional
Apply aperture correction. Default is False.
timestamp : boolean, optional
Add timestamp in verbose output (if verbose=True). Default is False.
verbose : boolean, optional
Verbose output to the screen. Default is False.
Returns
-------
cat : table
The output table of best-fit PSF values for all of the sources.
model : CCDData object
The best-fitting model for the stars (without sky).
sky : CCDData object
The background sky image used for the image.
psf : PSF object
The best-fitting PSF model.
Example
-------
cat,model,sky,psf = prometheus.run(image,psfname='gaussian',verbose=True)
"""
# Set up the logger
if timestamp and verbose:
logger = dln.basiclogger()
logger.handlers[0].setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s"))
logger.handlers[0].setStream(sys.stdout)
builtins.logger = logger # make it available globally across all modules
start = time.time()
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
# Load the file
if isinstance(image,str):
filename = image
if verbose:
print('Loading image from "'+filename+'"')
image = CCDData.read(filename)
if isinstance(image,CCDData) is False:
raise ValueError('Input image must be a filename or CCDData object')
if verbose:
print('Image shape ',image.shape)
residim = image.copy()
# Processing steps
#-----------------
for niter in range(iterdet+1):
if verbose and iterdet>0:
print('--- Iteration = '+str(niter+1)+' ---')
# 1) Detection
#-------------
if verbose:
print('Step 1: Detection')
objects = detection.detect(residim,nsigma=ndetsigma,verbose=verbose)
objects['ndetiter'] = niter+1
if verbose:
print(str(len(objects))+' objects detected')
# 2) Aperture photometry
#-----------------------
if verbose:
print('Step 2: Aperture photometry')
objects = aperture.aperphot(residim,objects)
nobjects = len(objects)
# Bright and faint limit, use 5th and 95th percentile
if niter==0:
minmag, maxmag = np.nanpercentile(objects['mag_auto'],(5,95))
if verbose:
print('Min/Max mag: %5.2f, %5.2f' % (minmag,maxmag))
# Imposing S/N cut
gd, = np.where((objects['snr'] >= snrthresh) & np.isfinite(objects['mag_auto']))
if len(gd)==0:
print('No objects passed S/N cut')
return None,None,None,None
objects = objects[gd]
objects['id'] = np.arange(len(objects))+1 # renumber
if verbose:
print('%d objects left after S/N=%5.1f cut' % (len(objects),snrthresh))
# 3) Construct the PSF
#---------------------
# only on first iteration
if niter==0:
if verbose:
print('Step 3: Construct the PSF')
# 3a) Estimate FWHM
#------------------
fwhm = utils.estimatefwhm(objects,verbose=verbose)
# 3b) Pick PSF stars
#------------------
psfobj = utils.pickpsfstars(objects,fwhm,verbose=verbose)
# 3c) Construct the PSF iteratively
#---------------------------------
# Make the initial PSF slightly elliptical so it's easier to fit the orientation
if psfname.lower() != 'empirical':
initpsf = models.psfmodel(psfname,[fwhm/2.35,0.9*fwhm/2.35,0.0],binned=binned,npix=npsfpix)
else:
initpsf = models.psfmodel(psfname,npix=npsfpix,imshape=image.shape,order=lorder)
# run getpsf
psf,psfpars,psfperror,psfcat = getpsf.getpsf(initpsf,image,psfobj,fitradius=psffitradius,
lookup=lookup,lorder=lorder,subnei=psfsubnei,
allcat=objects,reject=reject,verbose=(verbose>=2))
# Trim the PSF
if psftrim is not None:
oldnpix = psf.npix
psf.trim(psftrim)
if verbose:
print('Trimming PSF size from '+str(oldnpix)+' to '+str(self.npix))
if verbose:
print('Final PSF: '+str(psf))
gd, = np.where(psfcat['reject']==0)
print('Median RMS: %.4f' % np.median(psfcat['rms'][gd]))
# 4) Run on all sources
#----------------------
# If niter>0, then use combined object catalog
if iterdet>0:
# Add detection
# Combine objects catalogs
if niter==0:
allobjects = objects.copy()
else:
objects['id'] = np.arange(len(objects))+1+np.max(allobjects['id'])
allobjects = vstack((allobjects,objects))
if 'group_id' in allobjects.keys():
allobjects.remove_column('group_id')
else:
allobjects = objects
if verbose:
print('Step 4: Get PSF photometry for all '+str(len(allobjects))+' objects')
psfout,model,sky = allfit.fit(psf,image,allobjects,fitradius=fitradius,
recenter=recenter,verbose=(verbose>=2))
# Construct residual image
if iterdet>0:
residim = image.copy()
residim.data -= model.data
# Combine aperture and PSF columns
outobj = allobjects.copy()
# rename some columns for clarity
outobj['x'].name = 'xc'
outobj['y'].name = 'yc'
outobj['a'].name = 'asemi'
outobj['b'].name = 'bsemi'
outobj['flux'].name = 'sumflux'
outobj.remove_columns(['cxx','cyy','cxy'])
# copy over PSF output columns
for n in psfout.columns:
outobj[n] = psfout[n]
outobj['psfamp'] = outobj['amp'].copy()
outobj['amp_error'].name = 'psfamp_error'
outobj['flux'].name = 'psfflux'
outobj['flux_error'].name = 'psfflux_error'
# change mag, magerr to psfmag, psfmag_error
outobj['mag'].name = 'psfmag'
outobj['mag_error'].name = 'psfmag_error'
# put ID at the beginning
cols = np.char.array(list(outobj.columns))
newcols = ['id']+list(cols[cols!='id'])
outobj = outobj[newcols]
# 5) Apply aperture correction
#-----------------------------
if apcorr:
if verbose:
print('Step 5: Applying aperture correction')
outobj,grow,cgrow = aperture.apercorr(psf,image,outobj,psfcat,verbose=verbose)
# Add exposure time correction
exptime = image.header.get('exptime')
if exptime is not None:
if verbose:
print('Applying correction for exposure time %.2f s' % exptime)
outobj['psfmag'] += 2.5*np.log10(exptime)
# Add coordinates if there's a WCS
if image.wcs is not None:
if image.wcs.has_celestial:
if verbose:
print('Adding RA/DEC coordinates to catalog')
skyc = image.wcs.pixel_to_world(outobj['x'],outobj['y'])
outobj['ra'] = skyc.ra
outobj['dec'] = skyc.dec
if verbose:
print('dt = %.2f sec' % (time.time()-start))
# Breakdown logger
if timestamp and verbose:
del builtins.logger
return outobj,model,sky,psf
|
PypiClean
|
/cdk-stacksets-0.0.148.tar.gz/cdk-stacksets-0.0.148/README.md
|
# CDK StackSets Construct Library
<!--BEGIN STABILITY BANNER-->---

> The APIs of higher level constructs in this module are experimental and under active development.
> They are subject to non-backward compatible changes or removal in any future version. These are
> not subject to the [Semantic Versioning](https://semver.org/) model and breaking changes will be
> announced in the release notes. This means that while you may use them, you may need to update
> your source code when upgrading to a newer version of this package.
---
<!--END STABILITY BANNER-->
This construct library allows you to define AWS CloudFormation StackSets.
```python
stack = Stack()
stack_set_stack = StackSetStack(stack, "MyStackSet")
StackSet(stack, "StackSet",
target=StackSetTarget.from_accounts(
regions=["us-east-1"],
accounts=["11111111111"],
parameter_overrides={
"SomeParam": "overrideValue"
}
),
template=StackSetTemplate.from_stack_set_stack(stack_set_stack)
)
```
## Installing
### TypeScript/JavaScript
```bash
npm install cdk-stacksets
```
### Python
```bash
pip install cdk-stacksets
```
### Java
```xml
// add this to your pom.xml
<dependency>
<groupId>io.github.cdklabs</groupId>
<artifactId>cdk-stacksets</artifactId>
<version>0.0.0</version> // replace with version
</dependency>
```
### .NET
```bash
dotnet add package CdklabsCdkStacksets --version X.X.X
```
### Go
```bash
go get cdk-stacksets-go
```
## Creating a StackSet Stack
StackSets allow you to deploy a single CloudFormation template across multiple AWS accounts and regions.
Typically when creating a CDK Stack that will be deployed across multiple environments, the CDK will
synthesize separate Stack templates for each environment (account/region combination). Because of the
way that StackSets work, StackSet Stacks behave differently. For Stacks that will be deployed via StackSets
a single Stack is defined and synthesized. Any environmental differences must be encoded using Parameters.
A special class was created to handle the uniqueness of the StackSet Stack.
You declare a `StackSetStack` the same way that you declare a normal `Stack`, but there
are a couple of differences. `StackSetStack`s have a couple of special requirements/limitations when
compared to Stacks.
*Requirements*
* Must be created in the scope of a `Stack`
* Must be environment agnostic
*Limitations*
* Does not support Docker container assets
Once you create a `StackSetStack` you can create resources within the stack.
```python
stack = Stack()
stack_set_stack = StackSetStack(stack, "StackSet")
iam.Role(stack_set_stack, "MyRole",
assumed_by=iam.ServicePrincipal("myservice.amazonaws.com")
)
```
Or
```python
class MyStackSet(StackSetStack):
def __init__(self, scope, id):
super().__init__(scope, id)
iam.Role(self, "MyRole",
assumed_by=iam.ServicePrincipal("myservice.amazonaws.com")
)
```
## Creating a StackSet
AWS CloudFormation StackSets enable you to create, update, or delete stacks across multiple accounts and AWS Regions
with a single operation. Using an administrator account, you define and manage an AWS CloudFormation template, and use
the template as the basis for provisioning stacks into selected target accounts across specific AWS Regions.
There are two methods for defining *where* the StackSet should be deployed. You can either define individual accounts, or
you can define AWS Organizations organizational units.
### Deploying to individual accounts
Deploying to individual accounts requires you to specify the account ids. If you want to later deploy to additional accounts,
or remove the stackset from accounts, this has to be done by adding/removing the account id from the list.
```python
stack = Stack()
stack_set_stack = StackSetStack(stack, "MyStackSet")
StackSet(stack, "StackSet",
target=StackSetTarget.from_accounts(
regions=["us-east-1"],
accounts=["11111111111"]
),
template=StackSetTemplate.from_stack_set_stack(stack_set_stack)
)
```
### Deploying to organizational units
AWS Organizations is an AWS service that enables you to centrally manage and govern multiple accounts.
AWS Organizations allows you to define organizational units (OUs) which are logical groupings of AWS accounts.
OUs enable you to organize your accounts into a hierarchy and make it easier for you to apply management controls.
For a deep dive on OU best practices you can read the [Best Practices for Organizational Units with AWS Organizations](https://aws.amazon.com/blogs/mt/best-practices-for-organizational-units-with-aws-organizations/) blog post.
You can either specify the organization itself, or individual OUs. By default the StackSet will be deployed
to all AWS accounts that are part of the OU. If the OU is nested it will also deploy to all accounts
that are part of any nested OUs.
For example, given the following org hierarchy
```mermaid
graph TD
root-->ou-1;
root-->ou-2;
ou-1-->ou-3;
ou-1-->ou-4;
ou-3-->account-1;
ou-3-->account-2;
ou-4-->account-4;
ou-2-->account-3;
ou-2-->account-5;
```
You could deploy to all AWS accounts under OUs `ou-1`, `ou-3`, `ou-4` by specifying the following:
```python
stack = Stack()
stack_set_stack = StackSetStack(stack, "MyStackSet")
StackSet(stack, "StackSet",
target=StackSetTarget.from_organizational_units(
regions=["us-east-1"],
organizational_units=["ou-1"]
),
template=StackSetTemplate.from_stack_set_stack(stack_set_stack)
)
```
This would deploy the StackSet to `account-1`, `account-2`, `account-4`.
If there are specific AWS accounts that are part of the specified OU hierarchy that you would like
to exclude, this can be done by specifying `excludeAccounts`.
```python
stack = Stack()
stack_set_stack = StackSetStack(stack, "MyStackSet")
StackSet(stack, "StackSet",
target=StackSetTarget.from_organizational_units(
regions=["us-east-1"],
organizational_units=["ou-1"],
exclude_accounts=["account-2"]
),
template=StackSetTemplate.from_stack_set_stack(stack_set_stack)
)
```
This would deploy only to `account-1` & `account-4`, and would exclude `account-2`.
Sometimes you might have individual accounts that you would like to deploy the StackSet to, but
you do not want to include the entire OU. To do that you can specify `additionalAccounts`.
```python
stack = Stack()
stack_set_stack = StackSetStack(stack, "MyStackSet")
StackSet(stack, "StackSet",
target=StackSetTarget.from_organizational_units(
regions=["us-east-1"],
organizational_units=["ou-1"],
additional_accounts=["account-5"]
),
template=StackSetTemplate.from_stack_set_stack(stack_set_stack)
)
```
This would deploy the StackSet to `account-1`, `account-2`, `account-4` & `account-5`.
### StackSet permissions
There are two modes for managing StackSet permissions (i.e. *where* StackSets can deploy & *what* resources they can create).
A StackSet can either be `Service Managed` or `Self Managed`.
You can control this through the `deploymentType` parameter.
#### Service Managed
When a StackSet is service managed, the permissions are managed by AWS Organizations. This allows the StackSet to deploy the Stack to *any*
account within the organization. In addition, the StackSet will be able to create *any* type of resource.
```python
stack = Stack()
stack_set_stack = StackSetStack(stack, "MyStackSet")
StackSet(stack, "StackSet",
target=StackSetTarget.from_organizational_units(
regions=["us-east-1"],
organizational_units=["ou-1"]
),
deployment_type=DeploymentType.service_managed(),
template=StackSetTemplate.from_stack_set_stack(stack_set_stack)
)
```
When you specify `serviceManaged` deployment type, automatic deployments are enabled by default.
Automatic deployments allow the StackSet to be automatically deployed to or deleted from
AWS accounts when they are added or removed from the specified organizational units.
### Using File Assets
You can use the StackSet's parent stack to facilitate file assets. Behind the scenes,
this is accomplished using the `BucketDeployment` construct from the
`aws_s3_deployment` module. You need to provide a bucket outside the scope of the CDK
managed asset buckets and ensure you have persmissions for the target accounts to pull
the artifacts from the supplied bucket.
As a basic example, if using a `serviceManaged` deployment, you just need to give read
access to the Organization. You can create the asset bucket in the parent stack, or another
stack in the same app and pass the object as a prop. Or, import an existing bucket as needed.
If creating in the parent or sibling stack you could create and export similar to this:
```python
bucket = s3.Bucket(self, "Assets",
bucket_name="cdkstacket-asset-bucket-xyz"
)
bucket.add_to_resource_policy(
iam.PolicyStatement(
actions=["s3:Get*", "s3:List*"],
resources=[bucket.arn_for_objects("*"), bucket.bucket_arn],
principals=[iam.OrganizationPrincipal("o-xyz")]
))
```
Then pass as a prop to the StackSet stack:
```python
# bucket: s3.Bucket
stack = Stack()
stack_set_stack = StackSetStack(stack, "MyStackSet",
asset_bucket=bucket
)
```
Then call `new StackSet` as described in the sections above.
You can use self-managed StackSet deployments with file assets too but will
need to ensure all target accounts roles will have access to the central asset
bucket you pass as the property.
## Deploying StackSets using CDK Pipelines
You can also deploy StackSets using [CDK Pipelines](https://docs.aws.amazon.com/cdk/api/v2/docs/aws-cdk-lib.pipelines-readme.html)
Below is an example of a Pipeline that deploys from a central account. It also
defines separate stages for each "environment" so that you can first test out
the stackset in pre-prod environments.
This would be an automated way of deploying the bootstrap stack described in
[this blog
post](https://aws.amazon.com/blogs/mt/bootstrapping-multiple-aws-accounts-for-aws-cdk-using-cloudformation-stacksets/).
```python
# app: App
class BootstrapStage(Stage):
def __init__(self, scope, id, *, initialBootstrapTarget, stacksetName=None, env=None, outdir=None, stageName=None, permissionsBoundary=None, policyValidationBeta1=None):
super().__init__(scope, id, initialBootstrapTarget=initialBootstrapTarget, stacksetName=stacksetName, env=env, outdir=outdir, stageName=stageName, permissionsBoundary=permissionsBoundary, policyValidationBeta1=policyValidationBeta1)
stack = Stack(self, "BootstrapStackSet")
bootstrap = Bootstrap(stack, "CDKToolkit")
stack_set = StackSet(stack, "StackSet",
template=StackSetTemplate.from_stack_set_stack(bootstrap),
target=initial_bootstrap_target,
capabilities=[Capability.NAMED_IAM],
managed_execution=True,
stack_set_name=stackset_name,
deployment_type=DeploymentType.service_managed(
delegated_admin=True,
auto_deploy_enabled=True,
auto_deploy_retain_stacks=False
),
operation_preferences=OperationPreferences(
region_concurrency_type=RegionConcurrencyType.PARALLEL,
max_concurrent_percentage=100,
failure_tolerance_percentage=99
)
)
pipeline = pipelines.CodePipeline(self, "BootstrapPipeline",
synth=pipelines.ShellStep("Synth",
commands=["yarn install --frozen-lockfile", "npx cdk synth"
],
input=pipelines.CodePipelineSource.connection("myorg/myrepo", "main",
connection_arn="arn:aws:codestar-connections:us-east-2:111111111111:connection/ca65d487-ca6e-41cc-aab2-645db37fdb2b"
)
),
self_mutation=True
)
regions = ["us-east-1", "us-east-2", "us-west-2", "eu-west-2", "eu-west-1", "ap-south-1", "ap-southeast-1"
]
pipeline.add_stage(
BootstrapStage(app, "DevBootstrap",
env=Environment(
region="us-east-1",
account="111111111111"
),
stackset_name="CDKToolkit-dev",
initial_bootstrap_target=StackSetTarget.from_organizational_units(
regions=regions,
organizational_units=["ou-hrza-ar333427"]
)
))
pipeline.add_stage(
BootstrapStage(app, "ProdBootstrap",
env=Environment(
region="us-east-1",
account="111111111111"
),
stackset_name="CDKToolkit-prd",
initial_bootstrap_target=StackSetTarget.from_organizational_units(
regions=regions,
organizational_units=["ou-hrza-bb999427", "ou-hraa-ar111127"]
)
))
```
|
PypiClean
|
/djangoteams_community-1.4.9-py3-none-any.whl/django_teams/views.py
|
from django.core.exceptions import PermissionDenied
# This is where your views go :)
from django.http import HttpResponseRedirect
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView
from django.urls import reverse
from django.shortcuts import get_object_or_404
from django.db.models import Count, F
from django_teams.models import Team, TeamStatus, Ownership
from django_teams.forms import (TeamEditForm,
TeamStatusCreateForm,
action_formset)
from django.db.models import Case, When
from django.db import models
from django.contrib.contenttypes.models import ContentType
def loadGenericKeyRelations(queryset):
distinct_contents = queryset.values_list('content_type', flat=True).distinct()
object_items = []
for object in distinct_contents:
content_type = ContentType.objects.get(id=object).model_class()
set = queryset.filter(content_type=object).values()
objects = content_type.objects.filter(pk__in=[object['object_id'] for object in set])
for relation in content_type._meta.get_fields():
if relation.get_internal_type() is 'ForeignKey':
objects.select_related(relation.name)
object_items.append(objects.all())
return object_items
class TeamListView(ListView):
model = Team
def render_to_response(self, context, **response_kwargs):
queryset = Team.objects.all().annotate(member_count=Count('users'))
queryset = queryset.annotate(owner=Case(When(teamstatus__role=20, then=F('users__username')), default=None))
if not self.request.user.is_anonymous:
queryset = queryset.annotate(role=Case(When(teamstatus__user=self.request.user,
then=F('teamstatus__role')), default=0, outputfield=models.IntegerField()))
queryset = queryset.order_by('-role', 'name')
else:
queryset = queryset.order_by('-id')
team_names = []
team_list = []
# combine teams with the same name
for q in queryset:
if q.name not in team_names:
team_names.append(q.name)
tmp = {'name': q.name, 'id': q.id, 'pk': q.pk, 'description': q.description,
'member_count': q.member_count, 'owner': q.owner}
try:
tmp['role'] = q.role
except Exception:
tmp['role'] = None
team_list.append(tmp)
else:
t = team_list[team_names.index(q.name)]
t['member_count'] += q.member_count
if q.owner is not None:
t['owner'] = q.owner
return super(TeamListView, self).render_to_response({'list': team_list}, **response_kwargs)
class UserTeamListView(ListView):
template_name = 'django_teams/user_team_list.html'
def get_queryset(self):
statuses = TeamStatus.objects.select_related('user').filter(user=self.request.user,
role=20).values_list('team', flat=True)
return statuses
class TeamCreateView(CreateView):
model = Team
template_name = 'django_teams/team_create.html'
fields = ['name', 'description', 'private']
def dispatch(self, request, *args, **kwargs):
self.request = request
return super(TeamCreateView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
ret = super(TeamCreateView, self).form_valid(form)
self.object.add_user(self.request.user, team_role=20)
return ret
class TeamDetailView(DetailView):
model = Team
def dispatch(self, request, *args, **kwargs):
self.request = request
return super(TeamDetailView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
object = super(TeamDetailView, self).get_object(queryset)
if object.private and self.request.user not in object.users.filter(teamstatus__role__gte=10):
raise PermissionDenied()
return object
def render_to_response(self, context, **response_kwargs):
team = self.object
# context['owner'] = team.users.filter(teamstatus__role=20)
# context['members'] = team.users.filter(teamstatus__role=10)
context['owners'] = []
context['members'] = []
statuses = TeamStatus.objects.select_related('user', 'team').filter(team=team)
for s in statuses:
if s.role == 10:
context['members'].append(s.user)
elif s.role == 20:
context['owners'].append(s.user)
owned = Ownership.objects.filter(team=team, approved=True)
context['approved_objects_types'] = loadGenericKeyRelations(owned)
return super(TeamDetailView, self).render_to_response(context, **response_kwargs)
class TeamInfoEditView(UpdateView):
model = Team
fields = ['name', 'description', 'private']
template_name = 'django_teams/teaminfo_form.html'
def get_object(self, queryset=None):
object = super(TeamInfoEditView, self).get_object(queryset)
if self.request.user not in object.users.filter(teamstatus__role__gte=20):
raise PermissionDenied()
return object
class TeamEditView(UpdateView):
model = Team
form_class = TeamEditForm
def dispatch(self, request, *args, **kwargs):
self.request = request
return super(TeamEditView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
object = super(TeamEditView, self).get_object(queryset)
# User must be admin of the object to get into this view
if self.request.user not in object.users.filter(teamstatus__role__gte=20):
raise PermissionDenied()
return object
def get_form_class(self):
# get forms for team leaders, team members, team requests
ret = []
users = self.object.users
ret += [action_formset(prefix_name='teachers', qset=users.filter(teamstatus__role=20),
actions=('---', 'Demote', 'Remove'))]
ret += [action_formset(prefix_name='students', qset=users.filter(teamstatus__role=10),
actions=('---', 'Promote', 'Remove'))]
ret += [action_formset(prefix_name='member-requests', qset=users.filter(teamstatus__role=1),
actions=('---', 'Approve', 'Reject'))]
owned_objects = Ownership.objects.filter(team=self.object)
approved = loadGenericKeyRelations(owned_objects.filter(approved=True))
for set in approved:
if set:
prefix_name = 'approved-' + str(set.model.__name__)
ret += [action_formset(prefix_name=prefix_name, qset=set, actions=('---', 'Remove'), link=True)]
pending_approval = loadGenericKeyRelations(owned_objects.filter(approved=False))
for set in pending_approval:
if set:
prefix_name = str(set.model.__name__) + 's-pending-approval'
ret += [action_formset(prefix_name=prefix_name, qset=set,
actions=('---', 'Approve', 'Remove'), link=True)]
return ret
def get_form(self, form_class=TeamEditForm):
kwargs = self.get_form_kwargs()
form_class = self.get_form_class()
if 'data' in kwargs:
ret = [form_class[num](kwargs['data'],
prefix=form_class[num].name) for num in range(len(form_class))]
else:
ret = [form_class[num](prefix=form_class[num].name) for num in range(len(form_class))]
return ret
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
self.object = get_object_or_404(Team, pk=kwargs['pk'])
form_class = self.get_form_class()
form = self.get_form(form_class)
for f in form:
if not f.is_valid():
return self.form_invalid(form)
# Go through each form and perform the action
# Owners
owner_action = form[0].cleaned_data['action']
owner_items = form[0].cleaned_data['items']
statuses = TeamStatus.objects.filter(team=self.object, user__in=[object for object in owner_items])
if owner_action == 'Demote':
for status in statuses:
status.role = 10
status.save()
if owner_action == 'Remove':
for status in statuses:
status.delete()
# Members
member_action = form[1].cleaned_data['action']
member_items = form[1].cleaned_data['items']
statuses = TeamStatus.objects.filter(team=self.object, user__in=[object for object in member_items])
if member_action == 'Promote':
for status in statuses:
status.role = 20
status.save()
if member_action == 'Remove':
for status in statuses:
status.delete()
# Member Requests
request_action = form[2].cleaned_data['action']
request_items = form[2].cleaned_data['items']
statuses = TeamStatus.objects.filter(team=self.object, user__in=[object for object in request_items])
if request_action == 'Approve':
for status in statuses:
status.role = 10
status.save()
if request_action == 'Revoke':
for status in statuses:
status.delete()
for num in range(3, len(form)):
current_action = form[num].cleaned_data['action']
current_items = form[num].cleaned_data['items']
objects = []
if current_items:
content_type = ContentType.objects.get_for_model(current_items[0])
objects = Ownership.objects.filter(content_type=content_type,
team=self.object,
object_id__in=[object.id for object in current_items])
if current_action == 'Approve':
for object in objects:
object.approved = True
object.save()
if current_action == 'Remove':
for object in objects:
object.delete()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('team-edit', kwargs={'pk': self.object.pk})
class TeamStatusCreateView(CreateView):
model = TeamStatus
form_class = TeamStatusCreateForm
def get_success_url(self):
return reverse('team-list')
def dispatch(self, request, *args, **kwargs):
self.team = Team.objects.get(pk=kwargs['team_pk'])
self.request = request
return super(TeamStatusCreateView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
form.instance.team = self.team
form.instance.user = self.request.user
form.instance.role = 1
return super(TeamStatusCreateView, self).form_valid(form)
def render_to_response(self, context, **response_kwargs):
context['team'] = self.team
return super(TeamStatusCreateView, self).render_to_response(context, **response_kwargs)
|
PypiClean
|
/articlequality-0.4.4.tar.gz/articlequality-0.4.4/README.md
|
# Wikipedia article quality classification
This library provides a set of utilities for performing automatic detection of
assessment classes of Wikipedia articles. For more information, see the full
documentation at https://articlequality.readthedocs.io .
**Compatible with Python 3.x only.** Sorry.
* **Install:** ``pip install articlequality``
* **Models:** https://github.com/wikimedia/articlequality/tree/master/models
* **Documentation:** https://articlequality.readthedocs.io
## Basic usage
>>> import articlequality
>>> from revscoring import Model
>>>
>>> scorer_model = Model.load(open("models/enwiki.nettrom_wp10.gradient_boosting.model", "rb"))
>>>
>>> text = "I am the text of a page. I have a <ref>word</ref>"
>>> articlequality.score(scorer_model, text)
{'prediction': 'stub',
'probability': {'stub': 0.27156163795807853,
'b': 0.14707452309674252,
'fa': 0.16844898943510833,
'c': 0.057668704007171959,
'ga': 0.21617801281707663,
'start': 0.13906813268582238}}
## Install
### Requirements
* Python 3.5, 3.6 or 3.7
* All the system requirements of [revscoring](https://github.com/wikimedia/revscoring)
### Installation steps
1. clone this repository
2. install the package itself and its dependencies `python setup.py install`
3. You can verify that your installation worked by running `make enwiki_models` to build the English Wikipedia article quality model or `make wikidatawiki_models` to build the item quality model for Wikidata
### Retraining the models
To retrain a model, run `make -B MODEL` e.g. `make -B wikidatawiki_models`. This will redownload the labels, re-extract the features from the revisions, and then retrain and rescore the model.
To skip re-downloading the training labels and re-extracting the features, it is enough `touch` the files in the `datasets/` directory and run the `make` command without the `-B` flag.
### Running tests
Example:
```
pytest -vv tests/feature_lists/test_wikidatawiki.py
```
## Authors
* Aaron Halfaker -- https://github.com/halfak
* Morten Warncke-Wang -- https://github.com/nettrom
|
PypiClean
|
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/cnes.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def cnes(path):
"""Variables from the 1997 Canadian National Election Study
These variables are from the mailback questionnaire to the 1997 Canadian
National Election Study, and are intended to tap attitude towards
“traditional values.”
A data frame with 1529 observations on the following 4 variables.
`MBSA2`
an ordered factor with levels `StronglyDisagree`, `Disagree`,
`Agree`, and `StronglyAgree`, in response to the statement, “We
should be more tolerant of people who choose to live according to
their own standards, even if they are very different from our own.”
`MBSA7`
an ordered factor with levels `StronglyDisagree`, `Disagree`,
`Agree`, and `StronglyAgree`, in response to the statement,
“Newer lifestyles are contributing to the breakdown of our society.”
`MBSA8`
an ordered factor with levels `StronglyDisagree`, `Disagree`,
`Agree`, and `StronglyAgree`, in response to the statement, “The
world is always changing and we should adapt our view of moral
behaviour to these changes.”
`MBSA9`
an ordered factor with levels `StronglyDisagree`, `Disagree`,
`Agree`, and `StronglyAgree`, in response to the statement,
“This country would have many fewer problems if there were more
emphasis on traditional family values.”
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `cnes.csv`.
Returns:
Tuple of np.ndarray `x_train` with 1529 rows and 4 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'cnes.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/sem/CNES.csv'
maybe_download_and_extract(path, url,
save_file_name='cnes.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
PypiClean
|
/holehebetter-1.61.tar.gz/holehebetter-1.61/holehe/modules/mails/yahoo.py
|
from holehe.core import *
from holehe.localuseragent import *
async def yahoo(email, client, out):
name = "yahoo"
domain = "yahoo.com"
method= "login"
frequent_rate_limit=True
headers = {
'User-Agent': random.choice(ua["browsers"]["firefox"]),
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'https://login.yahoo.com',
'DNT': '1',
'Connection': 'keep-alive',
}
req = await client.get("https://login.yahoo.com", headers=headers)
headers = {
'User-Agent': random.choice(ua["browsers"]["firefox"]),
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'bucket': 'mbr-fe-merge-manage-account',
'X-Requested-With': 'XMLHttpRequest',
'Origin': 'https://login.yahoo.com',
'DNT': '1',
'Connection': 'keep-alive',
}
params = {
'.src': 'fpctx',
'.intl': 'ca',
'.lang': 'en-CA',
'.done': 'https://ca.yahoo.com',
}
try:
data = {
'acrumb': req.text.split('<input type="hidden" name="acrumb" value="')[1].split('"')[0],
'sessionIndex': req.text.split('<input type="hidden" name="sessionIndex" value="')[1].split('"')[0],
'username': email,
'passwd': '',
'signin': 'Next',
'persistent': 'y'}
response = await client.post(
'https://login.yahoo.com/',
headers=headers,
params=params,
data=data)
response = response.json()
if "error" in response.keys():
if not response["error"]:
out.append({"name": name,"domain":domain,"method":method,"frequent_rate_limit":frequent_rate_limit,
"rateLimit": False,
"exists": True,
"emailrecovery": None,
"phoneNumber": None,
"others": None})
else:
out.append({"name": name,"domain":domain,"method":method,"frequent_rate_limit":frequent_rate_limit,
"rateLimit": True,
"exists": False,
"emailrecovery": None,
"phoneNumber": None,
"others": None})
elif "render" in response.keys():
if response["render"]["error"] == "messages.ERROR_INVALID_USERNAME":
out.append({"name": name,"domain":domain,"method":method,"frequent_rate_limit":frequent_rate_limit,
"rateLimit": False,
"exists": False,
"emailrecovery": None,
"phoneNumber": None,
"others": None})
else:
out.append({"name": name,"domain":domain,"method":method,"frequent_rate_limit":frequent_rate_limit,
"rateLimit": True,
"exists": False,
"emailrecovery": None,
"phoneNumber": None,
"others": None})
elif "location" in response.keys():
out.append({"name": name,"domain":domain,"method":method,"frequent_rate_limit":frequent_rate_limit,
"rateLimit": False,
"exists": False,
"emailrecovery": None,
"phoneNumber": None,
"others": None})
else:
out.append({"name": name,"domain":domain,"method":method,"frequent_rate_limit":frequent_rate_limit,
"rateLimit": True,
"exists": False,
"emailrecovery": None,
"phoneNumber": None,
"others": None})
except Exception:
out.append({"name": name,"domain":domain,"method":method,"frequent_rate_limit":frequent_rate_limit,
"rateLimit": True,
"exists": False,
"emailrecovery": None,
"phoneNumber": None,
"others": None})
|
PypiClean
|
/dltk_ai-1.1.9.tar.gz/dltk_ai-1.1.9/dltk_ai/visualization.py
|
#Importi
import pandas as pd
import plotly
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.express as px
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Bar graph
def bar_graph(library, dataset_path, datasource_type, values_list_column, target_column, main_title, title_x_axis, title_y_axis):
"""
Parameters:
library : plotly/matplotlib/seaborn
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
values_list_column : list of independent coumns mention
target_column : dependent column mention
main_title : Main title of graph
title_x_axis : X_axis title of graph
title_y_axis : Y_axis title of graph
"""
if library == "plotly":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
new_data = pd.pivot_table(dataset, values=values_list_column, columns= target_column)
traces = [go.Bar(
x = new_data.columns,
y = new_data.loc[rowname],
name = rowname
)for rowname in new_data.index]
layout = go.Layout(title = main_title)
fig = plotly.graph_objs.Figure(data = traces,layout = layout)
fig.update_layout(
xaxis_title=title_x_axis,
yaxis_title=title_y_axis)
plotly.offline.iplot(fig)
elif library =="matplotlib":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
# Figure Size
fig = plt.figure(figsize =(10, 7))
value = dataset[values_list_column]
target = dataset[target_column]
# Horizontal Bar Plot
plt.bar(value, target)
plt.title(main_title)
plt.xlabel(title_x_axis)
plt.ylabel(title_y_axis)
plt.show()
elif library == "seaborn":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
sns.barplot(x = values_list_column, y = target_column, data = dataset)
plt.title(main_title)
plt.xlabel(title_x_axis)
plt.ylabel(title_y_axis)
plt.show()
else:
print("incorrect library used")
#line graph
def line_graph(library, dataset_path, datasource_type, values_list_column, target_column, main_title, title_x_axis, title_y_axis):
"""
Parameters:
library : plotly/matplotlib/seaborn
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
values_list_column : list of independent coumns mention
target_column : dependent column mention
main_title : Main title of graph
title_x_axis : X_axis title of graph
title_y_axis : Y_axis title of graph
"""
if library == "plotly":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
new_data = pd.pivot_table(dataset, values=values_list_column, columns= target_column)
traces = [go.Line(
x = new_data.columns,
y = new_data.loc[rowname],
mode = 'lines',
name = rowname
)for rowname in new_data.index]
layout = go.Layout(title = main_title)
fig = plotly.graph_objs.Figure(data = traces,layout = layout)
fig.update_layout(
xaxis_title=title_x_axis,
yaxis_title=title_y_axis)
plotly.offline.iplot(fig)
elif library == "matplotlib":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
plt.plot(dataset[values_list_column], dataset[target_column])
plt.title(main_title)
plt.xlabel(title_x_axis)
plt.ylabel(title_y_axis)
plt.show()
elif library == "seaborn":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
plt.figure(figsize=(15,15))
plt.title(main_title)
sns.lineplot(dataset[values_list_column],dataset[target_column])
plt.xlabel(title_x_axis)
plt.ylabel(title_y_axis)
plt.show()
else:
print("incorrect library used")
# scatter plot
def scatter_plot(library, dataset_path, datasource_type, values_list_column, target_column, main_title, title_x_axis, title_y_axis):
"""
Parameters:
library : plotly/matplotlib/seaborn
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
values_list_column : list of independent coumns mention
target_column : dependent column mention
main_title : Main title of graph
title_x_axis : X_axis title of graph
title_y_axis : Y_axis title of graph
"""
if library == "plotly":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
new_data = pd.pivot_table(dataset, values=values_list_column, columns= target_column)
traces = [go.Scatter(
x = new_data.columns,
y = new_data.loc[rowname],
mode = 'markers',
name = rowname
)for rowname in new_data.index]
layout = go.Layout(title = main_title)
fig = plotly.graph_objs.Figure(data = traces,layout = layout)
fig.update_layout(
xaxis_title=title_x_axis,
yaxis_title=title_y_axis)
plotly.offline.iplot(fig)
elif library == "matplotlib":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
x = dataset[values_list_column]
y = dataset[target_column]
plt.scatter(x, y, cmap='viridis', alpha=0.3)
plt.xlabel(title_x_axis)
plt.ylabel(title_y_axis)
plt.show()
elif library == "seaborn":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
# Set the width and height of the figure
plt.figure(figsize=(14,6))
# Add title
plt.title(main_title)
sns.scatterplot(data=dataset, x=values_list_column, y=target_column)
# Add label for horizontal axis
plt.xlabel(title_x_axis)
plt.ylabel(title_y_axis)
plt.show()
else :
print("incorrect library used")
# Pie graph
def pie_graph(library, dataset_path, datasource_type, values_list_column, main_title):
"""
Parameters:
library : plotly/matplotlib/seaborn
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
values_list_column : list of independent coumns mention
main_title : Main title of graph
"""
if library=="plotly":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
trace = plotly.graph_objs.Pie(values=dataset[values_list_column].value_counts().values.tolist(),
labels=dataset[values_list_column].value_counts().keys().tolist(),
hoverinfo="label+percent+name",
domain=dict(x=[0, .48]),
marker=dict(line=dict(width=2,
color="rgb(243,243,243)")
),
hole=.6
)
layout = plotly.graph_objs.Layout(dict(title=values_list_column,
plot_bgcolor="rgb(243,243,243)",
paper_bgcolor="rgb(243,243,243)",
annotations=[dict(text=main_title,
font=dict(size=13),
showarrow=False,
x=.15, y=.5),
]
)
)
data = [trace]
fig = plotly.graph_objs.Figure(data=data, layout=layout)
plotly.offline.iplot(fig)
elif library == "matplotlib":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
values = dataset[values_list_column].value_counts()
per=[]
for i in values:
perc = i/values.sum()
per.append(format(perc,'.2f'))
plt.figure(figsize=(10,6))
plt.title(main_title,fontsize=20)
plt.pie(per,autopct='%1.1f%%')
plt.legend(values.index,loc='best', fontsize=15)
elif library == "seaborn":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
pie, ax = plt.subplots(figsize=[10,6])
values = dataset[values_list_column].value_counts().values.tolist()
labels = dataset[values_list_column].value_counts().keys()
plt.pie(x=values, autopct="%.1f%%", labels=labels, pctdistance=0.5)
plt.title(main_title, fontsize=14);
else:
print("incorrect library used")
#Box Plot
def box_plot(library, dataset_path, datasource_type, values_list_column, target_column, main_title, title_x_axis, title_y_axis):
"""
Parameters:
library : plotly/matplotlib/seaborn
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
values_list_column : list of independent coumns mention
target_column : dependent column mention
main_title : Main title of graph
title_x_axis : X_axis title of graph
title_y_axis : Y_axis title of graph
"""
if library == "plotly":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
fig = px.box(dataset, x=values_list_column, y=target_column)
fig.show()
elif library == "matplotlib":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
dataset.boxplot(by = values_list_column, column = target_column, grid = False)
plt.title(main_title)
plt.xlabel(title_x_axis)
plt.ylabel(title_y_axis)
plt.show()
elif library == "seaborn":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
plt.figure(figsize=(14,6))
# Add title
plt.title(main_title)
ax= sns.boxplot(x=values_list_column,y=target_column,data=dataset)
# Add label for horizontal axis
plt.xlabel(title_x_axis)
plt.ylabel(title_y_axis)
plt.show()
else:
print("incorrect library used")
#Violin Plot
def violin_plot(library, dataset_path, datasource_type, values_list_column, target_column, main_title, title_x_axis, title_y_axis):
"""
Parameters:
library : plotly/matplotlib/seaborn
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
values_list_column : list of independent coumns mention
target_column : dependent column mention
main_title : Main title of graph
title_x_axis : X_axis title of graph
title_y_axis : Y_axis title of graph
"""
if library == "matplotlib":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
data_to_plot = [dataset[values_list_column], dataset[target_column]]
# Create a figure instance
fig = plt.figure()
# Create an axes instance
ax = fig.add_axes([0,0,1,1])
# Create the boxplot
bp = ax.violinplot(data_to_plot)
plt.title(main_title)
plt.xlabel(title_x_axis)
plt.ylabel(title_y_axis)
plt.show()
plt.show()
elif library == "seaborn":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
sns.violinplot(x=values_list_column, y=target_column, data=dataset, palette="Pastel1")
plt.title(main_title)
plt.xlabel(title_x_axis)
plt.ylabel(title_y_axis)
plt.show()
elif library == "plotly":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
fig1 = px.violin(dataset, y=values_list_column, x=target_column)
fig1.show()
else:
print("incorrect library used")
# Function for correlation matrix
def correlation_matrix(library, dataset_path, datasource_type, main_title, scale_title):
"""
Parameters:
library : plotly/matplotlib/seaborn
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
main_title : Main title of graph
scale_title: Title of the scale mention
"""
if library=="plotly":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
correlation = dataset.corr()
matrix_cols = correlation.columns.tolist()
corr_array = np.array(correlation)
trace = plotly.graph_objs.Heatmap(z=corr_array,
x=matrix_cols,
y=matrix_cols,
colorscale="Viridis",
colorbar=dict(title=scale_title,
titleside="right"
),
)
layout = plotly.graph_objs.Layout(dict(title=main_title,
autosize=False,
height=720,
width=800,
margin=dict(r=0, l=210,
t=25, b=210,
),
yaxis=dict(tickfont=dict(size=9)),
xaxis=dict(tickfont=dict(size=9))
)
)
data = [trace]
fig = plotly.graph_objs.Figure(data=data, layout=layout)
plotly.offline.iplot(fig)
elif library=="matplotlib":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
corr = dataset.corr()
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
plt.suptitle(main_title, fontsize=16)
plt.pcolor(corr, cmap='RdBu_r')
cb = plt.colorbar()
cb.set_label(scale_title, fontsize=14)
plt.show()
elif library=="seaborn":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
corr = dataset.corr()
plt.figure(figsize=(16, 6))
heatmap = sns.heatmap(corr, vmin=-1, vmax=1, annot=True)
heatmap.set_title(main_title, fontdict={'fontsize':12}, pad=12);
plt.show()
else:
print("incorrect library used")
#Distribution Plot
def dist_plot(library, dataset_path, datasource_type, values_list_column, main_title):
"""
Parameters:
library : plotly/matplotlib/seaborn
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
values_list_column : list of independent coumns mention
main_title : Main title of graph
"""
if library == "seaborn":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
sns.set(rc={'figure.figsize':(11.7,8.27)})
sns.distplot(dataset[values_list_column], bins=30)
plt.title(main_title)
plt.show()
elif library == "matplotlib":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
dataset[values_list_column].hist(bins=15, figsize=(11,11))
plt.title(main_title)
plt.show()
elif library == "plotly":
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
hist_data = [dataset[values_list_column]]
group_labels = [main_title] # name of the dataset
fig = ff.create_distplot(hist_data, group_labels)
fig.show()
else:
print("incorrect library used")
#Single lib plots
def histogram(dataset_path, datasource_type):
"""
Parameters:
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
"""
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
dataset.hist(bins=15, figsize=(5,5))
plt.show()
#To Check the missingvalues using heatmap
def missingvalues(dataset_path, datasource_type):
"""
Parameters:
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
"""
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
heatmap = sns.heatmap(dataset.isnull(),cbar=False, yticklabels=False, cmap = 'viridis')
heatmap.set_title('Missing Values Check', fontdict={'fontsize':12}, pad=12);
plt.show()
# to check count
def count_plot(dataset_path, datasource_type, values_list_column, main_title):
"""
Parameters:
dataset_path : Path of dataset
datasource_type : csv/excel/dataframe
values_list_column : list of independent coumns mention
main_title : Main title of graph
"""
if datasource_type == 'csv':
dataset = pd.read_csv(dataset_path)
elif datasource_type == 'excel':
dataset = pd.read_excel(dataset_path)
elif datasource_type == 'dataframe':
dataset = dataset_path
sns.set(rc={'figure.figsize':(12,9)})
sns.countplot(x = values_list_column, data = dataset, palette = "Set2")
plt.title(main_title)
plt.show()
# Show the plot
plt.show()
|
PypiClean
|
/tbtool1-0.1-py3-none-any.whl/aws_services/s3/s3_constants.py
|
class S3Checks:
S3_CHECK_AT_REST_ENCRYPTION = "S3 buckets MUST be configured for encryption at rest using a KMS key with default SSE-S3 encryption at a minimum"
S3_CHECK_IN_TRANSIT_ENCRYPTION = "S3 buckets MUST be configured for in-transit encryption by enabling Secure Transport"
S3_PUBLIC_ACL_ACCESS = "S3 bucket ACLs MUST NOT allow public write or full-control access"
S3_PUBLIC_POLICY_ACCESS = "S3 bucket policies MUST NOT allow public write or full-control access"
S3_VERSIONING = "S3 Bucket Version SHOULD be enabled"
S3_CHECK_TAGS = "MUST be tagged in accordance with tagging standards"
class S3MethodAssociations:
S3_CHECK_AT_REST_ENCRYPTION = "check_encryption_at_rest"
S3_CHECK_IN_TRANSIT_ENCRYPTION = "check_in_transit_encryption"
S3_PUBLIC_ACL_ACCESS = "check_s3_acl"
S3_PUBLIC_POLICY_ACCESS = "check_s3_policy"
S3_VERSIONING = "check_s3_versioning"
S3_CHECK_TAGS = "check_bucket_tags"
security_checks = {
"S3_CHECK_AT_REST_ENCRYPTION": {
"method_name": S3MethodAssociations.S3_CHECK_AT_REST_ENCRYPTION,
"check_description": S3Checks.S3_CHECK_AT_REST_ENCRYPTION
},
"S3_CHECK_IN_TRANSIT_ENCRYPTION": {
"method_name": S3MethodAssociations.S3_CHECK_IN_TRANSIT_ENCRYPTION,
"check_description": S3Checks.S3_CHECK_IN_TRANSIT_ENCRYPTION
},
"S3_PUBLIC_ACL_ACCESS": {
"method_name": S3MethodAssociations.S3_PUBLIC_ACL_ACCESS,
"check_description": S3Checks.S3_PUBLIC_ACL_ACCESS
},
"S3_PUBLIC_POLICY_ACCESS": {
"method_name": S3MethodAssociations.S3_PUBLIC_POLICY_ACCESS,
"check_description": S3Checks.S3_PUBLIC_POLICY_ACCESS
},
"S3_CHECK_TAGS": {
"method_name": S3MethodAssociations.S3_CHECK_TAGS,
"check_description": S3Checks.S3_CHECK_TAGS
}
}
best_practices_checks = {
"S3_VERSIONING": {
"method_name": S3MethodAssociations.S3_VERSIONING,
"check_description": S3Checks.S3_VERSIONING
}
}
check_dict = {
"SecurityCheck": security_checks,
"BestPractices": best_practices_checks,
"AccountSecurityCheck": {},
"AccountBestPractices": {}
}
|
PypiClean
|
/World%20of%20Warships%20replays%20parser-3.3.3.tar.gz/World of Warships replays parser-3.3.3/replay_unpack/clients/wows/versions/0_11_6/battle_controller.py
|
import copy
import logging
import pickle
from replay_unpack.core import IBattleController
from replay_unpack.core.entity import Entity
from .constants import DamageStatsType, Category, TaskType, Status
try:
from .constants import DEATH_TYPES
except ImportError:
DEATH_TYPES = {}
from .players_info import PlayersInfo, PlayerType
class BattleController(IBattleController):
def __init__(self):
self._entities = {}
self._achievements = {}
self._ribbons = {}
self._players = PlayersInfo()
self._battle_result = None
self._damage_map = {}
self._shots_damage_map = {}
self._death_map = []
self._map = {}
self._player_id = None
self._arena_id = None
self._dead_planes = {}
Entity.subscribe_method_call('Avatar', 'onBattleEnd', self.onBattleEnd)
Entity.subscribe_method_call('Avatar', 'onArenaStateReceived', self.onArenaStateReceived)
Entity.subscribe_method_call('Avatar', 'onGameRoomStateChanged', self.onPlayerInfoUpdate)
Entity.subscribe_method_call('Avatar', 'receiveVehicleDeath', self.receiveVehicleDeath)
# Entity.subscribe_method_call('Vehicle', 'setConsumables', self.onSetConsumable)
Entity.subscribe_method_call('Avatar', 'onRibbon', self.onRibbon)
Entity.subscribe_method_call('Avatar', 'onAchievementEarned', self.onAchievementEarned)
Entity.subscribe_method_call('Avatar', 'receiveDamageStat', self.receiveDamageStat)
Entity.subscribe_method_call('Avatar', 'receive_planeDeath', self.receive_planeDeath)
Entity.subscribe_method_call('Avatar', 'onNewPlayerSpawnedInBattle', self.onNewPlayerSpawnedInBattle)
Entity.subscribe_method_call('Vehicle', 'receiveDamagesOnShip', self.g_receiveDamagesOnShip)
Entity.subscribe_nested_property_change('BattleLogic', 'state.controlPoints', self.controlPoints)
def controlPoints(self, entity, control_points):
# print(control_points)
pass
def onSetConsumable(self, vehicle, blob):
print(pickle.loads(blob))
@property
def entities(self):
return self._entities
@property
def battle_logic(self):
return next(e for e in self._entities.values() if e.get_name() == 'BattleLogic')
def create_entity(self, entity: Entity):
self._entities[entity.id] = entity
def destroy_entity(self, entity: Entity):
self._entities.pop(entity.id)
def on_player_enter_world(self, entity_id: int):
self._player_id = entity_id
def get_info(self):
# adding killed planes data
players = copy.deepcopy(self._players.get_info())
for player in players.values():
player['planesCount'] = self._dead_planes.get(
player.get('shipId', 0), 0)
return dict(
achievements=self._achievements,
ribbons=self._ribbons,
players=players,
battle_result=self._battle_result,
damage_map=self._damage_map,
shots_damage_map=self._shots_damage_map,
death_map=self._death_map,
death_info=self._getDeathsInfo(),
map=self._map,
player_id=self._player_id,
control_points=self._getCapturePointsInfo(),
tasks=list(self._getTasksInfo()),
skills=dict(),
arena_id=self._arena_id
)
def _getDeathsInfo(self):
deaths = {}
for killedVehicleId, fraggerVehicleId, typeDeath in self._death_map:
death_type = DEATH_TYPES.get(typeDeath)
if death_type is None:
logging.warning('Unknown death type %s', typeDeath)
continue
deaths[killedVehicleId] = {
'killer_id': fraggerVehicleId,
'icon': death_type['icon'],
'name': death_type['name'],
}
return deaths
def _getCapturePointsInfo(self):
return self.battle_logic.properties['client']['state'].get('controlPoints', [])
def _getTasksInfo(self):
tasks = self.battle_logic.properties['client']['state'].get('tasks', [])
for task in tasks:
yield {
"category": Category.names[task['category']],
"status": Status.names[task['status']],
"name": task['name'],
"type": TaskType.names[task['type']]
}
def onBattleEnd(self, avatar, teamId, state):
self._battle_result = dict(
winner_team_id=teamId,
victory_type=state
)
def onNewPlayerSpawnedInBattle(self, avatar, playersStates, botsStates, observersState):
self._players.create_or_update_players(
pickle.loads(playersStates, encoding='latin1'),
PlayerType.PLAYER
)
self._players.create_or_update_players(
pickle.loads(botsStates, encoding='latin1'),
PlayerType.BOT
)
self._players.create_or_update_players(
pickle.loads(observersState, encoding='latin1'),
PlayerType.OBSERVER
)
def onArenaStateReceived(self, avatar, arenaUniqueId, teamBuildTypeId, preBattlesInfo, playersStates, botsStates,
observersState, buildingsInfo):
self._arena_id = arenaUniqueId
self._players.create_or_update_players(
pickle.loads(playersStates, encoding='latin1'),
PlayerType.PLAYER
)
self._players.create_or_update_players(
pickle.loads(botsStates, encoding='latin1'),
PlayerType.BOT
)
self._players.create_or_update_players(
pickle.loads(observersState, encoding='latin1'),
PlayerType.OBSERVER
)
def onPlayerInfoUpdate(self, avatar, playersData, botsData, observersData):
self._players.create_or_update_players(
pickle.loads(playersData, encoding='latin1'),
PlayerType.PLAYER
)
self._players.create_or_update_players(
pickle.loads(botsData, encoding='latin1'),
PlayerType.BOT
)
self._players.create_or_update_players(
pickle.loads(observersData, encoding='latin1'),
PlayerType.OBSERVER
)
def receiveDamageStat(self, avatar, blob):
normalized = {}
for (type_, bool_), value in pickle.loads(blob).items():
# TODO: improve damage_map and list other damage types too
if bool_ != DamageStatsType.DAMAGE_STATS_ENEMY:
continue
normalized.setdefault(type_, {}).setdefault(bool_, 0)
normalized[type_][bool_] = value
self._damage_map.update(normalized)
def onRibbon(self, avatar, ribbon_id):
self._ribbons.setdefault(avatar.id, {}).setdefault(ribbon_id, 0)
self._ribbons[avatar.id][ribbon_id] += 1
def onAchievementEarned(self, avatar, avatar_id, achievement_id):
self._achievements.setdefault(avatar_id, {}).setdefault(achievement_id, 0)
self._achievements[avatar_id][achievement_id] += 1
def receiveVehicleDeath(self, avatar, killedVehicleId, fraggerVehicleId, typeDeath):
self._death_map.append((killedVehicleId, fraggerVehicleId, typeDeath))
def g_receiveDamagesOnShip(self, vehicle, damages):
for damage_info in damages:
self._shots_damage_map.setdefault(vehicle.id, {}).setdefault(damage_info['vehicleID'], 0)
self._shots_damage_map[vehicle.id][damage_info['vehicleID']] += damage_info['damage']
def receive_planeDeath(self, avatar, squadronID, planeIDs, reason, attackerId):
self._dead_planes.setdefault(attackerId, 0)
self._dead_planes[attackerId] += len(planeIDs)
@property
def map(self):
raise NotImplemented()
@map.setter
def map(self, value):
self._map = value.lstrip('spaces/')
|
PypiClean
|
/golismero-2.0.3.tar.gz/golismero-2.0.3/thirdparty_libs/yaml/events.py
|
class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
if hasattr(self, key)]
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
def __init__(self, anchor, start_mark=None, end_mark=None):
self.anchor = anchor
self.start_mark = start_mark
self.end_mark = end_mark
class CollectionStartEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
flow_style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class CollectionEndEvent(Event):
pass
# Implementations.
class StreamStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndEvent(Event):
pass
class DocumentStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None, version=None, tags=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
class AliasEvent(NodeEvent):
pass
class ScalarEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, value,
start_mark=None, end_mark=None, style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class SequenceStartEvent(CollectionStartEvent):
pass
class SequenceEndEvent(CollectionEndEvent):
pass
class MappingStartEvent(CollectionStartEvent):
pass
class MappingEndEvent(CollectionEndEvent):
pass
|
PypiClean
|
/suap-duf-0.3.16.tar.gz/suap-duf-0.3.16/docs/big_picture.rst
|
Big Picture
===========
This document explains all of the concepts used in Django URL Filter
in context hence providing a "big picture" of how it works.
Basics
------
In order to filter any data, this library breaks the process
into 3 phases:
1. Parse the URL querystring into :class:`.LookupConfig`
2. Loop through all the configs and generate :class:`.FilterSpec` when possible
3. Use the list of specs to actually filter data
And here is a bit more information about each phase.
Parsing
+++++++
Fundamentally a querystring is a collection of key-pairs.
As such, this data is naturally flat and is usually represented
as a simple dictionary::
?foo=bar&happy=rainbows => {
'foo': 'bar',
'happy': 'rainbows',
}
.. note::
Technically, this is not 100% true since key
can be repeated which is why Django uses ``QueryDict`` but for
the purposes of this discussion, let's assume no duplicate keys
are present.
The filtering however is not flat. Each querystring key can be nested
when using nested :class:`.FilterSet` and in addition it can optionally
contain lookup. For example::
?foo=bar
?foo__contains=bar
?foo__nested=bar
?foo__nested__contains=bar
?foo__nested__othernested=bar
?foo__nested__othernested__contains=bar
In order to accomodate the nested structure of querystring keys,
Django URL Filter parses all querystring key-value pairs into
nested dictionaries. For example::
?foo__nested__othernested=bar => {
'foo': {
'nested': {
'othernested': 'bar'
}
}
}
?foo__nested__othernested__contains=bar => {
'foo': {
'nested': {
'othernested': {
'contains': 'bar'
}
}
}
}
That is essentially what :class:`.LookupConfig` stores. Since these dictionaries
are flat (each dictionaty has at most one key), it also provides some utility
properties for dealing with such data. You can refer to the
:class:`.LookupConfig` API documentation for more
information.
Filter Specification
++++++++++++++++++++
As mentioned in :doc:`README <index>`, Django URL Filter decouples parsing
of querystring and filtering. It achieves that by constructing filter
specifications which have all necessary information to filter data
without actually filtering data. Thats what :class:`.FilterSpec` is.
It stores 3 required pieces of information on how to filter data:
* Which attribute to filter on. Since models can be related by attributes
of related models, this actually ends up being a list of attributes which
we call ``components``.
* Lookup to use to filter data. This specifies how the value should be
compared while doing filtering. Example is ``exact``, ``contains``.
By default only lookups from Django ORM are supported however custom
:class:`.CallableFilter` can be used to define custom lookups.
* If the filter is negated. For example, to filter when username is ``'foo'``
or filter when username is not ``'foo'``.
Filtering
+++++++++
Since filtering is decoupled from the :class:`.FilterSet`, the filtering honors
all go to a specified filter backend. The backend is very simple.
It takes a list of filter specifications and a data to filter and its
job is to filter that data as specified in the specifications.
.. note::
Currently we only support a handful of backends such as Django ORM,
SQLAlchemy and plain Python interables filter backends
but you can imagine that any backend can be implemented.
Eventually filter backends can be added for more exotic sources
like Mongo, Redis, etc.
Steps
-----
Above information hopefully puts things in perspective and here is more
detailed step-by-step guide what Django URL Filter does behind the scenes:
#. :class:`.FilterSet` is instantiated with querystring data as well as
queryset to filter.
#. :class:`.FilterSet` is asked to filter given data via
:meth:`filter <url_filter.filtersets.base.FilterSet.filter>` method
which kicks in all the steps below.
#. :class:`.FilterSet` finds all filters it is capable of Filtering
via :meth:`get_filters <url_filter.filtersets.base.FilterSet.get_filters>`.
This is where custom filtersets can hook into to do custom stuff like
extracting filters from a Django model.
#. :class:`.FilterSet` binds all child filters to itself via
:meth:`bind <url_filter.filters.BaseFilter.bind>`.
This practically sets :attr:`parent <url_filter.filters.BaseFilter.parent>`
and :attr:`name <url_filter.filters.BaseFilter.name>` attributes.
#. Root :class:`.FilterSet` loops through all querystring pairs and generates
:class:`.LookupConfig` for all of them.
#. Root :class:`.FilterSet` loops through all generated configs and attemps to
find appropriate filter to use to generate a spec fo the given config.
The matching happens by the first key in the :class:`.LookupConfig` dict.
If that key is found in available filters, that filter is used and
otherwise that config is skipped. This is among the reasons why
:class:`.LookupConfig` is used since it allows this step to be very simple.
#. If appropriate filter is found, it is passed nested config to the child
filter which then goes through very similar process as in previous step
until it gets to a leaf filter.
#. Leaf :class:`.Filter` gets the config. In then checks if the config is still
nested. For example if the config is simply a value (e.g. ``'bar'``)
or is still a dictionary (e.g. ``{'contains': 'bar'}``).
If the config is just a value, it then uses a default lookup for that
filter as provided in ``default_lookup`` parameter when instantiating
:class:`.Filter`. If the config is a dictionary, it makes sure that it is a
valid lookup config (e.g. its not ``{'rainbows': {'contains': 'bar'}}``
since it would not know what to do with ``rainbows`` since it is not a
valid lookup value).
#. Now that :class:`.Filter` validated the lookup itself, it cleans the actual
filter value by using either ``form_field`` as passed as parameter
when instantiating :class:`.Filter` or by using lookup overwrite.
Overwrites are necessary for more exotic lookups like ``in`` or ``year``
since they need to validate data in a different way.
#. If the value is valid, then the leaf filter constructs a :class:`.FilterSpec`
since it has all the necessary information to do that - 1) all filter
component names from all ancestors (e.g. all attributes which
should be accessed on the queryset to get to the value to be filtered on);
2) the actual filter value and 3) if the filter is negated.
#. At this point, root :class:`.FilterSet` will get the :class:`.FilterSpec` as
bubbled up from the leaf filter. If any ``ValidationError`` exceptions
are raised, then depending on ``strict_mode``, it will either ignore
errors or will propagate them up to the caller of the filterset.
#. Once all specs are collected from all the querystring key-value-pairs,
root :class:`.FilterSet` instantiates a filter backend and passes it
all the specs.
#. Finally root :class:`.FilterSet` uses the filter backend to filter
given queryset and returns the results to the user.
Some important things to note:
* Root :class:`.FilterSet` does all the looping over querystring data and
generated configurations.
* Children filters of a root :class:`.FilterSet` are only responsible for
generating :class:`.FilterSpec` and in the process of validating the data.
|
PypiClean
|
/binhash-0.2.3.tar.gz/binhash-0.2.3/README.md
|
# Compression Techniques for sparse binary data
## In Development
## Prerequisites ##
* Python 2.7 or higher
* [NumPy](http://numpy.org)
* [scikit learn](https://scikit-learn.org/stable/)
* Libraries: [Pickle], [random], [re]
## Usage
```
from BinHash import hasher
corpus = 'path_to_the_folder_containing_documents'
d = 10000
k = 500
myhasher = hasher(corpus, d, k)
sample_text = "this is a sample text"
sample_hash = myhasher.hash_text(sample_text)
```
## Citation
Please cite these papers in your publications if it helps your research
```
@inproceedings{DBLP:conf/pakdd/PratapSK18,
author = {Rameshwar Pratap and
Ishan Sohony and
Raghav Kulkarni},
title = {Efficient Compression Technique for Sparse Sets},
booktitle = {Advances in Knowledge Discovery and Data Mining - 22nd Pacific-Asia
Conference, {PAKDD} 2018, Melbourne, VIC, Australia, June 3-6, 2018,
Proceedings, Part {III}},
pages = {164--176},
year = {2018},
crossref = {DBLP:conf/pakdd/2018-3},
url = {https://doi.org/10.1007/978-3-319-93040-4\_14},
doi = {10.1007/978-3-319-93040-4\_14},
timestamp = {Tue, 19 Jun 2018 09:13:55 +0200},
biburl = {https://dblp.org/rec/bib/conf/pakdd/PratapSK18},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{compression,
author = {Rameshwar Pratap and
Raghav Kulkarni and
Ishan Sohony},
title = {Efficient Dimensionality Reduction for Sparse Binary Data},
booktitle = {IEEE International Conference on BIG DATA, Accepted},
year = {2018}
}
```
|
PypiClean
|
/dafni_cli-0.0.1b3-py3-none-any.whl/dafni_cli/api/session.py
|
import json
import os
import time
from dataclasses import dataclass
from io import BufferedReader
from pathlib import Path
from typing import BinaryIO, Callable, Dict, List, Literal, Optional, Union
import click
import requests
from requests import HTTPError
from dafni_cli.api.exceptions import DAFNIError, EndpointNotFoundError, LoginError
from dafni_cli.consts import (
LOGIN_API_ENDPOINT,
LOGOUT_API_ENDPOINT,
MAX_SSL_ERROR_RETRY_ATTEMPTS,
REQUESTS_TIMEOUT,
SESSION_COOKIE,
SESSION_SAVE_FILE,
SSL_ERROR_RETRY_WAIT,
URLS_REQUIRING_COOKIE_AUTHENTICATION,
)
from dafni_cli.utils import dataclass_from_dict
@dataclass
class LoginResponse:
"""Dataclass for storing the response from logging in"""
access_token: Optional[str] = None
refresh_token: Optional[str] = None
def was_successful(self):
"""
Returns whether this login response represents a successful login
"""
return self.access_token is not None and self.refresh_token is not None
@dataclass
class SessionData:
"""Dataclass for storing information about a logged in session (This will
be stored for session persistence)"""
username: str
access_token: str
refresh_token: str
@staticmethod
def from_login_response(username: str, login_response: LoginResponse):
"""
Constructs a session data object and returns it
Args:
username (str): Username to identify the session with
login_response (LoginResponse): Structure containing the response
from logging in
"""
return SessionData(
username=username,
access_token=login_response.access_token,
refresh_token=login_response.refresh_token,
)
class DAFNISession:
"""Handles user login and authentication"""
# Session data
_session_data: SessionData
# Whether the session data was loaded/saved from/to a file
# (avoids creating/deleting the file if using as a library
# instead of through the CLI)
_use_session_data_file: bool = False
def __init__(self, session_data: Optional[SessionData] = None):
"""DAFNISession constructor
Args:
session_data (SessionData or None) - Session data from built using
information obtained after login. When None will
attempt to load the last session from a file or
otherwise will request the user to login.
"""
if session_data is None:
self._use_session_data_file = True
self._obtain_session_data()
else:
self._session_data = session_data
@staticmethod
def _get_login_save_path():
"""Returns the filepath to save login responses to"""
return Path().home() / SESSION_SAVE_FILE
@staticmethod
def has_session_file():
"""Returns whether the session file exists and hence whether the user's
still logged in
"""
return DAFNISession._get_login_save_path().is_file()
@property
def username(self):
"""Username associated with the current session"""
return self._session_data.username
def _save_session_data(self):
"""Saves the SessionData instance to a storage file to persist the session"""
with open(DAFNISession._get_login_save_path(), "w", encoding="utf-8") as file:
file.write(json.dumps(self._session_data.__dict__))
def _assign_session_data(self, username: str, login_response: LoginResponse):
"""Assigns and if _use_session_data_file is True, saves session data
obtained from a successful login response"""
self._session_data = SessionData.from_login_response(username, login_response)
if self._use_session_data_file:
self._save_session_data()
def _load_session_data(self) -> bool:
"""Attempts to load a SessionData instance from the storage file
Returns:
bool: Whether the session data was loaded or not
"""
path = DAFNISession._get_login_save_path()
if not path.is_file():
return False
with open(path, "r", encoding="utf-8") as file:
dictionary = json.loads(file.read())
self._session_data = dataclass_from_dict(SessionData, dictionary)
return True
def _obtain_session_data(self):
"""Tries to load a previously stored LoginResponse, or obtains a new
one and saves it by asking the user to login if the storage file was
not found"""
# Attempt to get from a file first
if not self._load_session_data():
# Couldn't so request a login
self.attempt_login()
def _refresh_tokens(self):
"""Obtains a new access token and stores it
Will attempt to request one using the currently stored refresh token,
but in the case it has expired will ask the user to login again.
"""
# Request a new refresh token
response = requests.post(
LOGIN_API_ENDPOINT,
data={
"client_id": "dafni-main",
"grant_type": "refresh_token",
"refresh_token": self._session_data.refresh_token,
},
timeout=REQUESTS_TIMEOUT,
)
if response.status_code == 400 and response.json()["error"] == "invalid_grant":
# This means the refresh token has expired, so login again
self.attempt_login()
else:
response.raise_for_status()
login_response = dataclass_from_dict(LoginResponse, response.json())
if not login_response.was_successful():
raise LoginError("Unable to refresh login.")
self._session_data = SessionData.from_login_response(
self._session_data.username, login_response
)
if self._use_session_data_file:
self._save_session_data()
def logout(self):
"""Logs out of keycloak"""
response = requests.post(
LOGOUT_API_ENDPOINT,
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": f"Bearer {self._session_data.access_token}",
},
data={
"client_id": "dafni-main",
"refresh_token": self._session_data.refresh_token,
"scope": "openid",
},
timeout=REQUESTS_TIMEOUT,
)
response.raise_for_status()
if self._use_session_data_file:
self._get_login_save_path().unlink()
@staticmethod
def _login(username: str, password: str) -> LoginResponse:
"""Returns a LoginResponse having logged in with a username and
password
Returns:
LoginResponse - If 'was_successful' is false, it means the username
or password given was likely wrong
"""
response = requests.post(
LOGIN_API_ENDPOINT,
data={
"username": username,
"password": password,
"client_id": "dafni-main",
"grant_type": "password",
"scope": "openid",
},
timeout=REQUESTS_TIMEOUT,
)
# When status_code is 401 => The username or password is wrong and
# there has not been any other error
if response.status_code != 401:
response.raise_for_status()
login_response = dataclass_from_dict(LoginResponse, response.json())
return login_response
@staticmethod
def login(username: str, password: str):
"""Returns a DAFNISession object after logging in with a username and
password
Raises:
LoginError - If login fails and its likely down to something other
than a bad password
"""
login_response = DAFNISession._login(username, password)
if not login_response.was_successful():
raise LoginError(
"Failed to login. Please check your username and password and try again."
)
return DAFNISession(SessionData.from_login_response(username, login_response))
def _attempt_login_from_env(self) -> bool:
"""Attempts to login using environment variables (if found)
If environment variables are found but login fails will cause
the program to exit.
Returns:
bool: True if a username and password were found in the
environment, False otherwise
"""
username = os.getenv("DAFNI_USERNAME")
password = os.getenv("DAFNI_PASSWORD")
if username is not None and password is not None:
# Attempt login
login_response = self._login(username, password)
if not login_response.was_successful():
click.echo(
"Failed to login from environment variables. Please check your username and password and try again."
)
raise SystemExit(1)
self._assign_session_data(username, login_response)
return True
return False
def _request_user_login(self) -> SessionData:
"""Prompts the user for their username and password. If login is
successful, notifies the user that login has been completed and displays
their username
"""
# Continue requesting the username and password for as long as the
# login fails to recognise them
login_response = None
while login_response is None or not login_response.was_successful():
username = click.prompt("Username")
password = click.prompt("Password", hide_input=True)
login_response = self._login(username, password)
if not login_response.was_successful():
click.echo(
"Failed to login. Please check your username and password and try again."
)
self._assign_session_data(username, login_response)
click.echo(f"Logged in as {self.username}")
def attempt_login(self):
"""First attempts to find login credentials from environment variables
and if that fails prompt's the user to enter a username and password
until login is successful"""
if not self._attempt_login_from_env():
self._request_user_login()
# Listed below this point are various methods for performing specific HTTP
# requests using the session data
def _authenticated_request(
self,
method: Literal["get", "post", "put", "patch", "delete"],
url: str,
headers: dict,
data: Union[dict, BinaryIO],
json,
allow_redirect: bool,
stream: Optional[bool] = None,
retry_callback: Optional[Callable] = None,
auth_recursion_level: int = 0,
ssl_recursion_level: int = 0,
) -> requests.Response:
"""Performs an authenticated request from the DAFNI API
Args:
url (str): The url endpoint that is being queried
headers (dict): Headers to include in the request (authorisation
will already be added)
data (dict or BinaryIO): Data to be include in the request
json: Any JSON serialisable object to include in the request
allow_redirect (bool): Flag to allow redirects during API call.
stream (Optional[bool]): Whether to stream the request
retry_callback (Optional[Callable]): Function called when the
request is retried e.g. after a token refresh
or if there is an SSLError. Particularly useful
for file uploads that may need to be reset.
auth_recursion_level (int): Number of times this method has
been recursively called due to an authentication
issue (Used to avoid infinite loops)
ssl_recursion_level (int): Number of times this method has
been recursively called due to an SSLError
(Used to avoid infinite loops)
Returns:
requests.Response: Response from the requests library
"""
# Should we retry the request for any reason
retry = False
try:
# Switch to cookie based authentication only for those that require it
if any(
url_requiring_cookie in url
for url_requiring_cookie in URLS_REQUIRING_COOKIE_AUTHENTICATION
):
response = requests.request(
method,
url=url,
headers=headers,
data=data,
json=json,
allow_redirects=allow_redirect,
stream=stream,
timeout=REQUESTS_TIMEOUT,
cookies={SESSION_COOKIE: self._session_data.access_token},
)
else:
response = requests.request(
method,
url=url,
headers={
"Authorization": f"Bearer {self._session_data.access_token}",
**headers,
},
data=data,
json=json,
allow_redirects=allow_redirect,
stream=stream,
timeout=REQUESTS_TIMEOUT,
)
# Check for any kind of authentication error, or an attempted redirect
# (this covers a case during file upload where a 302 is returned rather
# than an actual authentication error)
if response.status_code == 403 or (
response.status_code == 302 and not allow_redirect
):
# Try again, but only once
if auth_recursion_level > 1:
# Provide further details from the response (if there is
# anything) - one place this occurs is running out of
# temporary buckets during upload
message = response.content.decode()
raise RuntimeError(f"Could not authenticate request: {message}")
else:
self._refresh_tokens()
retry = True
auth_recursion_level += 1
except requests.exceptions.SSLError as err:
# Retry a if below the maximum number of retires
if ssl_recursion_level >= MAX_SSL_ERROR_RETRY_ATTEMPTS:
raise RuntimeError(
f"Could not connect due to an SSLError after retrying {MAX_SSL_ERROR_RETRY_ATTEMPTS} times"
) from err
else:
# Workaround for https://github.com/dafnifacility/cli/issues/113
# Retry up to MAX_SSL_ERROR_RETRY_ATTEMPTS times, waiting for
# SSL_ERROR_RETRY_WAIT seconds between each attempt
retry = True
ssl_recursion_level += 1
time.sleep(SSL_ERROR_RETRY_WAIT)
if retry:
# It seems in the event we need to retry the request, requests
# still reads at least a small part of any file being uploaded -
# this for example can result in the validation of some metadata
# files to fail citing that they are missing all parameters when
# in fact they are defined. Resetting any file reader here
# solves the issue.
if isinstance(data, BufferedReader):
data.seek(0)
# When tqdm is also involved we cannot quite apply the same
# solution so allow a callback function that can be used to
# reset the original file and any progress bars
if retry_callback is not None:
retry_callback()
response = self._authenticated_request(
method,
url=url,
headers=headers,
data=data,
json=json,
stream=stream,
allow_redirect=allow_redirect,
retry_callback=retry_callback,
auth_recursion_level=auth_recursion_level,
ssl_recursion_level=ssl_recursion_level,
)
return response
def get_error_message(self, response: requests.Response) -> Optional[str]:
"""Attempts to find an error message from a failed request response
Args:
response (requests.Response): The failed request response
Returns:
Optional[str]: String representing an error message or None
if none was found
"""
# Try and get JSON data from the response
try:
error_message = None
decoded_response = response.json()
# Some requests have an error and error_message, in which case we
# want to override with the latter
if "error" in decoded_response:
error_message = f"Error: {decoded_response['error']}"
if "error_message" in decoded_response:
error_message = f"{error_message}, {decoded_response['error_message']}"
elif "errors" in decoded_response:
error_message = "The following errors were returned:"
for error in decoded_response["errors"]:
error_message += f"\nError: {error}"
return error_message
except requests.JSONDecodeError:
return None
def _check_response(
self,
url: str,
response: requests.Response,
error_message_func: Callable[[requests.Response], Optional[str]] = None,
):
"""Checks a requests response for any errors and raises them as
required
Args:
url (str): URL endpoint that was being queried
response (requests.Response): Response from requests
error_message_func (Optional[Callable[[requests.Response], Optional[str]]]):
Function called on a response after an error to
obtain an error message. If it returns None, a
HTTPError will be returned, otherwise it will be
a DAFNIError. By default this will be
get_error_message.
Raises:
EndpointNotFoundError: If the response returns a 404 status code
DAFNIError: If an error occurs with an error message from DAFNI
HTTPError: If any other error occurs without an error message from
DAFNI
"""
if error_message_func is None:
error_message_func = self.get_error_message
error_message = None
# Check for any error response
if not response.ok:
# Specialised error for when we get a 404 - helps to identify
# missing objects
if response.status_code == 404:
raise EndpointNotFoundError(f"Could not find {url}")
# Attempt to find an error message from the API itself
error_message = error_message_func(response)
# If there is an error from DAFNI raise a DAFNI exception as well
# with more details, otherwise leave as any errors are HTTPError's
try:
response.raise_for_status()
except HTTPError as err:
if error_message is None:
raise err
raise DAFNIError(error_message) from err
def get_request(
self,
url: str,
content_type: str = "application/json",
allow_redirect: bool = False,
stream: bool = False,
error_message_func: Optional[
Callable[[requests.Response], Optional[str]]
] = None,
retry_callback: Optional[Callable] = None,
) -> Union[Dict, List[Dict], requests.Response]:
"""Performs a GET request from the DAFNI API
Args:
url (str): The url endpoint that is being queried
content_type (str): Content type to put in request header
allow_redirect (bool): Flag to allow redirects during API call.
Defaults to False.
stream (bool): Whether to stream the request. In this case will
return the response object itself rather than the
json.
error_message_func (Optional[Callable[[requests.Response], Optional[str]]]):
Function called on a response after an error to
obtain an error message. If it returns None, a
HTTPError will be returned, otherwise it will be
a DAFNIError. By default this will be
get_error_message.
retry_callback (Optional[Callable]): Function called when the
request is retried e.g. after a token refresh
or if there is an SSLError. Particularly useful
for file uploads that may need to be reset.
Returns:
Dict: When 'stream' is False for endpoints returning one object
e.g. /models/<version_id>
List[Dict]: When 'stream' is False for endpoints returning multiple
objects e.g. /models/
requests.Response: When 'stream' is True - The whole response object
Raises:
EndpointNotFoundError: If the response returns a 404 status code
DAFNIError: If an error occurs with an error message from DAFNI
HTTPError: If any other error occurs without an error message from
DAFNI
"""
response = self._authenticated_request(
method="get",
url=url,
headers={"Content-Type": content_type},
data=None,
json=None,
allow_redirect=allow_redirect,
stream=stream,
retry_callback=retry_callback,
)
self._check_response(url, response, error_message_func=error_message_func)
if stream:
return response
return response.json()
def post_request(
self,
url: str,
content_type: str = "application/json",
data: Optional[Union[dict, BinaryIO]] = None,
json=None,
allow_redirect: bool = False,
error_message_func: Optional[
Callable[[requests.Response], Optional[str]]
] = None,
retry_callback: Optional[Callable] = None,
) -> Dict:
"""Performs a POST request to the DAFNI API
Args:
url (str): The url endpoint that is being queried
content_type (str): Content type to put in request header
data (dict or BinaryIO): Data to be include in the request
json: Any JSON serialisable object to include in the request
allow_redirect (bool): Flag to allow redirects during API call.
Defaults to False.
error_message_func (Optional[Callable[[requests.Response], Optional[str]]]):
Function called on a response after an error to
obtain an error message. If it returns None, a
HTTPError will be returned, otherwise it will be
a DAFNIError. By default this will be
get_error_message.
retry_callback (Optional[Callable]): Function called when the
request is retried e.g. after a token refresh
or if there is an SSLError. Particularly useful
for file uploads that may need to be reset.
Returns:
Dict: The decoded json response
Raises:
EndpointNotFoundError: If the response returns a 404 status code
DAFNIError: If an error occurs with an error message from DAFNI
HTTPError: If any other error occurs without an error message from
DAFNI
"""
response = self._authenticated_request(
method="post",
url=url,
headers={"Content-Type": content_type},
data=data,
json=json,
allow_redirect=allow_redirect,
retry_callback=retry_callback,
)
self._check_response(url, response, error_message_func=error_message_func)
return response.json()
def put_request(
self,
url: str,
content_type: str = "application/json",
data: Optional[Union[dict, BinaryIO]] = None,
json=None,
allow_redirect: bool = False,
error_message_func: Optional[
Callable[[requests.Response], Optional[str]]
] = None,
retry_callback: Optional[Callable] = None,
) -> requests.Response:
"""Performs a PUT request to the DAFNI API
Args:
url (str): The url endpoint that is being queried
content_type (str): Content type to put in request header
data (dict or BinaryIO): Data to be include in the request
json: Any JSON serialisable object to include in the request
allow_redirect (bool): Flag to allow redirects during API call.
Defaults to False.
error_message_func (Optional[Callable[[requests.Response], Optional[str]]]):
Function called on a response after an error to
obtain an error message. If it returns None, a
HTTPError will be returned, otherwise it will be
a DAFNIError. By default this will be
get_error_message.
retry_callback (Optional[Callable]): Function called when the
request is retried e.g. after a token refresh
or if there is an SSLError. Particularly useful
for file uploads that may need to be reset.
Returns:
requests.Response: The response object
Raises:
EndpointNotFoundError: If the response returns a 404 status code
DAFNIError: If an error occurs with an error message from DAFNI
HTTPError: If any other error occurs without an error message from
DAFNI
"""
response = self._authenticated_request(
method="put",
url=url,
headers={"Content-Type": content_type},
data=data,
json=json,
allow_redirect=allow_redirect,
retry_callback=retry_callback,
)
self._check_response(url, response, error_message_func=error_message_func)
return response
def patch_request(
self,
url: str,
content_type: str = "application/json",
data: Optional[Union[dict, BinaryIO]] = None,
json=None,
allow_redirect: bool = False,
error_message_func: Optional[
Callable[[requests.Response], Optional[str]]
] = None,
retry_callback: Optional[Callable] = None,
) -> Dict:
"""Performs a PATCH request to the DAFNI API
Args:
url (str): The url endpoint that is being queried
content_type (str): Content type to put in request header
data (dict or BinaryIO): Data to be include in the request
json: Any JSON serialisable object to include in the request
allow_redirect (bool): Flag to allow redirects during API call.
Defaults to False.
error_message_func (Optional[Callable[[requests.Response], Optional[str]]]):
Function called on a response after an error to
obtain an error message. If it returns None, a
HTTPError will be returned, otherwise it will be
a DAFNIError. By default this will be
get_error_message.
retry_callback (Optional[Callable]): Function called when the
request is retried e.g. after a token refresh
or if there is an SSLError. Particularly useful
for file uploads that may need to be reset.
Returns:
Dict: The decoded json response
Raises:
EndpointNotFoundError: If the response returns a 404 status code
DAFNIError: If an error occurs with an error message from DAFNI
HTTPError: If any other error occurs without an error message from
DAFNI
"""
response = self._authenticated_request(
method="patch",
url=url,
headers={"Content-Type": content_type},
data=data,
json=json,
allow_redirect=allow_redirect,
retry_callback=retry_callback,
)
self._check_response(url, response, error_message_func=error_message_func)
return response.json()
def delete_request(
self,
url: str,
allow_redirect: bool = False,
error_message_func: Optional[
Callable[[requests.Response], Optional[str]]
] = None,
retry_callback: Optional[Callable] = None,
) -> requests.Response:
"""Performs a DELETE request to the DAFNI API
Args:
url (str): The url endpoint that is being queried
allow_redirect (bool): Flag to allow redirects during API call.
Defaults to False.
content (bool): Flag to define if the response content is
returned. default is the response json
error_message_func (Optional[Callable[[requests.Response], Optional[str]]]):
Function called on a response after an error to
obtain an error message. If it returns None, a
HTTPError will be returned, otherwise it will be
a DAFNIError. By default this will be
get_error_message.
retry_callback (Optional[Callable]): Function called when the
request is retried e.g. after a token refresh
or if there is an SSLError. Particularly useful
for file uploads that may need to be reset.
Returns:
requests.Response: The response object
Raises:
EndpointNotFoundError: If the response returns a 404 status code
DAFNIError: If an error occurs with an error message from DAFNI
HTTPError: If any other error occurs without an error message from
DAFNI
"""
response = self._authenticated_request(
method="delete",
url=url,
headers={},
data=None,
json=None,
allow_redirect=allow_redirect,
retry_callback=retry_callback,
)
self._check_response(url, response, error_message_func=error_message_func)
return response
|
PypiClean
|
/coloquinte-0.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/coloquinte.py
|
import gzip
import lzma
import math
import os
import sys
import coloquinte_pybind
from coloquinte_pybind import (
CellOrientation,
CellRowPolarity,
ColoquinteParameters,
LegalizationModel,
NetModel,
PlacementStep,
Rectangle,
Row,
)
def _open_file(name, write=False):
"""
Open the file with the appropriate decompression method. In read mode, search for compressed versions if the exact name does not exist.
"""
mode = "wt" if write else "rt"
if name.endswith(".gz"):
return gzip.open(name, mode=mode)
elif name.endswith(".xz") or name.endswith(".lzma"):
return lzma.open(name, mode=mode)
elif write:
return open(name, mode=mode)
elif os.path.exists(name):
return open(name, mode=mode)
elif os.path.exists(name + ".gz"):
return gzip.open(name + ".gz", mode=mode)
elif os.path.exists(name + ".xz"):
return lzma.open(name + ".xz", mode=mode)
elif os.path.exists(name + ".lzma"):
return lzma.open(name + ".lzma", mode=mode)
else:
raise RuntimeError(f"Could not find file {name}")
def _read_aux(filename):
if os.path.isdir(filename):
dir_list = os.listdir(filename)
all_files = [f for f in dir_list if f.endswith(".aux")]
default_name = os.path.basename(os.path.normpath(filename)) + ".aux"
if len(all_files) == 1:
filename = os.path.join(filename, all_files[0])
elif len(all_files) > 1 and default_name in all_files:
filename = os.path.join(filename, default_name)
else:
raise RuntimeError(
f"There should be one file ending with .aux, got {', '.join(all_files)}"
)
elif not os.path.exists(filename):
filename = filename + ".aux"
dirname = os.path.dirname(filename)
files = []
with open(filename) as f:
for line in f:
for name in line.split():
files.append(name)
node_files = [n for n in files if n.endswith(".nodes")]
net_files = [n for n in files if n.endswith(".nets")]
pl_files = [n for n in files if n.endswith(".pl")]
scl_files = [n for n in files if n.endswith(".scl")]
if len(node_files) != 1:
raise RuntimeError("There should be a .nodes file in .aux")
if len(net_files) != 1:
raise RuntimeError("There should be a .nets file in .aux")
if len(pl_files) != 1:
raise RuntimeError("There should be a .pl file in .aux")
if len(scl_files) != 1:
raise RuntimeError("There should be a .scl file in .aux")
return (
filename,
os.path.join(dirname, node_files[0]),
os.path.join(dirname, net_files[0]),
os.path.join(dirname, pl_files[0]),
os.path.join(dirname, scl_files[0]),
)
def _parse_num_line(line):
tokens = line.split(":")
if len(tokens) != 2:
raise RuntimeError(f"Couldn't interpret <{line}> as <Key : Value>")
return int(tokens[1].strip())
def _read_nodes(filename):
nb_nodes = None
nb_terminals = None
nodes = []
with _open_file(filename) as f:
first_line_found = False
for line in f:
line = line.strip()
if len(line) == 0:
continue
if line.startswith("#"):
continue
if line.startswith("UCLA") and not first_line_found:
first_line_found = True
continue
if line.startswith("NumNodes"):
assert nb_nodes is None
nb_nodes = _parse_num_line(line)
continue
if line.startswith("NumTerminals"):
assert nb_terminals is None
nb_terminals = _parse_num_line(line)
continue
vals = line.split()
fixed = False
obstruction = True
if "terminal" in vals[1:]:
fixed = True
name = vals[0]
if len(vals) >= 3:
width, height = vals[1:3]
width = int(width)
height = int(height)
else:
# Dummy placed cell
width = 0
height = 0
obstruction = False
nodes.append((name, width, height, fixed, obstruction))
if nb_nodes is not None:
assert len(nodes) == nb_nodes
if nb_terminals is not None:
assert len([n for n in nodes if n[3]]) == nb_terminals
names = [n[0] for n in nodes]
widths = [n[1] for n in nodes]
heights = [n[2] for n in nodes]
fixed = [n[3] for n in nodes]
obstruction = [n[4] for n in nodes]
return names, widths, heights, fixed, obstruction
def _read_nets(filename, cell_names, cell_widths, cell_heights, sort_entries=False):
name_dir = dict((name, i) for i, name in enumerate(cell_names))
nb_nets = None
nb_pins = None
net_degree = None
nets = []
with _open_file(filename) as f:
first_line_found = False
for line in f:
line = line.strip()
if len(line) == 0:
continue
if line.startswith("#"):
continue
if line.startswith("UCLA") and not first_line_found:
first_line_found = True
continue
if line.startswith("NumNets"):
assert nb_nets is None
nb_nets = _parse_num_line(line)
continue
if line.startswith("NumPins"):
assert nb_pins is None
nb_pins = _parse_num_line(line)
continue
line = line.replace(":", " ")
if line.startswith("NetDegree"):
vals = line.split()
assert 2 <= len(vals) <= 3
net_degree = int(vals[1])
if len(vals) == 3:
name = vals[2]
else:
name = f"n{len(nets)}"
nets.append((name, net_degree, []))
continue
vals = line.split()
assert len(vals) == 4 or len(vals) == 2
if len(vals) == 4:
cell, direction, x, y = vals
x = float(x)
y = float(y)
else:
cell, direction = vals
x = 0.0
y = 0.0
assert cell in name_dir
nets[-1][2].append((name_dir[cell], direction, x, y))
total_pins = 0
for name, net_degree, pins in nets:
if net_degree != len(pins):
raise RuntimeError(
f"Net degree for {name} is {len(pins)}; expected {net_degree}"
)
total_pins += net_degree
if nb_nets is not None:
assert len(nets) == nb_nets
if nb_pins is not None:
assert total_pins == nb_pins
if sort_entries:
# Sort so that same-size nets are contiguous
nets.sort(key=lambda net: len(net[-1]))
cell_x_offset = [0.5 * c for c in cell_widths]
cell_y_offset = [0.5 * c for c in cell_heights]
ret = []
for name, _, pins in nets:
cells = []
pin_x = []
pin_y = []
for cell, _, x, y in pins:
cells.append(cell)
pin_x.append(int(round(cell_x_offset[cell] + x)))
pin_y.append(int(round(cell_y_offset[cell] + y)))
ret.append((name, cells, pin_x, pin_y))
return ret
def _read_place(filename, cell_names):
name_dir = dict((name, i) for i, name in enumerate(cell_names))
cell_x = [0 for i in cell_names]
cell_y = [0 for i in cell_names]
cell_orient = [None for i in cell_names]
with _open_file(filename) as f:
first_line_found = False
for line in f:
line = line.strip()
if len(line) == 0:
continue
if line.startswith("#"):
continue
if line.startswith("UCLA") and not first_line_found:
first_line_found = True
continue
line = line.replace(":", " ")
vals = line.split()
assert len(vals) >= 3
cell, x, y, orient = vals[:4]
assert cell in name_dir
cell_ind = name_dir[cell]
cell_x[cell_ind] = int(x)
cell_y[cell_ind] = int(y)
if orient in CellOrientation.__members__:
cell_orient[cell_ind] = CellOrientation.__members__[orient]
else:
raise RuntimeError(f"Unknown orientation encountered {orient}")
return cell_x, cell_y, cell_orient
def _read_rows(filename):
nb_rows = None
rows = []
with _open_file(filename) as f:
lines = [l.strip() for l in f]
for line in lines:
if line.startswith("NumRows"):
assert nb_rows is None
nb_rows = _parse_num_line(line)
row_descs = []
in_row = False
for line in lines:
if line.startswith("CoreRow"):
row_descs.append([])
in_row = True
elif line.startswith("End"):
in_row = False
elif in_row:
row_descs[-1].extend(line.replace(":", " ").split())
for desc in row_descs:
min_x = None
min_y = None
width = None
height = None
site_width = 1
orient = CellOrientation.N
for i in range(1, len(desc)):
if desc[i - 1].lower() == "coordinate":
min_y = int(desc[i])
if desc[i - 1].lower() == "subroworigin":
min_x = int(desc[i])
if desc[i - 1].lower() == "numsites":
width = int(desc[i])
if desc[i - 1].lower() == "height":
height = int(desc[i])
if desc[i - 1].lower() == "sitewidth":
site_width = int(desc[i])
if desc[i - 1].lower() == "siteorient":
if desc[i] in CellOrientation.__members__:
orient = CellOrientation.__members__[desc[i]]
width *= site_width
assert min_x is not None
assert min_y is not None
assert width is not None
assert height is not None
r = Rectangle(min_x, min_x + width, min_y, min_y + height)
rows.append(Row(r, orient))
return rows
def _str2bool(v):
"""
Parse boolean arguments with argparse, from
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
import argparse
raise argparse.ArgumentTypeError('Boolean value expected.')
class Circuit(coloquinte_pybind.Circuit):
def __init__(self, nb_cells):
super(Circuit, self).__init__(nb_cells)
self._filename = None
self._cell_name = None
self._net_name = None
@staticmethod
def read_ispd(filename, ignore_obstructions=False):
"""
Read an ISPD benchmark from its .aux file
"""
(
aux_filename,
node_filename,
net_filename,
pl_filename,
scl_filename,
) = _read_aux(filename)
cell_names, cell_widths, cell_heights, cell_fixed, cell_obstruction = _read_nodes(
node_filename
)
nets = _read_nets(net_filename, cell_names, cell_widths, cell_heights)
cell_x, cell_y, cell_orient = _read_place(pl_filename, cell_names)
rows = _read_rows(scl_filename)
ret = Circuit(len(cell_names))
ret._filename = os.path.splitext(aux_filename)[0]
# Setup cell properties
ret._cell_name = cell_names
ret.cell_width = cell_widths
ret.cell_height = cell_heights
ret.cell_is_fixed = cell_fixed
ret.cell_is_obstruction = cell_obstruction
if ignore_obstructions:
# All fixed cells marked as not obstructions
ret.cell_is_obstruction = [not f for f in cell_fixed]
# Setup nets and pins
ret._net_name = []
for name, cells, pins_x, pins_y in nets:
ret._net_name.append(name)
ret.add_net(cells, pins_x, pins_y)
# Setup initial cell placement
ret.cell_x = cell_x
ret.cell_y = cell_y
ret.cell_orientation = cell_orient
# Setup rows
ret.rows = rows
# Allow macros to have any orientation
row_height = ret.row_height
polarities = ret.cell_row_polarity
for i in range(ret.nb_cells):
if cell_heights[i] > 4 * row_height:
polarities[i] = CellRowPolarity.ANY
elif cell_heights[i] % row_height != 0:
polarities[i] = CellRowPolarity.NW
elif cell_heights[i] % row_height == 0:
polarities[i] = CellRowPolarity.SAME
ret.cell_row_polarity = polarities
ret.check()
return ret
def place_global(self, params, callback=None):
"""
Run the global placement
"""
if not isinstance(params, ColoquinteParameters):
if not isinstance(params, int):
raise TypeError("Argument should be an integer effort")
params = ColoquinteParameters(params)
super().place_global(params, callback)
def legalize(self, params, callback=None):
"""
Run the legalization
"""
if not isinstance(params, ColoquinteParameters):
if not isinstance(params, int):
raise TypeError("Argument should be an integer effort")
params = ColoquinteParameters(params)
super().legalize(params, callback)
def place_detailed(self, params, callback=None):
"""
Run the detailed placement
"""
if not isinstance(params, ColoquinteParameters):
if not isinstance(params, int):
raise TypeError("Argument should be an integer effort")
params = ColoquinteParameters(params)
super().place_detailed(params, callback)
def load_placement(self, filename):
if filename is None:
return
cell_x, cell_y, cell_orient = _read_place(filename, self._cell_name)
self.cell_x = cell_x
self.cell_y = cell_y
self.cell_orientation = cell_orient
def write_placement(self, filename):
"""
Write the placement result in ISPD file format
"""
if filename is None:
if self._filename is None:
raise RuntimeError("No filename to export placement to")
filename = os.path.splitext(self._filename)[0] + ".sol.pl"
with _open_file(filename, True) as f:
print("UCLA pl 1.0", file=f)
print("# Created by Coloquinte", file=f)
print("# https://github.com/Coloquinte/PlaceRoute", file=f)
print("", file=f)
cell_x = self.cell_x
cell_y = self.cell_y
cell_orientation = self.cell_orientation
cell_is_fixed = self.cell_is_fixed
for i in range(self.nb_cells):
name = self._cell_name[i]
x = cell_x[i]
y = cell_y[i]
orient = cell_orientation[i].name
if cell_is_fixed[i]:
orient += " /FIXED"
print(f"{name}\t{x}\t{y}\t: {orient}", file=f)
def write_image(self, filename, macros_only=False, image_width=2048):
img, scale_factor = self._make_image(image_width)
self._draw_rows(img, scale_factor)
self._draw_cells(img, True, scale_factor)
if not macros_only:
self._draw_cells(img, False, scale_factor)
self._save_image(img, filename)
def write_displacement(self, filename, pl1, pl2, image_width=2048):
img, scale_factor = self._make_image(image_width)
self._draw_rows(img, scale_factor)
self._draw_cells(img, True, scale_factor)
self._draw_displacement(img, pl1, pl2, scale_factor)
self._save_image(img, filename)
def _save_image(self, img, filename):
img.save(filename, lossless=True)
def _draw_area(self):
placement = self.cell_placement
rows = self.rows
min_x_c = min(pl.min_x for pl in placement)
min_y_c = min(pl.min_y for pl in placement)
max_x_c = max(pl.max_x for pl in placement)
max_y_c = max(pl.max_y for pl in placement)
min_x_r = min(r.min_x for r in rows)
min_y_r = min(r.min_y for r in rows)
max_x_r = max(r.max_x for r in rows)
max_y_r = max(r.max_y for r in rows)
return (min(min_x_c, min_x_r), min(min_y_c, min_y_r), max(max_x_c, max_x_r), max(max_y_c, max_y_r))
def _scale_factor(self, image_width):
min_x, min_y, max_x, max_y = self._draw_area()
max_allowed_ratio = 8
fact = max(1.0, (max_x - min_x) / image_width,
(max_y - min_y) / (max_allowed_ratio * image_width))
return fact, math.ceil((max_y - min_y) / fact)
def _make_image(self, image_width):
from PIL import Image
scale_factor, image_height = self._scale_factor(image_width)
img = Image.new("RGB", (image_width, image_height), "lightgray")
return img, scale_factor
def _draw_rows(self, img, scale_factor):
from PIL import ImageDraw
min_x, min_y, max_x, max_y = self._draw_area()
draw = ImageDraw.Draw(img)
rows = self.rows
for r in rows:
xmn = round((r.min_x - min_x) / scale_factor)
xmx = round((r.max_x - min_x) / scale_factor)
ymn = round((r.min_y - min_y) / scale_factor)
ymx = round((r.max_y - min_y) / scale_factor)
rect = [(xmn, ymn), (xmx, ymx)]
draw.rectangle(rect, fill="white")
def _draw_cells(self, img, macros, scale_factor):
from PIL import ImageDraw
min_x, min_y, max_x, max_y = self._draw_area()
draw = ImageDraw.Draw(img)
placement = self.cell_placement
fixed = self.cell_is_fixed
row_height = self.row_height
for i, pl in enumerate(placement):
if fixed[i] != macros:
continue
xmn = round((pl.min_x - min_x) / scale_factor)
xmx = round((pl.max_x - min_x) / scale_factor)
ymn = round((pl.min_y - min_y) / scale_factor)
ymx = round((pl.max_y - min_y) / scale_factor)
rect = [(xmn, ymn), (xmx, ymx)]
if xmn == xmx or ymn == ymx:
continue
if fixed[i]:
# Fixed cells and macros
draw.rectangle(rect, fill="gray", outline="black", width=1)
elif pl.height > 4 * row_height:
# Movable macros
draw.rectangle(rect, fill="aqua",
outline="mediumblue", width=1)
else:
# Nice standard cells (or close enough)
draw.rectangle(rect, fill="blue",
outline="mediumblue", width=1)
return img
def _draw_displacement(self, img, pl1, pl2, scale_factor):
from PIL import ImageDraw
min_x, min_y, max_x, max_y = self._draw_area()
draw = ImageDraw.Draw(img)
fixed = self.cell_is_fixed
assert len(pl1) == len(pl2)
for i in range(self.nb_cells):
if not fixed[i]:
x1 = (pl1[i].min_x + pl1[i].max_x) / 2
x2 = (pl2[i].min_x + pl2[i].max_x) / 2
y1 = (pl1[i].min_y + pl1[i].max_y) / 2
y2 = (pl2[i].min_y + pl2[i].max_y) / 2
x1 = round((x1 - min_x) / scale_factor)
x2 = round((x2 - min_x) / scale_factor)
y1 = round((y1 - min_y) / scale_factor)
y2 = round((y2 - min_y) / scale_factor)
draw.line([(x1, y1), (x2, y2)], fill="red", width=1)
draw.arc([x1 - 1, y1 - 1, x1 + 1, y1 + 1],
0, 360, fill="black")
def _add_arguments(parser, obj, prefix):
import argparse
for name in obj.__dir__():
if name.startswith("_"):
continue
if name in ["check", "seed"]:
continue
child = getattr(obj, name)
arg_type = type(child)
if arg_type in (int, float):
parser.add_argument(
"--" + ".".join(prefix + [name, ]),
type=arg_type,
metavar=name.upper(),
help=argparse.SUPPRESS
)
elif arg_type is bool:
parser.add_argument(
"--" + ".".join(prefix + [name, ]),
type=_str2bool,
metavar=name.upper(),
help=argparse.SUPPRESS
)
elif '__members__' in child.__dir__():
parser.add_argument(
"--" + ".".join(prefix + [name, ]),
choices=list(arg_type.__members__.keys()),
metavar=name.upper(),
help=argparse.SUPPRESS
)
else:
_add_arguments(parser, child, prefix + [name,])
def _parse_arguments(args, obj, prefix):
for name in obj.__dir__():
if name.startswith("_"):
continue
if name in ["check", "seed"]:
continue
argname = ".".join(prefix + [name, ])
child = getattr(obj, name)
arg_type = type(child)
if arg_type in (int, float, bool) or "__members__" in child.__dir__():
val = getattr(args, argname)
if val is not None:
new_val = val
old_val = getattr(obj, name)
if arg_type not in (int, float, bool):
val = arg_type.__members__[val]
old_val = old_val.name
print(
f"Overloading {argname} placement parameter "
f"({old_val} -> {new_val})"
)
setattr(obj, name, val)
else:
_parse_arguments(args, child, prefix + [name, ])
def _show_params(obj, tabs):
for name in obj.__dir__():
if name.startswith("_"):
continue
if name in ["check", "seed"]:
continue
child = getattr(obj, name)
arg_type = type(child)
if arg_type in (int, float, bool) or "__members__" in child.__dir__():
if arg_type not in (int, float, bool):
child = child.name
p = "\t" * tabs
print(f"{p}{name}: {child}")
for name in obj.__dir__():
if name.startswith("_"):
continue
if name in ["check", "seed"]:
continue
child = getattr(obj, name)
arg_type = type(child)
if arg_type in (int, float, bool) or "__members__" in child.__dir__():
continue
p = "\t" * tabs
print(f"{p}{name}: ")
_show_params(child, tabs + 1)
class OptimizationCallback:
def __init__(self, circuit, prefix, image_width, extension):
self.circuit = circuit
self.step = 1
self.prefix = prefix
self.image_width = image_width
self.extension = extension
self.save_view = True
self.save_displacement = True
self.prev_placement = None
self.history = []
def __call__(self, step_name):
if self.save_view:
filename = f"{self.prefix}_{self.step:04d}_{step_name.name.lower()}.{self.extension}"
self.circuit.write_image(filename, image_width=self.image_width)
self.save_graph()
if self.save_displacement:
if self.prev_placement is not None:
filename = f"{self.prefix}_{self.step:04d}_{step_name.name.lower()}_disp.{self.extension}"
self.circuit.write_displacement(
filename, self.prev_placement, self.circuit.cell_placement, image_width=self.image_width)
self.prev_placement = self.circuit.cell_placement
self.history.append((self.step, step_name, self.circuit.hpwl()))
self.step += 1
def save_graph(self):
import matplotlib.pyplot as plt
filename = f"{self.prefix}_WL.{self.extension}"
plt.title("Wirelength over time")
plt.xlabel("Step")
plt.ylabel("Wirelength")
for step_name in (
PlacementStep.LowerBound,
PlacementStep.UpperBound,
PlacementStep.Detailed,
):
steps = []
vals = []
for step, name, val in self.history:
if name == step_name:
steps.append(step)
vals.append(val)
if len(vals) > 0:
plt.plot(steps, vals, label=step_name.name)
plt.legend(loc="upper left")
plt.savefig(filename)
plt.clf()
def main():
"""
Run the whole placement algorithm from the command line
"""
import argparse
parser = argparse.ArgumentParser(
description="Place a benchmark circuit from the command line",
usage="usage: coloquinte [-h] [--effort EFFORT] [--seed SEED] [--load-solution FILE] [--save-solution FILE] instance",
)
parser.add_argument("instance", help="Benchmark instance", nargs="?")
parser.add_argument("--effort", help="Placement effort",
type=int, default=3)
parser.add_argument("--seed", help="Random seed", type=int, default=-1)
parser.add_argument(
"--load-solution", help="Load initial placement", metavar="FILE"
)
parser.add_argument("--save-solution",
help="Save final placement", metavar="FILE")
parser.add_argument(
"--show-parameters",
help="Show available tuning parameters and their current value",
action="store_true",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--report-only", help="Load the circuit and return", action="store_true")
group.add_argument(
"--no-global", help="Skip global placement", action="store_true")
group.add_argument(
"--only-global",
help="Run only global placement (no legalization)",
action="store_true",
)
group.add_argument(
"--no-detailed", help="Skip detailed placement", action="store_true"
)
parser.add_argument(
"--ignore-macros",
help="Ignore fixed placement obstructions",
action="store_true",
dest="ignore_obstructions",
)
# Prefix to save images
parser.add_argument("--save-images", help=argparse.SUPPRESS, type=str)
# Save intermediate placement images
parser.add_argument(
"--save-all-images", help=argparse.SUPPRESS, action="store_true"
)
parser.add_argument(
"--save-displacement", help=argparse.SUPPRESS, action="store_true"
)
parser.add_argument(
"--save-graph", help=argparse.SUPPRESS, action="store_true")
# Save intermediate placement images
parser.add_argument(
"--image-width", help=argparse.SUPPRESS, type=int, default=1080)
# Save intermediate placement images
parser.add_argument(
"--image-extension", help=argparse.SUPPRESS, type=str, default="webp"
)
tuning_options = parser.add_argument_group("tuning options")
_add_arguments(tuning_options, ColoquinteParameters(), [])
args = parser.parse_args()
print(f"Placement effort {args.effort}, seed {args.seed}")
params = ColoquinteParameters(args.effort, args.seed)
_parse_arguments(args, params, [])
params.check()
if args.show_parameters:
print("Parameters can be set using command line options. For example, --detailed.nb_passes 2")
print("Current parameter values:")
_show_params(params, 1)
return
if args.instance is None:
parser.print_help()
return
circuit = Circuit.read_ispd(args.instance, args.ignore_obstructions)
print(circuit.report())
if args.ignore_obstructions:
print("Ignoring macros for standard cell placement")
if args.load_solution is not None:
print(f"Loading initial solution")
circuit.load_placement(args.load_solution)
sys.stdout.flush()
callback = None
if args.save_images is not None:
circuit.write_image(
f"{args.save_images}_macros.{args.image_extension}", True, args.image_width
)
if args.load_solution is not None:
circuit.write_image(
f"{args.save_images}_initial.{args.image_extension}", False, args.image_width
)
if args.save_all_images or args.save_graph:
callback = OptimizationCallback(
circuit, args.save_images, args.image_width, args.image_extension
)
callback.save_view = args.save_all_images
callback.save_displacement = args.save_displacement
if args.load_solution is not None:
callback.prev_placement = circuit.cell_placement
if args.report_only:
return
if args.no_global:
print("Global placement skipped at user's request")
else:
circuit.place_global(params, callback)
sys.stdout.flush()
if args.only_global:
print("Legalization and detailed placement skipped at user's request")
elif args.no_detailed:
circuit.legalize(params, callback)
print("Detailed placement skipped at user's request")
else:
circuit.place_detailed(params, callback)
circuit.write_placement(args.save_solution)
if args.save_images is not None:
circuit.write_image(
f"{args.save_images}_placed.{args.image_extension}", False, args.image_width
)
if callback is not None:
callback.save_graph()
__all__ = [
"Circuit",
"GlobalPlacerParameters",
"DetailedPlacerParameters",
"Rectangle",
"LegalizationModel",
"NetModel",
"CellOrientation",
"main",
]
if __name__ == "__main__":
main()
|
PypiClean
|
/azure-mgmt-servicenetworking-1.0.0b1.zip/azure-mgmt-servicenetworking-1.0.0b1/azure/mgmt/servicenetworking/aio/operations/_operations.py
|
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.servicenetworking.aio.ServiceNetworkingMgmtClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.Operation"]:
"""List the operations for the provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.servicenetworking.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-10-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ServiceNetworking/operations"}
|
PypiClean
|
/zpyshell-0.1.2.0.zip/zpyshell-0.1.2.0/Zpy/languages/python/python_completer.py
|
from prompt_toolkit.completion import Completion
class PythonCompleter():
def __init__(self):
self.last_command = {
"command": "",
"command_arguments" : ""
}
self.completions = []
# List of completions
# Taken from `xonsh` and `pygments` modules
completion_1 = ('__import__', 'import', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'bytes',
'chr', 'classmethod', 'cmp', 'compile', 'complex', 'delattr', 'dict',
'dir', 'divmod', 'enumerate', 'eval', 'filter', 'float', 'format',
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
'input', 'int', 'isinstance', 'issubclass', 'iter', 'len', 'list',
'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct',
'open', 'ord', 'pow', 'print', 'property', 'range', 'repr', 'reversed',
'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str',
'sum', 'super', 'tuple', 'type', 'vars', 'zip')
completion_2 = ('__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
'unichr', 'unicode', 'vars', 'xrange', 'zip')
completion_3 = ('assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
'print', 'raise', 'return', 'try', 'while', 'yield',
'yield from', 'as', 'with', 'from')
completion_4 = ('and', 'else', 'for', 'if', 'in', 'is', 'lambda', 'not', 'or',
'+', '-', '/', '//', '%', '**', '|', '&', '~', '^', '>>', '<<', '<',
'<=', '>', '>=', '==', '!=', ',', '?', '??')
completion_5 = ('as', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'except', 'finally:', 'from', 'global', 'import',
'nonlocal', 'pass', 'raise', 'return', 'try:', 'while', 'with',
'yield ', '-', '/', '//', '%', '**', '|', '&', '~', '^', '>>', '<<',
'<', '<=', '->', '=', '+=', '-=', '*=', '/=', '%=', '**=',
'>>=', '<<=', '&=', '^=', '|=', '//=', ';', ':', '..')
self.command_list = set(completion_1) | set(completion_2) | set(completion_3) | set(completion_4) | set(completion_5)
def get_python_completion(self, line):
"""
Get completion for python
:param line: line for completions
:return: list of completions or empty list
"""
return list(filter(lambda x : x.startswith(line), self.command_list))
def complete(self, line):
"""
:param line: Complete line
:return: generator of completion
>>> completer = PythonCompleter()
>>> "with" in [i.text for i in list(completer.complete('with'))]
True
>>> "import" in [i.text for i in list(completer.complete('import'))]
True
>>> "somecommm" in [i.text for i in list(completer.complete('import'))]
False
>>> [i.text for i in list(completer.complete('for im'))]
['import']
"""
if len(line) > 0 and line[-1] == " ":
#End of command, do not complete
return
commands = line.strip().split(' ')
if len(commands) == 1:
# Command without arguments
command = commands[0]
# Check this command was be already using in search(looking in cache)
if not line.startswith(self.last_command['command']) or len(self.last_command['command']) == 0:
self.last_command = {
"command": command,
"command_arguments": ""
}
self.completions = self.get_python_completion(line)
for completion in filter(lambda x: x.startswith(line), self.completions):
yield Completion(completion, start_position=-len(line))
else:
# Check for arguments
arguments = commands[1:]
arguments_joined = " ".join(arguments)
if not arguments_joined.startswith(self.last_command["command_arguments"]) or len(
self.last_command['command_arguments']) == 0:
self.last_command["command_arguments"] = arguments_joined
#Recursion
completions = self.complete(arguments[-1])
for completion in completions:
yield Completion(completion.text, start_position=-len(arguments[-1]))
|
PypiClean
|
/tensorflow_datasets-4.9.2-py3-none-any.whl/tensorflow_datasets/datasets/squad/squad_dataset_builder.py
|
from __future__ import annotations
import json
import os
from etils import epath
import numpy as np
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.question_answering import qa_utils
_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
_HOMEPAGE_URL = "https://rajpurkar.github.io/SQuAD-explorer/"
def _v2_features():
return tfds.features.FeaturesDict({
"id": np.str_,
"title": tfds.features.Text(),
"context": tfds.features.Text(),
"plausible_answers": tfds.features.Sequence({
"text": tfds.features.Text(),
"answer_start": np.int32,
}),
"question": tfds.features.Text(),
"is_impossible": np.bool_,
"answers": tfds.features.Sequence({
"text": tfds.features.Text(),
"answer_start": np.int32,
}),
})
def _generate_v2_examples(filepath):
"""Returns v2 examples."""
with epath.Path(filepath).open() as f:
squad = json.load(f)
for article in squad["data"]:
title = article.get("title", "")
for paragraph in article["paragraphs"]:
context = paragraph["context"]
for qa in paragraph["qas"]:
id_ = qa["id"]
# Not all examples have plausible answers
if "plausible_answers" not in qa:
qa["plausible_answers"] = []
question = qa["question"]
is_impossible = qa["is_impossible"]
plausible_answer_starts = [
plausible_answer["answer_start"]
for plausible_answer in qa["plausible_answers"]
]
plausible_answers = [
plausible_answer["text"]
for plausible_answer in qa["plausible_answers"]
]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"] for answer in qa["answers"]]
yield id_, {
"title": title,
"context": context,
"question": question,
"id": id_,
"plausible_answers": {
"answer_start": plausible_answer_starts,
"text": plausible_answers,
},
"answers": {
"answer_start": answer_starts,
"text": answers,
},
"is_impossible": is_impossible,
}
class SquadConfig(tfds.core.BuilderConfig):
"""BuilderConfig for SQUAD."""
def __init__(self, *, train_file, dev_file, **kwargs):
super(SquadConfig, self).__init__(**kwargs)
self.train_file = train_file
self.dev_file = dev_file
class Builder(tfds.core.GeneratorBasedBuilder):
"""SQUAD: The Stanford Question Answering Dataset."""
BUILDER_CONFIGS = [
SquadConfig(
name="v1.1",
description="Version 1.1.0 of SQUAD",
train_file="train-v1.1.json",
dev_file="dev-v1.1.json",
),
SquadConfig(
name="v2.0",
description="Version 2.0.0 of SQUAD",
train_file="train-v2.0.json",
dev_file="dev-v2.0.json",
),
]
VERSION = tfds.core.Version("3.0.0")
RELEASE_NOTES = {
"3.0.0": (
"Fixes issue with small number of examples (19) where answer spans "
"are misaligned due to context white-space removal."
),
}
def _info(self):
if self.builder_config.name == "v1.1":
features_dict = qa_utils.squadlike_features()
elif self.builder_config.name == "v2.0":
features_dict = _v2_features()
else:
raise AssertionError("Dataset version should be either 1.1 or 2.0")
return self.dataset_info_from_configs(
features=features_dict,
# No default supervised_keys (as we have to pass both question
# and context as input).
supervised_keys=None,
homepage=_HOMEPAGE_URL,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": os.path.join(_URL, self.builder_config.train_file),
"dev": os.path.join(_URL, self.builder_config.dev_file),
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files["train"]},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={"filepath": downloaded_files["dev"]},
),
]
def _generate_examples(self, filepath):
if self.builder_config.name == "v1.1":
return qa_utils.generate_squadlike_examples(filepath)
return _generate_v2_examples(filepath)
|
PypiClean
|
/vaine-widget-0.1.0a0.tar.gz/vaine-widget-0.1.0a0/vaine/util.py
|
# Copyright (c) 2020, Pacific Northwest National Laboratories
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from itertools import product, combinations, permutations
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.spatial.distance import squareform
from scipy.stats import pearsonr
import networkx as nx
def grid_from_product(rows, cols, s=4, ar=1, row_format=None, col_format=None, **kwargs):
n_rows = len(rows)
n_cols = len(cols)
fd = {
'fontweight': 'bold'
}
plt.figure(figsize=(ar*s*n_cols, s*n_rows))
for d, (r, c) in enumerate(product(rows, cols)):
ax = plt.subplot(n_rows, n_cols, d + 1, **kwargs)
i = d//n_cols
j = d%n_cols
if i == 0:
plt.title(
c if col_format is None else col_format(c),
fontdict=fd
)
if j == 0:
plt.ylabel(
r if row_format is None else row_format(r),
fontdict=fd
)
yield r, c, ax
def sig(p, bins=np.array([.001, .01, .05])):
return ''.join(['*']*(p <= bins).sum())
def reorder(data, absolute=False, return_corr=False, approx=False, threshold=0, split=True):
if data.shape[1] > 6:
approx = True
modified_corr = corr = pd.DataFrame(
squareform([
pearsonr(data[r], data[c])[0]
for r, c in combinations(data, 2)
]),
index=list(data),
columns=list(data)
).fillna(0)
if absolute:
modified_corr = modified_corr.abs()
modified_corr = modified_corr*(modified_corr >= threshold)
if approx:
G = nx.from_pandas_adjacency(modified_corr)
data = data[nx.spectral_ordering(G)]
else:
values = modified_corr.values
split = int(split == True)
def objective(ii):
jj = np.roll(ii, 1)
return values[ii[split:], jj[split:]].sum()
best = max(
map(np.array, permutations(range(len(values)))),
key=objective
)
data = data[data.columns[best]]
if return_corr:
order = list(data)
return data, corr.loc[order, order]
return data
|
PypiClean
|
/Eskapade-1.0.0-py3-none-any.whl/eskapade/analysis/links/hist_filler.py
|
import histogrammar as hg
import numpy as np
from eskapade.analysis import histogram_filling as hf
from eskapade.analysis.histogram_filling import HistogramFillerBase
class HistogrammarFiller(HistogramFillerBase):
"""Fill histogrammar sparse-bin histograms.
Algorithm to fill histogrammar style sparse-bin and category histograms.
It is possible to do after-filling cleaning of these histograms by rejecting certain
keys or removing inconsistent data types. Timestamp columns are
converted to nanoseconds before the binning is applied. Final histograms
are stored in the datastore.
Example is available in: tutorials/esk303_hgr_filler_plotter.py
"""
def __init__(self, **kwargs):
"""Initialize link instance.
Store and do basic check on the attributes of link HistogrammarFiller.
:param str name: name of link
:param str read_key: key of input data to read from data store
:param str store_key: key of output data to store histograms in data store
:param list columns: colums to pick up from input data. (default is all columns)
:param dict bin_specs: dictionaries used for rebinning numeric or timestamp columns
Example bin_specs dictionary is:
>>> bin_specs = {'x': {'bin_width': 1, 'bin_offset': 0},
'y': {'bin_edges': [0, 2, 3, 4, 5, 7, 8]}}
:param dict var_dtype: dict of datatypes of the columns to study from dataframe
If not provided, try to determine datatypes directy from dataframe.
:param dict quantity: dictionary of lambda functions of how to pars certain columns
Example quantity dictionary is:
>>> quantity = {'y': lambda x: x}
:param bool store_at_finalize: Store histograms in datastore at finalize(), not at
execute(). Useful when looping over datasets. Default is False.
:param drop_keys dict: dictionary used for dropping specific keys from bins dictionaries of histograms
Example drop_keys dictionary is:
>>> drop_keys = {'x': [1, 4, 8, 19],
'y': ['apple', 'pear', 'tomato'],
'x:y': [(1, 'apple'), (19, 'tomato')]}
"""
# initialize Link, pass name from kwargs
if 'name' not in kwargs:
kwargs['name'] = 'HistogrammarFiller'
HistogramFillerBase.__init__(self, **kwargs)
# process and register all relevant kwargs. kwargs are added as attributes of the link.
# second arg is default value for an attribute. key is popped from kwargs.
self._process_kwargs(kwargs,
quantity={})
def fill_histogram(self, idf, columns):
"""Fill input histogram with column(s) of input dataframe.
:param idf: input data frame used for filling histogram
:param list columns: histogram column(s)
"""
name = ':'.join(columns)
if name not in self._hists:
# create an (empty) histogram of right type
self._hists[name] = self.construct_empty_hist(columns)
hist = self._hists[name]
# do the actual filling
clm = columns[0] if len(columns) == 1 else columns
hist.fill.numpy(idf[clm])
# remove specific keys from histogram before merging, if so requested
hist.bins = self.drop_requested_keys(name, hist.bins)
self._hists[name] = hist
def construct_empty_hist(self, columns):
"""Create an (empty) histogram of right type.
Create a multi-dim histogram by iterating through the columns in
reverse order and passing a single-dim hist as input to the next
column.
:param list columns: histogram columns
:returns: created histogram
:rtype: histogrammar.Count
"""
hist = hg.Count()
# create a multi-dim histogram by iterating through the columns in reverse order
# and passing a single-dim hist as input to the next column
revcols = list(reversed(columns))
for idx,col in enumerate(revcols):
# histogram type depends on the data type
dt = np.dtype(self.var_dtype[col])
# processing function, e.g. only accept boolians during filling
f = self.quantity.get(col, hf.QUANTITY[dt.type])
if len(columns) == 1:
# df[col] is a pd.series
quant = lambda x, fnc=f: fnc(x) # noqa
else:
# df[columns] is a pd.Dataframe
# fix column to col
quant = lambda x, fnc=f, clm=col: fnc(x[clm]) # noqa
is_number = isinstance(dt.type(), np.number)
is_timestamp = isinstance(dt.type(), np.datetime64)
if is_number or is_timestamp:
# numbers and timestamps are put in a sparse binned histogram
bs = self.bin_specs.get(col, self._unit_bin_specs if is_number else self._unit_timestamp_specs)
hist = hg.SparselyBin(binWidth=bs['bin_width'], origin=bs['bin_offset'], quantity=quant, value=hist)
else:
# string and boolians are treated as categories
hist = hg.Categorize(quantity=quant, value=hist)
# decorators; adding them here doesn't seem to work!
#hist.n_dim = get_n_dim(hist)
#selected_cols = revcols[:idx+1]
#dta = [self.var_dtype[col] for col in reversed(selected_cols)]
#hist.datatype = dta[0] if hist.n_dim==1 else dta
# FIXME stick data types and number of dimension to histogram
dta = [self.var_dtype[col] for col in columns]
hist.datatype = dta[0] if len(columns) == 1 else dta
hist.n_dim = len(columns)
return hist
def process_and_store(self):
"""Process and store histogrammar objects."""
# fix histogrammar contentType bug for n-dim histograms
# convert boolean keys to string
for name, hist in self._hists.items():
hgr_fix_contentType(hist)
hgr_convert_bool_to_str(hist)
hist.n_bins = get_n_bins(hist)
# put hists in datastore as normal
HistogramFillerBase.process_and_store(self)
def hgr_fix_contentType(hist):
"""Fix missing contentType attribute of histogrammar histogram.
Patch up missing contentType where needed; needed for toJson() call
:param hist: input histogrammar histogram
"""
# nothing left to fix?
if isinstance(hist, hg.Count):
return
# patch up missing contentType where needed; needed for toJson() call
if hist is not None:
if not hasattr(hist, 'contentType'):
hist.contentType = 'Count'
# 1. loop through bins
if hasattr(hist, 'bins'):
for h in hist.bins.values():
hgr_fix_contentType(h)
# 2. loop through values
elif hasattr(hist, 'values'):
for h in hist.values:
hgr_fix_contentType(h)
# 3. process attributes if present
if hasattr(hist, 'value'):
hgr_fix_contentType(hist.value)
if hasattr(hist, 'underflow'):
hgr_fix_contentType(hist.underflow)
if hasattr(hist, 'overflow'):
hgr_fix_contentType(hist.overflow)
if hasattr(hist, 'nanflow'):
hgr_fix_contentType(hist.nanflow)
def hgr_convert_bool_to_str(hist):
"""Convert boolean keys to string.
Convert boolean keys to string; needed for toJson() call
:param hist: input histogrammar histogram
"""
# nothing left to fix?
if isinstance(hist, hg.Count):
return
# 1. loop through bins
if hasattr(hist, 'bins'):
kys = list(hist.bins.keys())
for k in kys:
if isinstance(k, (bool, np.bool_)):
hist.bins[str(k)] = hist.bins.pop(k)
for h in hist.bins.values():
hgr_convert_bool_to_str(h)
# 2. loop through values
elif hasattr(hist, 'values'):
for h in hist.values:
hgr_convert_bool_to_str(h)
# 3. process attributes if present
if hasattr(hist, 'value'):
hgr_convert_bool_to_str(hist.value)
if hasattr(hist, 'underflow'):
hgr_convert_bool_to_str(hist.underflow)
if hasattr(hist, 'overflow'):
hgr_convert_bool_to_str(hist.overflow)
if hasattr(hist, 'nanflow'):
hgr_convert_bool_to_str(hist.nanflow)
def get_n_dim(cls):
"""Histogram dimension
:returns: dimension of the histogram
:rtype: int
"""
if isinstance(cls, hg.Count):
return 0
# histogram may have a subhistogram. Extract it and recurse
if hasattr(cls, 'values'):
hist = cls.values[0] if cls.values else hg.Count()
elif hasattr(cls, 'bins'):
hist = list(cls.bins.values())[0] if cls.bins else hg.Count()
else:
hist = hg.Count()
return 1 + get_n_dim(hist)
def get_n_bins(cls):
"""Get number of bins."""
if hasattr(cls, 'num'):
return cls.num
elif hasattr(cls, 'size'):
return cls.size
else:
raise RuntimeError('Cannot retrieve number of bins from hgr hist.')
|
PypiClean
|
/sonatype_nexus_sdk-0.1.3-py3-none-any.whl/nexus_sdk/paths/v1_repositories_r_hosted_repository_name/put.py
|
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
from dataclasses import dataclass
from datetime import date, datetime # noqa: F401
import frozendict # noqa: F401
import typing_extensions # noqa: F401
import urllib3
from urllib3._collections import HTTPHeaderDict
from nexus_sdk import schemas # noqa: F401
from nexus_sdk import api_client, exceptions
from nexus_sdk.model.r_hosted_repository_api_request import RHostedRepositoryApiRequest
from . import path
# Path params
RepositoryNameSchema = schemas.StrSchema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'repositoryName': typing.Union[RepositoryNameSchema, str, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_repository_name = api_client.PathParameter(
name="repositoryName",
style=api_client.ParameterStyle.SIMPLE,
schema=RepositoryNameSchema,
required=True,
)
# body param
SchemaForRequestBodyApplicationJson = RHostedRepositoryApiRequest
request_body_body = api_client.RequestBody(
content={
'application/json': api_client.MediaType(
schema=SchemaForRequestBodyApplicationJson),
},
)
@dataclass
class ApiResponseFor204(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
]
headers: schemas.Unset = schemas.unset
_response_for_204 = api_client.OpenApiResponse(
response_cls=ApiResponseFor204,
)
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
)
@dataclass
class ApiResponseFor403(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
]
headers: schemas.Unset = schemas.unset
_response_for_403 = api_client.OpenApiResponse(
response_cls=ApiResponseFor403,
)
_status_code_to_response = {
'204': _response_for_204,
'401': _response_for_401,
'403': _response_for_403,
}
class BaseApi(api_client.Api):
@typing.overload
def _update_repository30_oapg(
self,
content_type: typing_extensions.Literal["application/json"] = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor204,
]: ...
@typing.overload
def _update_repository30_oapg(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor204,
]: ...
@typing.overload
def _update_repository30_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _update_repository30_oapg(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor204,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _update_repository30_oapg(
self,
content_type: str = 'application/json',
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Update R hosted repository
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_repository_name,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
# TODO add cookie handling
_fields = None
_body = None
if body is not schemas.unset:
serialized_data = request_body_body.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='put'.upper(),
headers=_headers,
fields=_fields,
body=_body,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class UpdateRepository30(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def update_repository30(
self,
content_type: typing_extensions.Literal["application/json"] = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor204,
]: ...
@typing.overload
def update_repository30(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor204,
]: ...
@typing.overload
def update_repository30(
self,
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def update_repository30(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor204,
api_client.ApiResponseWithoutDeserialization,
]: ...
def update_repository30(
self,
content_type: str = 'application/json',
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._update_repository30_oapg(
body=body,
path_params=path_params,
content_type=content_type,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForput(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def put(
self,
content_type: typing_extensions.Literal["application/json"] = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor204,
]: ...
@typing.overload
def put(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor204,
]: ...
@typing.overload
def put(
self,
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def put(
self,
content_type: str = ...,
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor204,
api_client.ApiResponseWithoutDeserialization,
]: ...
def put(
self,
content_type: str = 'application/json',
body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset,
path_params: RequestPathParams = frozendict.frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._update_repository30_oapg(
body=body,
path_params=path_params,
content_type=content_type,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
PypiClean
|