id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/v3/model/v3_data_volume.py
|
import pprint
import re
import six
class V3DataVolume:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'cluster_id': 'str',
'cluster_type': 'str',
'extend_param': 'dict(str, object)',
'hwpassthrough': 'bool',
'size': 'int',
'volumetype': 'str',
'metadata': 'DataVolumeMetadata'
}
attribute_map = {
'cluster_id': 'cluster_id',
'cluster_type': 'cluster_type',
'extend_param': 'extendParam',
'hwpassthrough': 'hw:passthrough',
'size': 'size',
'volumetype': 'volumetype',
'metadata': 'metadata'
}
def __init__(self, cluster_id=None, cluster_type=None, extend_param=None, hwpassthrough=None, size=None, volumetype=None, metadata=None):
"""V3DataVolume - a model defined in huaweicloud sdk"""
self._cluster_id = None
self._cluster_type = None
self._extend_param = None
self._hwpassthrough = None
self._size = None
self._volumetype = None
self._metadata = None
self.discriminator = None
if cluster_id is not None:
self.cluster_id = cluster_id
if cluster_type is not None:
self.cluster_type = cluster_type
if extend_param is not None:
self.extend_param = extend_param
if hwpassthrough is not None:
self.hwpassthrough = hwpassthrough
self.size = size
self.volumetype = volumetype
if metadata is not None:
self.metadata = metadata
@property
def cluster_id(self):
"""Gets the cluster_id of this V3DataVolume.
云服务器系统盘对应的存储池的ID。仅用作专属云集群,专属分布式存储DSS的存储池ID,即dssPoolID。 获取方法请参见获取单个专属分布式存储池详情中“表3 响应参数”的ID字段。
:return: The cluster_id of this V3DataVolume.
:rtype: str
"""
return self._cluster_id
@cluster_id.setter
def cluster_id(self, cluster_id):
"""Sets the cluster_id of this V3DataVolume.
云服务器系统盘对应的存储池的ID。仅用作专属云集群,专属分布式存储DSS的存储池ID,即dssPoolID。 获取方法请参见获取单个专属分布式存储池详情中“表3 响应参数”的ID字段。
:param cluster_id: The cluster_id of this V3DataVolume.
:type: str
"""
self._cluster_id = cluster_id
@property
def cluster_type(self):
"""Gets the cluster_type of this V3DataVolume.
云服务器系统盘对应的磁盘存储类型。仅用作专属云集群,固定取值为dss。
:return: The cluster_type of this V3DataVolume.
:rtype: str
"""
return self._cluster_type
@cluster_type.setter
def cluster_type(self, cluster_type):
"""Sets the cluster_type of this V3DataVolume.
云服务器系统盘对应的磁盘存储类型。仅用作专属云集群,固定取值为dss。
:param cluster_type: The cluster_type of this V3DataVolume.
:type: str
"""
self._cluster_type = cluster_type
@property
def extend_param(self):
"""Gets the extend_param of this V3DataVolume.
磁盘扩展参数,取值请参见[创建云服务器](https://support.huaweicloud.com/api-ecs/zh-cn_topic_0020212668.html)中“extendparam”参数的描述。
:return: The extend_param of this V3DataVolume.
:rtype: dict(str, object)
"""
return self._extend_param
@extend_param.setter
def extend_param(self, extend_param):
"""Sets the extend_param of this V3DataVolume.
磁盘扩展参数,取值请参见[创建云服务器](https://support.huaweicloud.com/api-ecs/zh-cn_topic_0020212668.html)中“extendparam”参数的描述。
:param extend_param: The extend_param of this V3DataVolume.
:type: dict(str, object)
"""
self._extend_param = extend_param
@property
def hwpassthrough(self):
"""Gets the hwpassthrough of this V3DataVolume.
- 使用SDI规格创建虚拟机时请关注该参数,如果该参数值为true,说明创建的为SCSI类型的卷 - 节点池类型为ElasticBMS时,此参数必须填写为true
:return: The hwpassthrough of this V3DataVolume.
:rtype: bool
"""
return self._hwpassthrough
@hwpassthrough.setter
def hwpassthrough(self, hwpassthrough):
"""Sets the hwpassthrough of this V3DataVolume.
- 使用SDI规格创建虚拟机时请关注该参数,如果该参数值为true,说明创建的为SCSI类型的卷 - 节点池类型为ElasticBMS时,此参数必须填写为true
:param hwpassthrough: The hwpassthrough of this V3DataVolume.
:type: bool
"""
self._hwpassthrough = hwpassthrough
@property
def size(self):
"""Gets the size of this V3DataVolume.
磁盘大小,单位为GB - 系统盘取值范围:40~1024 - 数据盘取值范围:100~32768
:return: The size of this V3DataVolume.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this V3DataVolume.
磁盘大小,单位为GB - 系统盘取值范围:40~1024 - 数据盘取值范围:100~32768
:param size: The size of this V3DataVolume.
:type: int
"""
self._size = size
@property
def volumetype(self):
"""Gets the volumetype of this V3DataVolume.
磁盘类型,取值请参见创建云服务器 中“root_volume字段数据结构说明”。 - SATA:普通IO,是指由SATA存储提供资源的磁盘类型。 - SAS:高IO,是指由SAS存储提供资源的磁盘类型。 - SSD:超高IO,是指由SSD存储提供资源的磁盘类型。
:return: The volumetype of this V3DataVolume.
:rtype: str
"""
return self._volumetype
@volumetype.setter
def volumetype(self, volumetype):
"""Sets the volumetype of this V3DataVolume.
磁盘类型,取值请参见创建云服务器 中“root_volume字段数据结构说明”。 - SATA:普通IO,是指由SATA存储提供资源的磁盘类型。 - SAS:高IO,是指由SAS存储提供资源的磁盘类型。 - SSD:超高IO,是指由SSD存储提供资源的磁盘类型。
:param volumetype: The volumetype of this V3DataVolume.
:type: str
"""
self._volumetype = volumetype
@property
def metadata(self):
"""Gets the metadata of this V3DataVolume.
:return: The metadata of this V3DataVolume.
:rtype: DataVolumeMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V3DataVolume.
:param metadata: The metadata of this V3DataVolume.
:type: DataVolumeMetadata
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V3DataVolume):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/matrix-angular-sdk-0.6.8.tar.gz/matrix-angular-sdk-0.6.8/README.rst
|
Matrix Angular SDK
==================
.. DANGER::
**matrix-angular-sdk is not currently being maintained or developed by the core
team, and whilst stable it has some serious performance issues; Angular makes it
a bit too easy to shoot yourself in the foot and doesn't help you escape when
you do so. All of our current focus is going into the
https://github.com/matrix-org/matrix-js-sdk, https://github.com/matrix-org/matrix-react-sdk
and https://github.com/matrix-org/matrix-react-skin stack instead - please use
those rather than this if you want support from the core team. Thanks!**
.. image:: http://matrix.org/jenkins/buildStatus/icon?job=SynapseWebClient
:target: http://matrix.org/jenkins/job/SynapseWebClient/
This project provides AngularJS services for implementing the `Client-Server API`_
on Matrix_ : an open standard for interoperable Instant Messaging and VoIP. It
comes shipped with Synapse_ : a home server reference implementation.
This project also provides a complete, stand-alone client which can communicate
with Matrix home servers using a web browser.
The Synapse_ homeserver ships the latest stable version of this library. If you
wish it to serve up a development copy instead, then you must configure this
checkout to be picked up by synapse::
$ python setup.py develop --user
Running
=======
To run the stand-alone client, the ``syweb/webclient`` folder must be hosted.
This can most easily be achieved by::
cd syweb/webclient
python -m SimpleHTTPServer
Navigate to ``http://localhost:8000`` to see the client.
Bugs / Feature Requests
=======================
Think you've found a bug? Want a new feature on the client? Please open an issue
on JIRA:
- Create an account and login to https://matrix.org/jira
- Navigate to the ``SYWEB`` project.
- Click **Create Issue** - Please be as descriptive as possible, with reproduction
steps if possible.
All issues in JIRA are **public**.
Contributing
============
Want to fix a bug or add a new feature? Check JIRA first to see if someone else is
handling this issue. If no one is actively working on the issue, then please fork
the ``develop`` branch when writing your fix, and open a pull request when you're
ready. Do not base your pull requests off ``master``.
Configuration
=============
The web client can be configured by adding a ``config.js`` file in the
``syweb/webclient`` directory. This includes configuration for setting up ReCaptcha.
An example file can be found at ``syweb/webclient/config.sample.js``.
Structure
=========
The ``app`` directory contains the SDK, which is split up into subfolders depending
on the logical scope of the code. The ``components`` directory contains reusable
components which are used in many places. More specific directories such as ``home``
and ``settings`` contain code specific to that part of the app: e.g. the home screen
and settings page respectively.
The `Client-Server API`_ is encapsulated as an AngularJS service called ``matrixService``.
There are also complementary services such as ``eventStreamService`` which handle more
complex non-HTTP client logic.
Services can be used independently provided their dependencies are satisfied.
* ``matrixService`` is provided at the lowest level, as it just wraps the raw HTTP calls.
* ``modelService`` allows models of matrix objects to be accessed, such as ``User``,
``Room``, ``RoomState`` and ``RoomMember``, and provides convenience functions to perform
HTTP calls on these objects (e.g. ``Room.leave``).
* ``eventHandlerService`` interprets raw Matrix events and determines what needs to be
stored with the ``modelService``.
* ``eventStreamService`` controls long-polling behaviour on the ``/events`` HTTP call.
* ``typingService`` controls the submission of typing events into a room.
* ``presenceService`` controls the submission of presence events.
Alternatively, you can use different controllers and html templates and leave the services
to work together as is.
Tests
=====
Tests are contained in the `test directory`_. They require
Karma (running PhantomJS) and Jasmine 2.x+ in order to run. Assuming you have the
required karma plugins, you can run the tests by running ``karma start`` in the
test directory.
Attributions
============
File icons are taken from http://medialoot.com/item/free-flat-filetype-icons/ and
distributed under the terms of the Paid License (invoice #7355)
Keyboard and GIF icon from icons8: http://icons8.com/
.. _Synapse: https://github.com/matrix-org/synapse/
.. _Matrix: http://www.matrix.org
.. _Client-Server API: http://matrix.org/docs/api/client-server/
.. _test directory: syweb/webclient/test
|
PypiClean
|
/fake_bge_module_0.2.5-20200804-py3-none-any.whl/bge/texture.py
|
import sys
import typing
import bpy.context
class DeckLink:
''' Certain DeckLink devices can be used to playback video: the host sends video frames regularly for immediate or scheduled playback. The video feed is outputted on HDMI or SDI interfaces. This class supports the immediate playback mode: it has a source attribute that is assigned one of the source object in the bge.texture module. Refreshing the DeckLink object causes the image source to be computed and sent to the DeckLink device for immediate transmission on the output interfaces. Keying is supported: it allows to composite the frame with an input video feed that transits through the DeckLink card. :arg cardIdx: Number of the card to be used for output (0=first card). It should be noted that DeckLink devices are usually half duplex: they can either be used for capture or playback but not both at the same time. :type cardIdx: int :arg format: String representing the display mode of the output feed. :type format: str The default value of the format argument is reserved for auto detection but it is currently not supported (it will generate a runtime error) and thus the video format must be explicitly specified. If keying is the goal (see keying attributes), the format must match exactly the input video feed, otherwise it can be any format supported by the device (there will be a runtime error if not). The format of the string is <displayMode>[/3D] . Refer to VideoDeckLink to get the list of acceptable <displayMode> . The optional /3D suffix is used to create a stereo 3D feed. In that case the 'right' attribute must also be set to specify the image source for the right eye. Note: The pixel format is not specified here because it is always BGRA. The alpha channel is used in keying to mix the source with the input video feed, otherwise it is not used. If a conversion is needed to match the native video format, it is done inside the DeckLink driver or device.
'''
source: typing.Union['VideoFFmpeg', 'ImageMirror', 'ImageViewport',
'VideoDeckLink', 'ImageBuff', 'ImageMix',
'ImageRender', 'ImageFFmpeg'] = None
''' This attribute must be set to one of the image sources. If the image size does not fit exactly the frame size, the extend attribute determines what to do. For best performance, the source image should match exactly the size of the output frame. A further optimization is achieved if the image source object is ImageViewport or ImageRender set for whole viewport, flip disabled and no filter: the GL frame buffer is copied directly to the image buffer and directly from there to the DeckLink card (hence no buffer to buffer copy inside VideoTexture).
:type: typing.Union['VideoFFmpeg', 'ImageMirror', 'ImageViewport', 'VideoDeckLink', 'ImageBuff', 'ImageMix', 'ImageRender', 'ImageFFmpeg']
'''
right: typing.Union['VideoFFmpeg', 'ImageMirror', 'ImageViewport',
'VideoDeckLink', 'ImageBuff', 'ImageMix',
'ImageRender', 'ImageFFmpeg'] = None
''' If the video format is stereo 3D, this attribute should be set to an image source object that will produce the right eye images. If the goal is to render the BGE scene in 3D, it can be achieved with 2 cameras, one for each eye, used by 2 ImageRender with an offscreen render buffer that is just the size of the video frame.
:type: typing.Union['VideoFFmpeg', 'ImageMirror', 'ImageViewport', 'VideoDeckLink', 'ImageBuff', 'ImageMix', 'ImageRender', 'ImageFFmpeg']
'''
keying: bool = None
''' Specify if keying is enabled. False (default): the output frame is sent unmodified on the output interface (in that case no input video is required). True: the output frame is mixed with the input video, using the alpha channel to blend the two images and the combination is sent on the output interface.
:type: bool
'''
level: int = None
''' If keying is enabled, sets the keying level from 0 to 255. This value is a global alpha value that multiplies the alpha channel of the image source. Use 255 (the default) to keep the alpha channel unmodified, 0 to make the output frame totally transparent.
:type: int
'''
extend: bool = None
''' Determines how the image source should be mapped if the size does not fit the video frame size. * False (the default): map the image pixel by pixel. If the image size is smaller than the frame size, extra space around the image is filled with 0-alpha black. If it is larger, the image is cropped to fit the frame size. * True: the image is scaled by the nearest neighbor algorithm to fit the frame size. The scaling is fast but poor quality. For best results, always adjust the image source to match the size of the output video.
:type: bool
'''
def close(self):
''' Close the DeckLink device and release all resources. After calling this method, the object cannot be reactivated, it must be destroyed and a new DeckLink object created from fresh to restart the output.
'''
pass
def refresh(self, refresh_source: bool, ts: float):
''' This method must be called frequently to update the output frame in the DeckLink device.
:param refresh_source: True if the source objects image buffer should be invalidated after being used to compute the output frame. This triggers the recomputing of the source image on next refresh, which is normally the desired effect. False if the image source buffer should stay valid and reused on next refresh. Note that the DeckLink device stores the output frame and replays until a new frame is sent from the host. Thus, it is not necessary to refresh the DeckLink object if it is known that the image source has not changed.
:type refresh_source: bool
:param ts: The timestamp value passed to the image source object to compute the image. If unspecified, the BGE clock is used.
:type ts: float
'''
pass
class FilterBGR24:
''' Source filter BGR24.
'''
pass
class FilterBlueScreen:
''' Filter for Blue Screen. The RGB channels of the color are left unchanged, while the output alpha is obtained as follows: - if the square of the euclidian distance between the RGB color and the filter's reference color is smaller than the filter's lower limit, the output alpha is set to 0; - if that square is bigger than the filter's upper limit, the output alpha is set to 255; - otherwise the output alpha is linarly extrapoled between 0 and 255 in the interval of the limits.
'''
color: list = None
''' Reference color. :default: (0, 0, 255)
:type: list
'''
limits: list = None
''' Reference color limits. :default: (64, 64)
:type: list
'''
previous = None
''' Previous pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
class FilterColor:
''' Filter for color calculations. The output color is obtained by multiplying the reduced 4x4 matrix with the input color and adding the remaining column to the result.
'''
matrix: list = None
''' Matrix [4][5] for color calculation. :default: ((256, 0, 0, 0, 0), (0, 256, 0, 0, 0), (0, 0, 256, 0, 0), (0, 0, 0, 256, 0))
:type: list
'''
previous = None
''' Previous pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
class FilterGray:
''' Filter for grayscale effect. Proportions of R, G and B contributions in the output grayscale are 28:151:77.
'''
previous = None
''' Previous pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
class FilterLevel:
''' Filter for levels calculations. Each output color component is obtained as follows: * if it is smaller than its corresponding min value, it is set to 0; * if it is bigger than its corresponding max value, it is set to 255; * Otherwise it is linearly extrapoled between 0 and 255 in the (min, max) interval.
'''
levels: list = None
''' Levels matrix [4] (min, max). :default: ((0, 255), (0, 255), (0, 255), (0, 255))
:type: list
'''
previous = None
''' Previous pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
class FilterNormal:
''' Normal map filter.
'''
colorIdx: int = None
''' Index of color used to calculate normal (0 - red, 1 - green, 2 - blue, 3 - alpha). :default: 0
:type: int
'''
depth: float = None
''' Depth of relief. :default: 4.0
:type: float
'''
previous = None
''' Previous pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
class FilterRGB24:
''' Returns a new input filter object to be used with ImageBuff object when the image passed to the :meth: ImageBuff.load function has the 3-bytes pixel format BGR.
'''
pass
class FilterRGBA32:
''' Source filter RGBA32.
'''
pass
class ImageBuff:
''' Image from application memory. For computer generated images, drawing applications. :arg width: Width of the image. :type width: int :arg height: Height of the image. :type height: int :arg color: Value to initialize RGB channels with. The initialized buffer will have all pixels set to (color, color, color, 255). (optional) :type color: int in [0, 255] :arg scale: Image uses scaling. (optional) :type scale: bool
'''
filter = None
''' Pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
flip: bool = None
''' Flip image vertically.
:type: bool
'''
image = None
''' Image data. (readonly)'''
scale: bool = None
''' Fast scale of image (near neighbour).
:type: bool
'''
size: tuple = None
''' Image size. (readonly)
:type: tuple
'''
valid: bool = None
''' Tells if an image is available. (readonly)
:type: bool
'''
def load(self, imageBuffer: typing.Union[bytes, 'bpy.context.object'],
width: int, height: int):
''' Load image from buffer.
:param imageBuffer: Buffer to load the image from.
:type imageBuffer: typing.Union[bytes, 'bpy.context.object']
:param width: Width of the image to load.
:type width: int
:param height: Height of the image to load.
:type height: int
'''
pass
def plot(self,
imageBuffer: typing.
Union[bytes, 'bpy.context.object', 'ImageBuff'],
width: int,
height: int,
positionX: int,
positionY: int,
mode: int = 'IMB_BLEND_COPY'):
''' Update image buffer.
:param imageBuffer: Buffer to load the new data from.
:type imageBuffer: typing.Union[bytes, 'bpy.context.object', 'ImageBuff']
:param width: Width of the data to load.
:type width: int
:param height: Height of the data to load.
:type height: int
:param positionX: Left boundary of the region to be drawn on.
:type positionX: int
:param positionY: Upper boundary of the region to be drawn on.
:type positionY: int
:param mode: Drawing mode, see Image Blending Modes _.
:type mode: int
'''
pass
class ImageFFmpeg:
''' FFmpeg image source, used for image files and web based images. :arg file: Path to the image to load. :type file: str
'''
status: int = None
''' Image status. (readonly) :value: see FFmpeg Video and Image Status _.
:type: int
'''
valid: bool = None
''' Tells if an image is available. (readonly)
:type: bool
'''
image = None
''' Image data. (readonly)'''
size: tuple = None
''' Image size. (readonly)
:type: tuple
'''
scale: bool = None
''' Fast scale of image (near neighbour).
:type: bool
'''
flip: bool = None
''' Flip image vertically.
:type: bool
'''
filter = None
''' Pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
def refresh(self, buffer=None, format: str = "RGBA") -> int:
''' Refresh image, get its status and optionally copy the frame to an external buffer.
:param buffer: An optional object that implements the buffer protocol. If specified, the image is copied to the buffer, which must be big enough or an exception is thrown.
:type buffer:
:param format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
:type format: str
:rtype: int
:return: see FFmpeg Video and Image Status _.
'''
pass
def reload(self, newname: str = None):
''' Reload image, i.e. reopen it.
:param newname: Path to a new image. (optional)
:type newname: str
'''
pass
class ImageMirror:
''' Image source from mirror. :arg scene: Scene in which the image has to be taken. :type scene: ~bge.types.KX_Scene :arg observer: Reference object for the mirror (the object from which the mirror has to be looked at, for example a camera). :type observer: ~bge.types.KX_GameObject :arg mirror: Object holding the mirror. :type mirror: ~bge.types.KX_GameObject :arg material: ID of the mirror's material to be used for mirroring. (optional) :type material: int :arg width: Off-screen render buffer width (optional). :type width: integer :arg height: Off-screen render buffer height (optional). :type height: integer :arg samples: Off-screen render buffer samples (optional). :type samples: integer :arg hdr: Off-screen image format (optional). :type hdr: One of :ref: these constants<render-hdr>
'''
alpha: bool = None
''' Use alpha in texture.
:type: bool
'''
horizon: typing.List[float] = None
''' Horizon color.
:type: typing.List[float]
'''
zenith: typing.List[float] = None
''' Zenith color.
:type: typing.List[float]
'''
background: typing.List[float] = None
''' Deprecated use :py:meth: bge.texture.ImageMirror.horizon or :py:meth: bge.texture.ImageMirror.zenith instead.
:type: typing.List[float]
'''
updateShadow: bool = None
''' Choose to force shadow buffer update if there is a gap beetween image rendered and shadows.
:type: bool
'''
colorBindCode = None
''' Off-screen color texture bind code.'''
capsize: list = None
''' Size of render area.
:type: list
'''
clip: float = None
''' Clipping distance.
:type: float
'''
filter = None
''' Pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
flip: bool = None
''' Flip image vertically.
:type: bool
'''
image = None
''' Image data. (readonly)'''
scale: bool = None
''' Fast scale of image (near neighbour).
:type: bool
'''
size: tuple = None
''' Image size (readonly).
:type: tuple
'''
valid: bool = None
''' Tells if an image is available. (readonly)
:type: bool
'''
whole: bool = None
''' Use whole viewport to render.
:type: bool
'''
def refresh(self, buffer=None, format: str = "RGBA"):
''' Refresh image - render and copy the image to an external buffer (optional) then invalidate its current content.
:param buffer: An optional object that implements the buffer protocol. If specified, the image is rendered and copied to the buffer, which must be big enough or an exception is thrown.
:type buffer:
:param format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
:type format: str
'''
pass
class ImageMix:
''' Image mixer used to mix multiple image sources together.
'''
filter = None
''' Pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
flip: bool = None
''' Flip image vertically.
:type: bool
'''
image = None
''' Image data. (readonly)'''
scale: bool = None
''' Fast scale of image (near neighbour).
:type: bool
'''
size: tuple = None
''' Image size. (readonly)
:type: tuple
'''
valid: bool = None
''' Tells if an image is available. (readonly)
:type: bool
'''
def getSource(
self, id: str
) -> typing.Union['VideoFFmpeg', 'ImageMirror', 'ImageViewport',
'ImageBuff', 'ImageMix', 'ImageRender', 'ImageFFmpeg']:
''' Get image source.
:param id: Identifier of the source to get.
:type id: str
:rtype: typing.Union['VideoFFmpeg', 'ImageMirror', 'ImageViewport', 'ImageBuff', 'ImageMix', 'ImageRender', 'ImageFFmpeg']
:return: Image source.
'''
pass
def getWeight(self, id: str) -> int:
''' Get image source weight.
:param id: Identifier of the source.
:type id: str
:rtype: int
:return: Weight of the source.
'''
pass
def refresh(self, buffer=None, format: str = "RGBA"):
''' Refresh image - calculate and copy the image to an external buffer (optional) then invalidate its current content.
:param buffer: An optional object that implements the buffer protocol. If specified, the image is calculated and copied to the buffer, which must be big enough or an exception is thrown.
:type buffer:
:param format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
:type format: str
'''
pass
def setSource(self, id: str, image):
''' Set image source - all sources must have the same size.
:param id: Identifier of the source to set.
:type id: str
:param image:
:type image:
'''
pass
def setWeight(self, id: str, weight: int):
''' Set image source weight - the sum of the weights should be 256 to get full color intensity in the output.
:param id: Identifier of the source.
:type id: str
:param weight: Weight of the source.
:type weight: int
'''
pass
class ImageRender:
''' Image source from a render of a non active camera. The render is done on a custom framebuffer object if fbo is specified, otherwise on the default framebuffer. :arg scene: Scene in which the image has to be taken. :type scene: ~bge.types.KX_Scene :arg camera: Camera from which the image has to be taken. :type camera: ~bge.types.KX_Camera :arg width: Off-screen render buffer width (optional). :type width: integer :arg height: Off-screen render buffer height (optional). :type height: integer :arg samples: Off-screen render buffer samples (optional). :type samples: integer :arg hdr: Off-screen image format (optional). :type hdr: One of :ref: these constants<render-hdr>
'''
alpha: bool = None
''' Use alpha in texture.
:type: bool
'''
horizon: typing.List[float] = None
''' Horizon color.
:type: typing.List[float]
'''
zenith: typing.List[float] = None
''' Zenith color.
:type: typing.List[float]
'''
background: typing.List[float] = None
''' Background color. Deprecated use :py:meth: bge.texture.ImageRender.horizon or :py:meth: bge.texture.ImageRender.zenith instead.
:type: typing.List[float]
'''
updateShadow: bool = None
''' Choose to force shadow buffer update if there is a gap beetween image rendered and shadows.
:type: bool
'''
colorBindCode = None
''' Off-screen color texture bind code.'''
capsize: list = None
''' Size of render area.
:type: list
'''
filter = None
''' Pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
flip: bool = None
''' Flip image vertically.
:type: bool
'''
image = None
''' Image data. (readonly)'''
scale: bool = None
''' Fast scale of image (near neighbour).
:type: bool
'''
size: tuple = None
''' Image size. (readonly)
:type: tuple
'''
valid: bool = None
''' Tells if an image is available. (readonly)
:type: bool
'''
whole: bool = None
''' Use whole viewport to render.
:type: bool
'''
depth: bool = None
''' Use depth component of render as array of float - not suitable for texture source, should only be used with bge.texture.imageToArray(mode='F').
:type: bool
'''
zbuff: bool = None
''' Use depth component of render as grayscale color - suitable for texture source.
:type: bool
'''
def render(self) -> bool:
''' Render the scene but do not extract the pixels yet. The function returns as soon as the render commands have been send to the GPU. The render will proceed asynchronously in the GPU while the host can perform other tasks. To complete the render, you can either call :func: refresh directly of refresh the texture of which this object is the source. This method is useful to implement asynchronous render for optimal performance: call render() on frame n and refresh() on frame n+1 to give as much as time as possible to the GPU to render the frame while the game engine can perform other tasks.
:rtype: bool
:return: True if the render was initiated, False if the render cannot be performed (e.g. the camera is active)
'''
pass
def refresh(self):
'''
'''
pass
def refresh(self, buffer, format: str = "RGBA") -> bool:
''' Refresh video - render and optionally copy the image to an external buffer then invalidate its current content. The render may have been started earlier with the :func: render method, in which case this function simply waits for the render operations to complete. When called without argument, the pixels are not extracted but the render is guaranteed to be completed when the function returns. This only makes sense with offscreen render on texture target (see :func: ~bge.render.offScreenCreate ).
:param buffer: An object that implements the buffer protocol. If specified, the image is copied to the buffer, which must be big enough or an exception is thrown. The transfer to the buffer is optimal if no processing of the image is needed. This is the case if flip=False, alpha=True, scale=False, whole=True, depth=False, zbuff=False and no filter is set.
:type buffer:
:param format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
:type format: str
:rtype: bool
:return: True if the render is complete, False if the render cannot be performed (e.g. the camera is active)
'''
pass
class ImageViewport:
''' Image source from viewport rendered by the active camera. To render from a non active camera see ImageRender .
'''
alpha: bool = None
''' Use alpha in texture.
:type: bool
'''
capsize: list = None
''' Size of viewport area being captured.
:type: list
'''
filter = None
''' Pixel filter. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
flip: bool = None
''' Flip image vertically.
:type: bool
'''
image = None
''' Image data. (readonly)'''
position: list = None
''' Upper left corner of the captured area.
:type: list
'''
scale: bool = None
''' Fast scale of image (near neighbour).
:type: bool
'''
size: tuple = None
''' Image size. (readonly)
:type: tuple
'''
valid: bool = None
''' Tells if an image is available. (readonly)
:type: bool
'''
whole: bool = None
''' Use whole viewport to capture.
:type: bool
'''
depth: bool = None
''' Use depth component of viewport as array of float - not suitable for texture source, should only be used with bge.texture.imageToArray(mode='F') .
:type: bool
'''
zbuff: bool = None
''' Use depth component of viewport as grayscale color - suitable for texture source.
:type: bool
'''
def refresh(self, buffer=None, format: str = "RGBA"):
''' Refresh video - copy the viewport to an external buffer (optional) then invalidate its current content.
:param buffer: An optional object that implements the buffer protocol. If specified, the image is copied to the buffer, which must be big enough or an exception is thrown. The transfer to the buffer is optimal if no processing of the image is needed. This is the case if flip=False, alpha=True, scale=False, whole=True, depth=False, zbuff=False and no filter is set.
:type buffer:
:param format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
:type format: str
'''
pass
class Texture:
''' Class that creates the Texture object that loads the dynamic texture on the GPU. :arg gameObj: Game object to be created a video texture on. :type gameObj: ~bge.types.KX_GameObject :arg materialID: Material ID default, 0 is the first material. (optional) :type materialID: int :arg textureID: Texture index in case of multi-texture channel, 0 = first channel by default. In case of UV texture, this parameter should always be 0. (optional) :type textureID: int :arg textureObj: Reference to another Texture object with shared bindId which he user might want to reuse the texture. If this argument is used, you should not create any source on this texture and there is no need to refresh it either: the other Texture object will provide the texture for both materials/textures.(optional) :type textureObj: Texture
'''
bindId: int = None
''' OpenGL Bind Name. (readonly)
:type: int
'''
mipmap: bool = None
''' Mipmap texture.
:type: bool
'''
source = None
''' Source of texture. * VideoFFmpeg * VideoDeckLink * ImageFFmpeg * ImageBuff * ImageMirror * ImageMix * ImageRender * ImageViewport'''
def close(self):
''' Close dynamic texture and restore original.
'''
pass
def refresh(self, refresh_source: bool, timestamp: float = -1.0):
''' Refresh texture from source.
:param refresh_source: Whether to also refresh the image source of the texture.
:type refresh_source: bool
:param timestamp: timestamp (in seconds from the start of the movie) of the frame to be loaded; this can be used for video-sound synchonization by passing :attr: ~bge.types.KX_SoundActuator.time to it. (optional)
:type timestamp: float
'''
pass
class VideoDeckLink:
''' Image source from an external video stream captured with a DeckLink video card from Black Magic Design. Before this source can be used, a DeckLink hardware device must be installed, it can be a PCIe card or a USB device, and the 'Desktop Video' software package (version 10.4 or above must be installed) on the host as described in the DeckLink documentation. If in addition you have a recent nVideo Quadro card, you can benefit from the 'GPUDirect' technology to push the captured video frame very efficiently to the GPU. For this you need to install the 'DeckLink SDK' version 10.4 or above and copy the 'dvp.dll' runtime library to Blender's installation directory or to any other place where Blender can load a DLL from. :arg format: string describing the video format to be captured. :type format: str :arg capture: Card number from which the input video must be captured. :type capture: int The format argument must be written as <displayMode>/<pixelFormat>[/3D][:<cacheSize>] where <displayMode> describes the frame size and rate and <pixelFormat> the encoding of the pixels. The optional /3D suffix is to be used if the video stream is stereo with a left and right eye feed. The optional :<cacheSize> suffix determines the number of the video frames kept in cache, by default 8. Some DeckLink cards won't work below a certain cache size. The default value 8 should be sufficient for all cards. You may try to reduce the cache size to reduce the memory footprint. For example the The 4K Extreme is known to work with 3 frames only, the Extreme 2 needs 4 frames and the Intensity Shuttle needs 6 frames, etc. Reducing the cache size may be useful when Decklink is used in conjunction with GPUDirect: all frames must be locked in memory in that case and that puts a lot of pressure on memory. If you reduce the cache size too much, you'll get no error but no video feed either. The valid <displayMode> values are copied from the BMDDisplayMode enum in the DeckLink API without the 'bmdMode' prefix. In case a mode that is not in this list is added in a later version of the SDK, it is also possible to specify the 4 letters of the internal code for that mode. You will find the internal code in the DeckLinkAPIModes.h file that is part of the SDK. Here is for reference the full list of supported display modes with their equivalent internal code: Internal Codes - NTSC 'ntsc' - NTSC2398 'nt23' - PAL 'pal ' - NTSCp 'ntsp' - PALp 'palp' HD 1080 Modes - HD1080p2398 '23ps' - HD1080p24 '24ps' - HD1080p25 'Hp25' - HD1080p2997 'Hp29' - HD1080p30 'Hp30' - HD1080i50 'Hi50' - HD1080i5994 'Hi59' - HD1080i6000 'Hi60' - HD1080p50 'Hp50' - HD1080p5994 'Hp59' - HD1080p6000 'Hp60' HD 720 Modes - HD720p50 'hp50' - HD720p5994 'hp59' - HD720p60 'hp60' 2k Modes - 2k2398 '2k23' - 2k24 '2k24' - 2k25 '2k25' 4k Modes - 4K2160p2398 '4k23' - 4K2160p24 '4k24' - 4K2160p25 '4k25' - 4K2160p2997 '4k29' - 4K2160p30 '4k30' - 4K2160p50 '4k50' - 4K2160p5994 '4k59' - 4K2160p60 '4k60' Most of names are self explanatory. If necessary refer to the DeckLink API documentation for more information. Similarly, <pixelFormat> is copied from the BMDPixelFormat enum. Here is for reference the full list of supported pixel format and their equivalent internal code: Pixel Formats - 8BitYUV '2vuy' - 10BitYUV 'v210' - 8BitARGB * no equivalent code * - 8BitBGRA 'BGRA' - 10BitRGB 'r210' - 12BitRGB 'R12B' - 12BitRGBLE 'R12L' - 10BitRGBXLE 'R10l' - 10BitRGBX 'R10b' Refer to the DeckLink SDK documentation for a full description of these pixel format. It is important to understand them as the decoding of the pixels is NOT done in VideoTexture for performance reason. Instead a specific shader must be used to decode the pixel in the GPU. Only the '8BitARGB', '8BitBGRA' and '10BitRGBXLE' pixel formats are mapped directly to OpenGL RGB float textures. The '8BitYUV' and '10BitYUV' pixel formats are mapped to openGL RGB float texture but require a shader to decode. The other pixel formats are sent as a GL_RED_INTEGER texture (i.e. a texture with only the red channel coded as an unsigned 32 bit integer) and are not recommended for use. Example: HD1080p24/10BitYUV/3D:4 is equivalent to 24ps/v210/3D:4 and represents a full HD stereo feed at 24 frame per second and 4 frames cache size. Although video format auto detection is possible with certain DeckLink devices, the corresponding API is NOT implemented in the BGE. Therefore it is important to specify the format string that matches exactly the video feed. If the format is wrong, no frame will be captured. It should be noted that the pixel format that you need to specify is not necessarily the actual format in the video feed. For example, the 4K Extreme card delivers 8bit RGBs pixels in the '10BitRGBXLE' format. Use the 'Media Express' application included in 'Desktop Video' to discover which pixel format works for a particular video stream.
'''
status: int = None
''' Status of the capture: 1=ready to use, 2=capturing, 3=stopped
:type: int
'''
framerate: float = None
''' Capture frame rate as computed from the video format.
:type: float
'''
valid: bool = None
''' Tells if the image attribute can be used to retrieve the image. Always False in this implementation (the image is not available at python level)
:type: bool
'''
image = None
''' The image data. Always None in this implementation.'''
size: int = None
''' The size of the frame in pixel. Stereo frames have double the height of the video frame, i.e. 3D is delivered to the GPU as a single image in top-bottom order, left eye on top.
:type: int
'''
scale: bool = None
''' Not used in this object.
:type: bool
'''
flip: bool = None
''' Not used in this object.
:type: bool
'''
filter = None
''' Not used in this object.'''
def play(self) -> bool:
''' Kick-off the capture after creation of the object.
:rtype: bool
:return: True if the capture could be started, False otherwise.
'''
pass
def pause(self) -> bool:
''' Temporary stops the capture. Use play() to restart it.
:rtype: bool
:return: True if the capture could be paused, False otherwise.
'''
pass
def stop(self) -> bool:
''' Stops the capture.
:rtype: bool
:return: True if the capture could be stopped, False otherwise.
'''
pass
class VideoFFmpeg:
''' FFmpeg video source, used for video files, video captures, or video streams. :arg file: Path to the video to load; if capture >= 0 on Windows, this parameter will not be used. :type file: str :arg capture: Capture device number; if >= 0, the corresponding webcam will be used. (optional) :type capture: int :arg rate: Capture rate. (optional, used only if capture >= 0) :type rate: float :arg width: Capture width. (optional, used only if capture >= 0) :type width: int :arg height: Capture height. (optional, used only if capture >= 0) :type height: int
'''
status: int = None
''' Video status. (readonly) :value: see FFmpeg Video and Image Status _.
:type: int
'''
range: list = None
''' The start and stop time of the video playback, expressed in seconds from beginning. By default the entire video.
:type: list
'''
repeat: int = None
''' Number of times to replay the video, -1 for infinite repeat.
:type: int
'''
framerate: float = None
''' Relative frame rate, <1.0 for slow, >1.0 for fast.
:type: float
'''
valid: bool = None
''' Tells if an image is available. (readonly)
:type: bool
'''
image = None
''' Image data. (readonly)'''
size: tuple = None
''' Image size. (readonly)
:type: tuple
'''
scale: bool = None
''' Set to True to activate fast nearest neighbor scaling algorithm. Texture width and height must be a power of 2. If the video picture size is not a power of 2, rescaling is required. By default bge.texture uses the precise but slow gluScaleImage() function. Best is to rescale the video offline so that no scaling is necessary at runtime!
:type: bool
'''
flip: bool = None
''' If True the imaged will be flipped vertically. FFmpeg always delivers the image upside down, so this attribute is set to True by default.
:type: bool
'''
filter = None
''' An additional filter that is applied on the video before sending it to the GPU. * FilterBGR24 * FilterBlueScreen * FilterColor * FilterGray * FilterLevel * FilterNormal * FilterRGB24 * FilterRGBA32'''
preseek: int = None
''' Number of frames of preseek.
:type: int
'''
deinterlace: bool = None
''' Deinterlace image.
:type: bool
'''
def play(self) -> bool:
''' Play (restart) video.
:rtype: bool
:return: Whether the video was ready or stopped.
'''
pass
def pause(self) -> bool:
''' Pause video.
:rtype: bool
:return: Whether the video was playing.
'''
pass
def stop(self) -> bool:
''' Stop video (play will replay it from start).
:rtype: bool
:return: Whether the video was playing.
'''
pass
def refresh(self,
buffer=None,
format: str = "RGBA",
timestamp: float = -1.0) -> int:
''' Refresh video - get its status and optionally copy the frame to an external buffer.
:param buffer: An optional object that implements the buffer protocol. If specified, the image is copied to the buffer, which must be big enough or an exception is thrown.
:type buffer:
:param format: An optional image format specifier for the image that will be copied to the buffer. Only valid values are "RGBA" or "BGRA"
:type format: str
:param timestamp: An optional timestamp (in seconds from the start of the movie) of the frame to be copied to the buffer.
:type timestamp: float
:rtype: int
:return: see FFmpeg Video and Image Status _.
'''
pass
def getLastError() -> str:
''' Last error that occurred in a bge.texture function.
:return: The description of the last error occurred in a bge.texture function.
'''
pass
def imageToArray(image, mode: str):
''' Returns a ~bgl.Buffer corresponding to the current image stored in a texture source object.
:param mode: Optional argument representing the pixel format. - You can use the characters R, G, B for the 3 color channels, A for the alpha channel, 0 to force a fixed 0 color channel and 1 to force a fixed 255 color channel. Examples: - "BGR" will return 3 bytes per pixel with the Blue, Green and Red channels in that order. - "RGB1" will return 4 bytes per pixel with the Red, Green, Blue channels in that order and the alpha channel forced to 255. - A special mode "F" allows to return the image as an array of float. This mode should only be used to retrieve the depth buffer of the class: ImageViewport and ImageRender objects. The default mode is "RGBA".
:type mode: str
:return: An object representing the image as one dimensional array of bytes of size (pixel_size*width*height), line by line starting from the bottom of the image. The pixel size and format is determined by the mode parameter. For mode 'F', the array is a one dimensional array of float of size (width*height).
'''
pass
def materialID(object, name: str) -> int:
''' Returns a numeric value that can be used in Texture to create a dynamic texture. The value corresponds to an internal material number that uses the texture identified by name. name is a string representing a texture name with IM prefix if you want to identify the texture directly. This method works for basic tex face and for material, provided the material has a texture channel using that particular texture in first position of the texture stack. name can also have MA prefix if you want to identify the texture by material. In that case the material must have a texture channel in first position. If the object has no material that matches name, it generates a runtime error. Use try/except to catch the exception. Ex: bge.texture.materialID(obj, 'IMvideo.png')
:param object: The game object that uses the texture you want to make dynamic.
:param name: Name of the texture/material you want to make dynamic.
:type name: str
:return: The internal material number.
'''
pass
def setLogFile(filename: str) -> int:
''' Sets the name of a text file in which runtime error messages will be written, in addition to the printing of the messages on the Python console. Only the runtime errors specific to the VideoTexture module are written in that file, ordinary runtime time errors are not written.
:param filename: Name of the error log file.
:type filename: str
:return: -1 if the parameter name is invalid (not of type string), else 0.
'''
pass
IMB_BLEND_ADD = None
IMB_BLEND_ADD_ALPHA = None
IMB_BLEND_COLOR = None
IMB_BLEND_COLORBURN = None
IMB_BLEND_COLORDODGE = None
IMB_BLEND_COPY = None
IMB_BLEND_COPY_ALPHA = None
IMB_BLEND_COPY_RGB = None
IMB_BLEND_DARKEN = None
IMB_BLEND_DIFFERENCE = None
IMB_BLEND_ERASE_ALPHA = None
IMB_BLEND_EXCLUSION = None
IMB_BLEND_HARDLIGHT = None
IMB_BLEND_HUE = None
IMB_BLEND_LIGHTEN = None
IMB_BLEND_LINEARBURN = None
IMB_BLEND_LINEARLIGHT = None
IMB_BLEND_LUMINOSITY = None
IMB_BLEND_MIX = None
IMB_BLEND_MUL = None
IMB_BLEND_OVERLAY = None
IMB_BLEND_PINLIGHT = None
IMB_BLEND_SATURATION = None
IMB_BLEND_SCREEN = None
IMB_BLEND_SOFTLIGHT = None
IMB_BLEND_SUB = None
IMB_BLEND_VIVIDLIGHT = None
SOURCE_EMPTY = None
SOURCE_ERROR = None
SOURCE_PLAYING = None
SOURCE_READY = None
SOURCE_STOPPED = None
|
PypiClean
|
/code-RTC-1.0.2.tar.gz/code-RTC-1.0.2/code_rtc/static/lib/monaco-editor/esm/vs/editor/standalone/browser/inspectTokens/inspectTokens.js
|
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
import './inspectTokens.css';
import { Color } from '../../../../base/common/color.js';
import { Disposable } from '../../../../base/common/lifecycle.js';
import { escape } from '../../../../base/common/strings.js';
import { EditorAction, registerEditorAction, registerEditorContribution } from '../../../browser/editorExtensions.js';
import { TokenMetadata, TokenizationRegistry } from '../../../common/modes.js';
import { NULL_STATE, nullTokenize, nullTokenize2 } from '../../../common/modes/nullMode.js';
import { IModeService } from '../../../common/services/modeService.js';
import { IStandaloneThemeService } from '../../common/standaloneThemeService.js';
import { editorHoverBackground, editorHoverBorder, editorHoverForeground } from '../../../../platform/theme/common/colorRegistry.js';
import { HIGH_CONTRAST, registerThemingParticipant } from '../../../../platform/theme/common/themeService.js';
import { InspectTokensNLS } from '../../../common/standaloneStrings.js';
var InspectTokensController = /** @class */ (function (_super) {
__extends(InspectTokensController, _super);
function InspectTokensController(editor, standaloneColorService, modeService) {
var _this = _super.call(this) || this;
_this._editor = editor;
_this._modeService = modeService;
_this._widget = null;
_this._register(_this._editor.onDidChangeModel(function (e) { return _this.stop(); }));
_this._register(_this._editor.onDidChangeModelLanguage(function (e) { return _this.stop(); }));
_this._register(TokenizationRegistry.onDidChange(function (e) { return _this.stop(); }));
return _this;
}
InspectTokensController.get = function (editor) {
return editor.getContribution(InspectTokensController.ID);
};
InspectTokensController.prototype.dispose = function () {
this.stop();
_super.prototype.dispose.call(this);
};
InspectTokensController.prototype.launch = function () {
if (this._widget) {
return;
}
if (!this._editor.hasModel()) {
return;
}
this._widget = new InspectTokensWidget(this._editor, this._modeService);
};
InspectTokensController.prototype.stop = function () {
if (this._widget) {
this._widget.dispose();
this._widget = null;
}
};
InspectTokensController.ID = 'editor.contrib.inspectTokens';
InspectTokensController = __decorate([
__param(1, IStandaloneThemeService),
__param(2, IModeService)
], InspectTokensController);
return InspectTokensController;
}(Disposable));
var InspectTokens = /** @class */ (function (_super) {
__extends(InspectTokens, _super);
function InspectTokens() {
return _super.call(this, {
id: 'editor.action.inspectTokens',
label: InspectTokensNLS.inspectTokensAction,
alias: 'Developer: Inspect Tokens',
precondition: undefined
}) || this;
}
InspectTokens.prototype.run = function (accessor, editor) {
var controller = InspectTokensController.get(editor);
if (controller) {
controller.launch();
}
};
return InspectTokens;
}(EditorAction));
function renderTokenText(tokenText) {
var result = '';
for (var charIndex = 0, len = tokenText.length; charIndex < len; charIndex++) {
var charCode = tokenText.charCodeAt(charIndex);
switch (charCode) {
case 9 /* Tab */:
result += '→';
break;
case 32 /* Space */:
result += '·';
break;
case 60 /* LessThan */:
result += '<';
break;
case 62 /* GreaterThan */:
result += '>';
break;
case 38 /* Ampersand */:
result += '&';
break;
default:
result += String.fromCharCode(charCode);
}
}
return result;
}
function getSafeTokenizationSupport(languageIdentifier) {
var tokenizationSupport = TokenizationRegistry.get(languageIdentifier.language);
if (tokenizationSupport) {
return tokenizationSupport;
}
return {
getInitialState: function () { return NULL_STATE; },
tokenize: function (line, state, deltaOffset) { return nullTokenize(languageIdentifier.language, line, state, deltaOffset); },
tokenize2: function (line, state, deltaOffset) { return nullTokenize2(languageIdentifier.id, line, state, deltaOffset); }
};
}
var InspectTokensWidget = /** @class */ (function (_super) {
__extends(InspectTokensWidget, _super);
function InspectTokensWidget(editor, modeService) {
var _this = _super.call(this) || this;
// Editor.IContentWidget.allowEditorOverflow
_this.allowEditorOverflow = true;
_this._editor = editor;
_this._modeService = modeService;
_this._model = _this._editor.getModel();
_this._domNode = document.createElement('div');
_this._domNode.className = 'tokens-inspect-widget';
_this._tokenizationSupport = getSafeTokenizationSupport(_this._model.getLanguageIdentifier());
_this._compute(_this._editor.getPosition());
_this._register(_this._editor.onDidChangeCursorPosition(function (e) { return _this._compute(_this._editor.getPosition()); }));
_this._editor.addContentWidget(_this);
return _this;
}
InspectTokensWidget.prototype.dispose = function () {
this._editor.removeContentWidget(this);
_super.prototype.dispose.call(this);
};
InspectTokensWidget.prototype.getId = function () {
return InspectTokensWidget._ID;
};
InspectTokensWidget.prototype._compute = function (position) {
var data = this._getTokensAtLine(position.lineNumber);
var token1Index = 0;
for (var i = data.tokens1.length - 1; i >= 0; i--) {
var t = data.tokens1[i];
if (position.column - 1 >= t.offset) {
token1Index = i;
break;
}
}
var token2Index = 0;
for (var i = (data.tokens2.length >>> 1); i >= 0; i--) {
if (position.column - 1 >= data.tokens2[(i << 1)]) {
token2Index = i;
break;
}
}
var result = '';
var lineContent = this._model.getLineContent(position.lineNumber);
var tokenText = '';
if (token1Index < data.tokens1.length) {
var tokenStartIndex = data.tokens1[token1Index].offset;
var tokenEndIndex = token1Index + 1 < data.tokens1.length ? data.tokens1[token1Index + 1].offset : lineContent.length;
tokenText = lineContent.substring(tokenStartIndex, tokenEndIndex);
}
result += "<h2 class=\"tm-token\">" + renderTokenText(tokenText) + "<span class=\"tm-token-length\">(" + tokenText.length + " " + (tokenText.length === 1 ? 'char' : 'chars') + ")</span></h2>";
result += "<hr class=\"tokens-inspect-separator\" style=\"clear:both\"/>";
var metadata = this._decodeMetadata(data.tokens2[(token2Index << 1) + 1]);
result += "<table class=\"tm-metadata-table\"><tbody>";
result += "<tr><td class=\"tm-metadata-key\">language</td><td class=\"tm-metadata-value\">" + escape(metadata.languageIdentifier.language) + "</td>";
result += "<tr><td class=\"tm-metadata-key\">token type</td><td class=\"tm-metadata-value\">" + this._tokenTypeToString(metadata.tokenType) + "</td>";
result += "<tr><td class=\"tm-metadata-key\">font style</td><td class=\"tm-metadata-value\">" + this._fontStyleToString(metadata.fontStyle) + "</td>";
result += "<tr><td class=\"tm-metadata-key\">foreground</td><td class=\"tm-metadata-value\">" + Color.Format.CSS.formatHex(metadata.foreground) + "</td>";
result += "<tr><td class=\"tm-metadata-key\">background</td><td class=\"tm-metadata-value\">" + Color.Format.CSS.formatHex(metadata.background) + "</td>";
result += "</tbody></table>";
result += "<hr class=\"tokens-inspect-separator\"/>";
if (token1Index < data.tokens1.length) {
result += "<span class=\"tm-token-type\">" + escape(data.tokens1[token1Index].type) + "</span>";
}
this._domNode.innerHTML = result;
this._editor.layoutContentWidget(this);
};
InspectTokensWidget.prototype._decodeMetadata = function (metadata) {
var colorMap = TokenizationRegistry.getColorMap();
var languageId = TokenMetadata.getLanguageId(metadata);
var tokenType = TokenMetadata.getTokenType(metadata);
var fontStyle = TokenMetadata.getFontStyle(metadata);
var foreground = TokenMetadata.getForeground(metadata);
var background = TokenMetadata.getBackground(metadata);
return {
languageIdentifier: this._modeService.getLanguageIdentifier(languageId),
tokenType: tokenType,
fontStyle: fontStyle,
foreground: colorMap[foreground],
background: colorMap[background]
};
};
InspectTokensWidget.prototype._tokenTypeToString = function (tokenType) {
switch (tokenType) {
case 0 /* Other */: return 'Other';
case 1 /* Comment */: return 'Comment';
case 2 /* String */: return 'String';
case 4 /* RegEx */: return 'RegEx';
}
return '??';
};
InspectTokensWidget.prototype._fontStyleToString = function (fontStyle) {
var r = '';
if (fontStyle & 1 /* Italic */) {
r += 'italic ';
}
if (fontStyle & 2 /* Bold */) {
r += 'bold ';
}
if (fontStyle & 4 /* Underline */) {
r += 'underline ';
}
if (r.length === 0) {
r = '---';
}
return r;
};
InspectTokensWidget.prototype._getTokensAtLine = function (lineNumber) {
var stateBeforeLine = this._getStateBeforeLine(lineNumber);
var tokenizationResult1 = this._tokenizationSupport.tokenize(this._model.getLineContent(lineNumber), stateBeforeLine, 0);
var tokenizationResult2 = this._tokenizationSupport.tokenize2(this._model.getLineContent(lineNumber), stateBeforeLine, 0);
return {
startState: stateBeforeLine,
tokens1: tokenizationResult1.tokens,
tokens2: tokenizationResult2.tokens,
endState: tokenizationResult1.endState
};
};
InspectTokensWidget.prototype._getStateBeforeLine = function (lineNumber) {
var state = this._tokenizationSupport.getInitialState();
for (var i = 1; i < lineNumber; i++) {
var tokenizationResult = this._tokenizationSupport.tokenize(this._model.getLineContent(i), state, 0);
state = tokenizationResult.endState;
}
return state;
};
InspectTokensWidget.prototype.getDomNode = function () {
return this._domNode;
};
InspectTokensWidget.prototype.getPosition = function () {
return {
position: this._editor.getPosition(),
preference: [2 /* BELOW */, 1 /* ABOVE */]
};
};
InspectTokensWidget._ID = 'editor.contrib.inspectTokensWidget';
return InspectTokensWidget;
}(Disposable));
registerEditorContribution(InspectTokensController.ID, InspectTokensController);
registerEditorAction(InspectTokens);
registerThemingParticipant(function (theme, collector) {
var border = theme.getColor(editorHoverBorder);
if (border) {
var borderWidth = theme.type === HIGH_CONTRAST ? 2 : 1;
collector.addRule(".monaco-editor .tokens-inspect-widget { border: " + borderWidth + "px solid " + border + "; }");
collector.addRule(".monaco-editor .tokens-inspect-widget .tokens-inspect-separator { background-color: " + border + "; }");
}
var background = theme.getColor(editorHoverBackground);
if (background) {
collector.addRule(".monaco-editor .tokens-inspect-widget { background-color: " + background + "; }");
}
var foreground = theme.getColor(editorHoverForeground);
if (foreground) {
collector.addRule(".monaco-editor .tokens-inspect-widget { color: " + foreground + "; }");
}
});
|
PypiClean
|
/skytime-0.16.1-py3-none-any.whl/sktime/annotation/ggs.py
|
import logging
import math
from typing import Dict, List, Tuple
import numpy as np
import numpy.typing as npt
from attrs import asdict, define, field
from sklearn.utils.validation import check_random_state
from sktime.base import BaseEstimator
logger = logging.getLogger(__name__)
@define
class GGS:
"""
Greedy Gaussian Segmentation.
The method approxmates solutions for the problem of breaking a
multivariate time series into segments, where the data in each segment
could be modeled as independent samples from a multivariate Gaussian
distribution. It uses a dynamic programming search algorithm with
a heuristic that allows finding approximate solution in linear time with
respect to the data length and always yields locally optimal choice.
Greedy Gaussian Segmentation (GGS) fits a segmented gaussian model (SGM)
to the data by computing the approximate solution to the combinatorial
problem of finding the approximate covariance-regularized maximum
log-likelihood for fixed number of change points and a reagularization
strength. It follows an interative procedure
where a new breakpoint is added and then adjusting all breakpoints to
(approximately) maximize the objective. It is similar to the top-down
search used in other change point detection problems.
Parameters
----------
k_max: int, default=10
Maximum number of change points to find. The number of segments is thus k+1.
lamb: : float, default=1.0
Regularization parameter lambda (>= 0), which controls the amount of
(inverse) covariance regularization, see Eq (1) in [1]_. Regularization
is introduced to reduce issues for high-dimensional problems. Setting
``lamb`` to zero will ignore regularization, whereas large values of
lambda will favour simpler models.
max_shuffles: int, default=250
Maximum number of shuffles
verbose: bool, default=False
If ``True`` verbose output is enabled.
random_state: int or np.random.RandomState, default=None
Either random seed or an instance of ``np.random.RandomState``
Attributes
----------
change_points_: array_like, default=[]
Locations of change points as integer indexes. By convention change points
include the identity segmentation, i.e. first and last index + 1 values.
_intermediate_change_points: List[List[int]], default=[]
Intermediate values of change points for each value of k = 1...k_max
_intermediate_ll: List[float], default=[]
Intermediate values for log-likelihood for each value of k = 1...k_max
Notes
-----
Based on the work from [1]_.
- source code adapted based on: https://github.com/cvxgrp/GGS
- paper available at: https://stanford.edu/~boyd/papers/pdf/ggs.pdf
References
----------
.. [1] Hallac, D., Nystrup, P. & Boyd, S.,
"Greedy Gaussian segmentation of multivariate time series.",
Adv Data Anal Classif 13, 727–751 (2019).
https://doi.org/10.1007/s11634-018-0335-0
"""
k_max: int = 10
lamb: float = 1.0
max_shuffles: int = 250
verbose: bool = False
random_state: int = None
change_points_: npt.ArrayLike = field(init=False, default=[])
_intermediate_change_points: List[List[int]] = field(init=False, default=[])
_intermediate_ll: List[float] = field(init=False, default=[])
def initialize_intermediates(self) -> None:
"""Initialize the state fo the estimator."""
self._intermediate_change_points = []
self._intermediate_ll = []
def log_likelihood(self, data: npt.ArrayLike) -> float:
"""
Compute the GGS log-likelihood of the segmented Gaussian model.
Parameters
----------
data: array_like
2D `array_like` representing time series with sequence index along
the first dimension and value series as columns.
Returns
-------
log_likelihood
"""
nrows, ncols = data.shape
cov = np.cov(data.T, bias=True)
(_, logdet) = np.linalg.slogdet(
cov + float(self.lamb) * np.identity(ncols) / nrows
)
return nrows * logdet - float(self.lamb) * np.trace(
np.linalg.inv(cov + float(self.lamb) * np.identity(ncols) / nrows)
)
def cumulative_log_likelihood(
self, data: npt.ArrayLike, change_points: List[int]
) -> float:
"""
Calculate cumulative GGS log-likelihood for all segments.
Args
----
data: array_like
2D `array_like` representing time series with sequence index along
the first dimension and value series as columns.
change_points: list of ints
Locations of change points as integer indexes. By convention change points
include the identity segmentation, i.e. first and last index + 1 values.
Returns
-------
log_likelihood: cumulative log likelihood
"""
log_likelihood = 0
for start, stop in zip(change_points[:-1], change_points[1:]):
segment = data[start:stop, :]
log_likelihood -= self.log_likelihood(segment)
return log_likelihood
def add_new_change_point(self, data: npt.ArrayLike) -> Tuple[int, float]:
"""
Add change point.
This methods finds a new change point by that splits the segment and
optimizes the objective function. See section 3.1 on split subroutine
in [1]_.
Parameters
----------
data: array_like
2D `array_like` representing time series with sequence index along
the first dimension and value series as columns.
Returns
-------
index: change point index
gll: gained log likelihood
"""
# Initialize parameters
m, n = data.shape
orig_mean = np.mean(data, axis=0)
orig_cov = np.cov(data.T, bias=True)
orig_ll = self.log_likelihood(data)
total_sum = m * (orig_cov + np.outer(orig_mean, orig_mean))
mu_left = data[0, :] / n
mu_right = (m * orig_mean - data[0, :]) / (m - 1)
runSum = np.outer(data[0, :], data[0, :])
# Loop through all samples
# find point where breaking the segment would have the largest LL increase
min_ll = orig_ll
new_index = 0
for i in range(2, m - 1):
# Update parameters
runSum = runSum + np.outer(data[i - 1, :], data[i - 1, :])
mu_left = ((i - 1) * mu_left + data[i - 1, :]) / (i)
mu_right = ((m - i + 1) * mu_right - data[i - 1, :]) / (m - i)
sigLeft = runSum / (i) - np.outer(mu_left, mu_left)
sigRight = (total_sum - runSum) / (m - i) - np.outer(mu_right, mu_right)
# Compute Cholesky, LogDet, and Trace
Lleft = np.linalg.cholesky(sigLeft + float(self.lamb) * np.identity(n) / i)
Lright = np.linalg.cholesky(
sigRight + float(self.lamb) * np.identity(n) / (m - i)
)
ll_left = 2 * sum(map(math.log, np.diag(Lleft)))
ll_right = 2 * sum(map(math.log, np.diag(Lright)))
(trace_left, trace_right) = (0, 0)
if self.lamb > 0:
trace_left = math.pow(np.linalg.norm(np.linalg.inv(Lleft)), 2)
trace_right = math.pow(np.linalg.norm(np.linalg.inv(Lright)), 2)
LL = (
i * ll_left
- float(self.lamb) * trace_left
+ (m - i) * ll_right
- float(self.lamb) * trace_right
)
# Keep track of the best point so far
if LL < min_ll:
min_ll = LL
new_index = i
# Return break, increase in LL
return new_index, min_ll - orig_ll
def adjust_change_points(
self, data: npt.ArrayLike, change_points: List[int], new_index: List[int]
) -> List[int]:
"""
Adjust change points.
This method adjusts the positions of all change points until the
result is 1-OPT, i.e., no change of any one breakpoint improves
the objective.
Parameters
----------
data: array_like
2D `array_like` representing time series with sequence index along
the first dimension and value series as columns.
change_points: list of ints
Locations of change points as integer indexes. By convention change points
include the identity segmentation, i.e. first and last index + 1 values.
new_index: list of ints
New change points
Returns
-------
change_points: list of ints
Locations of change points as integer indexes. By convention change points
include the identity segmentation, i.e. first and last index + 1 values.
"""
rng = check_random_state(self.random_state)
bp = change_points[:]
# Just one breakpoint, no need to adjust anything
if len(bp) == 3:
return bp
# Keep track of what change_points have changed,
# so that we don't have to adjust ones which we know are constant
last_pass = {}
this_pass = {b: 0 for b in bp}
for i in new_index:
this_pass[i] = 1
for _ in range(self.max_shuffles):
last_pass = dict(this_pass)
this_pass = {b: 0 for b in bp}
switch_any = False
ordering = list(range(1, len(bp) - 1))
rng.shuffle(ordering)
for i in ordering:
# Check if we need to adjust it
if (
last_pass[bp[i - 1]] == 1
or last_pass[bp[i + 1]] == 1
or this_pass[bp[i - 1]] == 1
or this_pass[bp[i + 1]] == 1
):
tempData = data[bp[i - 1] : bp[i + 1], :]
ind, val = self.add_new_change_point(tempData)
if bp[i] != ind + bp[i - 1] and val != 0:
last_pass[ind + bp[i - 1]] = last_pass[bp[i]]
del last_pass[bp[i]]
del this_pass[bp[i]]
this_pass[ind + bp[i - 1]] = 1
if self.verbose:
logger.info(
f"Moving {bp[i]} to {ind + bp[i - 1]}"
f"length = {tempData.shape[0]}, {ind}"
)
bp[i] = ind + bp[i - 1]
switch_any = True
if not switch_any:
return bp
return bp
def identity_segmentation(self, data: npt.ArrayLike) -> List[int]:
"""Initialize change points."""
return [0, data.shape[0] + 1]
def find_change_points(self, data: npt.ArrayLike) -> List[int]:
"""
Search iteratively for up to ``k_max`` change points.
Parameters
----------
data: array_like
2D `array_like` representing time series with sequence index along
the first dimension and value series as columns.
Returns
-------
The K change points, along with all intermediate change points (for k < K)
and their corresponding covariance-regularized maximum likelihoods.
"""
change_points = self.identity_segmentation(data)
self._intermediate_change_points = [change_points[:]]
self._intermediate_ll = [self.cumulative_log_likelihood(data, change_points)]
# Start GGS Algorithm
for _ in range(self.k_max):
new_index = -1
new_value = +1
# For each segment, find change point and increase in LL
for start, stop in zip(change_points[:-1], change_points[1:]):
segment = data[start:stop, :]
ind, val = self.add_new_change_point(segment)
if val < new_value:
new_index = ind + start
new_value = val
# Check if our algorithm is finished
if new_value == 0:
logger.info("Adding change points!")
return change_points
# Add new change point
change_points.append(new_index)
change_points.sort()
if self.verbose:
logger.info(f"Change point occurs at: {new_index}, LL: {new_value}")
# Adjust current locations of the change points
change_points = self.adjust_change_points(data, change_points, [new_index])[
:
]
# Calculate likelihood
ll = self.cumulative_log_likelihood(data, change_points)
self._intermediate_change_points.append(change_points[:])
self._intermediate_ll.append(ll)
return change_points
class GreedyGaussianSegmentation(BaseEstimator):
"""Greedy Gaussian Segmentation Estimator.
The method approxmates solutions for the problem of breaking a
multivariate time series into segments, where the data in each segment
could be modeled as independent samples from a multivariate Gaussian
distribution. It uses a dynamic programming search algorithm with
a heuristic that allows finding approximate solution in linear time with
respect to the data length and always yields locally optimal choice.
Greedy Gaussian Segmentation (GGS) fits a segmented gaussian model (SGM)
to the data by computing the approximate solution to the combinatorial
problem of finding the approximate covariance-regularized maximum
log-likelihood for fixed number of change points and a reagularization
strength. It follows an interative procedure
where a new breakpoint is added and then adjusting all breakpoints to
(approximately) maximize the objective. It is similar to the top-down
search used in other change point detection problems.
Parameters
----------
k_max: int, default=10
Maximum number of change points to find. The number of segments is thus k+1.
lamb: : float, default=1.0
Regularization parameter lambda (>= 0), which controls the amount of
(inverse) covariance regularization, see Eq (1) in [1]_. Regularization
is introduced to reduce issues for high-dimensional problems. Setting
``lamb`` to zero will ignore regularization, whereas large values of
lambda will favour simpler models.
max_shuffles: int, default=250
Maximum number of shuffles
verbose: bool, default=False
If ``True`` verbose output is enabled.
random_state: int or np.random.RandomState, default=None
Either random seed or an instance of ``np.random.RandomState``
Attributes
----------
change_points_: array_like, default=[]
Locations of change points as integer indexes. By convention change points
include the identity segmentation, i.e. first and last index + 1 values.
_intermediate_change_points: List[List[int]], default=[]
Intermediate values of change points for each value of k = 1...k_max
_intermediate_ll: List[float], default=[]
Intermediate values for log-likelihood for each value of k = 1...k_max
Notes
-----
Based on the work from [1]_.
- source code adapted based on: https://github.com/cvxgrp/GGS
- paper available at: https://stanford.edu/~boyd/papers/pdf/ggs.pdf
References
----------
.. [1] Hallac, D., Nystrup, P. & Boyd, S.,
"Greedy Gaussian segmentation of multivariate time series.",
Adv Data Anal Classif 13, 727–751 (2019).
https://doi.org/10.1007/s11634-018-0335-0
"""
def __init__(
self,
k_max: int = 10,
lamb: float = 1.0,
max_shuffles: int = 250,
verbose: bool = False,
random_state: int = None,
):
# this is ugly and necessary only because of dum `test_constructor`
self.k_max = k_max
self.lamb = lamb
self.max_shuffles = max_shuffles
self.verbose = verbose
self.random_state = random_state
self._adaptee_class = GGS
self._adaptee = self._adaptee_class(
k_max=k_max,
lamb=lamb,
max_shuffles=max_shuffles,
verbose=verbose,
random_state=random_state,
)
def fit(self, X: npt.ArrayLike, y: npt.ArrayLike = None):
"""Fit method for compatibility with sklearn-type estimator interface.
It sets the internal state of the estimator and returns the initialized
instance.
Parameters
----------
X: array_like
2D `array_like` representing time series with sequence index along
the first dimension and value series as columns.
y: array_like
Placeholder for compatibility with sklearn-api, not used, default=None.
"""
self._adaptee.initialize_intermediates()
return self
def predict(self, X: npt.ArrayLike, y: npt.ArrayLike = None) -> npt.ArrayLike:
"""Perform segmentation.
Parameters
----------
X: array_like
2D `array_like` representing time series with sequence index along
the first dimension and value series as columns.
y: array_like
Placeholder for compatibility with sklearn-api, not used, default=None.
Returns
-------
y_pred : array_like
1D array with predicted segmentation of the same size as the first
dimension of X. The numerical values represent distinct segments
labels for each of the data points.
"""
self.change_points_ = self._adaptee.find_change_points(X)
labels = np.zeros(X.shape[0], dtype=np.int32)
for i, (start, stop) in enumerate(
zip(self.change_points_[:-1], self.change_points_[1:])
):
labels[start:stop] = i
return labels
def fit_predict(self, X: npt.ArrayLike, y: npt.ArrayLike = None) -> npt.ArrayLike:
"""Perform segmentation.
Parameters
----------
X: array_like
2D `array_like` representing time series with sequence index along
the first dimension and value series as columns.
y: array_like
Placeholder for compatibility with sklearn-api, not used, default=None.
Returns
-------
y_pred : array_like
1D array with predicted segmentation of the same size as the first
dimension of X. The numerical values represent distinct segments
labels for each of the data points.
"""
return self.fit(X, y).predict(X, y)
def get_params(self, deep: bool = True) -> Dict:
"""Return initialization parameters.
Parameters
----------
deep: bool
Dummy argument for compatibility with sklearn-api, not used.
Returns
-------
params: dict
Dictionary with the estimator's initialization parameters, with
keys being argument names and values being argument values.
"""
return asdict(self._adaptee, filter=lambda attr, value: attr.init is True)
def set_params(self, **parameters):
"""Set the parameters of this object.
Parameters
----------
parameters : dict
Initialization parameters for th estimator.
Returns
-------
self : reference to self (after parameters have been set)
"""
for key, value in parameters.items():
setattr(self._adaptee, key, value)
return self
def __repr__(self) -> str:
"""Return a string representation of the estimator."""
return self._adaptee.__repr__()
|
PypiClean
|
/upt-pypi-0.6.tar.gz/upt-pypi-0.6/upt_pypi/upt_pypi.py
|
import pkg_resources
import re
import tempfile
from urllib import request
import pkginfo
import requests
import upt
from .licenses import guess_licenses
class PyPIPackage(upt.Package):
pass
class PyPIFrontend(upt.Frontend):
name = 'pypi'
@staticmethod
def get_archive_info(release, kind):
for elt in release:
if elt['packagetype'] == kind:
digests = elt.get('digests', {})
return (elt['url'], elt.get('size', 0),
digests.get('md5'), digests.get('sha256'))
raise ValueError(f'No archive of type "{kind}" could be found')
def get_sdist_archive_url(self, release):
url, _, _, _ = self.get_archive_info(release, 'sdist')
return url
def get_wheel_url(self, release):
url, _, _, _ = self.get_archive_info(release, 'bdist_wheel')
return url
@staticmethod
def _string_req_to_upt_pkg_req(string_req):
r = pkg_resources.Requirement.parse(string_req)
name = r.project_name
specifier = ','.join(op+version for (op, version) in r.specs)
return upt.PackageRequirement(name, specifier)
@classmethod
def _string_req_list_to_upt_pkg_req_list(cls, string_req_list):
return [cls._string_req_to_upt_pkg_req(s) for s in string_req_list]
def parse_requires_dist(self, requires_dist):
run_reqs = []
test_reqs = []
for req in requires_dist:
try:
req_name, extra = req.split(';')
extra = extra.strip()
except ValueError: # No "extras".
req_name = req
extra = None
pkg = self._string_req_to_upt_pkg_req(req_name)
if extra is not None:
# We only care about extras if they are likely to define the
# test requirements.
# TODO: care about optional runtime requirements when upt
# provides support for them.
# TODO: handle cases where 'extra' matches a requirement on the
# Python version.
m = re.match("extra == '(.*)'", extra)
if m:
extra_name = m.group(1)
if extra_name in ('test', 'tests', 'testing'):
test_reqs.append(pkg)
else:
run_reqs.append(pkg)
return run_reqs, test_reqs
def compute_requirements_from_wheel(self, wheel_url):
reqs = {}
with tempfile.NamedTemporaryFile(suffix=".whl") as wheelfile:
request.urlretrieve(wheel_url, wheelfile.name)
wheel = pkginfo.Wheel(wheelfile.name)
run_reqs, test_reqs = self.parse_requires_dist(wheel.requires_dist)
if run_reqs:
reqs['run'] = run_reqs
if test_reqs:
reqs['test'] = test_reqs
return reqs
def compute_requirements_from_pypi_json(self, json):
reqs = {}
requires_dist = json.get('info', {}).get('requires_dist', [])
run_reqs, test_reqs = self.parse_requires_dist(requires_dist or [])
if run_reqs:
reqs['run'] = run_reqs
if test_reqs:
reqs['test'] = test_reqs
return reqs
def compute_requirements(self):
"""Computes the requirements using various methods.
Try to compute the runtime requirements of the package by;
- looking at the requires_dist field of the JSON document we are
parsing.
- looking at the contents of the wheel. It is the most reliable
method, but it is a bit slower since we need to download the wheel.
The relevant data is not always available in the JSON returned by PyPI.
For instance, alembic 1.7.1 depends on a few packages, but none of them
are shown in on https://pypi.org/pypi/alembic/1.7.1/json: the
.info.requires_dist field is null. The dependencies are available in
the wheel, though.
"""
reqs = self.compute_requirements_from_pypi_json(self.json)
if reqs:
return reqs
else:
# If we did not get any requirements from the JSON we parsed, this
# could mean two different things:
# 1) The package has no dependencies at all (unlikely)
# 2) Data was missing from the JSON we parsed (this does happen for
# some releases, for unknown reasons)
# Since this is suspicious, we try to get the requirements from the
# wheel (if there is one) instead.
try:
version = self.json['info']['version']
wheel_url = self.get_wheel_url(self.json['releases'][version])
return self.compute_requirements_from_wheel(wheel_url)
except ValueError: # No wheel for this package
return {}
def get_archives(self, release):
url, size, md5, sha256 = self.get_archive_info(release, 'sdist')
archive = upt.Archive(url, size=size, md5=md5, sha256=sha256)
return [archive]
@staticmethod
def get_name(json):
"""Return the name of the package.
We cannot just rely on the name submitted by the user, since they may
use the wrong capitalization.
"""
return json['info']['name']
def parse(self, pkg_name, version=None):
if version is not None:
url = f'https://pypi.org/pypi/{pkg_name}/{version}/json'
r = requests.get(url)
if not r.ok:
raise upt.InvalidPackageVersionError(self.name, pkg_name,
version)
else:
url = f'https://pypi.org/pypi/{pkg_name}/json'
r = requests.get(url)
if not r.ok:
raise upt.InvalidPackageNameError(self.name, pkg_name)
self.json = r.json()
version = self.json['info']['version']
requirements = self.compute_requirements()
try:
self.archives = self.get_archives(self.json['releases'][version])
sdist_url = self.archives[0].url
except ValueError:
self.archives = []
sdist_url = ''
d = {
'homepage': self.json['info']['home_page'],
'summary': self.json['info']['summary'],
'description': self.json['info']['description'],
'requirements': requirements,
'archives': self.archives,
'licenses': guess_licenses(self.json, sdist_url),
}
return PyPIPackage(self.get_name(self.json), version, **d)
|
PypiClean
|
/tess-locator-0.5.0.tar.gz/tess-locator-0.5.0/README.rst
|
tess-locator
============
**Where is my favorite star or galaxy in NASA's TESS Full Frame Image data set?**
|pypi| |pytest| |black| |flake8| |mypy|
.. |pypi| image:: https://img.shields.io/pypi/v/tess-locator
:target: https://pypi.python.org/pypi/tess-locator
.. |pytest| image:: https://github.com/SSDataLab/tess-locator/workflows/pytest/badge.svg
.. |black| image:: https://github.com/SSDataLab/tess-locator/workflows/black/badge.svg
.. |flake8| image:: https://github.com/SSDataLab/tess-locator/workflows/flake8/badge.svg
.. |mypy| image:: https://github.com/SSDataLab/tess-locator/workflows/mypy/badge.svg
`tess-locator` is a user-friendly package which combines the
`tess-point <https://github.com/christopherburke/tess-point>`_
and `tess-cloud <https://github.com/SSDataLab/tess-cloud>`_ packages
to enable the positions of astronomical objects in the TESS data set
to be queried in a fast and friendly way.
Installation
------------
.. code-block:: bash
python -m pip install tess-locator
Example use
-----------
Converting celestial coordinates to TESS pixel coordinates:
.. code-block:: python
>>> from tess_locator import locate
>>> locate("Alpha Cen")
List of 3 coordinates
↳[TessCoord(sector=11, camera=2, ccd=2, column=1699.1, row=1860.3, time=None)
TessCoord(sector=12, camera=2, ccd=1, column=359.9, row=1838.7, time=None)
TessCoord(sector=38, camera=2, ccd=2, column=941.1, row=1953.7, time=None)]
Obtaining pixel coordinates for a specific time:
.. code-block:: python
>>> locate("Alpha Cen", time="2019-04-28")
List of 1 coordinates
↳[TessCoord(sector=11, camera=2, ccd=2, column=1699.1, row=1860.3, time=2019-04-28 00:00:00)]
Obtaining pixel coordinates for a specific celestial coordinate:
.. code-block:: python
>>> from astropy.coordinates import SkyCoord
>>> crd = SkyCoord(ra=60, dec=70, unit='deg')
>>> locate(crd)
List of 1 coordinates
↳[TessCoord(sector=19, camera=2, ccd=2, column=355.3, row=1045.9, time=None)]
You can access the properties of `TessCoord` objects using standard list and attribute syntax:
.. code-block:: python
>>> crdlist = locate("Alpha Cen")
>>> crdlist[0].sector, crdlist[0].camera, crdlist[0].ccd, crdlist[0].column, crdlist[0].row
(11, 2, 2, 1699.0540739785683, 1860.2510951146114)
When you have obtained a `TessCoord` object, you can use it to obtain a list of the TESS Full Frame Images (FFIs) which covered the position:
.. code-block:: python
>>> crdlist[0].list_images()
List of 1248 images
↳[TessImage("tess2019113062933-s0011-2-2-0143-s_ffic.fits")
TessImage("tess2019113065933-s0011-2-2-0143-s_ffic.fits")
TessImage("tess2019113072933-s0011-2-2-0143-s_ffic.fits")
TessImage("tess2019113075933-s0011-2-2-0143-s_ffic.fits")
...
TessImage("tess2019140065932-s0011-2-2-0143-s_ffic.fits")
TessImage("tess2019140072932-s0011-2-2-0143-s_ffic.fits")
TessImage("tess2019140075932-s0011-2-2-0143-s_ffic.fits")
TessImage("tess2019140082932-s0011-2-2-0143-s_ffic.fits")]
Documentation
-------------
Please visit the `tutorial <https://github.com/SSDataLab/tess-locator/blob/master/docs/tutorial.ipynb>`_.
Similar packages
----------------
* `tess-point <https://github.com/christopherburke/tess-point>`_ is the package being called behind the scenes. Compared to `tess-point`, we add a user-friendly API and the ability to specify the time, which is important for moving objects.
* `astroquery.mast <https://astroquery.readthedocs.io/en/latest/mast/mast.html>`_ includes the excellent ``TesscutClass.get_sectors()`` method which queries a web API. This package provides an offline version of that service, and adds the ability to query by time.
* `tess-waldo <https://github.com/SimonJMurphy/tess-waldo>`_ lets you visualize how a target moves over the detector across sectors. It queries the ``TessCut`` service to obtain this information. This package adds the ability to create such plots offline.
|
PypiClean
|
/gphotos-sync-3.1.3.tar.gz/gphotos-sync-3.1.3/src/gphotos_sync/DatabaseMedia.py
|
from datetime import datetime
from pathlib import Path
from typing import Optional, TypeVar
from gphotos_sync import Utils
from gphotos_sync.BaseMedia import BaseMedia
from gphotos_sync.Checks import get_check
# this allows self reference to this class in its factory methods
D = TypeVar("D", bound="DatabaseMedia")
# noinspection PyUnresolvedReferences
# pylint: disable=no-member
class DatabaseMedia(BaseMedia):
"""A Class for reading and writing BaseMedia objects to and from
database tables
The standard BaseMedia attributes are represented here. This dumb class
is used for representing any MediaBase derived class that has been read out
of the Database.
Attributes:
_id: remote identifier from Google Photos
_url: the 'product URL' which takes you to the Web view for this file
_relative_folder: root relative path to file
_filename: local filename
_orig_name: as above minus any duplicate number suffix
_duplicate_number: which instance if > 1 file has same orig_name
_size: files size on disk
_mime_type: string representation of file type
_date: modification date
_create_date: creation date
_description:
_downloaded: true if previously downloaded to disk
"""
def __init__(
self,
_id: str = "",
_uid: str = "",
_url: str = "",
_relative_folder: Path = Path(),
_filename: str = "",
_orig_name: str = "",
_duplicate_number: int = 0,
_size: int = 0,
_mime_type: str = "",
_description: str = "",
_date: datetime = Utils.MINIMUM_DATE,
_create_date: datetime = Utils.MINIMUM_DATE,
_downloaded: bool = False,
_location: str = "",
):
super(DatabaseMedia, self).__init__()
self._id = _id
self._uid = _uid
self._url = _url
self._relative_folder = _relative_folder
self._filename = _filename
self._orig_name = _orig_name
self._duplicate_number = _duplicate_number
self._size = _size
self._mime_type = _mime_type
self._description = _description
self._date = _date
self._create_date = _create_date
self._downloaded = _downloaded
self._location = _location
# this is used to replace meta data that has been extracted from the
# file system and overrides that provided by Google API
# noinspection PyAttributeOutsideInit
def update_extra_meta(self, uid, create_date, size):
self._uid = uid
self._create_date = create_date
self._size = size
@property
def location(self) -> Optional[str]:
"""
image GPS information
"""
return self._location
# ----- BaseMedia base class override Properties below -----
@property
def size(self) -> int:
return self._size
@property
def mime_type(self) -> Optional[str]:
return self._mime_type
@property
def id(self) -> str:
return self._id
@property
def uid(self) -> str:
return self._uid
@property
def description(self) -> str:
"""
The description of the file
"""
return get_check().valid_file_name(self._description)
@property
def orig_name(self) -> str:
"""
Original filename before duplicate name handling
"""
return get_check().valid_file_name(self._orig_name)
@property
def filename(self) -> str:
"""
filename including a suffix to make it unique if duplicates exist
"""
return get_check().valid_file_name(self._filename)
@property
def create_date(self) -> datetime:
"""
Creation date
"""
return self._create_date
@property
def modify_date(self) -> datetime:
"""
Modify Date
"""
return self._date
@property
def url(self) -> str:
"""
Remote url to retrieve this file from the server
"""
return self._url
|
PypiClean
|
/benchling_api_client-2.0.207-py3-none-any.whl/benchling_api_client/v2/beta/models/dna_sequence_bulk_upsert_request.py
|
from typing import Any, cast, Dict, List, Optional, Type, TypeVar, Union
import attr
from ..extensions import NotPresentError
from ..models.archive_record_set import ArchiveRecordSet
from ..models.custom_fields import CustomFields
from ..models.dna_annotation import DnaAnnotation
from ..models.dna_sequence_part import DnaSequencePart
from ..models.fields_with_resolution import FieldsWithResolution
from ..models.primer import Primer
from ..models.translation import Translation
from ..types import UNSET, Unset
T = TypeVar("T", bound="DnaSequenceBulkUpsertRequest")
@attr.s(auto_attribs=True, repr=False)
class DnaSequenceBulkUpsertRequest:
""" """
_name: str
_schema_id: str
_registry_id: str
_aliases: Union[Unset, List[str]] = UNSET
_annotations: Union[Unset, List[DnaAnnotation]] = UNSET
_author_ids: Union[Unset, List[str]] = UNSET
_bases: Union[Unset, str] = UNSET
_custom_fields: Union[Unset, CustomFields] = UNSET
_fields: Union[Unset, FieldsWithResolution] = UNSET
_folder_id: Union[Unset, str] = UNSET
_is_circular: Union[Unset, bool] = UNSET
_parts: Union[Unset, List[DnaSequencePart]] = UNSET
_primers: Union[Unset, List[Primer]] = UNSET
_translations: Union[Unset, List[Translation]] = UNSET
_entity_registry_id: Union[Unset, str] = UNSET
_archive_record: Union[Unset, ArchiveRecordSet] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def __repr__(self):
fields = []
fields.append("name={}".format(repr(self._name)))
fields.append("schema_id={}".format(repr(self._schema_id)))
fields.append("registry_id={}".format(repr(self._registry_id)))
fields.append("aliases={}".format(repr(self._aliases)))
fields.append("annotations={}".format(repr(self._annotations)))
fields.append("author_ids={}".format(repr(self._author_ids)))
fields.append("bases={}".format(repr(self._bases)))
fields.append("custom_fields={}".format(repr(self._custom_fields)))
fields.append("fields={}".format(repr(self._fields)))
fields.append("folder_id={}".format(repr(self._folder_id)))
fields.append("is_circular={}".format(repr(self._is_circular)))
fields.append("parts={}".format(repr(self._parts)))
fields.append("primers={}".format(repr(self._primers)))
fields.append("translations={}".format(repr(self._translations)))
fields.append("entity_registry_id={}".format(repr(self._entity_registry_id)))
fields.append("archive_record={}".format(repr(self._archive_record)))
fields.append("additional_properties={}".format(repr(self.additional_properties)))
return "DnaSequenceBulkUpsertRequest({})".format(", ".join(fields))
def to_dict(self) -> Dict[str, Any]:
name = self._name
schema_id = self._schema_id
registry_id = self._registry_id
aliases: Union[Unset, List[Any]] = UNSET
if not isinstance(self._aliases, Unset):
aliases = self._aliases
annotations: Union[Unset, List[Any]] = UNSET
if not isinstance(self._annotations, Unset):
annotations = []
for annotations_item_data in self._annotations:
annotations_item = annotations_item_data.to_dict()
annotations.append(annotations_item)
author_ids: Union[Unset, List[Any]] = UNSET
if not isinstance(self._author_ids, Unset):
author_ids = self._author_ids
bases = self._bases
custom_fields: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self._custom_fields, Unset):
custom_fields = self._custom_fields.to_dict()
fields: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self._fields, Unset):
fields = self._fields.to_dict()
folder_id = self._folder_id
is_circular = self._is_circular
parts: Union[Unset, List[Any]] = UNSET
if not isinstance(self._parts, Unset):
parts = []
for parts_item_data in self._parts:
parts_item = parts_item_data.to_dict()
parts.append(parts_item)
primers: Union[Unset, List[Any]] = UNSET
if not isinstance(self._primers, Unset):
primers = []
for primers_item_data in self._primers:
primers_item = primers_item_data.to_dict()
primers.append(primers_item)
translations: Union[Unset, List[Any]] = UNSET
if not isinstance(self._translations, Unset):
translations = []
for translations_item_data in self._translations:
translations_item = translations_item_data.to_dict()
translations.append(translations_item)
entity_registry_id = self._entity_registry_id
archive_record: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self._archive_record, Unset):
archive_record = self._archive_record.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
# Allow the model to serialize even if it was created outside of the constructor, circumventing validation
if name is not UNSET:
field_dict["name"] = name
if schema_id is not UNSET:
field_dict["schemaId"] = schema_id
if registry_id is not UNSET:
field_dict["registryId"] = registry_id
if aliases is not UNSET:
field_dict["aliases"] = aliases
if annotations is not UNSET:
field_dict["annotations"] = annotations
if author_ids is not UNSET:
field_dict["authorIds"] = author_ids
if bases is not UNSET:
field_dict["bases"] = bases
if custom_fields is not UNSET:
field_dict["customFields"] = custom_fields
if fields is not UNSET:
field_dict["fields"] = fields
if folder_id is not UNSET:
field_dict["folderId"] = folder_id
if is_circular is not UNSET:
field_dict["isCircular"] = is_circular
if parts is not UNSET:
field_dict["parts"] = parts
if primers is not UNSET:
field_dict["primers"] = primers
if translations is not UNSET:
field_dict["translations"] = translations
if entity_registry_id is not UNSET:
field_dict["entityRegistryId"] = entity_registry_id
if archive_record is not UNSET:
field_dict["archiveRecord"] = archive_record
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any], strict: bool = False) -> T:
d = src_dict.copy()
def get_name() -> str:
name = d.pop("name")
return name
try:
name = get_name()
except KeyError:
if strict:
raise
name = cast(str, UNSET)
def get_schema_id() -> str:
schema_id = d.pop("schemaId")
return schema_id
try:
schema_id = get_schema_id()
except KeyError:
if strict:
raise
schema_id = cast(str, UNSET)
def get_registry_id() -> str:
registry_id = d.pop("registryId")
return registry_id
try:
registry_id = get_registry_id()
except KeyError:
if strict:
raise
registry_id = cast(str, UNSET)
def get_aliases() -> Union[Unset, List[str]]:
aliases = cast(List[str], d.pop("aliases"))
return aliases
try:
aliases = get_aliases()
except KeyError:
if strict:
raise
aliases = cast(Union[Unset, List[str]], UNSET)
def get_annotations() -> Union[Unset, List[DnaAnnotation]]:
annotations = []
_annotations = d.pop("annotations")
for annotations_item_data in _annotations or []:
annotations_item = DnaAnnotation.from_dict(annotations_item_data, strict=False)
annotations.append(annotations_item)
return annotations
try:
annotations = get_annotations()
except KeyError:
if strict:
raise
annotations = cast(Union[Unset, List[DnaAnnotation]], UNSET)
def get_author_ids() -> Union[Unset, List[str]]:
author_ids = cast(List[str], d.pop("authorIds"))
return author_ids
try:
author_ids = get_author_ids()
except KeyError:
if strict:
raise
author_ids = cast(Union[Unset, List[str]], UNSET)
def get_bases() -> Union[Unset, str]:
bases = d.pop("bases")
return bases
try:
bases = get_bases()
except KeyError:
if strict:
raise
bases = cast(Union[Unset, str], UNSET)
def get_custom_fields() -> Union[Unset, CustomFields]:
custom_fields: Union[Unset, Union[Unset, CustomFields]] = UNSET
_custom_fields = d.pop("customFields")
if not isinstance(_custom_fields, Unset):
custom_fields = CustomFields.from_dict(_custom_fields)
return custom_fields
try:
custom_fields = get_custom_fields()
except KeyError:
if strict:
raise
custom_fields = cast(Union[Unset, CustomFields], UNSET)
def get_fields() -> Union[Unset, FieldsWithResolution]:
fields: Union[Unset, Union[Unset, FieldsWithResolution]] = UNSET
_fields = d.pop("fields")
if not isinstance(_fields, Unset):
fields = FieldsWithResolution.from_dict(_fields)
return fields
try:
fields = get_fields()
except KeyError:
if strict:
raise
fields = cast(Union[Unset, FieldsWithResolution], UNSET)
def get_folder_id() -> Union[Unset, str]:
folder_id = d.pop("folderId")
return folder_id
try:
folder_id = get_folder_id()
except KeyError:
if strict:
raise
folder_id = cast(Union[Unset, str], UNSET)
def get_is_circular() -> Union[Unset, bool]:
is_circular = d.pop("isCircular")
return is_circular
try:
is_circular = get_is_circular()
except KeyError:
if strict:
raise
is_circular = cast(Union[Unset, bool], UNSET)
def get_parts() -> Union[Unset, List[DnaSequencePart]]:
parts = []
_parts = d.pop("parts")
for parts_item_data in _parts or []:
parts_item = DnaSequencePart.from_dict(parts_item_data, strict=False)
parts.append(parts_item)
return parts
try:
parts = get_parts()
except KeyError:
if strict:
raise
parts = cast(Union[Unset, List[DnaSequencePart]], UNSET)
def get_primers() -> Union[Unset, List[Primer]]:
primers = []
_primers = d.pop("primers")
for primers_item_data in _primers or []:
primers_item = Primer.from_dict(primers_item_data, strict=False)
primers.append(primers_item)
return primers
try:
primers = get_primers()
except KeyError:
if strict:
raise
primers = cast(Union[Unset, List[Primer]], UNSET)
def get_translations() -> Union[Unset, List[Translation]]:
translations = []
_translations = d.pop("translations")
for translations_item_data in _translations or []:
translations_item = Translation.from_dict(translations_item_data, strict=False)
translations.append(translations_item)
return translations
try:
translations = get_translations()
except KeyError:
if strict:
raise
translations = cast(Union[Unset, List[Translation]], UNSET)
def get_entity_registry_id() -> Union[Unset, str]:
entity_registry_id = d.pop("entityRegistryId")
return entity_registry_id
try:
entity_registry_id = get_entity_registry_id()
except KeyError:
if strict:
raise
entity_registry_id = cast(Union[Unset, str], UNSET)
def get_archive_record() -> Union[Unset, ArchiveRecordSet]:
archive_record: Union[Unset, Union[Unset, ArchiveRecordSet]] = UNSET
_archive_record = d.pop("archiveRecord")
if not isinstance(_archive_record, Unset):
archive_record = ArchiveRecordSet.from_dict(_archive_record)
return archive_record
try:
archive_record = get_archive_record()
except KeyError:
if strict:
raise
archive_record = cast(Union[Unset, ArchiveRecordSet], UNSET)
dna_sequence_bulk_upsert_request = cls(
name=name,
schema_id=schema_id,
registry_id=registry_id,
aliases=aliases,
annotations=annotations,
author_ids=author_ids,
bases=bases,
custom_fields=custom_fields,
fields=fields,
folder_id=folder_id,
is_circular=is_circular,
parts=parts,
primers=primers,
translations=translations,
entity_registry_id=entity_registry_id,
archive_record=archive_record,
)
dna_sequence_bulk_upsert_request.additional_properties = d
return dna_sequence_bulk_upsert_request
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
def get(self, key, default=None) -> Optional[Any]:
return self.additional_properties.get(key, default)
@property
def name(self) -> str:
if isinstance(self._name, Unset):
raise NotPresentError(self, "name")
return self._name
@name.setter
def name(self, value: str) -> None:
self._name = value
@property
def schema_id(self) -> str:
if isinstance(self._schema_id, Unset):
raise NotPresentError(self, "schema_id")
return self._schema_id
@schema_id.setter
def schema_id(self, value: str) -> None:
self._schema_id = value
@property
def registry_id(self) -> str:
if isinstance(self._registry_id, Unset):
raise NotPresentError(self, "registry_id")
return self._registry_id
@registry_id.setter
def registry_id(self, value: str) -> None:
self._registry_id = value
@property
def aliases(self) -> List[str]:
""" Aliases to add to the DNA sequence """
if isinstance(self._aliases, Unset):
raise NotPresentError(self, "aliases")
return self._aliases
@aliases.setter
def aliases(self, value: List[str]) -> None:
self._aliases = value
@aliases.deleter
def aliases(self) -> None:
self._aliases = UNSET
@property
def annotations(self) -> List[DnaAnnotation]:
"""Annotations to create on the DNA sequence."""
if isinstance(self._annotations, Unset):
raise NotPresentError(self, "annotations")
return self._annotations
@annotations.setter
def annotations(self, value: List[DnaAnnotation]) -> None:
self._annotations = value
@annotations.deleter
def annotations(self) -> None:
self._annotations = UNSET
@property
def author_ids(self) -> List[str]:
""" IDs of users to set as the DNA sequence's authors. """
if isinstance(self._author_ids, Unset):
raise NotPresentError(self, "author_ids")
return self._author_ids
@author_ids.setter
def author_ids(self, value: List[str]) -> None:
self._author_ids = value
@author_ids.deleter
def author_ids(self) -> None:
self._author_ids = UNSET
@property
def bases(self) -> str:
"""Base pairs for the DNA sequence."""
if isinstance(self._bases, Unset):
raise NotPresentError(self, "bases")
return self._bases
@bases.setter
def bases(self, value: str) -> None:
self._bases = value
@bases.deleter
def bases(self) -> None:
self._bases = UNSET
@property
def custom_fields(self) -> CustomFields:
if isinstance(self._custom_fields, Unset):
raise NotPresentError(self, "custom_fields")
return self._custom_fields
@custom_fields.setter
def custom_fields(self, value: CustomFields) -> None:
self._custom_fields = value
@custom_fields.deleter
def custom_fields(self) -> None:
self._custom_fields = UNSET
@property
def fields(self) -> FieldsWithResolution:
if isinstance(self._fields, Unset):
raise NotPresentError(self, "fields")
return self._fields
@fields.setter
def fields(self, value: FieldsWithResolution) -> None:
self._fields = value
@fields.deleter
def fields(self) -> None:
self._fields = UNSET
@property
def folder_id(self) -> str:
"""ID of the folder containing the DNA sequence."""
if isinstance(self._folder_id, Unset):
raise NotPresentError(self, "folder_id")
return self._folder_id
@folder_id.setter
def folder_id(self, value: str) -> None:
self._folder_id = value
@folder_id.deleter
def folder_id(self) -> None:
self._folder_id = UNSET
@property
def is_circular(self) -> bool:
"""Whether the DNA sequence is circular or linear."""
if isinstance(self._is_circular, Unset):
raise NotPresentError(self, "is_circular")
return self._is_circular
@is_circular.setter
def is_circular(self, value: bool) -> None:
self._is_circular = value
@is_circular.deleter
def is_circular(self) -> None:
self._is_circular = UNSET
@property
def parts(self) -> List[DnaSequencePart]:
if isinstance(self._parts, Unset):
raise NotPresentError(self, "parts")
return self._parts
@parts.setter
def parts(self, value: List[DnaSequencePart]) -> None:
self._parts = value
@parts.deleter
def parts(self) -> None:
self._parts = UNSET
@property
def primers(self) -> List[Primer]:
if isinstance(self._primers, Unset):
raise NotPresentError(self, "primers")
return self._primers
@primers.setter
def primers(self, value: List[Primer]) -> None:
self._primers = value
@primers.deleter
def primers(self) -> None:
self._primers = UNSET
@property
def translations(self) -> List[Translation]:
"""Translations to create on the DNA sequence. Translations are specified by either a combination of 'start' and 'end' fields, or a list of regions. Both cannot be provided."""
if isinstance(self._translations, Unset):
raise NotPresentError(self, "translations")
return self._translations
@translations.setter
def translations(self, value: List[Translation]) -> None:
self._translations = value
@translations.deleter
def translations(self) -> None:
self._translations = UNSET
@property
def entity_registry_id(self) -> str:
""" Registry ID of the entity in Benchling. """
if isinstance(self._entity_registry_id, Unset):
raise NotPresentError(self, "entity_registry_id")
return self._entity_registry_id
@entity_registry_id.setter
def entity_registry_id(self, value: str) -> None:
self._entity_registry_id = value
@entity_registry_id.deleter
def entity_registry_id(self) -> None:
self._entity_registry_id = UNSET
@property
def archive_record(self) -> ArchiveRecordSet:
""" Currently, we only support setting a null value for archiveRecord, which unarchives the item """
if isinstance(self._archive_record, Unset):
raise NotPresentError(self, "archive_record")
return self._archive_record
@archive_record.setter
def archive_record(self, value: ArchiveRecordSet) -> None:
self._archive_record = value
@archive_record.deleter
def archive_record(self) -> None:
self._archive_record = UNSET
|
PypiClean
|
/django-shpaml-template-loader-0.2.3.tar.gz/django-shpaml-template-loader-0.2.3/shpaml_loader/shpaml.py
|
import re
__version__ = '1.00b'
def convert_text(in_body):
'''
You can call convert_text directly to convert shpaml markup
to HTML markup.
'''
return convert_shpaml_tree(in_body)
PASS_SYNTAX = 'PASS'
FLUSH_LEFT_SYNTAX = '|| '
FLUSH_LEFT_EMPTY_LINE = '||'
TAG_WHITESPACE_ATTRS = re.compile('(\S+)([ \t]*?)(.*)')
TAG_AND_REST = re.compile(r'((?:[^ \t\.#]|\.\.)+)(.*)')
CLASS_OR_ID = re.compile(r'([.#])((?:[^ \t\.#]|\.\.)+)')
COMMENT_SYNTAX = re.compile(r'^::comment$')
VERBATIM_SYNTAX = re.compile('(.+) VERBATIM$')
DJANGO_TAG_SYNTAX = re.compile(r'^%(.+)')
DIV_SHORTCUT = re.compile(r'^(?:#|(?:\.(?!\.)))')
quotedText = r"""(?:(?:'(?:\\'|[^'])*')|(?:"(?:\\"|[^"])*"))"""
AUTO_QUOTE = re.compile("""([ \t]+[^ \t=]+=)(""" + quotedText + """|[^ \t]+)""")
def AUTO_QUOTE_ATTRIBUTES(attrs):
def _sub(m):
attr = m.group(2)
if attr[0] in "\"'":
return m.group(1) + attr
return m.group(1) + '"' + attr + '"'
return re.sub(AUTO_QUOTE, _sub,attrs)
def syntax(regex):
def wrap(f):
f.regex = re.compile(regex)
return f
return wrap
@syntax('([ \t]*)(.*)')
def INDENT(m):
prefix, line = m.groups()
line = line.rstrip()
if line == '':
prefix = ''
return prefix, line
@syntax('^([<{\(\)]\S.*)')
def RAW_HTML(m):
return m.group(1).rstrip()
@syntax('= ?(.*)')
def DJANGO_VAR(m):
return "{{ %s }}" % m.group(1).rstrip()
@syntax('%(.*)')
def DJANGO_TAG(m):
return "{%% %s %%}" % m.group(1).rstrip()
@syntax('^\| (.*)')
def TEXT(m):
return m.group(1).rstrip()
@syntax('(.*?) > (.*)')
def OUTER_CLOSING_TAG(m):
tag, text = m.groups()
text = convert_line(text)
return enclose_tag(tag, text)
@syntax('(.*?) \|= (.*)')
def DJANGO_VAR_ENCLOSING_TAG(m):
tag, text = m.groups()
text = "{{ %s }}" % text.strip()
return enclose_tag(tag, text)
@syntax('%(.*) \| (.*)')
def TEXT_ENCLOSING_DJANGO_TAG(m):
tag, text = m.groups()
return enclose_django_tag(tag, text)
@syntax('%(.*) \|\|')
def EMPTY_DJANGO_TAG(m):
tag = m.groups()[0]
return enclose_django_tag(tag, "")
@syntax('(.*?) \|\|')
def EMPTY_TAG(m):
tag = m.groups()[0]
return enclose_tag(tag, "")
@syntax('(.*?) \|% (.*)')
def DJANGO_TAG_ENCLOSING_TAG(m):
tag, text = m.groups()
text = "{%% %s %%}" % text.strip()
return enclose_tag(tag, text)
@syntax('(.*?) \| (.*)')
def TEXT_ENCLOSING_TAG(m):
tag, text = m.groups()
return enclose_tag(tag, text)
@syntax('> (.*)')
def SELF_CLOSING_TAG(m):
tag = m.group(1).strip()
return '<%s>' % apply_jquery(tag)[0]
@syntax('(.*)')
def RAW_TEXT(m):
return m.group(1).rstrip()
LINE_METHODS = [
RAW_HTML,
DJANGO_VAR,
EMPTY_DJANGO_TAG,
TEXT_ENCLOSING_DJANGO_TAG,
DJANGO_TAG,
TEXT,
OUTER_CLOSING_TAG,
DJANGO_TAG_ENCLOSING_TAG,
DJANGO_VAR_ENCLOSING_TAG,
EMPTY_TAG,
TEXT_ENCLOSING_TAG,
SELF_CLOSING_TAG,
RAW_TEXT,
]
def convert_shpaml_tree(in_body):
"""Returns HTML as a basestring.
Parameters
----------
in_body : basestring
SHPAML source code.
Implementation Notes
--------------------
This is just a wrapper around the indent function, which requires
a bunch of other arguments that specify various characteristics
about the language we are trying to parse. This function just
passes in values that are specific to SHPAML.
"""
return indent(in_body,
branch_method=html_block_tag,
leaf_method=convert_line,
pass_syntax=PASS_SYNTAX,
flush_left_syntax=FLUSH_LEFT_SYNTAX,
flush_left_empty_line=FLUSH_LEFT_EMPTY_LINE,
indentation_method=find_indentation)
def html_block_tag(output, block, recurse):
append = output.append
prefix, tag = block[0]
if RAW_HTML.regex.match(tag):
append(prefix + tag)
recurse(block[1:])
elif COMMENT_SYNTAX.match(tag):
pass
elif VERBATIM_SYNTAX.match(tag):
m = VERBATIM_SYNTAX.match(tag)
tag = m.group(1).rstrip()
start_tag, end_tag = apply_jquery_sugar(tag)
append(prefix + start_tag)
stream(append, block[1:])
append(prefix + end_tag)
elif DJANGO_TAG_SYNTAX.match(tag):
m = DJANGO_TAG_SYNTAX.match(tag)
tag = m.group(1).rstrip()
start_tag, end_tag = apply_django_sugar(tag)
append(prefix + start_tag)
recurse(block[1:])
append(prefix + end_tag)
else:
start_tag, end_tag = apply_jquery_sugar(tag)
append(prefix + start_tag)
recurse(block[1:])
append(prefix + end_tag)
def stream(append, prefix_lines):
for prefix, line in prefix_lines:
if line == '':
append('')
else:
append(prefix + line)
def convert_line(line):
prefix, line = find_indentation(line.strip())
for method in LINE_METHODS:
m = method.regex.match(line)
if m:
return prefix + method(m)
def apply_django_sugar(tag):
start_tag = '{%% %s %%}' % tag
end_tag = '{%% end%s %%}' % tag.split(" ")[0]
return (start_tag, end_tag)
def apply_jquery_sugar(markup):
if DIV_SHORTCUT.match(markup):
markup = 'div' + markup
start_tag, tag = apply_jquery(markup)
return ('<%s>' % start_tag, '</%s>' % tag)
def apply_jquery(markup):
tag, whitespace, attrs = TAG_WHITESPACE_ATTRS.match(markup).groups()
tag, rest = tag_and_rest(tag)
ids, classes = ids_and_classes(rest)
attrs = AUTO_QUOTE_ATTRIBUTES(attrs)
if classes:
attrs += ' class="%s"' % classes
if ids:
attrs += ' id="%s"' % ids
start_tag = tag + whitespace + attrs
return start_tag, tag
def ids_and_classes(rest):
if not rest: return '', ''
ids = []
classes=[];
def _match(m):
if m.group(1) == '#':
ids.append(m.group(2))
else:
classes.append(m.group(2))
CLASS_OR_ID.sub(_match, rest)
return jfixdots(ids), jfixdots(classes)
def jfixdots(a): return fixdots(' '.join(a))
def fixdots(s): return s.replace('..', '.')
def tag_and_rest(tag):
m = TAG_AND_REST.match(tag)
if m:
return fixdots(m.group(1)), m.group(2)
else:
return fixdots(tag), None
def enclose_tag(tag, text):
start_tag, end_tag = apply_jquery_sugar(tag)
return start_tag + text + end_tag
def enclose_django_tag(tag, text):
start_tag, end_tag = apply_django_sugar(tag)
return start_tag + text + end_tag
def find_indentation(line):
"""Returns a pair of basestrings.
The first consists of leading spaces and tabs in line. The second
is the remainder of the line with any trailing space stripped off.
Parameters
----------
line : basestring
"""
return INDENT(INDENT.regex.match(line))
############ Generic indentation stuff follows
def get_indented_block(prefix_lines):
"""Returns an integer.
The return value is the number of lines that belong to block begun
on the first line.
Parameters
----------
prefix_lines : list of basestring pairs
Each pair corresponds to a line of SHPAML source code. The
first element of each pair is indentation. The second is the
remaining part of the line, except for trailing newline.
"""
prefix, line = prefix_lines[0]
len_prefix = len(prefix)
# Find the first nonempty line with len(prefix) <= len(prefix)
i = 1
while i < len(prefix_lines):
new_prefix, line = prefix_lines[i]
if line and len(new_prefix) <= len_prefix:
break
i += 1
# Rewind to exclude empty lines
while i-1 > 0 and prefix_lines[i-1][1] == '':
i -= 1
return i
def indent(text,
branch_method,
leaf_method,
pass_syntax,
flush_left_syntax,
flush_left_empty_line,
indentation_method,
get_block = get_indented_block,
):
"""Returns HTML as a basestring.
Parameters
----------
text : basestring
Source code, typically SHPAML, but could be a different (but
related) language. The remaining parameters specify details
about the language used in the source code. To parse SHPAML,
pass the same values as convert_shpaml_tree.
branch_method : function
convert_shpaml_tree passes html_block_tag here.
leaf_method : function
convert_shpaml_tree passes convert_line here.
pass_syntax : basestring
convert_shpaml_tree passes PASS_SYNTAX here.
flush_left_syntax : basestring
convert_shpaml_tree passes FLUSH_LEFT_SYNTAX here.
flush_left_empty_line : basestring
convert_shpaml_tree passes FLUSH_LEFT_EMPTY_LINE here.
indentation_method : function
convert_shpaml_tree passes INDENT here.
get_block : function
Defaults to get_indented_block.
"""
text = text.rstrip()
lines = text.split('\n')
output = []
indent_lines(
lines,
output,
branch_method,
leaf_method,
pass_syntax,
flush_left_syntax,
flush_left_empty_line,
indentation_method,
get_block = get_indented_block,
)
return '\n'.join(output) + '\n'
def indent_lines(lines,
output,
branch_method,
leaf_method,
pass_syntax,
flush_left_syntax,
flush_left_empty_line,
indentation_method,
get_block,
):
"""Returns None.
The way this function produces output is by adding strings to the
list that's passed in as the second parameter.
Parameters
----------
lines : list of basestring's
Each string is a line of a SHPAML source code
(trailing newlines not included).
output : empty list
Explained earlier...
The remaining parameters are exactly the same as in the indent
function:
* branch_method
* leaf_method
* pass_syntax
* flush_left_syntax
* flush_left_empty_line
* indentation_method
* get_block
"""
append = output.append
def recurse(prefix_lines):
while prefix_lines:
prefix, line = prefix_lines[0]
if line == '':
prefix_lines.pop(0)
append('')
continue
block_size = get_block(prefix_lines)
if block_size == 1:
prefix_lines.pop(0)
if line == pass_syntax:
pass
elif line.startswith(flush_left_syntax):
append(line[len(flush_left_syntax):])
elif line.startswith(flush_left_empty_line):
append('')
else:
append(prefix + leaf_method(line))
else:
block = prefix_lines[:block_size]
prefix_lines = prefix_lines[block_size:]
branch_method(output, block, recurse)
return
prefix_lines = list(map(indentation_method, lines))
recurse(prefix_lines)
if __name__ == "__main__":
# if file name is given convert file, else convert stdin
import sys
if len(sys.argv) == 2:
shpaml_text = open(sys.argv[1]).read()
else:
shpaml_text = sys.stdin.read()
sys.stdout.write(convert_text(shpaml_text))
|
PypiClean
|
/django-jsgettext-0.5.tar.gz/django-jsgettext-0.5/djsgettext/views.py
|
from django.views.decorators.cache import cache_page
from django.views.generic import View
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.utils.text import javascript_quote
from django.utils import six
from django.utils.encoding import smart_text
from django.utils.translation import check_for_language, activate, to_locale, get_language
from django.utils import importlib
from django.conf import settings
import gettext as gettext_module
import os
template_head = """(function() {
var catalog = {};
"""
template_body = """
var gettext = function(msgid) {
var value = catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
var ngettext =(singular, plural, count) {
value = catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[pluralidx(count)];
}
};
var pgettext = function(context, msgid) {
var value = gettext(context + '\\x04' + msgid);
if (value.indexOf('\\x04') != -1) {
value = msgid;
}
return value;
};
var npgettext = function(context, singular, plural, count) {
var value = ngettext(context + '\\x04' + singular, context + '\\x04' + plural, count);
if (value.indexOf('\\x04') != -1) {
value = ngettext(singular, plural, count);
}
return value;
};
var interpolate = function(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
"""
template_footer = """
this.gettext = gettext;
this.ngettext = ngettext;
this.pgettext = pgettext;
this.npgettext = npgettext;
this.interpolate = interpolate;
this.pluralidx = pluralidx;
}).call(this);
"""
plural_idx_template = """
var pluralidx = function(n) {
var v=%s;
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
"""
plural_simple_template = """
var pluralidx function(count) { return (count == 1) ? 0 : 1; };
"""
I18N_VIEW_CACHE_TIMEOUT = getattr(settings, 'I18N_VIEW_CACHE_TIMEOUT', 20)
class I18n(View):
domains = ['djsgettext', 'djangojs']
packages = []
#@method_decorator(cache_page(I18N_VIEW_CACHE_TIMEOUT))
def dispatch(self, *args, **kwargs):
return super(I18n, self).dispatch(*args, **kwargs)
def get_paths(self, packages):
paths = []
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(p.__file__), 'locale')
paths.append(path)
paths.extend(list(reversed(settings.LOCALE_PATHS)))
return paths
def get_catalog(self, paths):
default_locale = to_locale(settings.LANGUAGE_CODE)
locale = to_locale(get_language())
en_selected = locale.startswith('en')
en_catalog_missing = True
t = {}
for domain in self.domains:
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
except IOError:
continue
else:
if en_selected:
en_catalog_missing = False
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
if locale != default_locale:
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t.update(locale_t)
return t
def make_js_catalog(self, t):
items, pitems = [], []
pdict = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
items.append(" catalog['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(v)))
elif isinstance(k, tuple):
if k[0] not in pdict:
pdict[k[0]] = k[1]
else:
pdict[k[0]] = max(k[1], pdict[k[0]])
items.append(" catalog['%s'][%d] = '%s';\n" % (javascript_quote(k[0]), k[1], javascript_quote(v)))
else:
raise TypeError(k)
items.sort()
for k, v in pdict.items():
pitems.append(" catalog['%s'] = [%s];\n" % (javascript_quote(k), ','.join(["''"]*(v+1))))
return "".join(items), "".join(pitems)
def get(self, request):
packages = self.packages
if not packages:
packages = ['django.conf']
paths = self.get_paths(packages)
t = self.get_catalog(paths)
# Plural methods discovery
plural = None
plural_template = plural_simple_template
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':',1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=',1)[1]
plural_template = plural_idx_template % (plural)
catalog, maincatalog = self.make_js_catalog(t)
src = [template_head, maincatalog, catalog,
template_body, plural_template, template_footer]
data = "".join(src)
return HttpResponse(data, content_type="text/javascript")
|
PypiClean
|
/pyjx-html5-0.1.0.tar.gz/pyjx-html5-0.1.0/pyjswidgets/pyjamas/DOM.pywebkitdfb.py
|
def getAbsoluteLeft(elem):
# Unattached elements and elements (or their ancestors) with style
# 'display: none' have no offsetLeft.
if (elem.offsetLeft is None) :
return 0
left = 0
curr = elem.parentNode
if (curr) :
# This intentionally excludes body which has a None offsetParent.
while (curr.offsetParent) :
left -= curr.scrollLeft
curr = curr.parentNode
while (elem) :
left += elem.offsetLeft
# Safari bug: a top-level absolutely positioned element includes the
# body's offset position already.
parent = elem.offsetParent
if (parent and (parent.tagName == 'BODY') and
(getStyleAttribute(elem, 'position') == 'absolute')) :
break
elem = parent
return left
def getAbsoluteTop(elem):
# Unattached elements and elements (or their ancestors) with style
# 'display: none' have no offsetTop.
if (elem.offsetTop is None) :
return 0
top = 0
curr = elem.parentNode
if (curr) :
# This intentionally excludes body which has a None offsetParent.
while (curr.offsetParent) :
top -= curr.scrollTop
curr = curr.parentNode
while (elem) :
top += elem.offsetTop
# Safari bug: a top-level absolutely positioned element includes the
# body's offset position already.
parent = elem.offsetParent
if (parent and (parent.tagName == 'BODY') and
(getStyleAttribute(elem, 'position') == 'absolute')) :
break
elem = parent
return top
def buttonClick(element):
evt = doc().createEvent('MouseEvents')
mf = get_main_frame()
target = mf.EventTarget(element)
target.setptr(element.getptr()) # dreadful hack that actually works. wow.
evt.initMouseEvent("click", True, True, wnd(), 1, 0, 0, 0, 0, False,
False, False, False, 0, target)
element.dispatchEvent(evt)
|
PypiClean
|
/django_extjs4-1.2.0-extjs4.2.1.tar.gz/django_extjs4-1.2.0-extjs4.2.1/extjs4/static/extjs4/packages/ext-locale/build/ext-locale-et-dev.js
|
* Estonian Translations
* By Rene Saarsoo (2012-05-28)
*/
Ext.onReady(function() {
if (Ext.Date) {
Ext.Date.monthNames = ["Jaanuar", "Veebruar", "Märts", "Aprill", "Mai", "Juuni", "Juuli", "August", "September", "Oktoober", "November", "Detsember"];
// Month names aren't shortened to strictly three letters
var shortMonthNames = ["Jaan", "Veeb", "Märts", "Apr", "Mai", "Juuni", "Juuli", "Aug", "Sept", "Okt", "Nov", "Dets"];
Ext.Date.getShortMonthName = function(month) {
return shortMonthNames[month];
};
Ext.Date.monthNumbers = {
Jan: 0,
Feb: 1,
Mar: 2,
Apr: 3,
May: 4,
Jun: 5,
Jul: 6,
Aug: 7,
Sep: 8,
Oct: 9,
Nov: 10,
Dec: 11
};
Ext.Date.getMonthNumber = function(name) {
return Ext.Date.monthNumbers[name.substring(0, 1).toUpperCase() + name.substring(1, 3).toLowerCase()];
};
Ext.Date.dayNames = ["Pühapäev", "Esmaspäev", "Teisipäev", "Kolmapäev", "Neljapäev", "Reede", "Laupäev"];
// Weekday names are abbreviated to single letter
Ext.Date.getShortDayName = function(day) {
return Ext.Date.dayNames[day].substring(0, 1);
};
}
if (Ext.util && Ext.util.Format) {
Ext.apply(Ext.util.Format, {
thousandSeparator: ' ',
decimalSeparator: ',',
currencySign: '\u20ac', // Euro
dateFormat: 'd.m.Y'
});
}
});
Ext.define("Ext.locale.et.view.View", {
override: "Ext.view.View",
emptyText: ""
});
Ext.define("Ext.locale.et.grid.plugin.DragDrop", {
override: "Ext.grid.plugin.DragDrop",
dragText: "{0} valitud rida"
});
// changing the msg text below will affect the LoadMask
Ext.define("Ext.locale.et.view.AbstractView", {
override: "Ext.view.AbstractView",
loadingText: "Laen..."
});
Ext.define("Ext.locale.et.picker.Date", {
override: "Ext.picker.Date",
todayText: "Täna",
minText: "See kuupäev on enne määratud vanimat kuupäeva",
maxText: "See kuupäev on pärast määratud hiliseimat kuupäeva",
disabledDaysText: "",
disabledDatesText: "",
nextText: 'Järgmine kuu (Ctrl+Paremale)',
prevText: 'Eelmine kuu (Ctrl+Vasakule)',
monthYearText: 'Vali kuu (Ctrl+Üles/Alla aastate muutmiseks)',
todayTip: "{0} (Tühik)",
format: "d.m.Y",
startDay: 1
});
Ext.define("Ext.locale.et.picker.Month", {
override: "Ext.picker.Month",
okText: " OK ",
cancelText: "Katkesta"
});
Ext.define("Ext.locale.et.toolbar.Paging", {
override: "Ext.PagingToolbar",
beforePageText: "Lehekülg",
afterPageText: "{0}-st",
firstText: "Esimene lk",
prevText: "Eelmine lk",
nextText: "Järgmine lk",
lastText: "Viimane lk",
refreshText: "Värskenda",
displayMsg: "Näitan {0} - {1} {2}-st",
emptyMsg: 'Puuduvad andmed mida näidata'
});
Ext.define("Ext.locale.et.form.Basic", {
override: "Ext.form.Basic",
waitTitle: "Palun oota..."
});
Ext.define("Ext.locale.et.form.field.Base", {
override: "Ext.form.field.Base",
invalidText: "Välja sisu ei vasta nõuetele"
});
Ext.define("Ext.locale.et.form.field.Text", {
override: "Ext.form.field.Text",
minLengthText: "Selle välja minimaalne pikkus on {0}",
maxLengthText: "Selle välja maksimaalne pikkus on {0}",
blankText: "Selle välja täitmine on nõutud",
regexText: "",
emptyText: null
});
Ext.define("Ext.locale.et.form.field.Number", {
override: "Ext.form.field.Number",
minText: "Selle välja vähim väärtus võib olla {0}",
maxText: "Selle välja suurim väärtus võib olla {0}",
nanText: "{0} pole korrektne number"
});
Ext.define("Ext.locale.et.form.field.Date", {
override: "Ext.form.field.Date",
disabledDaysText: "Võimetustatud",
disabledDatesText: "Võimetustatud",
minText: "Kuupäev peab olema alates kuupäevast: {0}",
maxText: "Kuupäev peab olema kuni kuupäevani: {0}",
invalidText: "{0} ei ole sobiv kuupäev - õige formaat on: {1}",
format: "d.m.Y"
});
Ext.define("Ext.locale.et.form.field.ComboBox", {
override: "Ext.form.field.ComboBox",
valueNotFoundText: undefined
}, function() {
Ext.apply(Ext.form.field.ComboBox.prototype.defaultListConfig, {
loadingText: "Laen..."
});
});
Ext.define("Ext.locale.et.form.field.VTypes", {
override: "Ext.form.field.VTypes",
emailText: 'Selle välja sisuks peab olema e-posti aadress kujul "[email protected]"',
urlText: 'Selle välja sisuks peab olema veebiaadress kujul "http:/'+'/www.domeen.com"',
alphaText: 'See väli võib sisaldada vaid tähemärke ja alakriipsu',
alphanumText: 'See väli võib sisaldada vaid tähemärke, numbreid ja alakriipsu'
});
Ext.define("Ext.locale.et.form.field.HtmlEditor", {
override: "Ext.form.field.HtmlEditor",
createLinkText: 'Palun sisestage selle lingi internetiaadress:'
}, function() {
Ext.apply(Ext.form.field.HtmlEditor.prototype, {
buttonTips: {
bold: {
title: 'Rasvane kiri (Ctrl+B)',
text: 'Muuda valitud tekst rasvaseks.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
italic: {
title: 'Kursiiv (Ctrl+I)',
text: 'Pane valitud tekst kaldkirja.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
underline: {
title: 'Allakriipsutus (Ctrl+U)',
text: 'Jooni valitud tekst alla.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
increasefontsize: {
title: 'Suurenda',
text: 'Suurenda teksti suurust.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
decreasefontsize: {
title: 'Vähenda',
text: 'Vähenda teksti suurust.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
backcolor: {
title: 'Tausta värv',
text: 'Muuda valitud teksti taustavärvi.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
forecolor: {
title: 'Teksti värv',
text: 'Muuda valitud teksti värvi.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
justifyleft: {
title: 'Vasakule',
text: 'Joonda tekst vasakule.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
justifycenter: {
title: 'Keskele',
text: 'Joonda tekst keskele.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
justifyright: {
title: 'Paremale',
text: 'Joonda tekst paremale.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
insertunorderedlist: {
title: 'Loetelu',
text: 'Alusta loetelu.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
insertorderedlist: {
title: 'Numereeritud list',
text: 'Alusta numereeritud nimekirja.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
createlink: {
title: 'Link',
text: 'Muuda tekst lingiks.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
sourceedit: {
title: 'Lähtekoodi muutmine',
text: 'Lülitu lähtekoodi muutmise režiimi.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
}
}
});
});
Ext.define("Ext.locale.et.grid.header.Container", {
override: "Ext.grid.header.Container",
sortAscText: "Järjesta kasvavalt",
sortDescText: "Järjesta kahanevalt",
columnsText: "Tulbad"
});
Ext.define("Ext.locale.et.grid.feature.Grouping", {
override: "Ext.grid.feature.Grouping",
emptyGroupText: '(Tühi)',
groupByText: 'Grupeeri selle välja järgi',
showGroupsText: 'Näita gruppides'
});
Ext.define("Ext.locale.et.grid.property.HeaderContainer", {
override: "Ext.grid.property.HeaderContainer",
nameText: "Nimi",
valueText: "Väärtus",
dateFormat: "d.m.Y"
});
Ext.define("Ext.locale.et.grid.column.Date", {
override: "Ext.grid.column.Date",
format: 'd.m.Y'
});
Ext.define("Ext.locale.et.form.field.Time", {
override: "Ext.form.field.Time",
minText: "Kellaaeg peab olema alates {0}",
maxText: "Kellaaeg peab olema kuni {0}",
invalidText: "{0} ei ole sobiv kellaaeg",
format: "H:i"
});
Ext.define("Ext.locale.et.form.CheckboxGroup", {
override: "Ext.form.CheckboxGroup",
blankText: "Vähemalt üks väli selles grupis peab olema valitud"
});
Ext.define("Ext.locale.et.form.RadioGroup", {
override: "Ext.form.RadioGroup",
blankText: "Vähemalt üks väli selles grupis peab olema valitud"
});
Ext.define("Ext.locale.et.window.MessageBox", {
override: "Ext.window.MessageBox",
buttonText: {
ok: "OK",
cancel: "Katkesta",
yes: "Jah",
no: "Ei"
}
});
// This is needed until we can refactor all of the locales into individual files
Ext.define("Ext.locale.et.Component", {
override: "Ext.Component"
});
|
PypiClean
|
/azure-cli-2.51.0.tar.gz/azure-cli-2.51.0/azure/cli/command_modules/cognitiveservices/_help.py
|
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['cognitiveservices'] = """
type: group
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
"""
helps['cognitiveservices account'] = """
type: group
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
"""
helps['cognitiveservices account create'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
parameters:
- name: --kind
populator-commands:
- az cognitiveservices account list-kinds
- name: --sku --sku-name
populator-commands:
- az cognitiveservices account list-skus
examples:
- name: Create an S0 face API Cognitive Services account in West Europe without confirmation required.
text: az cognitiveservices account create -n myresource -g myResourceGroup --kind Face --sku S0 -l WestEurope --yes
- name: Manage Azure Cognitive Services accounts. (autogenerated)
text: az cognitiveservices account create --kind Face --location WestEurope --name myresource --resource-group myResourceGroup --sku S0 --subscription MySubscription --yes
crafted: true
- name: Create a Text Analytics Cognitive Services account in West Europe without confirmation required and use customer owned storage.
text: |
az cognitiveservices account create -n myresource -g myResourceGroup --assign-identity --kind TextAnalytics --sku S -l WestEurope --yes
--storage '[
{
"resourceId": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myStorageAccount"
}
]'
- name: Create a Text Analytics Cognitive Services account in West Europe without confirmation required and user Customer-managed encryption.
text: |
az cognitiveservices account create -n myresource -g myResourceGroup --assign-identity --kind TextAnalytics --sku S -l WestEurope --yes
--encryption '{
"keySource": "Microsoft.KeyVault",
"keyVaultProperties": {
"keyName": "KeyName",
"keyVersion": "secretVersion",
"keyVaultUri": "https://issue23056kv.vault.azure.net/"
}
}'
"""
helps['cognitiveservices account delete'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Delete account.
text: az cognitiveservices account delete --name myresource-luis -g cognitive-services-resource-group
"""
helps['cognitiveservices account keys'] = """
type: group
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
"""
helps['cognitiveservices account keys list'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Get current resource keys.
text: az cognitiveservices account keys list --name myresource -g cognitive-services-resource-group
- name: Manage Azure Cognitive Services accounts. (autogenerated)
text: az cognitiveservices account keys list --name myresource --resource-group cognitive-services-resource-group --subscription MySubscription
crafted: true
"""
helps['cognitiveservices account keys regenerate'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Get new keys for resource.
text: az cognitiveservices account keys regenerate --name myresource -g cognitive-services-resource-group --key-name key1
"""
helps['cognitiveservices account list'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: List all the Cognitive Services accounts in a resource group.
text: az cognitiveservices account list -g MyResourceGroup
"""
helps['cognitiveservices account list-skus'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
parameters:
- name: --name -n
long-summary: |
--kind and --location will be ignored when --name is specified.
--resource-group is required when when --name is specified.
- name: --resource-group -g
long-summary: |
--resource-group is used when when --name is specified. In other cases it will be ignored.
- name: --kind
populator-commands:
- az cognitiveservices account list-kinds
examples:
- name: Show SKUs.
text: az cognitiveservices account list-skus --kind Face --location westus
"""
helps['cognitiveservices account list-models'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: List models available for a Cognitive Services account.
text: az cognitiveservices account list-models -n myresource -g cognitive-services-resource-group
"""
helps['cognitiveservices account network-rule'] = """
type: group
short-summary: Manage network rules.
"""
helps['cognitiveservices account network-rule add'] = """
type: command
short-summary: Add a network rule.
long-summary: >
Rules can be created for an IPv4 address, address range (CIDR format), or a virtual network subnet.
examples:
- name: Create a rule to allow a specific address-range.
text: az cognitiveservices account network-rule add -g myRg --name MyAccount --ip-address 23.45.1.0/24
- name: Create a rule to allow access for a subnet.
text: az cognitiveservices account network-rule add -g myRg --name MyAccount --vnet myvnet --subnet mysubnet
"""
helps['cognitiveservices account network-rule list'] = """
type: command
short-summary: List network rules.
examples:
- name: List network rules.
text: az cognitiveservices account network-rule list --name MyAccount --resource-group MyResourceGroup
crafted: true
"""
helps['cognitiveservices account network-rule remove'] = """
type: command
short-summary: Remove a network rule.
examples:
- name: Remove a network rule.
text: az cognitiveservices account network-rule remove --name MyAccount --resource-group MyResourceGroup --subnet mysubnet
crafted: true
- name: Remove a network rule.
text: az cognitiveservices account network-rule remove --name MyAccount --ip-address 23.45.1.0/24 --resource-group MyResourceGroup
crafted: true
"""
helps['cognitiveservices account show'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Show account information.
text: az cognitiveservices account show --name myresource --resource-group cognitive-services-resource-group
"""
helps['cognitiveservices account update'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
parameters:
- name: --sku --sku-name
populator-commands:
- az cognitiveservices account list-skus
examples:
- name: Update sku and tags.
text: az cognitiveservices account update --name myresource -g cognitive-services-resource-group --sku S0 --tags external-app=chatbot-HR azure-web-app-bot=HR-external azure-app-service=HR-external-app-service
"""
helps['cognitiveservices list'] = """
type: command
short-summary: Manage Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: List all the Cognitive Services accounts in a resource group.
text: az cognitiveservices list -g MyResourceGroup
"""
helps['cognitiveservices account identity'] = """
type: group
short-summary: Manage identity of Cognitive Services accounts.
"""
helps['cognitiveservices account identity assign'] = """
type: command
short-summary: Assign an identity of a Cognitive Services account.
long-summary: Assign an identity object of a Cognitive Services account. An system assigned identity will be generate and assigned to the account.
examples:
- name: Assign an identity of Cognitive Services accounts.
text: az cognitiveservices account identity assign --name myresource --resource-group cognitive-services-resource-group
"""
helps['cognitiveservices account identity show'] = """
type: command
short-summary: Show the identity of a Cognitive Services account.
long-summary: Show the identity object of a Cognitive Services account, empty object might be returned if the account has no assigned identity.
examples:
- name: Show the identity of Cognitive Services accounts.
text: az cognitiveservices account identity show --name myresource --resource-group cognitive-services-resource-group
"""
helps['cognitiveservices account identity remove'] = """
type: command
short-summary: Remove the identity from a Cognitive Services account.
long-summary: Remove the identity (if exists) from a Cognitive Services account.
examples:
- name: Remove the identity from a Cognitive Services account.
text: az cognitiveservices account identity remove --name myresource --resource-group cognitive-services-resource-group
"""
helps['cognitiveservices account list-deleted'] = """
type: command
short-summary: List soft-deleted Azure Cognitive Services accounts.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: List all the Cognitive Services accounts in a subscription.
text: az cognitiveservices account list-deleted
"""
helps['cognitiveservices account show-deleted'] = """
type: command
short-summary: Show a soft-deleted Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Show a soft-deleted Azure Cognitive Services account.
text: az cognitiveservices account show-deleted --location eastus --resource-group cognitive-services-resource-group --name cognitive-services-account-name
"""
helps['cognitiveservices account recover'] = """
type: command
short-summary: Recover a soft-deleted Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Recover a soft-deleted Azure Cognitive Services account.
text: az cognitiveservices account recover --location eastus --resource-group cognitive-services-resource-group --name cognitive-services-account-name
"""
helps['cognitiveservices account purge'] = """
type: command
short-summary: Purge a soft-deleted Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Purge a soft-deleted Azure Cognitive Services account.
text: az cognitiveservices account purge --location eastus --resource-group cognitive-services-resource-group --name cognitive-services-account-name
"""
helps['cognitiveservices account deployment'] = """
type: group
short-summary: Manage deployments for Azure Cognitive Services accounts.
"""
helps['cognitiveservices account deployment create'] = """
type: command
short-summary: Create a deployment for Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Create a deployment for Azure Cognitive Services account.
text: az cognitiveservices account deployment create -g yuanyang-test-sdk -n yytest-oai --deployment-name dpy --model-name ada --model-version "1" --model-format OpenAI --sku-capacity 1 --sku-name "Standard"
"""
helps['cognitiveservices account deployment delete'] = """
type: command
short-summary: Delete a deployment from Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Delete a deployment from Azure Cognitive Services account.
text: az cognitiveservices account deployment delete -g yuanyang-test-sdk -n yytest-oai --deployment-name dpy
"""
helps['cognitiveservices account deployment show'] = """
type: command
short-summary: Show a deployment for Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Show a deployment for Azure Cognitive Services account.
text: az cognitiveservices account deployment show -g yuanyang-test-sdk -n yytest-oai --deployment-name dpy
"""
helps['cognitiveservices account deployment list'] = """
type: command
short-summary: Show all deployments for Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Show all deployments for Azure Cognitive Services account.
text: az cognitiveservices account deployment list -g yuanyang-test-sdk -n yytest-oai
"""
helps['cognitiveservices commitment-tier'] = """
type: group
short-summary: Manage commitment tiers for Azure Cognitive Services.
"""
helps['cognitiveservices commitment-tier list'] = """
type: command
short-summary: Show all commitment tiers for Azure Cognitive Services.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Show all commitment tiers for Azure Cognitive Services.
text: az cognitiveservices commitment-tier list -l centraluseuap
"""
helps['cognitiveservices account commitment-plan'] = """
type: group
short-summary: Manage commitment plans for Azure Cognitive Services accounts.
"""
helps['cognitiveservices account commitment-plan create'] = """
type: command
short-summary: Create a commitment plan for Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Create a commitment plan for Azure Cognitive Services account.
text: az cognitiveservices account commitment-plan create -g yuanyang-test-sdk -n yytest-ta --commitment-plan-name "plan" --hosting-model "Web" --plan-type "TA" --auto-renew false --current-tier "T1" --next-tier "T2"
"""
helps['cognitiveservices account commitment-plan delete'] = """
type: command
short-summary: Delete a commitment plan from Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Delete a commitment plan from Azure Cognitive Services account.
text: az cognitiveservices account commitment-plan delete -g yuanyang-test-sdk -n yytest-ta --commitment-plan-name "plan"
"""
helps['cognitiveservices account commitment-plan show'] = """
type: command
short-summary: Show a commitment plan from Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Show a commitment plan from Azure Cognitive Services account.
text: az cognitiveservices account commitment-plan show -g yuanyang-test-sdk -n yytest-ta --commitment-plan-name "plan"
"""
helps['cognitiveservices account commitment-plan list'] = """
type: command
short-summary: Show all commitment plans from Azure Cognitive Services account.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Show all commitment plans from Azure Cognitive Services account.
text: az cognitiveservices account commitment-plan list -g yuanyang-test-sdk -n yytest-ta
"""
helps['cognitiveservices model'] = """
type: group
short-summary: Manage model for Azure Cognitive Services.
"""
helps['cognitiveservices model list'] = """
type: command
short-summary: Show all models for Azure Cognitive Services.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Show all models for Azure Cognitive Services.
text: az cognitiveservices model list -l centraluseuap
"""
helps['cognitiveservices usage'] = """
type: group
short-summary: Manage usage for Azure Cognitive Services.
"""
helps['cognitiveservices usage list'] = """
type: command
short-summary: Show all usages for Azure Cognitive Services.
long-summary: This article lists the Azure CLI commands for Azure Cognitive Services account and subscription management only. Refer to the documentation at https://docs.microsoft.com/azure/cognitive-services/ for individual services to learn how to use the APIs and supported SDKs.
examples:
- name: Show all usages for Azure Cognitive Services.
text: az cognitiveservices usage list -l centraluseuap
"""
|
PypiClean
|
/love_course_2016_2019-2023.3.1.0-py3-none-any.whl/LoveCourse20162019/docs/chris/chris《搭讪达人》:解惑篇09废物测试(下).md
|
# chris《搭讪达人》:解惑篇09 废物测试(下)
那麼歡迎來到我們的菲律測試的第三集,OK我們為什麼會花三集會餃費測呢,因為菲律的東西真的是,給我們當中非常非常非常非常重要的一個環節,他直接可以決定你搭上的結果,還是不好的這個東西直接可以決定你的結果。
而且我們的老用戶都應該知道,就是柯老師也有人是比較喜歡講底層邏輯的,就探究一個事務的底層邏輯,然後講出來讓他能夠聽懂,菲律測試也會隨著一個人的認知,以他的認知水平,他看待菲律測試的意義是不同的。
像最早期的時候傳統認為女生,給那個男的菲律測試就測試,他是不是一個廢物嗎,但是其實當我們了解到紅藥完以後,其實廢物測試就是女生,他確立他是一個性選擇者的位置,用來這個判斷,或者是篩選擇男的一種方式。
所以說要知道了,就是說你要脫離這樣的一個情境,包括在我們另外的產品當中,講這個比如說高階技術,怎麼成為一個獎品二法男人,你首先應該思考的說,我不是說比如說如果你購買了高階技術的話。
你不應該思考我怎麼成為一個女人的獎品,成為一個二法男人,你首先應該思考的是,怎麼樣不會讓女人把你當成是擂塔,然後再去思考怎麼樣成為二法跟獎品,如果你沒有辦法去脫離,說把女人把你當成擂塔的這個情境的話。
那你後面的東西基本上都是無忌可思,這就是為什麼很多人說,我這個很多技巧用不出來的原因,因為他連一些基本的東西都難以做到,在我們這個搭上達人裡,就會把你的這些基本問題全部都給解決掉,對吧。
那我們廢話不多說,我們一起來看這個視頻先,法男,你原來都跟我接嗎,我問你一個問你,你結婚了嗎,不是你結婚了沒有,幹嘛,因為我看到你覺得你挺成熟的嘛,所以我還跟你打個招呼,覺得你挺成熟的。
所以我還跟你打個招呼,你多大的嗎,等一下等一下,我是過來,大善你了,我的目的,大善你已經在拍台節目話已經叫我小了,我才在拍台節目,你為什麼覺得我現在拍節目,不是在拍節目,你也不是這樣走,不,就是。
因為我覺得你,你這種小的應該很多小的年紀,因為我覺得你挺成熟的你知道嗎,我已經很成熟了,對我已經很成熟了,你多大的嗎,那其實前面就以系列的鼻霧城市,你是在拍節目嗎,當然有可能女生確實有點經驗了。
然後後面說,你這種小的應該很多小姐姐喜歡嗎,就很多人聽到這個話,她會認為,這不是在跨我嗎,挺好了,你認為這個時候,你講這個話是跨的意思,真的是跨的意思,我想讓大家去,更進一步的去思考一下。
真的是跨講你的意思嗎,就是我們看女生講一句話的時候,要去不僅要看她的語言,還要看她的情緒,這個女生當時的這種情緒,在講這句話的時候,你這種小哥哥應該很多女孩子會喜歡的,對吧,這次完全兩種不同的情緒。
那女生她用這種情緒說這樣的話,是什麼意思,不同的情緒一句話說出來的意思,表達出來的這個背後的意思,是完全不同的,那女生是第一種情緒,她說這樣的話,就是說,你說你去搭上其他人嘛,對吧,這個你又不是其他人。
沒人不會喜歡你別別來反我,就她其實在表達這樣的一個意思,所以這句話,你理解理解她的潛在一起,後她其實也是一個非無測試,對吧,她是一種防禦照,對啊,我們講了女生的第二種防禦照,價值的防禦照,明白嗎。
那我們繼續在網下看,你搭的,沒有,有搭的,我比你搭的,沒有,有搭的,你幾年了,197,哇,我,你覺得我像多大嗎,不行,你應該沒機會吧,你沒機會就好,那就,就是,沒機會,我們就可以認識,是嗎,我95的。
我想問,你好像有那個就是,化妝圖的那一幅上,這個,這個,我不知道,媽媽你跟我說,你先把你回來,寫面乃木一路,從那裡,然後左轉,然後左轉,就去那邊,有很多化妝品店,你第一次來成功嗎,不是,那你。
那你對太過一步手,你很少,你平常是忙手上班,所以你,是工作人員,多大嗎,你大腦我自己,你多小,然後你,你年齡我全都不是問題嗎,我自己自己喜歡,很熟悉,你,簡型的飛路車是又來了,是不是,四層箱石。
在上一集,上一集裡面,也有,我們,再來回看一下,你年齡我全,他說我比你大,我對你不敢興趣,然後我一場這裡,就犯了,非常致命的錯誤,就是去自我覆格,去解釋,去證明,他說年齡應該不是問題,對吧,就是女生說。
你配不上我,就我前面,再上一集裡面講到了,就是我們不合適,你不是我猜的意思,就是說你配不上我,因為你非常的,很加質地,然後你就說,還沒有我配得上你,對,他說年齡其實,應該不是問題吧,你不是問題嗎。
你不是問題嗎,你不是,你不是,你不是,你不是,你不是,你不是,然後,這女的說,姐姐就喜歡成熟了,對不對,所以女生她的幣物測試,都是基於她,把自己,確立,她是性選擇的,這個狀況,就我在挑選你。
我在這個挑選,你,給我想,人人,炸亮不合適,對吧,然後,這王翼翔這裡看她掌書,你多大人,你多大人,給我請,你多大人,給我大人,給我大人,給我大人,發靈會,也是大人,90會也是大人,房間有所到,嗯。
重測邊,有故事嗎,那這裡,就是,玩一玩,這個回應的,也不好玩,就是呼亮話題,意思不是說,你呼煉這個幣物測試,而是呼煉掉,廢物測試的這個話題,就是當車,其實,這也是算一種自我福革,那女生说咋两年龄不合适。
然后王一强说你比我大你多大吗,你多大吗,就一直说为什么为什么为什么你多大吗,就是一直绕不开这个话题,一直在在究竟,那这里讲一下思路还是,我们讲了会有测试有三种婚姻方式,简答忽略然后,向平移丢回去 对吧。
简答的话就是说,好吧行吧,然后问他其他的东西了其他的东西,然后或者是忽略,就直接不理他这个话题,直接了其他的东西,那么或者是丢回去,夸大他的意思或者是夸大自己的一个感受。
那么夸大他的意思或者是夸大自己的感受,这种情况下怎么用呢,非常简单,你可以这么说,你说原来你一直说你比我大很多了,那我是不是应该叫你妈妈,本来女生说她说这个,我喜欢陈薯的,我喜欢陈薯,姐姐就喜欢陈薯的。
你说你一次说你喜欢找一个这种爸爸,把这东西丢过去给他看他怎么回应,那这里也有重点,就说当你丢回去的时候,你的前沟通也很重要,就是你在丢回去的时候,你要看着他,眼睛要强眼神沟通看着他去讲这个话。
那这个时候你的二法特指是,可以很快的通过破废测显现出来,对 我觉得,在我,我做自媒体,我这次我不是小姐姐,你怎么好看啊,你那你这个节目很长的,我觉得,我觉得在你去买化妆品之前。
我们是不是可以先去认识一下,一个会什么的,那就有一个咖啡店,我去一个会议酒咖啡托吧,不是,不是我的天哪咖啡托,ok 同样的状况,对吧,那王毅强这一次想提出,计时约会啊,待这个女生喝咖啡,然后女生说。
你不会吃那种咖啡托吧,可能其实太鼓励哪来的托吗,就说就是一样的一样的思路,我们要知道化背后的意思,而且她说这样化的动气,当女生把你当成是什么托尼啊,说你是咖啡托啊,其实都是一个意思。
就是她就是说她认为你吃那种,低价值的男人,是那种要锁取她价值的男人,对吧说你是咖啡托啊,你是要带我喝咖啡,你的目的是要向我锁取什么东西吗,是这样的一个情景知道,咖啡托,我第一次听说咖啡托,我的天哪。
我天落酒托,没听过咖啡托,你去哪凝期,五六八三,不是,不是不是,咖啡不贵,其实是,其实是刚才那条级,就女过来的那条级,就跟你聊着聊着就聊着,我真的我亲戚中还有口哨,你是老师啊,难怪我看你那么正经。
也是一样的,我们看到其实没有破除非物测试的时候,女生她的回应都非常的类似,她不愿意给你注意力,因为她不觉得你有价值,她会专注在她自己的事情上,上一期的那女生是在等人,这一集的女生是在找路的。
她虽然愿意跟你聊,但是她把你认为是那种低价值的人以后,就她是愿意跟你聊嘛,可以说这是处于一个社交理义吧,那有一些女的当然不直接不理你的也有,因为你是低价值男人啊,别来烦她对吧,但是她是会把更多的注意力。
放在她自己专注的事情上,等她的人找她的路买她的花荣品,那就是你刚才不是从那边过来的吗,然后去那边又穿吗,看到呀,那可以那个吗,就是我们,可以一起去马上,我们之后一起去约会,改天嘛,改天嘛,改天嘛。
对了 改天再说吧,就是这个女生自己,就是不会根本不会跟她出来,绝对不会跟她出来的,改天再说吧,就是一种敷衍的态度,那我们要知道,为什么女生她会敷衍你,无非就是觉得你没有价值,王岳强在这种情况下。
女生认为她没价值的情况下,她没有去逆转这个局势,所以说即使约会更不可,要到号码也约不出来啊,真的是这样的情况,这就是为什么大善无效的原因,那为什么我们要会花三级讲这个吗,也是非常非常重要的。
因为大部分男人都没有办法突破这样的一个状况,其实你可以突破这个状况的话,你的大善了,以及你大善所获得的结果,会得到一个制的非愿,改天嘛,改天嘛,改天嘛,改天,改天再说,让你微信,改天可以出来约会,对吧。
就是你们看到,其实渴老师有时候,搭战的时候啊,去播种邀约重吵,基本上就是,女生都会说好啊,改天出来很安心,是好啊,女生她都是,就再说基本上就是说,不会,不会再出来了,这微信家的,就是无效号码吗。
你多当了吗,你说我是小朋友,不是,你这样说我就,很不喜欢听吗,我95年的,我是小朋友吗,就一直在说完,想了小,小朋友小朋友,就说咱俩不陪你,陪不上我,然后这里我,很生气了,说你这样说,我很不开心了。
我95年的,我是小朋友吗,这又是一种自我,这就是证明自己,哎我她妈,那你这么说,我哪里小了,我95年我小吗,对吗,女生给这个男的废侧,而认为她是低价值,你太矮了,这个男的证明,我她妈我一七零,我算矮吗。
对吗,我妈我头类球,我都可以这个,扣篮了,我她妈我一七零矮吗,甚至同样的一种回应的一种邏续啊,都是自我复格,就是强调你是一个悲弹男人,就这相当于什么呢,相当于你有点气急,败坏的这个样子。
就这个波打到你身上,你给你造成伤害,你才会出现这样的一个反应,走,我是男人,好吧,我哪里啊,我真的要,我前走,哎呀,你干吗,我不要叫啥反正,那就是你的那个种,一串一串一串一串一串,你干吗。
但这个视频后面就是,王一强啊,因为带她去买,给她指路嘛,带她去买那个,互付品,然后后来那女生就走,所以说废物测试,真的是非常非常的重要,那我们最后总结一下废物测试吧,第一个首先你要会分辨废物测试。
有些人她可能分辨不出来,她认为女生说这样的话,是觉得我不是她的菜,No no,是因为你没有让女生看到你的价值,如果你有价值女生都是会喜欢的,也就是说她认为你是低价值,那怎么分辨废测呢,非常简单。
如果她是雕难你的,第一个的是看这个,她说这个话本身,如果是雕难你啊,或者是敷衍你,那基本上都可以认为是废测,因为女生的出发点,就是认为你是低价值的人,所以会给你这样的一种回应方式。
我们都可以把它当成废测来看,那第二种就是判断情绪,有时候女生可能在夸你,但是这种夸呢,是一种公围或者是打发,就像刚才那个女生说,因为这种小哥哥,应该很多女孩子喜欢,你不要来反我,你看这期也是一种废测。
要看女生的情绪,然后呢,废测的回应方式分为三种,第一种就是简单,加弧略,第二种直接弧略,第三种就是相平一理论,丢回给她,前两种方式呢,它的好处是比较简单,你不用思考,第一就是说你忽略掉它的这个东西。
这个东西对你造成没影响,但是你的价值还是没有出来,你需要继续通过聊天互动,制造更多的机会让你的价值,其实有些时候,就说搭上的时候,你的那个时间其实并不多,你要最快的让她看到你的价值。
女生她才会被你给吸引,才会注意力逐渐地给到你,所以第三种回应方式是最好的,但第三种比较难,因为需要训练,你在现场的时候,你不能做出一个这样的一种随机应变的,既性的反馈,你要通过训练。
你才能做出这样的一种反应,那第三种是直接可以,一下子把你的价值抬上去,前面两种不会让你的价值下去,但也不会让你的价值变高,你要接下来慢慢的互动,让你的价值上去,那第三种呢,因为你把这个女生的废测。
这个博物给谈回去了以后,你可是可以让直接让自己的价值上去,所以第三种回应呢,就是相平一理论,让大家掌握了矿价,把让他掌握矿价的这个局面,一转过来,然后你掌握矿价,让他去头疼让他去思考,OK。
那最后我们再赶紧强调一下,这个学长的重要,这个非常重要,今天大家搭乘的时候,应该多去全球兴手,结果应该多去练习这个比。
那我们下一期再见
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/groups/item/team/members/add/add_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from . import add_post_request_body, add_response
from ......models.o_data_errors import o_data_error
class AddRequestBuilder():
"""
Provides operations to call the add method.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new AddRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/groups/{group%2Did}/team/members/add"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
async def post(self,body: Optional[add_post_request_body.AddPostRequestBody] = None, request_configuration: Optional[AddRequestBuilderPostRequestConfiguration] = None) -> Optional[add_response.AddResponse]:
"""
Invoke action add
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[add_response.AddResponse]
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.to_post_request_information(
body, request_configuration
)
from ......models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from . import add_response
return await self.request_adapter.send_async(request_info, add_response.AddResponse, error_mapping)
def to_post_request_information(self,body: Optional[add_post_request_body.AddPostRequestBody] = None, request_configuration: Optional[AddRequestBuilderPostRequestConfiguration] = None) -> RequestInformation:
"""
Invoke action add
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.POST
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_content_from_parsable(self.request_adapter, "application/json", body)
return request_info
@dataclass
class AddRequestBuilderPostRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/realms-wiki-0.9.3.tar.gz/realms-wiki-0.9.3/realms/static/vendor/ace-builds/src-noconflict/mode-html.js
|
ace.define("ace/mode/doc_comment_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var DocCommentHighlightRules = function() {
this.$rules = {
"start" : [ {
token : "comment.doc.tag",
regex : "@[\\w\\d_]+" // TODO: fix email addresses
},
DocCommentHighlightRules.getTagRule(),
{
defaultToken : "comment.doc",
caseInsensitive: true
}]
};
};
oop.inherits(DocCommentHighlightRules, TextHighlightRules);
DocCommentHighlightRules.getTagRule = function(start) {
return {
token : "comment.doc.tag.storage.type",
regex : "\\b(?:TODO|FIXME|XXX|HACK)\\b"
};
}
DocCommentHighlightRules.getStartRule = function(start) {
return {
token : "comment.doc", // doc comment
regex : "\\/\\*(?=\\*)",
next : start
};
};
DocCommentHighlightRules.getEndRule = function (start) {
return {
token : "comment.doc", // closing comment
regex : "\\*\\/",
next : start
};
};
exports.DocCommentHighlightRules = DocCommentHighlightRules;
});
ace.define("ace/mode/javascript_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/doc_comment_highlight_rules","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var DocCommentHighlightRules = require("./doc_comment_highlight_rules").DocCommentHighlightRules;
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var identifierRe = "[a-zA-Z\\$_\u00a1-\uffff][a-zA-Z\\d\\$_\u00a1-\uffff]*";
var JavaScriptHighlightRules = function(options) {
var keywordMapper = this.createKeywordMapper({
"variable.language":
"Array|Boolean|Date|Function|Iterator|Number|Object|RegExp|String|Proxy|" + // Constructors
"Namespace|QName|XML|XMLList|" + // E4X
"ArrayBuffer|Float32Array|Float64Array|Int16Array|Int32Array|Int8Array|" +
"Uint16Array|Uint32Array|Uint8Array|Uint8ClampedArray|" +
"Error|EvalError|InternalError|RangeError|ReferenceError|StopIteration|" + // Errors
"SyntaxError|TypeError|URIError|" +
"decodeURI|decodeURIComponent|encodeURI|encodeURIComponent|eval|isFinite|" + // Non-constructor functions
"isNaN|parseFloat|parseInt|" +
"JSON|Math|" + // Other
"this|arguments|prototype|window|document" , // Pseudo
"keyword":
"const|yield|import|get|set|async|await|" +
"break|case|catch|continue|default|delete|do|else|finally|for|function|" +
"if|in|instanceof|new|return|switch|throw|try|typeof|let|var|while|with|debugger|" +
"__parent__|__count__|escape|unescape|with|__proto__|" +
"class|enum|extends|super|export|implements|private|public|interface|package|protected|static",
"storage.type":
"const|let|var|function",
"constant.language":
"null|Infinity|NaN|undefined",
"support.function":
"alert",
"constant.language.boolean": "true|false"
}, "identifier");
var kwBeforeRe = "case|do|else|finally|in|instanceof|return|throw|try|typeof|yield|void";
var escapedRe = "\\\\(?:x[0-9a-fA-F]{2}|" + // hex
"u[0-9a-fA-F]{4}|" + // unicode
"u{[0-9a-fA-F]{1,6}}|" + // es6 unicode
"[0-2][0-7]{0,2}|" + // oct
"3[0-7][0-7]?|" + // oct
"[4-7][0-7]?|" + //oct
".)";
this.$rules = {
"no_regex" : [
DocCommentHighlightRules.getStartRule("doc-start"),
comments("no_regex"),
{
token : "string",
regex : "'(?=.)",
next : "qstring"
}, {
token : "string",
regex : '"(?=.)',
next : "qqstring"
}, {
token : "constant.numeric", // hex
regex : /0(?:[xX][0-9a-fA-F]+|[bB][01]+)\b/
}, {
token : "constant.numeric", // float
regex : /[+-]?\d[\d_]*(?:(?:\.\d*)?(?:[eE][+-]?\d+)?)?\b/
}, {
token : [
"storage.type", "punctuation.operator", "support.function",
"punctuation.operator", "entity.name.function", "text","keyword.operator"
],
regex : "(" + identifierRe + ")(\\.)(prototype)(\\.)(" + identifierRe +")(\\s*)(=)",
next: "function_arguments"
}, {
token : [
"storage.type", "punctuation.operator", "entity.name.function", "text",
"keyword.operator", "text", "storage.type", "text", "paren.lparen"
],
regex : "(" + identifierRe + ")(\\.)(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"entity.name.function", "text", "keyword.operator", "text", "storage.type",
"text", "paren.lparen"
],
regex : "(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"storage.type", "punctuation.operator", "entity.name.function", "text",
"keyword.operator", "text",
"storage.type", "text", "entity.name.function", "text", "paren.lparen"
],
regex : "(" + identifierRe + ")(\\.)(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s+)(\\w+)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"storage.type", "text", "entity.name.function", "text", "paren.lparen"
],
regex : "(function)(\\s+)(" + identifierRe + ")(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"entity.name.function", "text", "punctuation.operator",
"text", "storage.type", "text", "paren.lparen"
],
regex : "(" + identifierRe + ")(\\s*)(:)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"text", "text", "storage.type", "text", "paren.lparen"
],
regex : "(:)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : "keyword",
regex : "(?:" + kwBeforeRe + ")\\b",
next : "start"
}, {
token : ["support.constant"],
regex : /that\b/
}, {
token : ["storage.type", "punctuation.operator", "support.function.firebug"],
regex : /(console)(\.)(warn|info|log|error|time|trace|timeEnd|assert)\b/
}, {
token : keywordMapper,
regex : identifierRe
}, {
token : "punctuation.operator",
regex : /[.](?![.])/,
next : "property"
}, {
token : "keyword.operator",
regex : /--|\+\+|\.{3}|===|==|=|!=|!==|<+=?|>+=?|!|&&|\|\||\?:|[!$%&*+\-~\/^]=?/,
next : "start"
}, {
token : "punctuation.operator",
regex : /[?:,;.]/,
next : "start"
}, {
token : "paren.lparen",
regex : /[\[({]/,
next : "start"
}, {
token : "paren.rparen",
regex : /[\])}]/
}, {
token: "comment",
regex: /^#!.*$/
}
],
property: [{
token : "text",
regex : "\\s+"
}, {
token : [
"storage.type", "punctuation.operator", "entity.name.function", "text",
"keyword.operator", "text",
"storage.type", "text", "entity.name.function", "text", "paren.lparen"
],
regex : "(" + identifierRe + ")(\\.)(" + identifierRe +")(\\s*)(=)(\\s*)(function)(?:(\\s+)(\\w+))?(\\s*)(\\()",
next: "function_arguments"
}, {
token : "punctuation.operator",
regex : /[.](?![.])/
}, {
token : "support.function",
regex : /(s(?:h(?:ift|ow(?:Mod(?:elessDialog|alDialog)|Help))|croll(?:X|By(?:Pages|Lines)?|Y|To)?|t(?:op|rike)|i(?:n|zeToContent|debar|gnText)|ort|u(?:p|b(?:str(?:ing)?)?)|pli(?:ce|t)|e(?:nd|t(?:Re(?:sizable|questHeader)|M(?:i(?:nutes|lliseconds)|onth)|Seconds|Ho(?:tKeys|urs)|Year|Cursor|Time(?:out)?|Interval|ZOptions|Date|UTC(?:M(?:i(?:nutes|lliseconds)|onth)|Seconds|Hours|Date|FullYear)|FullYear|Active)|arch)|qrt|lice|avePreferences|mall)|h(?:ome|andleEvent)|navigate|c(?:har(?:CodeAt|At)|o(?:s|n(?:cat|textual|firm)|mpile)|eil|lear(?:Timeout|Interval)?|a(?:ptureEvents|ll)|reate(?:StyleSheet|Popup|EventObject))|t(?:o(?:GMTString|S(?:tring|ource)|U(?:TCString|pperCase)|Lo(?:caleString|werCase))|est|a(?:n|int(?:Enabled)?))|i(?:s(?:NaN|Finite)|ndexOf|talics)|d(?:isableExternalCapture|ump|etachEvent)|u(?:n(?:shift|taint|escape|watch)|pdateCommands)|j(?:oin|avaEnabled)|p(?:o(?:p|w)|ush|lugins.refresh|a(?:ddings|rse(?:Int|Float)?)|r(?:int|ompt|eference))|e(?:scape|nableExternalCapture|val|lementFromPoint|x(?:p|ec(?:Script|Command)?))|valueOf|UTC|queryCommand(?:State|Indeterm|Enabled|Value)|f(?:i(?:nd|le(?:ModifiedDate|Size|CreatedDate|UpdatedDate)|xed)|o(?:nt(?:size|color)|rward)|loor|romCharCode)|watch|l(?:ink|o(?:ad|g)|astIndexOf)|a(?:sin|nchor|cos|t(?:tachEvent|ob|an(?:2)?)|pply|lert|b(?:s|ort))|r(?:ou(?:nd|teEvents)|e(?:size(?:By|To)|calc|turnValue|place|verse|l(?:oad|ease(?:Capture|Events)))|andom)|g(?:o|et(?:ResponseHeader|M(?:i(?:nutes|lliseconds)|onth)|Se(?:conds|lection)|Hours|Year|Time(?:zoneOffset)?|Da(?:y|te)|UTC(?:M(?:i(?:nutes|lliseconds)|onth)|Seconds|Hours|Da(?:y|te)|FullYear)|FullYear|A(?:ttention|llResponseHeaders)))|m(?:in|ove(?:B(?:y|elow)|To(?:Absolute)?|Above)|ergeAttributes|a(?:tch|rgins|x))|b(?:toa|ig|o(?:ld|rderWidths)|link|ack))\b(?=\()/
}, {
token : "support.function.dom",
regex : /(s(?:ub(?:stringData|mit)|plitText|e(?:t(?:NamedItem|Attribute(?:Node)?)|lect))|has(?:ChildNodes|Feature)|namedItem|c(?:l(?:ick|o(?:se|neNode))|reate(?:C(?:omment|DATASection|aption)|T(?:Head|extNode|Foot)|DocumentFragment|ProcessingInstruction|E(?:ntityReference|lement)|Attribute))|tabIndex|i(?:nsert(?:Row|Before|Cell|Data)|tem)|open|delete(?:Row|C(?:ell|aption)|T(?:Head|Foot)|Data)|focus|write(?:ln)?|a(?:dd|ppend(?:Child|Data))|re(?:set|place(?:Child|Data)|move(?:NamedItem|Child|Attribute(?:Node)?)?)|get(?:NamedItem|Element(?:sBy(?:Name|TagName|ClassName)|ById)|Attribute(?:Node)?)|blur)\b(?=\()/
}, {
token : "support.constant",
regex : /(s(?:ystemLanguage|cr(?:ipts|ollbars|een(?:X|Y|Top|Left))|t(?:yle(?:Sheets)?|atus(?:Text|bar)?)|ibling(?:Below|Above)|ource|uffixes|e(?:curity(?:Policy)?|l(?:ection|f)))|h(?:istory|ost(?:name)?|as(?:h|Focus))|y|X(?:MLDocument|SLDocument)|n(?:ext|ame(?:space(?:s|URI)|Prop))|M(?:IN_VALUE|AX_VALUE)|c(?:haracterSet|o(?:n(?:structor|trollers)|okieEnabled|lorDepth|mp(?:onents|lete))|urrent|puClass|l(?:i(?:p(?:boardData)?|entInformation)|osed|asses)|alle(?:e|r)|rypto)|t(?:o(?:olbar|p)|ext(?:Transform|Indent|Decoration|Align)|ags)|SQRT(?:1_2|2)|i(?:n(?:ner(?:Height|Width)|put)|ds|gnoreCase)|zIndex|o(?:scpu|n(?:readystatechange|Line)|uter(?:Height|Width)|p(?:sProfile|ener)|ffscreenBuffering)|NEGATIVE_INFINITY|d(?:i(?:splay|alog(?:Height|Top|Width|Left|Arguments)|rectories)|e(?:scription|fault(?:Status|Ch(?:ecked|arset)|View)))|u(?:ser(?:Profile|Language|Agent)|n(?:iqueID|defined)|pdateInterval)|_content|p(?:ixelDepth|ort|ersonalbar|kcs11|l(?:ugins|atform)|a(?:thname|dding(?:Right|Bottom|Top|Left)|rent(?:Window|Layer)?|ge(?:X(?:Offset)?|Y(?:Offset)?))|r(?:o(?:to(?:col|type)|duct(?:Sub)?|mpter)|e(?:vious|fix)))|e(?:n(?:coding|abledPlugin)|x(?:ternal|pando)|mbeds)|v(?:isibility|endor(?:Sub)?|Linkcolor)|URLUnencoded|P(?:I|OSITIVE_INFINITY)|f(?:ilename|o(?:nt(?:Size|Family|Weight)|rmName)|rame(?:s|Element)|gColor)|E|whiteSpace|l(?:i(?:stStyleType|n(?:eHeight|kColor))|o(?:ca(?:tion(?:bar)?|lName)|wsrc)|e(?:ngth|ft(?:Context)?)|a(?:st(?:M(?:odified|atch)|Index|Paren)|yer(?:s|X)|nguage))|a(?:pp(?:MinorVersion|Name|Co(?:deName|re)|Version)|vail(?:Height|Top|Width|Left)|ll|r(?:ity|guments)|Linkcolor|bove)|r(?:ight(?:Context)?|e(?:sponse(?:XML|Text)|adyState))|global|x|m(?:imeTypes|ultiline|enubar|argin(?:Right|Bottom|Top|Left))|L(?:N(?:10|2)|OG(?:10E|2E))|b(?:o(?:ttom|rder(?:Width|RightWidth|BottomWidth|Style|Color|TopWidth|LeftWidth))|ufferDepth|elow|ackground(?:Color|Image)))\b/
}, {
token : "identifier",
regex : identifierRe
}, {
regex: "",
token: "empty",
next: "no_regex"
}
],
"start": [
DocCommentHighlightRules.getStartRule("doc-start"),
comments("start"),
{
token: "string.regexp",
regex: "\\/",
next: "regex"
}, {
token : "text",
regex : "\\s+|^$",
next : "start"
}, {
token: "empty",
regex: "",
next: "no_regex"
}
],
"regex": [
{
token: "regexp.keyword.operator",
regex: "\\\\(?:u[\\da-fA-F]{4}|x[\\da-fA-F]{2}|.)"
}, {
token: "string.regexp",
regex: "/[sxngimy]*",
next: "no_regex"
}, {
token : "invalid",
regex: /\{\d+\b,?\d*\}[+*]|[+*$^?][+*]|[$^][?]|\?{3,}/
}, {
token : "constant.language.escape",
regex: /\(\?[:=!]|\)|\{\d+\b,?\d*\}|[+*]\?|[()$^+*?.]/
}, {
token : "constant.language.delimiter",
regex: /\|/
}, {
token: "constant.language.escape",
regex: /\[\^?/,
next: "regex_character_class"
}, {
token: "empty",
regex: "$",
next: "no_regex"
}, {
defaultToken: "string.regexp"
}
],
"regex_character_class": [
{
token: "regexp.charclass.keyword.operator",
regex: "\\\\(?:u[\\da-fA-F]{4}|x[\\da-fA-F]{2}|.)"
}, {
token: "constant.language.escape",
regex: "]",
next: "regex"
}, {
token: "constant.language.escape",
regex: "-"
}, {
token: "empty",
regex: "$",
next: "no_regex"
}, {
defaultToken: "string.regexp.charachterclass"
}
],
"function_arguments": [
{
token: "variable.parameter",
regex: identifierRe
}, {
token: "punctuation.operator",
regex: "[, ]+"
}, {
token: "punctuation.operator",
regex: "$"
}, {
token: "empty",
regex: "",
next: "no_regex"
}
],
"qqstring" : [
{
token : "constant.language.escape",
regex : escapedRe
}, {
token : "string",
regex : "\\\\$",
next : "qqstring"
}, {
token : "string",
regex : '"|$',
next : "no_regex"
}, {
defaultToken: "string"
}
],
"qstring" : [
{
token : "constant.language.escape",
regex : escapedRe
}, {
token : "string",
regex : "\\\\$",
next : "qstring"
}, {
token : "string",
regex : "'|$",
next : "no_regex"
}, {
defaultToken: "string"
}
]
};
if (!options || !options.noES6) {
this.$rules.no_regex.unshift({
regex: "[{}]", onMatch: function(val, state, stack) {
this.next = val == "{" ? this.nextState : "";
if (val == "{" && stack.length) {
stack.unshift("start", state);
}
else if (val == "}" && stack.length) {
stack.shift();
this.next = stack.shift();
if (this.next.indexOf("string") != -1 || this.next.indexOf("jsx") != -1)
return "paren.quasi.end";
}
return val == "{" ? "paren.lparen" : "paren.rparen";
},
nextState: "start"
}, {
token : "string.quasi.start",
regex : /`/,
push : [{
token : "constant.language.escape",
regex : escapedRe
}, {
token : "paren.quasi.start",
regex : /\${/,
push : "start"
}, {
token : "string.quasi.end",
regex : /`/,
next : "pop"
}, {
defaultToken: "string.quasi"
}]
});
if (!options || options.jsx != false)
JSX.call(this);
}
this.embedRules(DocCommentHighlightRules, "doc-",
[ DocCommentHighlightRules.getEndRule("no_regex") ]);
this.normalizeRules();
};
oop.inherits(JavaScriptHighlightRules, TextHighlightRules);
function JSX() {
var tagRegex = identifierRe.replace("\\d", "\\d\\-");
var jsxTag = {
onMatch : function(val, state, stack) {
var offset = val.charAt(1) == "/" ? 2 : 1;
if (offset == 1) {
if (state != this.nextState)
stack.unshift(this.next, this.nextState, 0);
else
stack.unshift(this.next);
stack[2]++;
} else if (offset == 2) {
if (state == this.nextState) {
stack[1]--;
if (!stack[1] || stack[1] < 0) {
stack.shift();
stack.shift();
}
}
}
return [{
type: "meta.tag.punctuation." + (offset == 1 ? "" : "end-") + "tag-open.xml",
value: val.slice(0, offset)
}, {
type: "meta.tag.tag-name.xml",
value: val.substr(offset)
}];
},
regex : "</?" + tagRegex + "",
next: "jsxAttributes",
nextState: "jsx"
};
this.$rules.start.unshift(jsxTag);
var jsxJsRule = {
regex: "{",
token: "paren.quasi.start",
push: "start"
};
this.$rules.jsx = [
jsxJsRule,
jsxTag,
{include : "reference"},
{defaultToken: "string"}
];
this.$rules.jsxAttributes = [{
token : "meta.tag.punctuation.tag-close.xml",
regex : "/?>",
onMatch : function(value, currentState, stack) {
if (currentState == stack[0])
stack.shift();
if (value.length == 2) {
if (stack[0] == this.nextState)
stack[1]--;
if (!stack[1] || stack[1] < 0) {
stack.splice(0, 2);
}
}
this.next = stack[0] || "start";
return [{type: this.token, value: value}];
},
nextState: "jsx"
},
jsxJsRule,
comments("jsxAttributes"),
{
token : "entity.other.attribute-name.xml",
regex : tagRegex
}, {
token : "keyword.operator.attribute-equals.xml",
regex : "="
}, {
token : "text.tag-whitespace.xml",
regex : "\\s+"
}, {
token : "string.attribute-value.xml",
regex : "'",
stateName : "jsx_attr_q",
push : [
{token : "string.attribute-value.xml", regex: "'", next: "pop"},
{include : "reference"},
{defaultToken : "string.attribute-value.xml"}
]
}, {
token : "string.attribute-value.xml",
regex : '"',
stateName : "jsx_attr_qq",
push : [
{token : "string.attribute-value.xml", regex: '"', next: "pop"},
{include : "reference"},
{defaultToken : "string.attribute-value.xml"}
]
},
jsxTag
];
this.$rules.reference = [{
token : "constant.language.escape.reference.xml",
regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)"
}];
}
function comments(next) {
return [
{
token : "comment", // multi line comment
regex : /\/\*/,
next: [
DocCommentHighlightRules.getTagRule(),
{token : "comment", regex : "\\*\\/", next : next || "pop"},
{defaultToken : "comment", caseInsensitive: true}
]
}, {
token : "comment",
regex : "\\/\\/",
next: [
DocCommentHighlightRules.getTagRule(),
{token : "comment", regex : "$|^", next : next || "pop"},
{defaultToken : "comment", caseInsensitive: true}
]
}
];
}
exports.JavaScriptHighlightRules = JavaScriptHighlightRules;
});
ace.define("ace/mode/matching_brace_outdent",["require","exports","module","ace/range"], function(require, exports, module) {
"use strict";
var Range = require("../range").Range;
var MatchingBraceOutdent = function() {};
(function() {
this.checkOutdent = function(line, input) {
if (! /^\s+$/.test(line))
return false;
return /^\s*\}/.test(input);
};
this.autoOutdent = function(doc, row) {
var line = doc.getLine(row);
var match = line.match(/^(\s*\})/);
if (!match) return 0;
var column = match[1].length;
var openBracePos = doc.findMatchingBracket({row: row, column: column});
if (!openBracePos || openBracePos.row == row) return 0;
var indent = this.$getIndent(doc.getLine(openBracePos.row));
doc.replace(new Range(row, 0, row, column-1), indent);
};
this.$getIndent = function(line) {
return line.match(/^\s*/)[0];
};
}).call(MatchingBraceOutdent.prototype);
exports.MatchingBraceOutdent = MatchingBraceOutdent;
});
ace.define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Range = require("../../range").Range;
var BaseFoldMode = require("./fold_mode").FoldMode;
var FoldMode = exports.FoldMode = function(commentRegex) {
if (commentRegex) {
this.foldingStartMarker = new RegExp(
this.foldingStartMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.start)
);
this.foldingStopMarker = new RegExp(
this.foldingStopMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.end)
);
}
};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.foldingStartMarker = /(\{|\[)[^\}\]]*$|^\s*(\/\*)/;
this.foldingStopMarker = /^[^\[\{]*(\}|\])|^[\s\*]*(\*\/)/;
this.singleLineBlockCommentRe= /^\s*(\/\*).*\*\/\s*$/;
this.tripleStarBlockCommentRe = /^\s*(\/\*\*\*).*\*\/\s*$/;
this.startRegionRe = /^\s*(\/\*|\/\/)#?region\b/;
this._getFoldWidgetBase = this.getFoldWidget;
this.getFoldWidget = function(session, foldStyle, row) {
var line = session.getLine(row);
if (this.singleLineBlockCommentRe.test(line)) {
if (!this.startRegionRe.test(line) && !this.tripleStarBlockCommentRe.test(line))
return "";
}
var fw = this._getFoldWidgetBase(session, foldStyle, row);
if (!fw && this.startRegionRe.test(line))
return "start"; // lineCommentRegionStart
return fw;
};
this.getFoldWidgetRange = function(session, foldStyle, row, forceMultiline) {
var line = session.getLine(row);
if (this.startRegionRe.test(line))
return this.getCommentRegionBlock(session, line, row);
var match = line.match(this.foldingStartMarker);
if (match) {
var i = match.index;
if (match[1])
return this.openingBracketBlock(session, match[1], row, i);
var range = session.getCommentFoldRange(row, i + match[0].length, 1);
if (range && !range.isMultiLine()) {
if (forceMultiline) {
range = this.getSectionRange(session, row);
} else if (foldStyle != "all")
range = null;
}
return range;
}
if (foldStyle === "markbegin")
return;
var match = line.match(this.foldingStopMarker);
if (match) {
var i = match.index + match[0].length;
if (match[1])
return this.closingBracketBlock(session, match[1], row, i);
return session.getCommentFoldRange(row, i, -1);
}
};
this.getSectionRange = function(session, row) {
var line = session.getLine(row);
var startIndent = line.search(/\S/);
var startRow = row;
var startColumn = line.length;
row = row + 1;
var endRow = row;
var maxRow = session.getLength();
while (++row < maxRow) {
line = session.getLine(row);
var indent = line.search(/\S/);
if (indent === -1)
continue;
if (startIndent > indent)
break;
var subRange = this.getFoldWidgetRange(session, "all", row);
if (subRange) {
if (subRange.start.row <= startRow) {
break;
} else if (subRange.isMultiLine()) {
row = subRange.end.row;
} else if (startIndent == indent) {
break;
}
}
endRow = row;
}
return new Range(startRow, startColumn, endRow, session.getLine(endRow).length);
};
this.getCommentRegionBlock = function(session, line, row) {
var startColumn = line.search(/\s*$/);
var maxRow = session.getLength();
var startRow = row;
var re = /^\s*(?:\/\*|\/\/|--)#?(end)?region\b/;
var depth = 1;
while (++row < maxRow) {
line = session.getLine(row);
var m = re.exec(line);
if (!m) continue;
if (m[1]) depth--;
else depth++;
if (!depth) break;
}
var endRow = row;
if (endRow > startRow) {
return new Range(startRow, startColumn, endRow, line.length);
}
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/javascript",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/javascript_highlight_rules","ace/mode/matching_brace_outdent","ace/range","ace/worker/worker_client","ace/mode/behaviour/cstyle","ace/mode/folding/cstyle"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var JavaScriptHighlightRules = require("./javascript_highlight_rules").JavaScriptHighlightRules;
var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent;
var Range = require("../range").Range;
var WorkerClient = require("../worker/worker_client").WorkerClient;
var CstyleBehaviour = require("./behaviour/cstyle").CstyleBehaviour;
var CStyleFoldMode = require("./folding/cstyle").FoldMode;
var Mode = function() {
this.HighlightRules = JavaScriptHighlightRules;
this.$outdent = new MatchingBraceOutdent();
this.$behaviour = new CstyleBehaviour();
this.foldingRules = new CStyleFoldMode();
};
oop.inherits(Mode, TextMode);
(function() {
this.lineCommentStart = "//";
this.blockComment = {start: "/*", end: "*/"};
this.getNextLineIndent = function(state, line, tab) {
var indent = this.$getIndent(line);
var tokenizedLine = this.getTokenizer().getLineTokens(line, state);
var tokens = tokenizedLine.tokens;
var endState = tokenizedLine.state;
if (tokens.length && tokens[tokens.length-1].type == "comment") {
return indent;
}
if (state == "start" || state == "no_regex") {
var match = line.match(/^.*(?:\bcase\b.*:|[\{\(\[])\s*$/);
if (match) {
indent += tab;
}
} else if (state == "doc-start") {
if (endState == "start" || endState == "no_regex") {
return "";
}
var match = line.match(/^\s*(\/?)\*/);
if (match) {
if (match[1]) {
indent += " ";
}
indent += "* ";
}
}
return indent;
};
this.checkOutdent = function(state, line, input) {
return this.$outdent.checkOutdent(line, input);
};
this.autoOutdent = function(state, doc, row) {
this.$outdent.autoOutdent(doc, row);
};
this.createWorker = function(session) {
var worker = new WorkerClient(["ace"], "ace/mode/javascript_worker", "JavaScriptWorker");
worker.attachToDocument(session.getDocument());
worker.on("annotate", function(results) {
session.setAnnotations(results.data);
});
worker.on("terminate", function() {
session.clearAnnotations();
});
return worker;
};
this.$id = "ace/mode/javascript";
}).call(Mode.prototype);
exports.Mode = Mode;
});
ace.define("ace/mode/css_highlight_rules",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var lang = require("../lib/lang");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var supportType = exports.supportType = "align-content|align-items|align-self|all|animation|animation-delay|animation-direction|animation-duration|animation-fill-mode|animation-iteration-count|animation-name|animation-play-state|animation-timing-function|backface-visibility|background|background-attachment|background-blend-mode|background-clip|background-color|background-image|background-origin|background-position|background-repeat|background-size|border|border-bottom|border-bottom-color|border-bottom-left-radius|border-bottom-right-radius|border-bottom-style|border-bottom-width|border-collapse|border-color|border-image|border-image-outset|border-image-repeat|border-image-slice|border-image-source|border-image-width|border-left|border-left-color|border-left-style|border-left-width|border-radius|border-right|border-right-color|border-right-style|border-right-width|border-spacing|border-style|border-top|border-top-color|border-top-left-radius|border-top-right-radius|border-top-style|border-top-width|border-width|bottom|box-shadow|box-sizing|caption-side|clear|clip|color|column-count|column-fill|column-gap|column-rule|column-rule-color|column-rule-style|column-rule-width|column-span|column-width|columns|content|counter-increment|counter-reset|cursor|direction|display|empty-cells|filter|flex|flex-basis|flex-direction|flex-flow|flex-grow|flex-shrink|flex-wrap|float|font|font-family|font-size|font-size-adjust|font-stretch|font-style|font-variant|font-weight|hanging-punctuation|height|justify-content|left|letter-spacing|line-height|list-style|list-style-image|list-style-position|list-style-type|margin|margin-bottom|margin-left|margin-right|margin-top|max-height|max-width|min-height|min-width|nav-down|nav-index|nav-left|nav-right|nav-up|opacity|order|outline|outline-color|outline-offset|outline-style|outline-width|overflow|overflow-x|overflow-y|padding|padding-bottom|padding-left|padding-right|padding-top|page-break-after|page-break-before|page-break-inside|perspective|perspective-origin|position|quotes|resize|right|tab-size|table-layout|text-align|text-align-last|text-decoration|text-decoration-color|text-decoration-line|text-decoration-style|text-indent|text-justify|text-overflow|text-shadow|text-transform|top|transform|transform-origin|transform-style|transition|transition-delay|transition-duration|transition-property|transition-timing-function|unicode-bidi|vertical-align|visibility|white-space|width|word-break|word-spacing|word-wrap|z-index";
var supportFunction = exports.supportFunction = "rgb|rgba|url|attr|counter|counters";
var supportConstant = exports.supportConstant = "absolute|after-edge|after|all-scroll|all|alphabetic|always|antialiased|armenian|auto|avoid-column|avoid-page|avoid|balance|baseline|before-edge|before|below|bidi-override|block-line-height|block|bold|bolder|border-box|both|bottom|box|break-all|break-word|capitalize|caps-height|caption|center|central|char|circle|cjk-ideographic|clone|close-quote|col-resize|collapse|column|consider-shifts|contain|content-box|cover|crosshair|cubic-bezier|dashed|decimal-leading-zero|decimal|default|disabled|disc|disregard-shifts|distribute-all-lines|distribute-letter|distribute-space|distribute|dotted|double|e-resize|ease-in|ease-in-out|ease-out|ease|ellipsis|end|exclude-ruby|fill|fixed|georgian|glyphs|grid-height|groove|hand|hanging|hebrew|help|hidden|hiragana-iroha|hiragana|horizontal|icon|ideograph-alpha|ideograph-numeric|ideograph-parenthesis|ideograph-space|ideographic|inactive|include-ruby|inherit|initial|inline-block|inline-box|inline-line-height|inline-table|inline|inset|inside|inter-ideograph|inter-word|invert|italic|justify|katakana-iroha|katakana|keep-all|last|left|lighter|line-edge|line-through|line|linear|list-item|local|loose|lower-alpha|lower-greek|lower-latin|lower-roman|lowercase|lr-tb|ltr|mathematical|max-height|max-size|medium|menu|message-box|middle|move|n-resize|ne-resize|newspaper|no-change|no-close-quote|no-drop|no-open-quote|no-repeat|none|normal|not-allowed|nowrap|nw-resize|oblique|open-quote|outset|outside|overline|padding-box|page|pointer|pre-line|pre-wrap|pre|preserve-3d|progress|relative|repeat-x|repeat-y|repeat|replaced|reset-size|ridge|right|round|row-resize|rtl|s-resize|scroll|se-resize|separate|slice|small-caps|small-caption|solid|space|square|start|static|status-bar|step-end|step-start|steps|stretch|strict|sub|super|sw-resize|table-caption|table-cell|table-column-group|table-column|table-footer-group|table-header-group|table-row-group|table-row|table|tb-rl|text-after-edge|text-before-edge|text-bottom|text-size|text-top|text|thick|thin|transparent|underline|upper-alpha|upper-latin|upper-roman|uppercase|use-script|vertical-ideographic|vertical-text|visible|w-resize|wait|whitespace|z-index|zero";
var supportConstantColor = exports.supportConstantColor = "aqua|black|blue|fuchsia|gray|green|lime|maroon|navy|olive|orange|purple|red|silver|teal|white|yellow";
var supportConstantFonts = exports.supportConstantFonts = "arial|century|comic|courier|cursive|fantasy|garamond|georgia|helvetica|impact|lucida|symbol|system|tahoma|times|trebuchet|utopia|verdana|webdings|sans-serif|serif|monospace";
var numRe = exports.numRe = "\\-?(?:(?:[0-9]+)|(?:[0-9]*\\.[0-9]+))";
var pseudoElements = exports.pseudoElements = "(\\:+)\\b(after|before|first-letter|first-line|moz-selection|selection)\\b";
var pseudoClasses = exports.pseudoClasses = "(:)\\b(active|checked|disabled|empty|enabled|first-child|first-of-type|focus|hover|indeterminate|invalid|last-child|last-of-type|link|not|nth-child|nth-last-child|nth-last-of-type|nth-of-type|only-child|only-of-type|required|root|target|valid|visited)\\b";
var CssHighlightRules = function() {
var keywordMapper = this.createKeywordMapper({
"support.function": supportFunction,
"support.constant": supportConstant,
"support.type": supportType,
"support.constant.color": supportConstantColor,
"support.constant.fonts": supportConstantFonts
}, "text", true);
this.$rules = {
"start" : [{
token : "comment", // multi line comment
regex : "\\/\\*",
push : "comment"
}, {
token: "paren.lparen",
regex: "\\{",
push: "ruleset"
}, {
token: "string",
regex: "@.*?{",
push: "media"
}, {
token: "keyword",
regex: "#[a-z0-9-_]+"
}, {
token: "variable",
regex: "\\.[a-z0-9-_]+"
}, {
token: "string",
regex: ":[a-z0-9-_]+"
}, {
token: "constant",
regex: "[a-z0-9-_]+"
}, {
caseInsensitive: true
}],
"media" : [{
token : "comment", // multi line comment
regex : "\\/\\*",
push : "comment"
}, {
token: "paren.lparen",
regex: "\\{",
push: "ruleset"
}, {
token: "string",
regex: "\\}",
next: "pop"
}, {
token: "keyword",
regex: "#[a-z0-9-_]+"
}, {
token: "variable",
regex: "\\.[a-z0-9-_]+"
}, {
token: "string",
regex: ":[a-z0-9-_]+"
}, {
token: "constant",
regex: "[a-z0-9-_]+"
}, {
caseInsensitive: true
}],
"comment" : [{
token : "comment",
regex : "\\*\\/",
next : "pop"
}, {
defaultToken : "comment"
}],
"ruleset" : [
{
token : "paren.rparen",
regex : "\\}",
next: "pop"
}, {
token : "comment", // multi line comment
regex : "\\/\\*",
push : "comment"
}, {
token : "string", // single line
regex : '["](?:(?:\\\\.)|(?:[^"\\\\]))*?["]'
}, {
token : "string", // single line
regex : "['](?:(?:\\\\.)|(?:[^'\\\\]))*?[']"
}, {
token : ["constant.numeric", "keyword"],
regex : "(" + numRe + ")(ch|cm|deg|em|ex|fr|gd|grad|Hz|in|kHz|mm|ms|pc|pt|px|rad|rem|s|turn|vh|vm|vw|%)"
}, {
token : "constant.numeric",
regex : numRe
}, {
token : "constant.numeric", // hex6 color
regex : "#[a-f0-9]{6}"
}, {
token : "constant.numeric", // hex3 color
regex : "#[a-f0-9]{3}"
}, {
token : ["punctuation", "entity.other.attribute-name.pseudo-element.css"],
regex : pseudoElements
}, {
token : ["punctuation", "entity.other.attribute-name.pseudo-class.css"],
regex : pseudoClasses
}, {
token : ["support.function", "string", "support.function"],
regex : "(url\\()(.*)(\\))"
}, {
token : keywordMapper,
regex : "\\-?[a-zA-Z_][a-zA-Z0-9_\\-]*"
}, {
caseInsensitive: true
}]
};
this.normalizeRules();
};
oop.inherits(CssHighlightRules, TextHighlightRules);
exports.CssHighlightRules = CssHighlightRules;
});
ace.define("ace/mode/css_completions",["require","exports","module"], function(require, exports, module) {
"use strict";
var propertyMap = {
"background": {"#$0": 1},
"background-color": {"#$0": 1, "transparent": 1, "fixed": 1},
"background-image": {"url('/$0')": 1},
"background-repeat": {"repeat": 1, "repeat-x": 1, "repeat-y": 1, "no-repeat": 1, "inherit": 1},
"background-position": {"bottom":2, "center":2, "left":2, "right":2, "top":2, "inherit":2},
"background-attachment": {"scroll": 1, "fixed": 1},
"background-size": {"cover": 1, "contain": 1},
"background-clip": {"border-box": 1, "padding-box": 1, "content-box": 1},
"background-origin": {"border-box": 1, "padding-box": 1, "content-box": 1},
"border": {"solid $0": 1, "dashed $0": 1, "dotted $0": 1, "#$0": 1},
"border-color": {"#$0": 1},
"border-style": {"solid":2, "dashed":2, "dotted":2, "double":2, "groove":2, "hidden":2, "inherit":2, "inset":2, "none":2, "outset":2, "ridged":2},
"border-collapse": {"collapse": 1, "separate": 1},
"bottom": {"px": 1, "em": 1, "%": 1},
"clear": {"left": 1, "right": 1, "both": 1, "none": 1},
"color": {"#$0": 1, "rgb(#$00,0,0)": 1},
"cursor": {"default": 1, "pointer": 1, "move": 1, "text": 1, "wait": 1, "help": 1, "progress": 1, "n-resize": 1, "ne-resize": 1, "e-resize": 1, "se-resize": 1, "s-resize": 1, "sw-resize": 1, "w-resize": 1, "nw-resize": 1},
"display": {"none": 1, "block": 1, "inline": 1, "inline-block": 1, "table-cell": 1},
"empty-cells": {"show": 1, "hide": 1},
"float": {"left": 1, "right": 1, "none": 1},
"font-family": {"Arial":2,"Comic Sans MS":2,"Consolas":2,"Courier New":2,"Courier":2,"Georgia":2,"Monospace":2,"Sans-Serif":2, "Segoe UI":2,"Tahoma":2,"Times New Roman":2,"Trebuchet MS":2,"Verdana": 1},
"font-size": {"px": 1, "em": 1, "%": 1},
"font-weight": {"bold": 1, "normal": 1},
"font-style": {"italic": 1, "normal": 1},
"font-variant": {"normal": 1, "small-caps": 1},
"height": {"px": 1, "em": 1, "%": 1},
"left": {"px": 1, "em": 1, "%": 1},
"letter-spacing": {"normal": 1},
"line-height": {"normal": 1},
"list-style-type": {"none": 1, "disc": 1, "circle": 1, "square": 1, "decimal": 1, "decimal-leading-zero": 1, "lower-roman": 1, "upper-roman": 1, "lower-greek": 1, "lower-latin": 1, "upper-latin": 1, "georgian": 1, "lower-alpha": 1, "upper-alpha": 1},
"margin": {"px": 1, "em": 1, "%": 1},
"margin-right": {"px": 1, "em": 1, "%": 1},
"margin-left": {"px": 1, "em": 1, "%": 1},
"margin-top": {"px": 1, "em": 1, "%": 1},
"margin-bottom": {"px": 1, "em": 1, "%": 1},
"max-height": {"px": 1, "em": 1, "%": 1},
"max-width": {"px": 1, "em": 1, "%": 1},
"min-height": {"px": 1, "em": 1, "%": 1},
"min-width": {"px": 1, "em": 1, "%": 1},
"overflow": {"hidden": 1, "visible": 1, "auto": 1, "scroll": 1},
"overflow-x": {"hidden": 1, "visible": 1, "auto": 1, "scroll": 1},
"overflow-y": {"hidden": 1, "visible": 1, "auto": 1, "scroll": 1},
"padding": {"px": 1, "em": 1, "%": 1},
"padding-top": {"px": 1, "em": 1, "%": 1},
"padding-right": {"px": 1, "em": 1, "%": 1},
"padding-bottom": {"px": 1, "em": 1, "%": 1},
"padding-left": {"px": 1, "em": 1, "%": 1},
"page-break-after": {"auto": 1, "always": 1, "avoid": 1, "left": 1, "right": 1},
"page-break-before": {"auto": 1, "always": 1, "avoid": 1, "left": 1, "right": 1},
"position": {"absolute": 1, "relative": 1, "fixed": 1, "static": 1},
"right": {"px": 1, "em": 1, "%": 1},
"table-layout": {"fixed": 1, "auto": 1},
"text-decoration": {"none": 1, "underline": 1, "line-through": 1, "blink": 1},
"text-align": {"left": 1, "right": 1, "center": 1, "justify": 1},
"text-transform": {"capitalize": 1, "uppercase": 1, "lowercase": 1, "none": 1},
"top": {"px": 1, "em": 1, "%": 1},
"vertical-align": {"top": 1, "bottom": 1},
"visibility": {"hidden": 1, "visible": 1},
"white-space": {"nowrap": 1, "normal": 1, "pre": 1, "pre-line": 1, "pre-wrap": 1},
"width": {"px": 1, "em": 1, "%": 1},
"word-spacing": {"normal": 1},
"filter": {"alpha(opacity=$0100)": 1},
"text-shadow": {"$02px 2px 2px #777": 1},
"text-overflow": {"ellipsis-word": 1, "clip": 1, "ellipsis": 1},
"-moz-border-radius": 1,
"-moz-border-radius-topright": 1,
"-moz-border-radius-bottomright": 1,
"-moz-border-radius-topleft": 1,
"-moz-border-radius-bottomleft": 1,
"-webkit-border-radius": 1,
"-webkit-border-top-right-radius": 1,
"-webkit-border-top-left-radius": 1,
"-webkit-border-bottom-right-radius": 1,
"-webkit-border-bottom-left-radius": 1,
"-moz-box-shadow": 1,
"-webkit-box-shadow": 1,
"transform": {"rotate($00deg)": 1, "skew($00deg)": 1},
"-moz-transform": {"rotate($00deg)": 1, "skew($00deg)": 1},
"-webkit-transform": {"rotate($00deg)": 1, "skew($00deg)": 1 }
};
var CssCompletions = function() {
};
(function() {
this.completionsDefined = false;
this.defineCompletions = function() {
if (document) {
var style = document.createElement('c').style;
for (var i in style) {
if (typeof style[i] !== 'string')
continue;
var name = i.replace(/[A-Z]/g, function(x) {
return '-' + x.toLowerCase();
});
if (!propertyMap.hasOwnProperty(name))
propertyMap[name] = 1;
}
}
this.completionsDefined = true;
}
this.getCompletions = function(state, session, pos, prefix) {
if (!this.completionsDefined) {
this.defineCompletions();
}
var token = session.getTokenAt(pos.row, pos.column);
if (!token)
return [];
if (state==='ruleset'){
var line = session.getLine(pos.row).substr(0, pos.column);
if (/:[^;]+$/.test(line)) {
/([\w\-]+):[^:]*$/.test(line);
return this.getPropertyValueCompletions(state, session, pos, prefix);
} else {
return this.getPropertyCompletions(state, session, pos, prefix);
}
}
return [];
};
this.getPropertyCompletions = function(state, session, pos, prefix) {
var properties = Object.keys(propertyMap);
return properties.map(function(property){
return {
caption: property,
snippet: property + ': $0',
meta: "property",
score: Number.MAX_VALUE
};
});
};
this.getPropertyValueCompletions = function(state, session, pos, prefix) {
var line = session.getLine(pos.row).substr(0, pos.column);
var property = (/([\w\-]+):[^:]*$/.exec(line) || {})[1];
if (!property)
return [];
var values = [];
if (property in propertyMap && typeof propertyMap[property] === "object") {
values = Object.keys(propertyMap[property]);
}
return values.map(function(value){
return {
caption: value,
snippet: value,
meta: "property value",
score: Number.MAX_VALUE
};
});
};
}).call(CssCompletions.prototype);
exports.CssCompletions = CssCompletions;
});
ace.define("ace/mode/behaviour/css",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/mode/behaviour/cstyle","ace/token_iterator"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Behaviour = require("../behaviour").Behaviour;
var CstyleBehaviour = require("./cstyle").CstyleBehaviour;
var TokenIterator = require("../../token_iterator").TokenIterator;
var CssBehaviour = function () {
this.inherit(CstyleBehaviour);
this.add("colon", "insertion", function (state, action, editor, session, text) {
if (text === ':') {
var cursor = editor.getCursorPosition();
var iterator = new TokenIterator(session, cursor.row, cursor.column);
var token = iterator.getCurrentToken();
if (token && token.value.match(/\s+/)) {
token = iterator.stepBackward();
}
if (token && token.type === 'support.type') {
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar === ':') {
return {
text: '',
selection: [1, 1]
}
}
if (!line.substring(cursor.column).match(/^\s*;/)) {
return {
text: ':;',
selection: [1, 1]
}
}
}
}
});
this.add("colon", "deletion", function (state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && selected === ':') {
var cursor = editor.getCursorPosition();
var iterator = new TokenIterator(session, cursor.row, cursor.column);
var token = iterator.getCurrentToken();
if (token && token.value.match(/\s+/)) {
token = iterator.stepBackward();
}
if (token && token.type === 'support.type') {
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.end.column, range.end.column + 1);
if (rightChar === ';') {
range.end.column ++;
return range;
}
}
}
});
this.add("semicolon", "insertion", function (state, action, editor, session, text) {
if (text === ';') {
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar === ';') {
return {
text: '',
selection: [1, 1]
}
}
}
});
}
oop.inherits(CssBehaviour, CstyleBehaviour);
exports.CssBehaviour = CssBehaviour;
});
ace.define("ace/mode/css",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/css_highlight_rules","ace/mode/matching_brace_outdent","ace/worker/worker_client","ace/mode/css_completions","ace/mode/behaviour/css","ace/mode/folding/cstyle"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var CssHighlightRules = require("./css_highlight_rules").CssHighlightRules;
var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent;
var WorkerClient = require("../worker/worker_client").WorkerClient;
var CssCompletions = require("./css_completions").CssCompletions;
var CssBehaviour = require("./behaviour/css").CssBehaviour;
var CStyleFoldMode = require("./folding/cstyle").FoldMode;
var Mode = function() {
this.HighlightRules = CssHighlightRules;
this.$outdent = new MatchingBraceOutdent();
this.$behaviour = new CssBehaviour();
this.$completer = new CssCompletions();
this.foldingRules = new CStyleFoldMode();
};
oop.inherits(Mode, TextMode);
(function() {
this.foldingRules = "cStyle";
this.blockComment = {start: "/*", end: "*/"};
this.getNextLineIndent = function(state, line, tab) {
var indent = this.$getIndent(line);
var tokens = this.getTokenizer().getLineTokens(line, state).tokens;
if (tokens.length && tokens[tokens.length-1].type == "comment") {
return indent;
}
var match = line.match(/^.*\{\s*$/);
if (match) {
indent += tab;
}
return indent;
};
this.checkOutdent = function(state, line, input) {
return this.$outdent.checkOutdent(line, input);
};
this.autoOutdent = function(state, doc, row) {
this.$outdent.autoOutdent(doc, row);
};
this.getCompletions = function(state, session, pos, prefix) {
return this.$completer.getCompletions(state, session, pos, prefix);
};
this.createWorker = function(session) {
var worker = new WorkerClient(["ace"], "ace/mode/css_worker", "Worker");
worker.attachToDocument(session.getDocument());
worker.on("annotate", function(e) {
session.setAnnotations(e.data);
});
worker.on("terminate", function() {
session.clearAnnotations();
});
return worker;
};
this.$id = "ace/mode/css";
}).call(Mode.prototype);
exports.Mode = Mode;
});
ace.define("ace/mode/xml_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var XmlHighlightRules = function(normalize) {
var tagRegex = "[_:a-zA-Z\xc0-\uffff][-_:.a-zA-Z0-9\xc0-\uffff]*";
this.$rules = {
start : [
{token : "string.cdata.xml", regex : "<\\!\\[CDATA\\[", next : "cdata"},
{
token : ["punctuation.xml-decl.xml", "keyword.xml-decl.xml"],
regex : "(<\\?)(xml)(?=[\\s])", next : "xml_decl", caseInsensitive: true
},
{
token : ["punctuation.instruction.xml", "keyword.instruction.xml"],
regex : "(<\\?)(" + tagRegex + ")", next : "processing_instruction"
},
{token : "comment.xml", regex : "<\\!--", next : "comment"},
{
token : ["xml-pe.doctype.xml", "xml-pe.doctype.xml"],
regex : "(<\\!)(DOCTYPE)(?=[\\s])", next : "doctype", caseInsensitive: true
},
{include : "tag"},
{token : "text.end-tag-open.xml", regex: "</"},
{token : "text.tag-open.xml", regex: "<"},
{include : "reference"},
{defaultToken : "text.xml"}
],
xml_decl : [{
token : "entity.other.attribute-name.decl-attribute-name.xml",
regex : "(?:" + tagRegex + ":)?" + tagRegex + ""
}, {
token : "keyword.operator.decl-attribute-equals.xml",
regex : "="
}, {
include: "whitespace"
}, {
include: "string"
}, {
token : "punctuation.xml-decl.xml",
regex : "\\?>",
next : "start"
}],
processing_instruction : [
{token : "punctuation.instruction.xml", regex : "\\?>", next : "start"},
{defaultToken : "instruction.xml"}
],
doctype : [
{include : "whitespace"},
{include : "string"},
{token : "xml-pe.doctype.xml", regex : ">", next : "start"},
{token : "xml-pe.xml", regex : "[-_a-zA-Z0-9:]+"},
{token : "punctuation.int-subset", regex : "\\[", push : "int_subset"}
],
int_subset : [{
token : "text.xml",
regex : "\\s+"
}, {
token: "punctuation.int-subset.xml",
regex: "]",
next: "pop"
}, {
token : ["punctuation.markup-decl.xml", "keyword.markup-decl.xml"],
regex : "(<\\!)(" + tagRegex + ")",
push : [{
token : "text",
regex : "\\s+"
},
{
token : "punctuation.markup-decl.xml",
regex : ">",
next : "pop"
},
{include : "string"}]
}],
cdata : [
{token : "string.cdata.xml", regex : "\\]\\]>", next : "start"},
{token : "text.xml", regex : "\\s+"},
{token : "text.xml", regex : "(?:[^\\]]|\\](?!\\]>))+"}
],
comment : [
{token : "comment.xml", regex : "-->", next : "start"},
{defaultToken : "comment.xml"}
],
reference : [{
token : "constant.language.escape.reference.xml",
regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)"
}],
attr_reference : [{
token : "constant.language.escape.reference.attribute-value.xml",
regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)"
}],
tag : [{
token : ["meta.tag.punctuation.tag-open.xml", "meta.tag.punctuation.end-tag-open.xml", "meta.tag.tag-name.xml"],
regex : "(?:(<)|(</))((?:" + tagRegex + ":)?" + tagRegex + ")",
next: [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : "start"}
]
}],
tag_whitespace : [
{token : "text.tag-whitespace.xml", regex : "\\s+"}
],
whitespace : [
{token : "text.whitespace.xml", regex : "\\s+"}
],
string: [{
token : "string.xml",
regex : "'",
push : [
{token : "string.xml", regex: "'", next: "pop"},
{defaultToken : "string.xml"}
]
}, {
token : "string.xml",
regex : '"',
push : [
{token : "string.xml", regex: '"', next: "pop"},
{defaultToken : "string.xml"}
]
}],
attributes: [{
token : "entity.other.attribute-name.xml",
regex : "(?:" + tagRegex + ":)?" + tagRegex + ""
}, {
token : "keyword.operator.attribute-equals.xml",
regex : "="
}, {
include: "tag_whitespace"
}, {
include: "attribute_value"
}],
attribute_value: [{
token : "string.attribute-value.xml",
regex : "'",
push : [
{token : "string.attribute-value.xml", regex: "'", next: "pop"},
{include : "attr_reference"},
{defaultToken : "string.attribute-value.xml"}
]
}, {
token : "string.attribute-value.xml",
regex : '"',
push : [
{token : "string.attribute-value.xml", regex: '"', next: "pop"},
{include : "attr_reference"},
{defaultToken : "string.attribute-value.xml"}
]
}]
};
if (this.constructor === XmlHighlightRules)
this.normalizeRules();
};
(function() {
this.embedTagRules = function(HighlightRules, prefix, tag){
this.$rules.tag.unshift({
token : ["meta.tag.punctuation.tag-open.xml", "meta.tag." + tag + ".tag-name.xml"],
regex : "(<)(" + tag + "(?=\\s|>|$))",
next: [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : prefix + "start"}
]
});
this.$rules[tag + "-end"] = [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next: "start",
onMatch : function(value, currentState, stack) {
stack.splice(0);
return this.token;
}}
]
this.embedRules(HighlightRules, prefix, [{
token: ["meta.tag.punctuation.end-tag-open.xml", "meta.tag." + tag + ".tag-name.xml"],
regex : "(</)(" + tag + "(?=\\s|>|$))",
next: tag + "-end"
}, {
token: "string.cdata.xml",
regex : "<\\!\\[CDATA\\["
}, {
token: "string.cdata.xml",
regex : "\\]\\]>"
}]);
};
}).call(TextHighlightRules.prototype);
oop.inherits(XmlHighlightRules, TextHighlightRules);
exports.XmlHighlightRules = XmlHighlightRules;
});
ace.define("ace/mode/html_highlight_rules",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/css_highlight_rules","ace/mode/javascript_highlight_rules","ace/mode/xml_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var lang = require("../lib/lang");
var CssHighlightRules = require("./css_highlight_rules").CssHighlightRules;
var JavaScriptHighlightRules = require("./javascript_highlight_rules").JavaScriptHighlightRules;
var XmlHighlightRules = require("./xml_highlight_rules").XmlHighlightRules;
var tagMap = lang.createMap({
a : 'anchor',
button : 'form',
form : 'form',
img : 'image',
input : 'form',
label : 'form',
option : 'form',
script : 'script',
select : 'form',
textarea : 'form',
style : 'style',
table : 'table',
tbody : 'table',
td : 'table',
tfoot : 'table',
th : 'table',
tr : 'table'
});
var HtmlHighlightRules = function() {
XmlHighlightRules.call(this);
this.addRules({
attributes: [{
include : "tag_whitespace"
}, {
token : "entity.other.attribute-name.xml",
regex : "[-_a-zA-Z0-9:.]+"
}, {
token : "keyword.operator.attribute-equals.xml",
regex : "=",
push : [{
include: "tag_whitespace"
}, {
token : "string.unquoted.attribute-value.html",
regex : "[^<>='\"`\\s]+",
next : "pop"
}, {
token : "empty",
regex : "",
next : "pop"
}]
}, {
include : "attribute_value"
}],
tag: [{
token : function(start, tag) {
var group = tagMap[tag];
return ["meta.tag.punctuation." + (start == "<" ? "" : "end-") + "tag-open.xml",
"meta.tag" + (group ? "." + group : "") + ".tag-name.xml"];
},
regex : "(</?)([-_a-zA-Z0-9:.]+)",
next: "tag_stuff"
}],
tag_stuff: [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : "start"}
]
});
this.embedTagRules(CssHighlightRules, "css-", "style");
this.embedTagRules(new JavaScriptHighlightRules({jsx: false}).getRules(), "js-", "script");
if (this.constructor === HtmlHighlightRules)
this.normalizeRules();
};
oop.inherits(HtmlHighlightRules, XmlHighlightRules);
exports.HtmlHighlightRules = HtmlHighlightRules;
});
ace.define("ace/mode/behaviour/xml",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/token_iterator","ace/lib/lang"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Behaviour = require("../behaviour").Behaviour;
var TokenIterator = require("../../token_iterator").TokenIterator;
var lang = require("../../lib/lang");
function is(token, type) {
return token.type.lastIndexOf(type + ".xml") > -1;
}
var XmlBehaviour = function () {
this.add("string_dquotes", "insertion", function (state, action, editor, session, text) {
if (text == '"' || text == "'") {
var quote = text;
var selected = session.doc.getTextRange(editor.getSelectionRange());
if (selected !== "" && selected !== "'" && selected != '"' && editor.getWrapBehavioursEnabled()) {
return {
text: quote + selected + quote,
selection: false
};
}
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
var iterator = new TokenIterator(session, cursor.row, cursor.column);
var token = iterator.getCurrentToken();
if (rightChar == quote && (is(token, "attribute-value") || is(token, "string"))) {
return {
text: "",
selection: [1, 1]
};
}
if (!token)
token = iterator.stepBackward();
if (!token)
return;
while (is(token, "tag-whitespace") || is(token, "whitespace")) {
token = iterator.stepBackward();
}
var rightSpace = !rightChar || rightChar.match(/\s/);
if (is(token, "attribute-equals") && (rightSpace || rightChar == '>') || (is(token, "decl-attribute-equals") && (rightSpace || rightChar == '?'))) {
return {
text: quote + quote,
selection: [1, 1]
};
}
}
});
this.add("string_dquotes", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && (selected == '"' || selected == "'")) {
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.start.column + 1, range.start.column + 2);
if (rightChar == selected) {
range.end.column++;
return range;
}
}
});
this.add("autoclosing", "insertion", function (state, action, editor, session, text) {
if (text == '>') {
var position = editor.getSelectionRange().start;
var iterator = new TokenIterator(session, position.row, position.column);
var token = iterator.getCurrentToken() || iterator.stepBackward();
if (!token || !(is(token, "tag-name") || is(token, "tag-whitespace") || is(token, "attribute-name") || is(token, "attribute-equals") || is(token, "attribute-value")))
return;
if (is(token, "reference.attribute-value"))
return;
if (is(token, "attribute-value")) {
var firstChar = token.value.charAt(0);
if (firstChar == '"' || firstChar == "'") {
var lastChar = token.value.charAt(token.value.length - 1);
var tokenEnd = iterator.getCurrentTokenColumn() + token.value.length;
if (tokenEnd > position.column || tokenEnd == position.column && firstChar != lastChar)
return;
}
}
while (!is(token, "tag-name")) {
token = iterator.stepBackward();
if (token.value == "<") {
token = iterator.stepForward();
break;
}
}
var tokenRow = iterator.getCurrentTokenRow();
var tokenColumn = iterator.getCurrentTokenColumn();
if (is(iterator.stepBackward(), "end-tag-open"))
return;
var element = token.value;
if (tokenRow == position.row)
element = element.substring(0, position.column - tokenColumn);
if (this.voidElements.hasOwnProperty(element.toLowerCase()))
return;
return {
text: ">" + "</" + element + ">",
selection: [1, 1]
};
}
});
this.add("autoindent", "insertion", function (state, action, editor, session, text) {
if (text == "\n") {
var cursor = editor.getCursorPosition();
var line = session.getLine(cursor.row);
var iterator = new TokenIterator(session, cursor.row, cursor.column);
var token = iterator.getCurrentToken();
if (token && token.type.indexOf("tag-close") !== -1) {
if (token.value == "/>")
return;
while (token && token.type.indexOf("tag-name") === -1) {
token = iterator.stepBackward();
}
if (!token) {
return;
}
var tag = token.value;
var row = iterator.getCurrentTokenRow();
token = iterator.stepBackward();
if (!token || token.type.indexOf("end-tag") !== -1) {
return;
}
if (this.voidElements && !this.voidElements[tag]) {
var nextToken = session.getTokenAt(cursor.row, cursor.column+1);
var line = session.getLine(row);
var nextIndent = this.$getIndent(line);
var indent = nextIndent + session.getTabString();
if (nextToken && nextToken.value === "</") {
return {
text: "\n" + indent + "\n" + nextIndent,
selection: [1, indent.length, 1, indent.length]
};
} else {
return {
text: "\n" + indent
};
}
}
}
}
});
};
oop.inherits(XmlBehaviour, Behaviour);
exports.XmlBehaviour = XmlBehaviour;
});
ace.define("ace/mode/folding/mixed",["require","exports","module","ace/lib/oop","ace/mode/folding/fold_mode"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var BaseFoldMode = require("./fold_mode").FoldMode;
var FoldMode = exports.FoldMode = function(defaultMode, subModes) {
this.defaultMode = defaultMode;
this.subModes = subModes;
};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.$getMode = function(state) {
if (typeof state != "string")
state = state[0];
for (var key in this.subModes) {
if (state.indexOf(key) === 0)
return this.subModes[key];
}
return null;
};
this.$tryMode = function(state, session, foldStyle, row) {
var mode = this.$getMode(state);
return (mode ? mode.getFoldWidget(session, foldStyle, row) : "");
};
this.getFoldWidget = function(session, foldStyle, row) {
return (
this.$tryMode(session.getState(row-1), session, foldStyle, row) ||
this.$tryMode(session.getState(row), session, foldStyle, row) ||
this.defaultMode.getFoldWidget(session, foldStyle, row)
);
};
this.getFoldWidgetRange = function(session, foldStyle, row) {
var mode = this.$getMode(session.getState(row-1));
if (!mode || !mode.getFoldWidget(session, foldStyle, row))
mode = this.$getMode(session.getState(row));
if (!mode || !mode.getFoldWidget(session, foldStyle, row))
mode = this.defaultMode;
return mode.getFoldWidgetRange(session, foldStyle, row);
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/folding/xml",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/range","ace/mode/folding/fold_mode","ace/token_iterator"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var lang = require("../../lib/lang");
var Range = require("../../range").Range;
var BaseFoldMode = require("./fold_mode").FoldMode;
var TokenIterator = require("../../token_iterator").TokenIterator;
var FoldMode = exports.FoldMode = function(voidElements, optionalEndTags) {
BaseFoldMode.call(this);
this.voidElements = voidElements || {};
this.optionalEndTags = oop.mixin({}, this.voidElements);
if (optionalEndTags)
oop.mixin(this.optionalEndTags, optionalEndTags);
};
oop.inherits(FoldMode, BaseFoldMode);
var Tag = function() {
this.tagName = "";
this.closing = false;
this.selfClosing = false;
this.start = {row: 0, column: 0};
this.end = {row: 0, column: 0};
};
function is(token, type) {
return token.type.lastIndexOf(type + ".xml") > -1;
}
(function() {
this.getFoldWidget = function(session, foldStyle, row) {
var tag = this._getFirstTagInLine(session, row);
if (!tag)
return "";
if (tag.closing || (!tag.tagName && tag.selfClosing))
return foldStyle == "markbeginend" ? "end" : "";
if (!tag.tagName || tag.selfClosing || this.voidElements.hasOwnProperty(tag.tagName.toLowerCase()))
return "";
if (this._findEndTagInLine(session, row, tag.tagName, tag.end.column))
return "";
return "start";
};
this._getFirstTagInLine = function(session, row) {
var tokens = session.getTokens(row);
var tag = new Tag();
for (var i = 0; i < tokens.length; i++) {
var token = tokens[i];
if (is(token, "tag-open")) {
tag.end.column = tag.start.column + token.value.length;
tag.closing = is(token, "end-tag-open");
token = tokens[++i];
if (!token)
return null;
tag.tagName = token.value;
tag.end.column += token.value.length;
for (i++; i < tokens.length; i++) {
token = tokens[i];
tag.end.column += token.value.length;
if (is(token, "tag-close")) {
tag.selfClosing = token.value == '/>';
break;
}
}
return tag;
} else if (is(token, "tag-close")) {
tag.selfClosing = token.value == '/>';
return tag;
}
tag.start.column += token.value.length;
}
return null;
};
this._findEndTagInLine = function(session, row, tagName, startColumn) {
var tokens = session.getTokens(row);
var column = 0;
for (var i = 0; i < tokens.length; i++) {
var token = tokens[i];
column += token.value.length;
if (column < startColumn)
continue;
if (is(token, "end-tag-open")) {
token = tokens[i + 1];
if (token && token.value == tagName)
return true;
}
}
return false;
};
this._readTagForward = function(iterator) {
var token = iterator.getCurrentToken();
if (!token)
return null;
var tag = new Tag();
do {
if (is(token, "tag-open")) {
tag.closing = is(token, "end-tag-open");
tag.start.row = iterator.getCurrentTokenRow();
tag.start.column = iterator.getCurrentTokenColumn();
} else if (is(token, "tag-name")) {
tag.tagName = token.value;
} else if (is(token, "tag-close")) {
tag.selfClosing = token.value == "/>";
tag.end.row = iterator.getCurrentTokenRow();
tag.end.column = iterator.getCurrentTokenColumn() + token.value.length;
iterator.stepForward();
return tag;
}
} while(token = iterator.stepForward());
return null;
};
this._readTagBackward = function(iterator) {
var token = iterator.getCurrentToken();
if (!token)
return null;
var tag = new Tag();
do {
if (is(token, "tag-open")) {
tag.closing = is(token, "end-tag-open");
tag.start.row = iterator.getCurrentTokenRow();
tag.start.column = iterator.getCurrentTokenColumn();
iterator.stepBackward();
return tag;
} else if (is(token, "tag-name")) {
tag.tagName = token.value;
} else if (is(token, "tag-close")) {
tag.selfClosing = token.value == "/>";
tag.end.row = iterator.getCurrentTokenRow();
tag.end.column = iterator.getCurrentTokenColumn() + token.value.length;
}
} while(token = iterator.stepBackward());
return null;
};
this._pop = function(stack, tag) {
while (stack.length) {
var top = stack[stack.length-1];
if (!tag || top.tagName == tag.tagName) {
return stack.pop();
}
else if (this.optionalEndTags.hasOwnProperty(top.tagName)) {
stack.pop();
continue;
} else {
return null;
}
}
};
this.getFoldWidgetRange = function(session, foldStyle, row) {
var firstTag = this._getFirstTagInLine(session, row);
if (!firstTag)
return null;
var isBackward = firstTag.closing || firstTag.selfClosing;
var stack = [];
var tag;
if (!isBackward) {
var iterator = new TokenIterator(session, row, firstTag.start.column);
var start = {
row: row,
column: firstTag.start.column + firstTag.tagName.length + 2
};
if (firstTag.start.row == firstTag.end.row)
start.column = firstTag.end.column;
while (tag = this._readTagForward(iterator)) {
if (tag.selfClosing) {
if (!stack.length) {
tag.start.column += tag.tagName.length + 2;
tag.end.column -= 2;
return Range.fromPoints(tag.start, tag.end);
} else
continue;
}
if (tag.closing) {
this._pop(stack, tag);
if (stack.length == 0)
return Range.fromPoints(start, tag.start);
}
else {
stack.push(tag);
}
}
}
else {
var iterator = new TokenIterator(session, row, firstTag.end.column);
var end = {
row: row,
column: firstTag.start.column
};
while (tag = this._readTagBackward(iterator)) {
if (tag.selfClosing) {
if (!stack.length) {
tag.start.column += tag.tagName.length + 2;
tag.end.column -= 2;
return Range.fromPoints(tag.start, tag.end);
} else
continue;
}
if (!tag.closing) {
this._pop(stack, tag);
if (stack.length == 0) {
tag.start.column += tag.tagName.length + 2;
if (tag.start.row == tag.end.row && tag.start.column < tag.end.column)
tag.start.column = tag.end.column;
return Range.fromPoints(tag.start, end);
}
}
else {
stack.push(tag);
}
}
}
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/folding/html",["require","exports","module","ace/lib/oop","ace/mode/folding/mixed","ace/mode/folding/xml","ace/mode/folding/cstyle"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var MixedFoldMode = require("./mixed").FoldMode;
var XmlFoldMode = require("./xml").FoldMode;
var CStyleFoldMode = require("./cstyle").FoldMode;
var FoldMode = exports.FoldMode = function(voidElements, optionalTags) {
MixedFoldMode.call(this, new XmlFoldMode(voidElements, optionalTags), {
"js-": new CStyleFoldMode(),
"css-": new CStyleFoldMode()
});
};
oop.inherits(FoldMode, MixedFoldMode);
});
ace.define("ace/mode/html_completions",["require","exports","module","ace/token_iterator"], function(require, exports, module) {
"use strict";
var TokenIterator = require("../token_iterator").TokenIterator;
var commonAttributes = [
"accesskey",
"class",
"contenteditable",
"contextmenu",
"dir",
"draggable",
"dropzone",
"hidden",
"id",
"inert",
"itemid",
"itemprop",
"itemref",
"itemscope",
"itemtype",
"lang",
"spellcheck",
"style",
"tabindex",
"title",
"translate"
];
var eventAttributes = [
"onabort",
"onblur",
"oncancel",
"oncanplay",
"oncanplaythrough",
"onchange",
"onclick",
"onclose",
"oncontextmenu",
"oncuechange",
"ondblclick",
"ondrag",
"ondragend",
"ondragenter",
"ondragleave",
"ondragover",
"ondragstart",
"ondrop",
"ondurationchange",
"onemptied",
"onended",
"onerror",
"onfocus",
"oninput",
"oninvalid",
"onkeydown",
"onkeypress",
"onkeyup",
"onload",
"onloadeddata",
"onloadedmetadata",
"onloadstart",
"onmousedown",
"onmousemove",
"onmouseout",
"onmouseover",
"onmouseup",
"onmousewheel",
"onpause",
"onplay",
"onplaying",
"onprogress",
"onratechange",
"onreset",
"onscroll",
"onseeked",
"onseeking",
"onselect",
"onshow",
"onstalled",
"onsubmit",
"onsuspend",
"ontimeupdate",
"onvolumechange",
"onwaiting"
];
var globalAttributes = commonAttributes.concat(eventAttributes);
var attributeMap = {
"html": {"manifest": 1},
"head": {},
"title": {},
"base": {"href": 1, "target": 1},
"link": {"href": 1, "hreflang": 1, "rel": {"stylesheet": 1, "icon": 1}, "media": {"all": 1, "screen": 1, "print": 1}, "type": {"text/css": 1, "image/png": 1, "image/jpeg": 1, "image/gif": 1}, "sizes": 1},
"meta": {"http-equiv": {"content-type": 1}, "name": {"description": 1, "keywords": 1}, "content": {"text/html; charset=UTF-8": 1}, "charset": 1},
"style": {"type": 1, "media": {"all": 1, "screen": 1, "print": 1}, "scoped": 1},
"script": {"charset": 1, "type": {"text/javascript": 1}, "src": 1, "defer": 1, "async": 1},
"noscript": {"href": 1},
"body": {"onafterprint": 1, "onbeforeprint": 1, "onbeforeunload": 1, "onhashchange": 1, "onmessage": 1, "onoffline": 1, "onpopstate": 1, "onredo": 1, "onresize": 1, "onstorage": 1, "onundo": 1, "onunload": 1},
"section": {},
"nav": {},
"article": {"pubdate": 1},
"aside": {},
"h1": {},
"h2": {},
"h3": {},
"h4": {},
"h5": {},
"h6": {},
"header": {},
"footer": {},
"address": {},
"main": {},
"p": {},
"hr": {},
"pre": {},
"blockquote": {"cite": 1},
"ol": {"start": 1, "reversed": 1},
"ul": {},
"li": {"value": 1},
"dl": {},
"dt": {},
"dd": {},
"figure": {},
"figcaption": {},
"div": {},
"a": {"href": 1, "target": {"_blank": 1, "top": 1}, "ping": 1, "rel": {"nofollow": 1, "alternate": 1, "author": 1, "bookmark": 1, "help": 1, "license": 1, "next": 1, "noreferrer": 1, "prefetch": 1, "prev": 1, "search": 1, "tag": 1}, "media": 1, "hreflang": 1, "type": 1},
"em": {},
"strong": {},
"small": {},
"s": {},
"cite": {},
"q": {"cite": 1},
"dfn": {},
"abbr": {},
"data": {},
"time": {"datetime": 1},
"code": {},
"var": {},
"samp": {},
"kbd": {},
"sub": {},
"sup": {},
"i": {},
"b": {},
"u": {},
"mark": {},
"ruby": {},
"rt": {},
"rp": {},
"bdi": {},
"bdo": {},
"span": {},
"br": {},
"wbr": {},
"ins": {"cite": 1, "datetime": 1},
"del": {"cite": 1, "datetime": 1},
"img": {"alt": 1, "src": 1, "height": 1, "width": 1, "usemap": 1, "ismap": 1},
"iframe": {"name": 1, "src": 1, "height": 1, "width": 1, "sandbox": {"allow-same-origin": 1, "allow-top-navigation": 1, "allow-forms": 1, "allow-scripts": 1}, "seamless": {"seamless": 1}},
"embed": {"src": 1, "height": 1, "width": 1, "type": 1},
"object": {"param": 1, "data": 1, "type": 1, "height" : 1, "width": 1, "usemap": 1, "name": 1, "form": 1, "classid": 1},
"param": {"name": 1, "value": 1},
"video": {"src": 1, "autobuffer": 1, "autoplay": {"autoplay": 1}, "loop": {"loop": 1}, "controls": {"controls": 1}, "width": 1, "height": 1, "poster": 1, "muted": {"muted": 1}, "preload": {"auto": 1, "metadata": 1, "none": 1}},
"audio": {"src": 1, "autobuffer": 1, "autoplay": {"autoplay": 1}, "loop": {"loop": 1}, "controls": {"controls": 1}, "muted": {"muted": 1}, "preload": {"auto": 1, "metadata": 1, "none": 1 }},
"source": {"src": 1, "type": 1, "media": 1},
"track": {"kind": 1, "src": 1, "srclang": 1, "label": 1, "default": 1},
"canvas": {"width": 1, "height": 1},
"map": {"name": 1},
"area": {"shape": 1, "coords": 1, "href": 1, "hreflang": 1, "alt": 1, "target": 1, "media": 1, "rel": 1, "ping": 1, "type": 1},
"svg": {},
"math": {},
"table": {"summary": 1},
"caption": {},
"colgroup": {"span": 1},
"col": {"span": 1},
"tbody": {},
"thead": {},
"tfoot": {},
"tr": {},
"td": {"headers": 1, "rowspan": 1, "colspan": 1},
"th": {"headers": 1, "rowspan": 1, "colspan": 1, "scope": 1},
"form": {"accept-charset": 1, "action": 1, "autocomplete": 1, "enctype": {"multipart/form-data": 1, "application/x-www-form-urlencoded": 1}, "method": {"get": 1, "post": 1}, "name": 1, "novalidate": 1, "target": {"_blank": 1, "top": 1}},
"fieldset": {"disabled": 1, "form": 1, "name": 1},
"legend": {},
"label": {"form": 1, "for": 1},
"input": {
"type": {"text": 1, "password": 1, "hidden": 1, "checkbox": 1, "submit": 1, "radio": 1, "file": 1, "button": 1, "reset": 1, "image": 31, "color": 1, "date": 1, "datetime": 1, "datetime-local": 1, "email": 1, "month": 1, "number": 1, "range": 1, "search": 1, "tel": 1, "time": 1, "url": 1, "week": 1},
"accept": 1, "alt": 1, "autocomplete": {"on": 1, "off": 1}, "autofocus": {"autofocus": 1}, "checked": {"checked": 1}, "disabled": {"disabled": 1}, "form": 1, "formaction": 1, "formenctype": {"application/x-www-form-urlencoded": 1, "multipart/form-data": 1, "text/plain": 1}, "formmethod": {"get": 1, "post": 1}, "formnovalidate": {"formnovalidate": 1}, "formtarget": {"_blank": 1, "_self": 1, "_parent": 1, "_top": 1}, "height": 1, "list": 1, "max": 1, "maxlength": 1, "min": 1, "multiple": {"multiple": 1}, "name": 1, "pattern": 1, "placeholder": 1, "readonly": {"readonly": 1}, "required": {"required": 1}, "size": 1, "src": 1, "step": 1, "width": 1, "files": 1, "value": 1},
"button": {"autofocus": 1, "disabled": {"disabled": 1}, "form": 1, "formaction": 1, "formenctype": 1, "formmethod": 1, "formnovalidate": 1, "formtarget": 1, "name": 1, "value": 1, "type": {"button": 1, "submit": 1}},
"select": {"autofocus": 1, "disabled": 1, "form": 1, "multiple": {"multiple": 1}, "name": 1, "size": 1, "readonly":{"readonly": 1}},
"datalist": {},
"optgroup": {"disabled": 1, "label": 1},
"option": {"disabled": 1, "selected": 1, "label": 1, "value": 1},
"textarea": {"autofocus": {"autofocus": 1}, "disabled": {"disabled": 1}, "form": 1, "maxlength": 1, "name": 1, "placeholder": 1, "readonly": {"readonly": 1}, "required": {"required": 1}, "rows": 1, "cols": 1, "wrap": {"on": 1, "off": 1, "hard": 1, "soft": 1}},
"keygen": {"autofocus": 1, "challenge": {"challenge": 1}, "disabled": {"disabled": 1}, "form": 1, "keytype": {"rsa": 1, "dsa": 1, "ec": 1}, "name": 1},
"output": {"for": 1, "form": 1, "name": 1},
"progress": {"value": 1, "max": 1},
"meter": {"value": 1, "min": 1, "max": 1, "low": 1, "high": 1, "optimum": 1},
"details": {"open": 1},
"summary": {},
"command": {"type": 1, "label": 1, "icon": 1, "disabled": 1, "checked": 1, "radiogroup": 1, "command": 1},
"menu": {"type": 1, "label": 1},
"dialog": {"open": 1}
};
var elements = Object.keys(attributeMap);
function is(token, type) {
return token.type.lastIndexOf(type + ".xml") > -1;
}
function findTagName(session, pos) {
var iterator = new TokenIterator(session, pos.row, pos.column);
var token = iterator.getCurrentToken();
while (token && !is(token, "tag-name")){
token = iterator.stepBackward();
}
if (token)
return token.value;
}
function findAttributeName(session, pos) {
var iterator = new TokenIterator(session, pos.row, pos.column);
var token = iterator.getCurrentToken();
while (token && !is(token, "attribute-name")){
token = iterator.stepBackward();
}
if (token)
return token.value;
}
var HtmlCompletions = function() {
};
(function() {
this.getCompletions = function(state, session, pos, prefix) {
var token = session.getTokenAt(pos.row, pos.column);
if (!token)
return [];
if (is(token, "tag-name") || is(token, "tag-open") || is(token, "end-tag-open"))
return this.getTagCompletions(state, session, pos, prefix);
if (is(token, "tag-whitespace") || is(token, "attribute-name"))
return this.getAttributeCompletions(state, session, pos, prefix);
if (is(token, "attribute-value"))
return this.getAttributeValueCompletions(state, session, pos, prefix);
var line = session.getLine(pos.row).substr(0, pos.column);
if (/&[A-z]*$/i.test(line))
return this.getHTMLEntityCompletions(state, session, pos, prefix);
return [];
};
this.getTagCompletions = function(state, session, pos, prefix) {
return elements.map(function(element){
return {
value: element,
meta: "tag",
score: Number.MAX_VALUE
};
});
};
this.getAttributeCompletions = function(state, session, pos, prefix) {
var tagName = findTagName(session, pos);
if (!tagName)
return [];
var attributes = globalAttributes;
if (tagName in attributeMap) {
attributes = attributes.concat(Object.keys(attributeMap[tagName]));
}
return attributes.map(function(attribute){
return {
caption: attribute,
snippet: attribute + '="$0"',
meta: "attribute",
score: Number.MAX_VALUE
};
});
};
this.getAttributeValueCompletions = function(state, session, pos, prefix) {
var tagName = findTagName(session, pos);
var attributeName = findAttributeName(session, pos);
if (!tagName)
return [];
var values = [];
if (tagName in attributeMap && attributeName in attributeMap[tagName] && typeof attributeMap[tagName][attributeName] === "object") {
values = Object.keys(attributeMap[tagName][attributeName]);
}
return values.map(function(value){
return {
caption: value,
snippet: value,
meta: "attribute value",
score: Number.MAX_VALUE
};
});
};
this.getHTMLEntityCompletions = function(state, session, pos, prefix) {
var values = ['Aacute;', 'aacute;', 'Acirc;', 'acirc;', 'acute;', 'AElig;', 'aelig;', 'Agrave;', 'agrave;', 'alefsym;', 'Alpha;', 'alpha;', 'amp;', 'and;', 'ang;', 'Aring;', 'aring;', 'asymp;', 'Atilde;', 'atilde;', 'Auml;', 'auml;', 'bdquo;', 'Beta;', 'beta;', 'brvbar;', 'bull;', 'cap;', 'Ccedil;', 'ccedil;', 'cedil;', 'cent;', 'Chi;', 'chi;', 'circ;', 'clubs;', 'cong;', 'copy;', 'crarr;', 'cup;', 'curren;', 'Dagger;', 'dagger;', 'dArr;', 'darr;', 'deg;', 'Delta;', 'delta;', 'diams;', 'divide;', 'Eacute;', 'eacute;', 'Ecirc;', 'ecirc;', 'Egrave;', 'egrave;', 'empty;', 'emsp;', 'ensp;', 'Epsilon;', 'epsilon;', 'equiv;', 'Eta;', 'eta;', 'ETH;', 'eth;', 'Euml;', 'euml;', 'euro;', 'exist;', 'fnof;', 'forall;', 'frac12;', 'frac14;', 'frac34;', 'frasl;', 'Gamma;', 'gamma;', 'ge;', 'gt;', 'hArr;', 'harr;', 'hearts;', 'hellip;', 'Iacute;', 'iacute;', 'Icirc;', 'icirc;', 'iexcl;', 'Igrave;', 'igrave;', 'image;', 'infin;', 'int;', 'Iota;', 'iota;', 'iquest;', 'isin;', 'Iuml;', 'iuml;', 'Kappa;', 'kappa;', 'Lambda;', 'lambda;', 'lang;', 'laquo;', 'lArr;', 'larr;', 'lceil;', 'ldquo;', 'le;', 'lfloor;', 'lowast;', 'loz;', 'lrm;', 'lsaquo;', 'lsquo;', 'lt;', 'macr;', 'mdash;', 'micro;', 'middot;', 'minus;', 'Mu;', 'mu;', 'nabla;', 'nbsp;', 'ndash;', 'ne;', 'ni;', 'not;', 'notin;', 'nsub;', 'Ntilde;', 'ntilde;', 'Nu;', 'nu;', 'Oacute;', 'oacute;', 'Ocirc;', 'ocirc;', 'OElig;', 'oelig;', 'Ograve;', 'ograve;', 'oline;', 'Omega;', 'omega;', 'Omicron;', 'omicron;', 'oplus;', 'or;', 'ordf;', 'ordm;', 'Oslash;', 'oslash;', 'Otilde;', 'otilde;', 'otimes;', 'Ouml;', 'ouml;', 'para;', 'part;', 'permil;', 'perp;', 'Phi;', 'phi;', 'Pi;', 'pi;', 'piv;', 'plusmn;', 'pound;', 'Prime;', 'prime;', 'prod;', 'prop;', 'Psi;', 'psi;', 'quot;', 'radic;', 'rang;', 'raquo;', 'rArr;', 'rarr;', 'rceil;', 'rdquo;', 'real;', 'reg;', 'rfloor;', 'Rho;', 'rho;', 'rlm;', 'rsaquo;', 'rsquo;', 'sbquo;', 'Scaron;', 'scaron;', 'sdot;', 'sect;', 'shy;', 'Sigma;', 'sigma;', 'sigmaf;', 'sim;', 'spades;', 'sub;', 'sube;', 'sum;', 'sup;', 'sup1;', 'sup2;', 'sup3;', 'supe;', 'szlig;', 'Tau;', 'tau;', 'there4;', 'Theta;', 'theta;', 'thetasym;', 'thinsp;', 'THORN;', 'thorn;', 'tilde;', 'times;', 'trade;', 'Uacute;', 'uacute;', 'uArr;', 'uarr;', 'Ucirc;', 'ucirc;', 'Ugrave;', 'ugrave;', 'uml;', 'upsih;', 'Upsilon;', 'upsilon;', 'Uuml;', 'uuml;', 'weierp;', 'Xi;', 'xi;', 'Yacute;', 'yacute;', 'yen;', 'Yuml;', 'yuml;', 'Zeta;', 'zeta;', 'zwj;', 'zwnj;'];
return values.map(function(value){
return {
caption: value,
snippet: value,
meta: "html entity",
score: Number.MAX_VALUE
};
});
};
}).call(HtmlCompletions.prototype);
exports.HtmlCompletions = HtmlCompletions;
});
ace.define("ace/mode/html",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/text","ace/mode/javascript","ace/mode/css","ace/mode/html_highlight_rules","ace/mode/behaviour/xml","ace/mode/folding/html","ace/mode/html_completions","ace/worker/worker_client"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var lang = require("../lib/lang");
var TextMode = require("./text").Mode;
var JavaScriptMode = require("./javascript").Mode;
var CssMode = require("./css").Mode;
var HtmlHighlightRules = require("./html_highlight_rules").HtmlHighlightRules;
var XmlBehaviour = require("./behaviour/xml").XmlBehaviour;
var HtmlFoldMode = require("./folding/html").FoldMode;
var HtmlCompletions = require("./html_completions").HtmlCompletions;
var WorkerClient = require("../worker/worker_client").WorkerClient;
var voidElements = ["area", "base", "br", "col", "embed", "hr", "img", "input", "keygen", "link", "meta", "menuitem", "param", "source", "track", "wbr"];
var optionalEndTags = ["li", "dt", "dd", "p", "rt", "rp", "optgroup", "option", "colgroup", "td", "th"];
var Mode = function(options) {
this.fragmentContext = options && options.fragmentContext;
this.HighlightRules = HtmlHighlightRules;
this.$behaviour = new XmlBehaviour();
this.$completer = new HtmlCompletions();
this.createModeDelegates({
"js-": JavaScriptMode,
"css-": CssMode
});
this.foldingRules = new HtmlFoldMode(this.voidElements, lang.arrayToMap(optionalEndTags));
};
oop.inherits(Mode, TextMode);
(function() {
this.blockComment = {start: "<!--", end: "-->"};
this.voidElements = lang.arrayToMap(voidElements);
this.getNextLineIndent = function(state, line, tab) {
return this.$getIndent(line);
};
this.checkOutdent = function(state, line, input) {
return false;
};
this.getCompletions = function(state, session, pos, prefix) {
return this.$completer.getCompletions(state, session, pos, prefix);
};
this.createWorker = function(session) {
if (this.constructor != Mode)
return;
var worker = new WorkerClient(["ace"], "ace/mode/html_worker", "Worker");
worker.attachToDocument(session.getDocument());
if (this.fragmentContext)
worker.call("setOptions", [{context: this.fragmentContext}]);
worker.on("error", function(e) {
session.setAnnotations(e.data);
});
worker.on("terminate", function() {
session.clearAnnotations();
});
return worker;
};
this.$id = "ace/mode/html";
}).call(Mode.prototype);
exports.Mode = Mode;
});
|
PypiClean
|
/pm2mp-0.0.98-py3-none-any.whl/pm2mp-0.0.98.data/data/node_modules/autocomplete.js
|
'use strict';
const Select = require('./select');
const highlight = (input, color) => {
let val = input.toLowerCase();
return str => {
let s = str.toLowerCase();
let i = s.indexOf(val);
let colored = color(str.slice(i, i + val.length));
return i >= 0 ? str.slice(0, i) + colored + str.slice(i + val.length) : str;
};
};
class AutoComplete extends Select {
constructor(options) {
super(options);
this.cursorShow();
}
moveCursor(n) {
this.state.cursor += n;
}
dispatch(ch) {
return this.append(ch);
}
space(ch) {
return this.options.multiple ? super.space(ch) : this.append(ch);
}
append(ch) {
let { cursor, input } = this.state;
this.input = input.slice(0, cursor) + ch + input.slice(cursor);
this.moveCursor(1);
return this.complete();
}
delete() {
let { cursor, input } = this.state;
if (!input) return this.alert();
this.input = input.slice(0, cursor - 1) + input.slice(cursor);
this.moveCursor(-1);
return this.complete();
}
deleteForward() {
let { cursor, input } = this.state;
if (input[cursor] === void 0) return this.alert();
this.input = `${input}`.slice(0, cursor) + `${input}`.slice(cursor + 1);
return this.complete();
}
number(ch) {
return this.append(ch);
}
async complete() {
this.completing = true;
this.choices = await this.suggest(this.input, this.state._choices);
this.state.limit = void 0; // allow getter/setter to reset limit
this.index = Math.min(Math.max(this.visible.length - 1, 0), this.index);
await this.render();
this.completing = false;
}
suggest(input = this.input, choices = this.state._choices) {
if (typeof this.options.suggest === 'function') {
return this.options.suggest.call(this, input, choices);
}
let str = input.toLowerCase();
return choices.filter(ch => ch.message.toLowerCase().includes(str));
}
pointer() {
return '';
}
format() {
if (!this.focused) return this.input;
if (this.options.multiple && this.state.submitted) {
return this.selected.map(ch => this.styles.primary(ch.message)).join(', ');
}
if (this.state.submitted) {
let value = this.value = this.input = this.focused.value;
return this.styles.primary(value);
}
return this.input;
}
async render() {
if (this.state.status !== 'pending') return super.render();
let style = this.options.highlight
? this.options.highlight.bind(this)
: this.styles.placeholder;
let color = highlight(this.input, style);
let choices = this.choices;
this.choices = choices.map(ch => ({ ...ch, message: color(ch.message) }));
await super.render();
this.choices = choices;
}
submit() {
if (this.options.multiple) {
this.value = this.selected.map(ch => ch.name);
}
return super.submit();
}
}
module.exports = AutoComplete;
|
PypiClean
|
/eeg-to-fmri-0.0.9.tar.gz/eeg-to-fmri-0.0.9/src/eeg_to_fmri/regularizers/path_sgd.py
|
import tensorflow as tf
import gc
OPTIMIZER=tf.keras.optimizers.Adam
def optimizer(name, input_shape, model, lr):
if(name=="PathAdam"):
return PathOptimizer(input_shape, model, lr)
elif(name=="Adam"):
return tf.keras.optimizers.Adam(lr)
else:
raise NotImplementedError
class PathOptimizer(OPTIMIZER):
"""
This class implements the tensorflow optimizer proposed in https://arxiv.org/abs/1506.02617
Example:
>>> import tensorflow as tf
>>>
>>> model=tf.keras.Sequential([tf.keras.layers.Dense(2), tf.keras.layers.Dense(2)])
>>> input_shape=(10,1)
>>> x = tf.keras.initializers.GlorotUniform()(input_shape)
>>> model.build(input_shape)
>>>
>>> #assert computations of gradients
>>> with tf.GradientTape() as tape:
>>> tape.watch(model.trainable_variables)
>>> y = model(x)
>>> gradients=tape.gradient(y,model.trainable_variables)
>>>
>>> #clone model and assign its l1 weights
>>> path_model=tf.keras.models.clone_model(model)
>>> for param in range(len(model.trainable_variables)):
>>> path_model.trainable_variables[param].assign(tf.abs(model.trainable_variables[param]))
>>>
>>> #compute scale
>>> with tf.GradientTape() as tape:
>>> tape.watch(path_model.trainable_variables)
>>> y = tf.reduce_sum(path_model(tf.ones(input_shape)))
>>> path_norm=tape.gradient(y, path_model.trainable_variables)
>>>
>>> #compute ratio
>>> sgd_norm=0.
>>> pathsgd_norm=0.
>>> model_params = model.trainable_variables
>>> path_params = model.trainable_variables
>>> for param in range(len(model_params)):
>>> sgd_norm += tf.norm(gradients[param], ord=1)
>>> pathsgd_norm += tf.norm(gradients[param]/path_norm[param], ord=1)
>>> ratio = ( sgd_norm / pathsgd_norm ) ** 1
>>>
>>> print("Gradients before:", gradients)
>>> #gradient update
>>> for param in range(len(model_params)):
>>> gradients[param]=(gradients[param]/path_norm[param])*ratio
>>>
>>> print("Gradients before:", gradients)
"""
def __init__(self, input_shape, model, lr, name="PathOptimizer", p=2, **kwargs):
self.model=model
self.path_norm=None
self.ratio=None
self.input_shape=input_shape
self.p=p
super(PathOptimizer, self).__init__(lr, name=name, **kwargs)
def apply_gradients(self, grads_and_vars, name=None, **kwargs,):
"""
Example:
>>> import tensorflow as tf
>>> from path_sgd import PathOptimizer
>>>
>>> model=tf.keras.Sequential([tf.keras.layers.Dense(2), tf.keras.layers.Dense(2)])
>>> input_shape=(10,1)
>>> x = tf.keras.initializers.GlorotUniform()(input_shape)
>>> model.build(input_shape)
>>>
>>> with tf.GradientTape() as tape:
>>> tape.watch(model.trainable_variables)
>>> y = model(x)
>>>
>>> gradients=tape.gradient(y,model.trainable_variables)
>>> optimizer=PathOptimizer(input_shape, model, 0.01)
>>> optimizer.apply_gradients(zip(gradients, model.trainable_variables))
"""
self.n_params=len(self.model.trainable_variables)
self.compute_path_norm()
unpacked_gradients=list(zip(*grads_and_vars))
gradients = list(unpacked_gradients[0])
variables = list(unpacked_gradients[1])
if(self.ratio is None):
#compute ratio
sgd_norm=0.
pathsgd_norm=0.
for param in range(self.n_params):
sgd_norm += tf.norm(gradients[param], ord=self.p)
pathsgd_norm += tf.norm(gradients[param]/self.path_norm[param], ord=self.p)
self.ratio = ( sgd_norm / pathsgd_norm ) ** (1/self.p)
for param in range(self.n_params):
gradients[param]=(gradients[param]/self.path_norm[param])*self.ratio
gc.collect()
return super().apply_gradients(zip(gradients, variables), name=name)
def compute_path_norm(self,):
#clone model and assign its l1 weights
path_model=type(self.model).from_config(self.model.get_config())
input_shape_tensor=None
#build input
if(type(self.input_shape) is list):
input_shape_tensor=tuple(tf.ones(input_shape) for input_shape in self.input_shape)
path_model.build(*tuple(input_shape for input_shape in self.input_shape))
else:
input_shape_tensor=(tf.ones(self.input_shape),)
path_model.build(self.input_shape[1:])
for param in range(len(self.model.variables)):
if(self.p==1):
path_model.variables[param].assign((self.model.variables[param]**2)**0.5)
else:
path_model.variables[param].assign(self.model.variables[param]**self.p)
path_model.training=False
#compute scale
with tf.GradientTape() as tape:
tape.watch(path_model.trainable_variables)
y=path_model(*input_shape_tensor)
if(type(y) is list):
y=tf.reduce_sum([tf.reduce_sum(y_i) for y_i in y])
else:
y=tf.reduce_sum(y)
self.path_norm=tape.gradient(y, path_model.trainable_variables)
del path_model
|
PypiClean
|
/plato-draw-1.12.0.tar.gz/plato-draw-1.12.0/doc/source/troubleshooting.rst
|
=======================
Troubleshooting and FAQ
=======================
.. note::
Depending on which backends you want to use, there may be
additional steps required for installation; consult the advice
`here
<https://bitbucket.org/snippets/glotzer/nMg8Gr/plato-dependency-installation-tips>`_.
Jupyter Notebook Issues
=======================
**When starting a jupyter notebook, I get a "Permission denied" error for a linking operation.**
This may be related to jupyter upgrades. Manually remove the symlink
and the notebook should be able to proceed once more.
**When running in a jupyter notebook, nothing is displayed.**
The solution to this problem depends on more details.
- *The canvas is displayed entirely black with "Uncaught TypeError: Cannot read property 'handle' of undefined" (or similar language)*: After the `canvas.show()` command in the cell, add a line `import time;time.sleep(.1)`. You may need to increase the argument of `time.sleep()`. This is due to a race condition in vispy.
- *I get an error 404 in the browser console for vispy.min.js* - Make sure that jupyter, ipywidgets, and all of the jupyter components are up to date (and have compatible versions, see https://bitbucket.org/snippets/glotzer/nMg8Gr/plato-dependency-installation-tips ).
- *I get an error 404 in the browser console for webgl-backend.js* - Try removing your jupyter notebook cache (~/.jupyter and ~/Library/Jupyter on OSX) and restarting jupyter
- Make sure the `jupyter` executable you are using is in the same virtualenv or conda environment as plato and its dependencies
**Things aren't displayed and I get a message "zmq message arrived on closed channel" in the console.**
Try running your jupyter notebook command with an increased data rate
limit::
jupyter notebook --NotebookApp.iopub_data_rate_limit=1000000000
|
PypiClean
|
/theogravity_pulumi-fusionauth-3.0.5.tar.gz/theogravity_pulumi-fusionauth-3.0.5/theogravity_pulumi-fusionauth/fusion_auth_idp_open_id_connect.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FusionAuthIdpOpenIdConnectArgs', 'FusionAuthIdpOpenIdConnect']
@pulumi.input_type
class FusionAuthIdpOpenIdConnectArgs:
def __init__(__self__, *,
button_text: pulumi.Input[str],
oauth2_client_id: pulumi.Input[str],
application_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]] = None,
button_image_url: Optional[pulumi.Input[str]] = None,
debug: Optional[pulumi.Input[bool]] = None,
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
idp_id: Optional[pulumi.Input[str]] = None,
lambda_reconcile_id: Optional[pulumi.Input[str]] = None,
linking_strategy: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
oauth2_authorization_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_client_authentication_method: Optional[pulumi.Input[str]] = None,
oauth2_client_secret: Optional[pulumi.Input[str]] = None,
oauth2_email_claim: Optional[pulumi.Input[str]] = None,
oauth2_issuer: Optional[pulumi.Input[str]] = None,
oauth2_scope: Optional[pulumi.Input[str]] = None,
oauth2_token_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_unique_id_claim: Optional[pulumi.Input[str]] = None,
oauth2_user_info_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_username_claim: Optional[pulumi.Input[str]] = None,
post_request: Optional[pulumi.Input[bool]] = None,
tenant_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]] = None):
"""
The set of arguments for constructing a FusionAuthIdpOpenIdConnect resource.
:param pulumi.Input[str] button_text: The top-level button text to use on the FusionAuth login page for this Identity Provider.
:param pulumi.Input[str] oauth2_client_id: The top-level client id for your Application.
:param pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]] application_configurations: The configuration for each Application that the identity provider is enabled for.
:param pulumi.Input[str] button_image_url: The top-level button image (URL) to use on the FusionAuth login page for this Identity Provider.
:param pulumi.Input[bool] debug: Determines if debug is enabled for this provider. When enabled, each time this provider is invoked to reconcile a login an Event Log will be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domains: This is an optional list of domains that this OpenID Connect provider should be used for. This converts the FusionAuth login form to a domain-based login form. This type of form first asks the user for their email. FusionAuth then uses their email to determine if an OpenID Connect identity provider should be used. If an OpenID Connect provider should be used, the browser is redirected to the authorization endpoint of that identity provider. Otherwise, the password field is revealed on the form so that the user can login using FusionAuth.
:param pulumi.Input[bool] enabled: Determines if this provider is enabled. If it is false then it will be disabled globally.
:param pulumi.Input[str] idp_id: The ID to use for the new identity provider. If not specified a secure random UUID will be generated.
:param pulumi.Input[str] lambda_reconcile_id: The unique Id of the lambda to used during the user reconcile process to map custom claims from the external identity provider to the FusionAuth user.
:param pulumi.Input[str] linking_strategy: The linking strategy to use when creating the link between the {idp_display_name} Identity Provider and the user.
:param pulumi.Input[str] name: The name of this OpenID Connect identity provider. This is only used for display purposes.
:param pulumi.Input[str] oauth2_authorization_endpoint: The top-level authorization endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the authorization endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_client_authentication_method: The client authentication method to use with the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_client_secret: The top-level client secret to use with the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_email_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the email address.
:param pulumi.Input[str] oauth2_issuer: The top-level issuer URI for the OpenID Connect identity provider. If this is provided, the authorization endpoint, token endpoint and userinfo endpoint will all be resolved using the issuer URI plus /.well-known/openid-configuration.
:param pulumi.Input[str] oauth2_scope: The top-level scope that you are requesting from the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_token_endpoint: The top-level token endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the token endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_unique_id_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the user Id.
:param pulumi.Input[str] oauth2_user_info_endpoint: The top-level userinfo endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the userinfo endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_username_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the username.
:param pulumi.Input[bool] post_request: Set this value equal to true if you wish to use POST bindings with this OpenID Connect identity provider. The default value of false means that a redirect binding which uses a GET request will be used.
:param pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]] tenant_configurations: The configuration for each Tenant that limits the number of links a user may have for a particular identity provider.
"""
pulumi.set(__self__, "button_text", button_text)
pulumi.set(__self__, "oauth2_client_id", oauth2_client_id)
if application_configurations is not None:
pulumi.set(__self__, "application_configurations", application_configurations)
if button_image_url is not None:
pulumi.set(__self__, "button_image_url", button_image_url)
if debug is not None:
pulumi.set(__self__, "debug", debug)
if domains is not None:
pulumi.set(__self__, "domains", domains)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if idp_id is not None:
pulumi.set(__self__, "idp_id", idp_id)
if lambda_reconcile_id is not None:
pulumi.set(__self__, "lambda_reconcile_id", lambda_reconcile_id)
if linking_strategy is not None:
pulumi.set(__self__, "linking_strategy", linking_strategy)
if name is not None:
pulumi.set(__self__, "name", name)
if oauth2_authorization_endpoint is not None:
pulumi.set(__self__, "oauth2_authorization_endpoint", oauth2_authorization_endpoint)
if oauth2_client_authentication_method is not None:
pulumi.set(__self__, "oauth2_client_authentication_method", oauth2_client_authentication_method)
if oauth2_client_secret is not None:
pulumi.set(__self__, "oauth2_client_secret", oauth2_client_secret)
if oauth2_email_claim is not None:
pulumi.set(__self__, "oauth2_email_claim", oauth2_email_claim)
if oauth2_issuer is not None:
pulumi.set(__self__, "oauth2_issuer", oauth2_issuer)
if oauth2_scope is not None:
pulumi.set(__self__, "oauth2_scope", oauth2_scope)
if oauth2_token_endpoint is not None:
pulumi.set(__self__, "oauth2_token_endpoint", oauth2_token_endpoint)
if oauth2_unique_id_claim is not None:
pulumi.set(__self__, "oauth2_unique_id_claim", oauth2_unique_id_claim)
if oauth2_user_info_endpoint is not None:
pulumi.set(__self__, "oauth2_user_info_endpoint", oauth2_user_info_endpoint)
if oauth2_username_claim is not None:
pulumi.set(__self__, "oauth2_username_claim", oauth2_username_claim)
if post_request is not None:
pulumi.set(__self__, "post_request", post_request)
if tenant_configurations is not None:
pulumi.set(__self__, "tenant_configurations", tenant_configurations)
@property
@pulumi.getter(name="buttonText")
def button_text(self) -> pulumi.Input[str]:
"""
The top-level button text to use on the FusionAuth login page for this Identity Provider.
"""
return pulumi.get(self, "button_text")
@button_text.setter
def button_text(self, value: pulumi.Input[str]):
pulumi.set(self, "button_text", value)
@property
@pulumi.getter(name="oauth2ClientId")
def oauth2_client_id(self) -> pulumi.Input[str]:
"""
The top-level client id for your Application.
"""
return pulumi.get(self, "oauth2_client_id")
@oauth2_client_id.setter
def oauth2_client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "oauth2_client_id", value)
@property
@pulumi.getter(name="applicationConfigurations")
def application_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]]:
"""
The configuration for each Application that the identity provider is enabled for.
"""
return pulumi.get(self, "application_configurations")
@application_configurations.setter
def application_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]]):
pulumi.set(self, "application_configurations", value)
@property
@pulumi.getter(name="buttonImageUrl")
def button_image_url(self) -> Optional[pulumi.Input[str]]:
"""
The top-level button image (URL) to use on the FusionAuth login page for this Identity Provider.
"""
return pulumi.get(self, "button_image_url")
@button_image_url.setter
def button_image_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "button_image_url", value)
@property
@pulumi.getter
def debug(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if debug is enabled for this provider. When enabled, each time this provider is invoked to reconcile a login an Event Log will be created.
"""
return pulumi.get(self, "debug")
@debug.setter
def debug(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "debug", value)
@property
@pulumi.getter
def domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
This is an optional list of domains that this OpenID Connect provider should be used for. This converts the FusionAuth login form to a domain-based login form. This type of form first asks the user for their email. FusionAuth then uses their email to determine if an OpenID Connect identity provider should be used. If an OpenID Connect provider should be used, the browser is redirected to the authorization endpoint of that identity provider. Otherwise, the password field is revealed on the form so that the user can login using FusionAuth.
"""
return pulumi.get(self, "domains")
@domains.setter
def domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "domains", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if this provider is enabled. If it is false then it will be disabled globally.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="idpId")
def idp_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID to use for the new identity provider. If not specified a secure random UUID will be generated.
"""
return pulumi.get(self, "idp_id")
@idp_id.setter
def idp_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "idp_id", value)
@property
@pulumi.getter(name="lambdaReconcileId")
def lambda_reconcile_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique Id of the lambda to used during the user reconcile process to map custom claims from the external identity provider to the FusionAuth user.
"""
return pulumi.get(self, "lambda_reconcile_id")
@lambda_reconcile_id.setter
def lambda_reconcile_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lambda_reconcile_id", value)
@property
@pulumi.getter(name="linkingStrategy")
def linking_strategy(self) -> Optional[pulumi.Input[str]]:
"""
The linking strategy to use when creating the link between the {idp_display_name} Identity Provider and the user.
"""
return pulumi.get(self, "linking_strategy")
@linking_strategy.setter
def linking_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linking_strategy", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this OpenID Connect identity provider. This is only used for display purposes.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="oauth2AuthorizationEndpoint")
def oauth2_authorization_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The top-level authorization endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the authorization endpoint. If you provide an issuer then this field will be ignored.
"""
return pulumi.get(self, "oauth2_authorization_endpoint")
@oauth2_authorization_endpoint.setter
def oauth2_authorization_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_authorization_endpoint", value)
@property
@pulumi.getter(name="oauth2ClientAuthenticationMethod")
def oauth2_client_authentication_method(self) -> Optional[pulumi.Input[str]]:
"""
The client authentication method to use with the OpenID Connect identity provider.
"""
return pulumi.get(self, "oauth2_client_authentication_method")
@oauth2_client_authentication_method.setter
def oauth2_client_authentication_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_client_authentication_method", value)
@property
@pulumi.getter(name="oauth2ClientSecret")
def oauth2_client_secret(self) -> Optional[pulumi.Input[str]]:
"""
The top-level client secret to use with the OpenID Connect identity provider.
"""
return pulumi.get(self, "oauth2_client_secret")
@oauth2_client_secret.setter
def oauth2_client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_client_secret", value)
@property
@pulumi.getter(name="oauth2EmailClaim")
def oauth2_email_claim(self) -> Optional[pulumi.Input[str]]:
"""
An optional configuration to modify the expected name of the claim returned by the IdP that contains the email address.
"""
return pulumi.get(self, "oauth2_email_claim")
@oauth2_email_claim.setter
def oauth2_email_claim(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_email_claim", value)
@property
@pulumi.getter(name="oauth2Issuer")
def oauth2_issuer(self) -> Optional[pulumi.Input[str]]:
"""
The top-level issuer URI for the OpenID Connect identity provider. If this is provided, the authorization endpoint, token endpoint and userinfo endpoint will all be resolved using the issuer URI plus /.well-known/openid-configuration.
"""
return pulumi.get(self, "oauth2_issuer")
@oauth2_issuer.setter
def oauth2_issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_issuer", value)
@property
@pulumi.getter(name="oauth2Scope")
def oauth2_scope(self) -> Optional[pulumi.Input[str]]:
"""
The top-level scope that you are requesting from the OpenID Connect identity provider.
"""
return pulumi.get(self, "oauth2_scope")
@oauth2_scope.setter
def oauth2_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_scope", value)
@property
@pulumi.getter(name="oauth2TokenEndpoint")
def oauth2_token_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The top-level token endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the token endpoint. If you provide an issuer then this field will be ignored.
"""
return pulumi.get(self, "oauth2_token_endpoint")
@oauth2_token_endpoint.setter
def oauth2_token_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_token_endpoint", value)
@property
@pulumi.getter(name="oauth2UniqueIdClaim")
def oauth2_unique_id_claim(self) -> Optional[pulumi.Input[str]]:
"""
An optional configuration to modify the expected name of the claim returned by the IdP that contains the user Id.
"""
return pulumi.get(self, "oauth2_unique_id_claim")
@oauth2_unique_id_claim.setter
def oauth2_unique_id_claim(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_unique_id_claim", value)
@property
@pulumi.getter(name="oauth2UserInfoEndpoint")
def oauth2_user_info_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The top-level userinfo endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the userinfo endpoint. If you provide an issuer then this field will be ignored.
"""
return pulumi.get(self, "oauth2_user_info_endpoint")
@oauth2_user_info_endpoint.setter
def oauth2_user_info_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_user_info_endpoint", value)
@property
@pulumi.getter(name="oauth2UsernameClaim")
def oauth2_username_claim(self) -> Optional[pulumi.Input[str]]:
"""
An optional configuration to modify the expected name of the claim returned by the IdP that contains the username.
"""
return pulumi.get(self, "oauth2_username_claim")
@oauth2_username_claim.setter
def oauth2_username_claim(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_username_claim", value)
@property
@pulumi.getter(name="postRequest")
def post_request(self) -> Optional[pulumi.Input[bool]]:
"""
Set this value equal to true if you wish to use POST bindings with this OpenID Connect identity provider. The default value of false means that a redirect binding which uses a GET request will be used.
"""
return pulumi.get(self, "post_request")
@post_request.setter
def post_request(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "post_request", value)
@property
@pulumi.getter(name="tenantConfigurations")
def tenant_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]]:
"""
The configuration for each Tenant that limits the number of links a user may have for a particular identity provider.
"""
return pulumi.get(self, "tenant_configurations")
@tenant_configurations.setter
def tenant_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]]):
pulumi.set(self, "tenant_configurations", value)
@pulumi.input_type
class _FusionAuthIdpOpenIdConnectState:
def __init__(__self__, *,
application_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]] = None,
button_image_url: Optional[pulumi.Input[str]] = None,
button_text: Optional[pulumi.Input[str]] = None,
debug: Optional[pulumi.Input[bool]] = None,
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
idp_id: Optional[pulumi.Input[str]] = None,
lambda_reconcile_id: Optional[pulumi.Input[str]] = None,
linking_strategy: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
oauth2_authorization_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_client_authentication_method: Optional[pulumi.Input[str]] = None,
oauth2_client_id: Optional[pulumi.Input[str]] = None,
oauth2_client_secret: Optional[pulumi.Input[str]] = None,
oauth2_email_claim: Optional[pulumi.Input[str]] = None,
oauth2_issuer: Optional[pulumi.Input[str]] = None,
oauth2_scope: Optional[pulumi.Input[str]] = None,
oauth2_token_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_unique_id_claim: Optional[pulumi.Input[str]] = None,
oauth2_user_info_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_username_claim: Optional[pulumi.Input[str]] = None,
post_request: Optional[pulumi.Input[bool]] = None,
tenant_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]] = None):
"""
Input properties used for looking up and filtering FusionAuthIdpOpenIdConnect resources.
:param pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]] application_configurations: The configuration for each Application that the identity provider is enabled for.
:param pulumi.Input[str] button_image_url: The top-level button image (URL) to use on the FusionAuth login page for this Identity Provider.
:param pulumi.Input[str] button_text: The top-level button text to use on the FusionAuth login page for this Identity Provider.
:param pulumi.Input[bool] debug: Determines if debug is enabled for this provider. When enabled, each time this provider is invoked to reconcile a login an Event Log will be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domains: This is an optional list of domains that this OpenID Connect provider should be used for. This converts the FusionAuth login form to a domain-based login form. This type of form first asks the user for their email. FusionAuth then uses their email to determine if an OpenID Connect identity provider should be used. If an OpenID Connect provider should be used, the browser is redirected to the authorization endpoint of that identity provider. Otherwise, the password field is revealed on the form so that the user can login using FusionAuth.
:param pulumi.Input[bool] enabled: Determines if this provider is enabled. If it is false then it will be disabled globally.
:param pulumi.Input[str] idp_id: The ID to use for the new identity provider. If not specified a secure random UUID will be generated.
:param pulumi.Input[str] lambda_reconcile_id: The unique Id of the lambda to used during the user reconcile process to map custom claims from the external identity provider to the FusionAuth user.
:param pulumi.Input[str] linking_strategy: The linking strategy to use when creating the link between the {idp_display_name} Identity Provider and the user.
:param pulumi.Input[str] name: The name of this OpenID Connect identity provider. This is only used for display purposes.
:param pulumi.Input[str] oauth2_authorization_endpoint: The top-level authorization endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the authorization endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_client_authentication_method: The client authentication method to use with the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_client_id: The top-level client id for your Application.
:param pulumi.Input[str] oauth2_client_secret: The top-level client secret to use with the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_email_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the email address.
:param pulumi.Input[str] oauth2_issuer: The top-level issuer URI for the OpenID Connect identity provider. If this is provided, the authorization endpoint, token endpoint and userinfo endpoint will all be resolved using the issuer URI plus /.well-known/openid-configuration.
:param pulumi.Input[str] oauth2_scope: The top-level scope that you are requesting from the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_token_endpoint: The top-level token endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the token endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_unique_id_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the user Id.
:param pulumi.Input[str] oauth2_user_info_endpoint: The top-level userinfo endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the userinfo endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_username_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the username.
:param pulumi.Input[bool] post_request: Set this value equal to true if you wish to use POST bindings with this OpenID Connect identity provider. The default value of false means that a redirect binding which uses a GET request will be used.
:param pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]] tenant_configurations: The configuration for each Tenant that limits the number of links a user may have for a particular identity provider.
"""
if application_configurations is not None:
pulumi.set(__self__, "application_configurations", application_configurations)
if button_image_url is not None:
pulumi.set(__self__, "button_image_url", button_image_url)
if button_text is not None:
pulumi.set(__self__, "button_text", button_text)
if debug is not None:
pulumi.set(__self__, "debug", debug)
if domains is not None:
pulumi.set(__self__, "domains", domains)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if idp_id is not None:
pulumi.set(__self__, "idp_id", idp_id)
if lambda_reconcile_id is not None:
pulumi.set(__self__, "lambda_reconcile_id", lambda_reconcile_id)
if linking_strategy is not None:
pulumi.set(__self__, "linking_strategy", linking_strategy)
if name is not None:
pulumi.set(__self__, "name", name)
if oauth2_authorization_endpoint is not None:
pulumi.set(__self__, "oauth2_authorization_endpoint", oauth2_authorization_endpoint)
if oauth2_client_authentication_method is not None:
pulumi.set(__self__, "oauth2_client_authentication_method", oauth2_client_authentication_method)
if oauth2_client_id is not None:
pulumi.set(__self__, "oauth2_client_id", oauth2_client_id)
if oauth2_client_secret is not None:
pulumi.set(__self__, "oauth2_client_secret", oauth2_client_secret)
if oauth2_email_claim is not None:
pulumi.set(__self__, "oauth2_email_claim", oauth2_email_claim)
if oauth2_issuer is not None:
pulumi.set(__self__, "oauth2_issuer", oauth2_issuer)
if oauth2_scope is not None:
pulumi.set(__self__, "oauth2_scope", oauth2_scope)
if oauth2_token_endpoint is not None:
pulumi.set(__self__, "oauth2_token_endpoint", oauth2_token_endpoint)
if oauth2_unique_id_claim is not None:
pulumi.set(__self__, "oauth2_unique_id_claim", oauth2_unique_id_claim)
if oauth2_user_info_endpoint is not None:
pulumi.set(__self__, "oauth2_user_info_endpoint", oauth2_user_info_endpoint)
if oauth2_username_claim is not None:
pulumi.set(__self__, "oauth2_username_claim", oauth2_username_claim)
if post_request is not None:
pulumi.set(__self__, "post_request", post_request)
if tenant_configurations is not None:
pulumi.set(__self__, "tenant_configurations", tenant_configurations)
@property
@pulumi.getter(name="applicationConfigurations")
def application_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]]:
"""
The configuration for each Application that the identity provider is enabled for.
"""
return pulumi.get(self, "application_configurations")
@application_configurations.setter
def application_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]]):
pulumi.set(self, "application_configurations", value)
@property
@pulumi.getter(name="buttonImageUrl")
def button_image_url(self) -> Optional[pulumi.Input[str]]:
"""
The top-level button image (URL) to use on the FusionAuth login page for this Identity Provider.
"""
return pulumi.get(self, "button_image_url")
@button_image_url.setter
def button_image_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "button_image_url", value)
@property
@pulumi.getter(name="buttonText")
def button_text(self) -> Optional[pulumi.Input[str]]:
"""
The top-level button text to use on the FusionAuth login page for this Identity Provider.
"""
return pulumi.get(self, "button_text")
@button_text.setter
def button_text(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "button_text", value)
@property
@pulumi.getter
def debug(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if debug is enabled for this provider. When enabled, each time this provider is invoked to reconcile a login an Event Log will be created.
"""
return pulumi.get(self, "debug")
@debug.setter
def debug(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "debug", value)
@property
@pulumi.getter
def domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
This is an optional list of domains that this OpenID Connect provider should be used for. This converts the FusionAuth login form to a domain-based login form. This type of form first asks the user for their email. FusionAuth then uses their email to determine if an OpenID Connect identity provider should be used. If an OpenID Connect provider should be used, the browser is redirected to the authorization endpoint of that identity provider. Otherwise, the password field is revealed on the form so that the user can login using FusionAuth.
"""
return pulumi.get(self, "domains")
@domains.setter
def domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "domains", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if this provider is enabled. If it is false then it will be disabled globally.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="idpId")
def idp_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID to use for the new identity provider. If not specified a secure random UUID will be generated.
"""
return pulumi.get(self, "idp_id")
@idp_id.setter
def idp_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "idp_id", value)
@property
@pulumi.getter(name="lambdaReconcileId")
def lambda_reconcile_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique Id of the lambda to used during the user reconcile process to map custom claims from the external identity provider to the FusionAuth user.
"""
return pulumi.get(self, "lambda_reconcile_id")
@lambda_reconcile_id.setter
def lambda_reconcile_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lambda_reconcile_id", value)
@property
@pulumi.getter(name="linkingStrategy")
def linking_strategy(self) -> Optional[pulumi.Input[str]]:
"""
The linking strategy to use when creating the link between the {idp_display_name} Identity Provider and the user.
"""
return pulumi.get(self, "linking_strategy")
@linking_strategy.setter
def linking_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "linking_strategy", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this OpenID Connect identity provider. This is only used for display purposes.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="oauth2AuthorizationEndpoint")
def oauth2_authorization_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The top-level authorization endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the authorization endpoint. If you provide an issuer then this field will be ignored.
"""
return pulumi.get(self, "oauth2_authorization_endpoint")
@oauth2_authorization_endpoint.setter
def oauth2_authorization_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_authorization_endpoint", value)
@property
@pulumi.getter(name="oauth2ClientAuthenticationMethod")
def oauth2_client_authentication_method(self) -> Optional[pulumi.Input[str]]:
"""
The client authentication method to use with the OpenID Connect identity provider.
"""
return pulumi.get(self, "oauth2_client_authentication_method")
@oauth2_client_authentication_method.setter
def oauth2_client_authentication_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_client_authentication_method", value)
@property
@pulumi.getter(name="oauth2ClientId")
def oauth2_client_id(self) -> Optional[pulumi.Input[str]]:
"""
The top-level client id for your Application.
"""
return pulumi.get(self, "oauth2_client_id")
@oauth2_client_id.setter
def oauth2_client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_client_id", value)
@property
@pulumi.getter(name="oauth2ClientSecret")
def oauth2_client_secret(self) -> Optional[pulumi.Input[str]]:
"""
The top-level client secret to use with the OpenID Connect identity provider.
"""
return pulumi.get(self, "oauth2_client_secret")
@oauth2_client_secret.setter
def oauth2_client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_client_secret", value)
@property
@pulumi.getter(name="oauth2EmailClaim")
def oauth2_email_claim(self) -> Optional[pulumi.Input[str]]:
"""
An optional configuration to modify the expected name of the claim returned by the IdP that contains the email address.
"""
return pulumi.get(self, "oauth2_email_claim")
@oauth2_email_claim.setter
def oauth2_email_claim(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_email_claim", value)
@property
@pulumi.getter(name="oauth2Issuer")
def oauth2_issuer(self) -> Optional[pulumi.Input[str]]:
"""
The top-level issuer URI for the OpenID Connect identity provider. If this is provided, the authorization endpoint, token endpoint and userinfo endpoint will all be resolved using the issuer URI plus /.well-known/openid-configuration.
"""
return pulumi.get(self, "oauth2_issuer")
@oauth2_issuer.setter
def oauth2_issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_issuer", value)
@property
@pulumi.getter(name="oauth2Scope")
def oauth2_scope(self) -> Optional[pulumi.Input[str]]:
"""
The top-level scope that you are requesting from the OpenID Connect identity provider.
"""
return pulumi.get(self, "oauth2_scope")
@oauth2_scope.setter
def oauth2_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_scope", value)
@property
@pulumi.getter(name="oauth2TokenEndpoint")
def oauth2_token_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The top-level token endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the token endpoint. If you provide an issuer then this field will be ignored.
"""
return pulumi.get(self, "oauth2_token_endpoint")
@oauth2_token_endpoint.setter
def oauth2_token_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_token_endpoint", value)
@property
@pulumi.getter(name="oauth2UniqueIdClaim")
def oauth2_unique_id_claim(self) -> Optional[pulumi.Input[str]]:
"""
An optional configuration to modify the expected name of the claim returned by the IdP that contains the user Id.
"""
return pulumi.get(self, "oauth2_unique_id_claim")
@oauth2_unique_id_claim.setter
def oauth2_unique_id_claim(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_unique_id_claim", value)
@property
@pulumi.getter(name="oauth2UserInfoEndpoint")
def oauth2_user_info_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The top-level userinfo endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the userinfo endpoint. If you provide an issuer then this field will be ignored.
"""
return pulumi.get(self, "oauth2_user_info_endpoint")
@oauth2_user_info_endpoint.setter
def oauth2_user_info_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_user_info_endpoint", value)
@property
@pulumi.getter(name="oauth2UsernameClaim")
def oauth2_username_claim(self) -> Optional[pulumi.Input[str]]:
"""
An optional configuration to modify the expected name of the claim returned by the IdP that contains the username.
"""
return pulumi.get(self, "oauth2_username_claim")
@oauth2_username_claim.setter
def oauth2_username_claim(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth2_username_claim", value)
@property
@pulumi.getter(name="postRequest")
def post_request(self) -> Optional[pulumi.Input[bool]]:
"""
Set this value equal to true if you wish to use POST bindings with this OpenID Connect identity provider. The default value of false means that a redirect binding which uses a GET request will be used.
"""
return pulumi.get(self, "post_request")
@post_request.setter
def post_request(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "post_request", value)
@property
@pulumi.getter(name="tenantConfigurations")
def tenant_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]]:
"""
The configuration for each Tenant that limits the number of links a user may have for a particular identity provider.
"""
return pulumi.get(self, "tenant_configurations")
@tenant_configurations.setter
def tenant_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]]):
pulumi.set(self, "tenant_configurations", value)
class FusionAuthIdpOpenIdConnect(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]]] = None,
button_image_url: Optional[pulumi.Input[str]] = None,
button_text: Optional[pulumi.Input[str]] = None,
debug: Optional[pulumi.Input[bool]] = None,
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
idp_id: Optional[pulumi.Input[str]] = None,
lambda_reconcile_id: Optional[pulumi.Input[str]] = None,
linking_strategy: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
oauth2_authorization_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_client_authentication_method: Optional[pulumi.Input[str]] = None,
oauth2_client_id: Optional[pulumi.Input[str]] = None,
oauth2_client_secret: Optional[pulumi.Input[str]] = None,
oauth2_email_claim: Optional[pulumi.Input[str]] = None,
oauth2_issuer: Optional[pulumi.Input[str]] = None,
oauth2_scope: Optional[pulumi.Input[str]] = None,
oauth2_token_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_unique_id_claim: Optional[pulumi.Input[str]] = None,
oauth2_user_info_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_username_claim: Optional[pulumi.Input[str]] = None,
post_request: Optional[pulumi.Input[bool]] = None,
tenant_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]]] = None,
__props__=None):
"""
## # OpenID Connect Identity Provider Resource
OpenID Connect identity providers connect to external OpenID Connect login systems. This type of login will optionally provide a Login with … button on FusionAuth’s login page. This button is customizable by using different properties of the identity provider.
Optionally, this identity provider can define one or more domains it is associated with. This is useful for allowing employees to log in with their corporate credentials. As long as the company has an identity solution that provides OpenID Connect, you can leverage this feature. This is referred to as a Domain Based Identity Provider. If you enable domains for an identity provider, the Login with … button will not be displayed. Instead, only the email form field will be displayed initially on the FusionAuth login page. Once the user types in their email address, FusionAuth will determine if the user is logging in locally or if they should be redirected to this identity provider. This is determined by extracting the domain from their email address and comparing it to the domains associated with the identity provider.
FusionAuth will also leverage the /userinfo API that is part of the OpenID Connect specification. The email address returned from the Userinfo response will be used to create or lookup the existing user. Additional claims from the Userinfo response can be used to reconcile the User in FusionAuth by using an OpenID Connect Reconcile Lambda. Unless you assign a reconcile lambda to this provider, on the email address will be used from the available claims returned by the OpenID Connect identity provider.
If the external OpenID Connect identity provider returns a refresh token, it will be stored in the UserRegistration object inside the tokens Map. This Map stores the tokens from the various identity providers so that you can use them in your application to call their APIs.
[OpenID Connect Identity Providers API](https://fusionauth.io/docs/v1/tech/apis/identity-providers/openid-connect)
## Example Usage
```python
import pulumi
import theogravity_pulumi-fusionauth as fusionauth
open_id = fusionauth.FusionAuthIdpOpenIdConnect("openID",
application_configurations=[fusionauth.FusionAuthIdpOpenIdConnectApplicationConfigurationArgs(
application_id=fusionauth_application["myapp"]["id"],
create_registration=True,
enabled=True,
)],
oauth2_authorization_endpoint="https://acme.com/oauth2/authorization",
oauth2_client_id="191c23dc-b772-4558-bd21-dc1cbf74ae21",
oauth2_client_secret="SUsnoP0pWUYfXvWbSe5pvj8Di5nAxOvO",
oauth2_client_authentication_method="client_secret_basic",
oauth2_scope="openid offline_access",
oauth2_token_endpoint="https://acme.com/oauth2/token",
oauth2_user_info_endpoint="https://acme.com/oauth2/userinfo",
button_text="Login with OpenID Connect",
debug=False,
enabled=True,
tenant_configurations=[fusionauth.FusionAuthIdpOpenIdConnectTenantConfigurationArgs(
tenant_id=fusionauth_tenant["example"]["id"],
limit_user_link_count_enabled=False,
limit_user_link_count_maximum_links=42,
)])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]] application_configurations: The configuration for each Application that the identity provider is enabled for.
:param pulumi.Input[str] button_image_url: The top-level button image (URL) to use on the FusionAuth login page for this Identity Provider.
:param pulumi.Input[str] button_text: The top-level button text to use on the FusionAuth login page for this Identity Provider.
:param pulumi.Input[bool] debug: Determines if debug is enabled for this provider. When enabled, each time this provider is invoked to reconcile a login an Event Log will be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domains: This is an optional list of domains that this OpenID Connect provider should be used for. This converts the FusionAuth login form to a domain-based login form. This type of form first asks the user for their email. FusionAuth then uses their email to determine if an OpenID Connect identity provider should be used. If an OpenID Connect provider should be used, the browser is redirected to the authorization endpoint of that identity provider. Otherwise, the password field is revealed on the form so that the user can login using FusionAuth.
:param pulumi.Input[bool] enabled: Determines if this provider is enabled. If it is false then it will be disabled globally.
:param pulumi.Input[str] idp_id: The ID to use for the new identity provider. If not specified a secure random UUID will be generated.
:param pulumi.Input[str] lambda_reconcile_id: The unique Id of the lambda to used during the user reconcile process to map custom claims from the external identity provider to the FusionAuth user.
:param pulumi.Input[str] linking_strategy: The linking strategy to use when creating the link between the {idp_display_name} Identity Provider and the user.
:param pulumi.Input[str] name: The name of this OpenID Connect identity provider. This is only used for display purposes.
:param pulumi.Input[str] oauth2_authorization_endpoint: The top-level authorization endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the authorization endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_client_authentication_method: The client authentication method to use with the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_client_id: The top-level client id for your Application.
:param pulumi.Input[str] oauth2_client_secret: The top-level client secret to use with the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_email_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the email address.
:param pulumi.Input[str] oauth2_issuer: The top-level issuer URI for the OpenID Connect identity provider. If this is provided, the authorization endpoint, token endpoint and userinfo endpoint will all be resolved using the issuer URI plus /.well-known/openid-configuration.
:param pulumi.Input[str] oauth2_scope: The top-level scope that you are requesting from the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_token_endpoint: The top-level token endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the token endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_unique_id_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the user Id.
:param pulumi.Input[str] oauth2_user_info_endpoint: The top-level userinfo endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the userinfo endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_username_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the username.
:param pulumi.Input[bool] post_request: Set this value equal to true if you wish to use POST bindings with this OpenID Connect identity provider. The default value of false means that a redirect binding which uses a GET request will be used.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]] tenant_configurations: The configuration for each Tenant that limits the number of links a user may have for a particular identity provider.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FusionAuthIdpOpenIdConnectArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # OpenID Connect Identity Provider Resource
OpenID Connect identity providers connect to external OpenID Connect login systems. This type of login will optionally provide a Login with … button on FusionAuth’s login page. This button is customizable by using different properties of the identity provider.
Optionally, this identity provider can define one or more domains it is associated with. This is useful for allowing employees to log in with their corporate credentials. As long as the company has an identity solution that provides OpenID Connect, you can leverage this feature. This is referred to as a Domain Based Identity Provider. If you enable domains for an identity provider, the Login with … button will not be displayed. Instead, only the email form field will be displayed initially on the FusionAuth login page. Once the user types in their email address, FusionAuth will determine if the user is logging in locally or if they should be redirected to this identity provider. This is determined by extracting the domain from their email address and comparing it to the domains associated with the identity provider.
FusionAuth will also leverage the /userinfo API that is part of the OpenID Connect specification. The email address returned from the Userinfo response will be used to create or lookup the existing user. Additional claims from the Userinfo response can be used to reconcile the User in FusionAuth by using an OpenID Connect Reconcile Lambda. Unless you assign a reconcile lambda to this provider, on the email address will be used from the available claims returned by the OpenID Connect identity provider.
If the external OpenID Connect identity provider returns a refresh token, it will be stored in the UserRegistration object inside the tokens Map. This Map stores the tokens from the various identity providers so that you can use them in your application to call their APIs.
[OpenID Connect Identity Providers API](https://fusionauth.io/docs/v1/tech/apis/identity-providers/openid-connect)
## Example Usage
```python
import pulumi
import theogravity_pulumi-fusionauth as fusionauth
open_id = fusionauth.FusionAuthIdpOpenIdConnect("openID",
application_configurations=[fusionauth.FusionAuthIdpOpenIdConnectApplicationConfigurationArgs(
application_id=fusionauth_application["myapp"]["id"],
create_registration=True,
enabled=True,
)],
oauth2_authorization_endpoint="https://acme.com/oauth2/authorization",
oauth2_client_id="191c23dc-b772-4558-bd21-dc1cbf74ae21",
oauth2_client_secret="SUsnoP0pWUYfXvWbSe5pvj8Di5nAxOvO",
oauth2_client_authentication_method="client_secret_basic",
oauth2_scope="openid offline_access",
oauth2_token_endpoint="https://acme.com/oauth2/token",
oauth2_user_info_endpoint="https://acme.com/oauth2/userinfo",
button_text="Login with OpenID Connect",
debug=False,
enabled=True,
tenant_configurations=[fusionauth.FusionAuthIdpOpenIdConnectTenantConfigurationArgs(
tenant_id=fusionauth_tenant["example"]["id"],
limit_user_link_count_enabled=False,
limit_user_link_count_maximum_links=42,
)])
```
:param str resource_name: The name of the resource.
:param FusionAuthIdpOpenIdConnectArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FusionAuthIdpOpenIdConnectArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]]] = None,
button_image_url: Optional[pulumi.Input[str]] = None,
button_text: Optional[pulumi.Input[str]] = None,
debug: Optional[pulumi.Input[bool]] = None,
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
idp_id: Optional[pulumi.Input[str]] = None,
lambda_reconcile_id: Optional[pulumi.Input[str]] = None,
linking_strategy: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
oauth2_authorization_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_client_authentication_method: Optional[pulumi.Input[str]] = None,
oauth2_client_id: Optional[pulumi.Input[str]] = None,
oauth2_client_secret: Optional[pulumi.Input[str]] = None,
oauth2_email_claim: Optional[pulumi.Input[str]] = None,
oauth2_issuer: Optional[pulumi.Input[str]] = None,
oauth2_scope: Optional[pulumi.Input[str]] = None,
oauth2_token_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_unique_id_claim: Optional[pulumi.Input[str]] = None,
oauth2_user_info_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_username_claim: Optional[pulumi.Input[str]] = None,
post_request: Optional[pulumi.Input[bool]] = None,
tenant_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FusionAuthIdpOpenIdConnectArgs.__new__(FusionAuthIdpOpenIdConnectArgs)
__props__.__dict__["application_configurations"] = application_configurations
__props__.__dict__["button_image_url"] = button_image_url
if button_text is None and not opts.urn:
raise TypeError("Missing required property 'button_text'")
__props__.__dict__["button_text"] = button_text
__props__.__dict__["debug"] = debug
__props__.__dict__["domains"] = domains
__props__.__dict__["enabled"] = enabled
__props__.__dict__["idp_id"] = idp_id
__props__.__dict__["lambda_reconcile_id"] = lambda_reconcile_id
__props__.__dict__["linking_strategy"] = linking_strategy
__props__.__dict__["name"] = name
__props__.__dict__["oauth2_authorization_endpoint"] = oauth2_authorization_endpoint
__props__.__dict__["oauth2_client_authentication_method"] = oauth2_client_authentication_method
if oauth2_client_id is None and not opts.urn:
raise TypeError("Missing required property 'oauth2_client_id'")
__props__.__dict__["oauth2_client_id"] = oauth2_client_id
__props__.__dict__["oauth2_client_secret"] = None if oauth2_client_secret is None else pulumi.Output.secret(oauth2_client_secret)
__props__.__dict__["oauth2_email_claim"] = oauth2_email_claim
__props__.__dict__["oauth2_issuer"] = oauth2_issuer
__props__.__dict__["oauth2_scope"] = oauth2_scope
__props__.__dict__["oauth2_token_endpoint"] = oauth2_token_endpoint
__props__.__dict__["oauth2_unique_id_claim"] = oauth2_unique_id_claim
__props__.__dict__["oauth2_user_info_endpoint"] = oauth2_user_info_endpoint
__props__.__dict__["oauth2_username_claim"] = oauth2_username_claim
__props__.__dict__["post_request"] = post_request
__props__.__dict__["tenant_configurations"] = tenant_configurations
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["oauth2ClientSecret"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(FusionAuthIdpOpenIdConnect, __self__).__init__(
'fusionauth:index/fusionAuthIdpOpenIdConnect:FusionAuthIdpOpenIdConnect',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
application_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]]] = None,
button_image_url: Optional[pulumi.Input[str]] = None,
button_text: Optional[pulumi.Input[str]] = None,
debug: Optional[pulumi.Input[bool]] = None,
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
idp_id: Optional[pulumi.Input[str]] = None,
lambda_reconcile_id: Optional[pulumi.Input[str]] = None,
linking_strategy: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
oauth2_authorization_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_client_authentication_method: Optional[pulumi.Input[str]] = None,
oauth2_client_id: Optional[pulumi.Input[str]] = None,
oauth2_client_secret: Optional[pulumi.Input[str]] = None,
oauth2_email_claim: Optional[pulumi.Input[str]] = None,
oauth2_issuer: Optional[pulumi.Input[str]] = None,
oauth2_scope: Optional[pulumi.Input[str]] = None,
oauth2_token_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_unique_id_claim: Optional[pulumi.Input[str]] = None,
oauth2_user_info_endpoint: Optional[pulumi.Input[str]] = None,
oauth2_username_claim: Optional[pulumi.Input[str]] = None,
post_request: Optional[pulumi.Input[bool]] = None,
tenant_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]]] = None) -> 'FusionAuthIdpOpenIdConnect':
"""
Get an existing FusionAuthIdpOpenIdConnect resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FusionAuthIdpOpenIdConnectApplicationConfigurationArgs']]]] application_configurations: The configuration for each Application that the identity provider is enabled for.
:param pulumi.Input[str] button_image_url: The top-level button image (URL) to use on the FusionAuth login page for this Identity Provider.
:param pulumi.Input[str] button_text: The top-level button text to use on the FusionAuth login page for this Identity Provider.
:param pulumi.Input[bool] debug: Determines if debug is enabled for this provider. When enabled, each time this provider is invoked to reconcile a login an Event Log will be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domains: This is an optional list of domains that this OpenID Connect provider should be used for. This converts the FusionAuth login form to a domain-based login form. This type of form first asks the user for their email. FusionAuth then uses their email to determine if an OpenID Connect identity provider should be used. If an OpenID Connect provider should be used, the browser is redirected to the authorization endpoint of that identity provider. Otherwise, the password field is revealed on the form so that the user can login using FusionAuth.
:param pulumi.Input[bool] enabled: Determines if this provider is enabled. If it is false then it will be disabled globally.
:param pulumi.Input[str] idp_id: The ID to use for the new identity provider. If not specified a secure random UUID will be generated.
:param pulumi.Input[str] lambda_reconcile_id: The unique Id of the lambda to used during the user reconcile process to map custom claims from the external identity provider to the FusionAuth user.
:param pulumi.Input[str] linking_strategy: The linking strategy to use when creating the link between the {idp_display_name} Identity Provider and the user.
:param pulumi.Input[str] name: The name of this OpenID Connect identity provider. This is only used for display purposes.
:param pulumi.Input[str] oauth2_authorization_endpoint: The top-level authorization endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the authorization endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_client_authentication_method: The client authentication method to use with the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_client_id: The top-level client id for your Application.
:param pulumi.Input[str] oauth2_client_secret: The top-level client secret to use with the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_email_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the email address.
:param pulumi.Input[str] oauth2_issuer: The top-level issuer URI for the OpenID Connect identity provider. If this is provided, the authorization endpoint, token endpoint and userinfo endpoint will all be resolved using the issuer URI plus /.well-known/openid-configuration.
:param pulumi.Input[str] oauth2_scope: The top-level scope that you are requesting from the OpenID Connect identity provider.
:param pulumi.Input[str] oauth2_token_endpoint: The top-level token endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the token endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_unique_id_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the user Id.
:param pulumi.Input[str] oauth2_user_info_endpoint: The top-level userinfo endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the userinfo endpoint. If you provide an issuer then this field will be ignored.
:param pulumi.Input[str] oauth2_username_claim: An optional configuration to modify the expected name of the claim returned by the IdP that contains the username.
:param pulumi.Input[bool] post_request: Set this value equal to true if you wish to use POST bindings with this OpenID Connect identity provider. The default value of false means that a redirect binding which uses a GET request will be used.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FusionAuthIdpOpenIdConnectTenantConfigurationArgs']]]] tenant_configurations: The configuration for each Tenant that limits the number of links a user may have for a particular identity provider.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FusionAuthIdpOpenIdConnectState.__new__(_FusionAuthIdpOpenIdConnectState)
__props__.__dict__["application_configurations"] = application_configurations
__props__.__dict__["button_image_url"] = button_image_url
__props__.__dict__["button_text"] = button_text
__props__.__dict__["debug"] = debug
__props__.__dict__["domains"] = domains
__props__.__dict__["enabled"] = enabled
__props__.__dict__["idp_id"] = idp_id
__props__.__dict__["lambda_reconcile_id"] = lambda_reconcile_id
__props__.__dict__["linking_strategy"] = linking_strategy
__props__.__dict__["name"] = name
__props__.__dict__["oauth2_authorization_endpoint"] = oauth2_authorization_endpoint
__props__.__dict__["oauth2_client_authentication_method"] = oauth2_client_authentication_method
__props__.__dict__["oauth2_client_id"] = oauth2_client_id
__props__.__dict__["oauth2_client_secret"] = oauth2_client_secret
__props__.__dict__["oauth2_email_claim"] = oauth2_email_claim
__props__.__dict__["oauth2_issuer"] = oauth2_issuer
__props__.__dict__["oauth2_scope"] = oauth2_scope
__props__.__dict__["oauth2_token_endpoint"] = oauth2_token_endpoint
__props__.__dict__["oauth2_unique_id_claim"] = oauth2_unique_id_claim
__props__.__dict__["oauth2_user_info_endpoint"] = oauth2_user_info_endpoint
__props__.__dict__["oauth2_username_claim"] = oauth2_username_claim
__props__.__dict__["post_request"] = post_request
__props__.__dict__["tenant_configurations"] = tenant_configurations
return FusionAuthIdpOpenIdConnect(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationConfigurations")
def application_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.FusionAuthIdpOpenIdConnectApplicationConfiguration']]]:
"""
The configuration for each Application that the identity provider is enabled for.
"""
return pulumi.get(self, "application_configurations")
@property
@pulumi.getter(name="buttonImageUrl")
def button_image_url(self) -> pulumi.Output[Optional[str]]:
"""
The top-level button image (URL) to use on the FusionAuth login page for this Identity Provider.
"""
return pulumi.get(self, "button_image_url")
@property
@pulumi.getter(name="buttonText")
def button_text(self) -> pulumi.Output[str]:
"""
The top-level button text to use on the FusionAuth login page for this Identity Provider.
"""
return pulumi.get(self, "button_text")
@property
@pulumi.getter
def debug(self) -> pulumi.Output[Optional[bool]]:
"""
Determines if debug is enabled for this provider. When enabled, each time this provider is invoked to reconcile a login an Event Log will be created.
"""
return pulumi.get(self, "debug")
@property
@pulumi.getter
def domains(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
This is an optional list of domains that this OpenID Connect provider should be used for. This converts the FusionAuth login form to a domain-based login form. This type of form first asks the user for their email. FusionAuth then uses their email to determine if an OpenID Connect identity provider should be used. If an OpenID Connect provider should be used, the browser is redirected to the authorization endpoint of that identity provider. Otherwise, the password field is revealed on the form so that the user can login using FusionAuth.
"""
return pulumi.get(self, "domains")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Determines if this provider is enabled. If it is false then it will be disabled globally.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="idpId")
def idp_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID to use for the new identity provider. If not specified a secure random UUID will be generated.
"""
return pulumi.get(self, "idp_id")
@property
@pulumi.getter(name="lambdaReconcileId")
def lambda_reconcile_id(self) -> pulumi.Output[Optional[str]]:
"""
The unique Id of the lambda to used during the user reconcile process to map custom claims from the external identity provider to the FusionAuth user.
"""
return pulumi.get(self, "lambda_reconcile_id")
@property
@pulumi.getter(name="linkingStrategy")
def linking_strategy(self) -> pulumi.Output[str]:
"""
The linking strategy to use when creating the link between the {idp_display_name} Identity Provider and the user.
"""
return pulumi.get(self, "linking_strategy")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of this OpenID Connect identity provider. This is only used for display purposes.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="oauth2AuthorizationEndpoint")
def oauth2_authorization_endpoint(self) -> pulumi.Output[Optional[str]]:
"""
The top-level authorization endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the authorization endpoint. If you provide an issuer then this field will be ignored.
"""
return pulumi.get(self, "oauth2_authorization_endpoint")
@property
@pulumi.getter(name="oauth2ClientAuthenticationMethod")
def oauth2_client_authentication_method(self) -> pulumi.Output[Optional[str]]:
"""
The client authentication method to use with the OpenID Connect identity provider.
"""
return pulumi.get(self, "oauth2_client_authentication_method")
@property
@pulumi.getter(name="oauth2ClientId")
def oauth2_client_id(self) -> pulumi.Output[str]:
"""
The top-level client id for your Application.
"""
return pulumi.get(self, "oauth2_client_id")
@property
@pulumi.getter(name="oauth2ClientSecret")
def oauth2_client_secret(self) -> pulumi.Output[Optional[str]]:
"""
The top-level client secret to use with the OpenID Connect identity provider.
"""
return pulumi.get(self, "oauth2_client_secret")
@property
@pulumi.getter(name="oauth2EmailClaim")
def oauth2_email_claim(self) -> pulumi.Output[Optional[str]]:
"""
An optional configuration to modify the expected name of the claim returned by the IdP that contains the email address.
"""
return pulumi.get(self, "oauth2_email_claim")
@property
@pulumi.getter(name="oauth2Issuer")
def oauth2_issuer(self) -> pulumi.Output[Optional[str]]:
"""
The top-level issuer URI for the OpenID Connect identity provider. If this is provided, the authorization endpoint, token endpoint and userinfo endpoint will all be resolved using the issuer URI plus /.well-known/openid-configuration.
"""
return pulumi.get(self, "oauth2_issuer")
@property
@pulumi.getter(name="oauth2Scope")
def oauth2_scope(self) -> pulumi.Output[Optional[str]]:
"""
The top-level scope that you are requesting from the OpenID Connect identity provider.
"""
return pulumi.get(self, "oauth2_scope")
@property
@pulumi.getter(name="oauth2TokenEndpoint")
def oauth2_token_endpoint(self) -> pulumi.Output[Optional[str]]:
"""
The top-level token endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the token endpoint. If you provide an issuer then this field will be ignored.
"""
return pulumi.get(self, "oauth2_token_endpoint")
@property
@pulumi.getter(name="oauth2UniqueIdClaim")
def oauth2_unique_id_claim(self) -> pulumi.Output[Optional[str]]:
"""
An optional configuration to modify the expected name of the claim returned by the IdP that contains the user Id.
"""
return pulumi.get(self, "oauth2_unique_id_claim")
@property
@pulumi.getter(name="oauth2UserInfoEndpoint")
def oauth2_user_info_endpoint(self) -> pulumi.Output[Optional[str]]:
"""
The top-level userinfo endpoint for the OpenID Connect identity provider. You can leave this blank if you provide the issuer field, which will be used to make a request to the OpenID Connect .well-known endpoint in order to dynamically resolve the userinfo endpoint. If you provide an issuer then this field will be ignored.
"""
return pulumi.get(self, "oauth2_user_info_endpoint")
@property
@pulumi.getter(name="oauth2UsernameClaim")
def oauth2_username_claim(self) -> pulumi.Output[Optional[str]]:
"""
An optional configuration to modify the expected name of the claim returned by the IdP that contains the username.
"""
return pulumi.get(self, "oauth2_username_claim")
@property
@pulumi.getter(name="postRequest")
def post_request(self) -> pulumi.Output[Optional[bool]]:
"""
Set this value equal to true if you wish to use POST bindings with this OpenID Connect identity provider. The default value of false means that a redirect binding which uses a GET request will be used.
"""
return pulumi.get(self, "post_request")
@property
@pulumi.getter(name="tenantConfigurations")
def tenant_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.FusionAuthIdpOpenIdConnectTenantConfiguration']]]:
"""
The configuration for each Tenant that limits the number of links a user may have for a particular identity provider.
"""
return pulumi.get(self, "tenant_configurations")
|
PypiClean
|
/ship_messenger_server-0.8.7.tar.gz/ship_messenger_server-0.8.7/packets/server/add_user.py
|
from PyQt5.QtWidgets import QDialog, QPushButton, QLineEdit, QApplication, \
QLabel, QMessageBox
from PyQt5.QtCore import Qt
import hashlib
import binascii
class RegisterUser(QDialog):
""" Класс диалог регистрации пользователя на сервере. """
def __init__(self, database, server):
super().__init__()
self.database = database
self.server = server
self.setWindowTitle('Регистрация')
self.setFixedSize(175, 183)
self.setModal(True)
self.setAttribute(Qt.WA_DeleteOnClose)
self.label_username = QLabel('Введите имя пользователя:', self)
self.label_username.move(10, 10)
self.label_username.setFixedSize(150, 15)
self.client_name = QLineEdit(self)
self.client_name.setFixedSize(154, 20)
self.client_name.move(10, 30)
self.label_passwd = QLabel('Введите пароль:', self)
self.label_passwd.move(10, 55)
self.label_passwd.setFixedSize(150, 15)
self.client_passwd = QLineEdit(self)
self.client_passwd.setFixedSize(154, 20)
self.client_passwd.move(10, 75)
self.client_passwd.setEchoMode(QLineEdit.Password)
self.label_conf = QLabel('Введите подтверждение:', self)
self.label_conf.move(10, 100)
self.label_conf.setFixedSize(150, 15)
self.client_conf = QLineEdit(self)
self.client_conf.setFixedSize(154, 20)
self.client_conf.move(10, 120)
self.client_conf.setEchoMode(QLineEdit.Password)
self.btn_ok = QPushButton('Сохранить', self)
self.btn_ok.move(10, 150)
self.btn_ok.clicked.connect(self.save_data)
self.btn_cancel = QPushButton('Выход', self)
self.btn_cancel.move(90, 150)
self.btn_cancel.clicked.connect(self.close_windows)
self.messages = QMessageBox()
self.show()
def close_windows(self):
""" Закрытие окна """
super().close()
def save_data(self):
"""
Метод проверки правильности ввода и сохранения в базу нового
пользователя.
"""
if not self.client_name.text():
self.messages.critical(
self, 'Ошибка', 'Не указано имя пользователя.')
return
elif self.client_passwd.text() != self.client_conf.text():
self.messages.critical(
self, 'Ошибка', 'Введённые пароли не совпадают.')
return
elif self.database.check_user(self.client_name.text()):
self.messages.critical(
self, 'Ошибка', 'Пользователь уже существует.')
return
else:
# Генерируем хэш пароля, в качестве соли будем использовать логин в
# нижнем регистре.
passwd_bytes = self.client_passwd.text().encode('utf-8')
salt = self.client_name.text().lower().encode('utf-8')
passwd_hash = hashlib.pbkdf2_hmac(
'sha512', passwd_bytes, salt, 10000)
self.database.add_user(
self.client_name.text(),
binascii.hexlify(passwd_hash))
self.messages.information(
self, 'Успех', 'Пользователь успешно зарегистрирован.')
# Рассылаем клиентам сообщение о необходимости обновить справочники
self.server.service_update_lists()
self.close()
if __name__ == '__main__':
app = QApplication([])
app.setAttribute(Qt.AA_DisableWindowContextHelpButton)
dial = RegisterUser(None)
app.exec_()
|
PypiClean
|
/yt-dlp-cp-2.9.9.tar.gz/yt-dlp-cp-2.9.9/yt_dlp/extractor/netverse.py
|
import itertools
from .common import InfoExtractor, SearchInfoExtractor
from .dailymotion import DailymotionIE
from ..utils import smuggle_url, traverse_obj
class NetverseBaseIE(InfoExtractor):
_ENDPOINTS = {
'watch': 'watchvideo',
'video': 'watchvideo',
'webseries': 'webseries',
'season': 'webseason_videos',
}
def _call_api(self, slug, endpoint, query={}, season_id='', display_id=None):
return self._download_json(
f'https://api.netverse.id/medias/api/v2/{self._ENDPOINTS[endpoint]}/{slug}/{season_id}',
display_id or slug, query=query)
def _get_comments(self, video_id):
last_page_number = None
for i in itertools.count(1):
comment_data = self._download_json(
f'https://api.netverse.id/mediadetails/api/v3/videos/comments/{video_id}',
video_id, data=b'', fatal=False, query={'page': i},
note=f'Downloading JSON comment metadata page {i}') or {}
yield from traverse_obj(comment_data, ('response', 'comments', 'data', ..., {
'id': '_id',
'text': 'comment',
'author_id': 'customer_id',
'author': ('customer', 'name'),
'author_thumbnail': ('customer', 'profile_picture'),
}))
if not last_page_number:
last_page_number = traverse_obj(comment_data, ('response', 'comments', 'last_page'))
if i >= (last_page_number or 0):
break
class NetverseIE(NetverseBaseIE):
_VALID_URL = r'https?://(?:\w+\.)?netverse\.id/(?P<type>watch|video)/(?P<display_id>[^/?#&]+)'
_TESTS = [{
# Watch video
'url': 'https://www.netverse.id/watch/waktu-indonesia-bercanda-edisi-spesial-lebaran-2016',
'info_dict': {
'id': 'k4yhqUwINAGtmHx3NkL',
'title': 'Waktu Indonesia Bercanda - Edisi Spesial Lebaran 2016',
'ext': 'mp4',
'season': 'Season 2016',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'episode_number': 22,
'episode': 'Episode 22',
'uploader_id': 'x2ir3vq',
'age_limit': 0,
'tags': [],
'view_count': int,
'display_id': 'waktu-indonesia-bercanda-edisi-spesial-lebaran-2016',
'duration': 2990,
'upload_date': '20210722',
'timestamp': 1626919804,
'like_count': int,
'uploader': 'Net Prime',
}
}, {
# series
'url': 'https://www.netverse.id/watch/jadoo-seorang-model',
'info_dict': {
'id': 'x88izwc',
'title': 'Jadoo Seorang Model',
'ext': 'mp4',
'season': 'Season 2',
'description': 'md5:8a74f70812cca267e19ee0635f0af835',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'episode_number': 2,
'episode': 'Episode 2',
'view_count': int,
'like_count': int,
'display_id': 'jadoo-seorang-model',
'uploader_id': 'x2ir3vq',
'duration': 635,
'timestamp': 1646372927,
'tags': ['PG069497-hellojadooseason2eps2'],
'upload_date': '20220304',
'uploader': 'Net Prime',
'age_limit': 0,
},
'skip': 'video get Geo-blocked for some country'
}, {
# non www host
'url': 'https://netverse.id/watch/tetangga-baru',
'info_dict': {
'id': 'k4CNGz7V0HJ7vfwZbXy',
'ext': 'mp4',
'title': 'Tetangga Baru',
'season': 'Season 1',
'description': 'md5:23fcf70e97d461d3029d25d59b2ccfb9',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'episode_number': 1,
'episode': 'Episode 1',
'timestamp': 1624538169,
'view_count': int,
'upload_date': '20210624',
'age_limit': 0,
'uploader_id': 'x2ir3vq',
'like_count': int,
'uploader': 'Net Prime',
'tags': ['PG008534', 'tetangga', 'Baru'],
'display_id': 'tetangga-baru',
'duration': 1406,
},
}, {
# /video url
'url': 'https://www.netverse.id/video/pg067482-hellojadoo-season1',
'title': 'Namaku Choi Jadoo',
'info_dict': {
'id': 'x887jzz',
'ext': 'mp4',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'season': 'Season 1',
'episode_number': 1,
'description': 'md5:d4f627b3e7a3f9acdc55f6cdd5ea41d5',
'title': 'Namaku Choi Jadoo',
'episode': 'Episode 1',
'age_limit': 0,
'like_count': int,
'view_count': int,
'tags': ['PG067482', 'PG067482-HelloJadoo-season1'],
'duration': 780,
'display_id': 'pg067482-hellojadoo-season1',
'uploader_id': 'x2ir3vq',
'uploader': 'Net Prime',
'timestamp': 1645764984,
'upload_date': '20220225',
},
'skip': 'This video get Geo-blocked for some country'
}, {
# video with comments
'url': 'https://netverse.id/video/episode-1-season-2016-ok-food',
'info_dict': {
'id': 'k6hetBPiQMljSxxvAy7',
'ext': 'mp4',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'display_id': 'episode-1-season-2016-ok-food',
'like_count': int,
'description': '',
'duration': 1471,
'age_limit': 0,
'timestamp': 1642405848,
'episode_number': 1,
'season': 'Season 2016',
'uploader_id': 'x2ir3vq',
'title': 'Episode 1 - Season 2016 - Ok Food',
'upload_date': '20220117',
'tags': [],
'view_count': int,
'episode': 'Episode 1',
'uploader': 'Net Prime',
'comment_count': int,
},
'params':{
'getcomments': True
}
}, {
# video with multiple page comment
'url': 'https://netverse.id/video/match-island-eps-1-fix',
'info_dict': {
'id': 'x8aznjc',
'ext': 'mp4',
'like_count': int,
'tags': ['Match-Island', 'Pd00111'],
'display_id': 'match-island-eps-1-fix',
'view_count': int,
'episode': 'Episode 1',
'uploader': 'Net Prime',
'duration': 4070,
'timestamp': 1653068165,
'description': 'md5:e9cf3b480ad18e9c33b999e3494f223f',
'age_limit': 0,
'title': 'Welcome To Match Island',
'upload_date': '20220520',
'episode_number': 1,
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
'uploader_id': 'x2ir3vq',
'season': 'Season 1',
'comment_count': int,
},
'params':{
'getcomments': True
}
}]
def _real_extract(self, url):
display_id, sites_type = self._match_valid_url(url).group('display_id', 'type')
program_json = self._call_api(display_id, sites_type)
videos = program_json['response']['videos']
return {
'_type': 'url_transparent',
'ie_key': DailymotionIE.ie_key(),
'url': smuggle_url(videos['dailymotion_url'], {'query': {'embedder': 'https://www.netverse.id'}}),
'display_id': display_id,
'title': videos.get('title'),
'season': videos.get('season_name'),
'thumbnail': traverse_obj(videos, ('program_detail', 'thumbnail_image')),
'description': traverse_obj(videos, ('program_detail', 'description')),
'episode_number': videos.get('episode_order'),
'__post_extractor': self.extract_comments(display_id),
}
class NetversePlaylistIE(NetverseBaseIE):
_VALID_URL = r'https?://(?:\w+\.)?netverse\.id/(?P<type>webseries)/(?P<display_id>[^/?#&]+)'
_TESTS = [{
# multiple season
'url': 'https://netverse.id/webseries/tetangga-masa-gitu',
'info_dict': {
'id': 'tetangga-masa-gitu',
'title': 'Tetangga Masa Gitu',
},
'playlist_count': 519,
}, {
# single season
'url': 'https://netverse.id/webseries/kelas-internasional',
'info_dict': {
'id': 'kelas-internasional',
'title': 'Kelas Internasional',
},
'playlist_count': 203,
}]
def parse_playlist(self, json_data, playlist_id):
slug_sample = traverse_obj(json_data, ('related', 'data', ..., 'slug'))[0]
for season in traverse_obj(json_data, ('seasons', ..., 'id')):
playlist_json = self._call_api(
slug_sample, 'season', display_id=playlist_id, season_id=season)
for current_page in range(playlist_json['response']['season_list']['last_page']):
playlist_json = self._call_api(slug_sample, 'season', query={'page': current_page + 1},
season_id=season, display_id=playlist_id)
for slug in traverse_obj(playlist_json, ('response', ..., 'data', ..., 'slug')):
yield self.url_result(f'https://www.netverse.id/video/{slug}', NetverseIE)
def _real_extract(self, url):
playlist_id, sites_type = self._match_valid_url(url).group('display_id', 'type')
playlist_data = self._call_api(playlist_id, sites_type)
return self.playlist_result(
self.parse_playlist(playlist_data['response'], playlist_id),
traverse_obj(playlist_data, ('response', 'webseries_info', 'slug')),
traverse_obj(playlist_data, ('response', 'webseries_info', 'title')))
class NetverseSearchIE(SearchInfoExtractor):
_SEARCH_KEY = 'netsearch'
_TESTS = [{
'url': 'netsearch10:tetangga',
'info_dict': {
'id': 'tetangga',
'title': 'tetangga',
},
'playlist_count': 10,
}]
def _search_results(self, query):
last_page = None
for i in itertools.count(1):
search_data = self._download_json(
'https://api.netverse.id/search/elastic/search', query,
query={'q': query, 'page': i}, note=f'Downloading page {i}')
videos = traverse_obj(search_data, ('response', 'data', ...))
for video in videos:
yield self.url_result(f'https://netverse.id/video/{video["slug"]}', NetverseIE)
last_page = last_page or traverse_obj(search_data, ('response', 'lastpage'))
if not videos or i >= (last_page or 0):
break
|
PypiClean
|
/bpy_cuda-2.82-cp37-cp37m-win_amd64.whl/bpy_cuda-2.82.data/scripts/2.82/scripts/addons/system_blend_info.py
|
bl_info = {
"name": "Scene Information",
"author": "uselessdreamer",
"version": (0, 3, 1),
"blender": (2, 80, 0),
"location": "Properties > Scene > Blend Info Panel",
"description": "Show information about the .blend",
"warning": "",
"wiki_url": "https://docs.blender.org/manual/en/dev/addons/"
"system/blend_info.html",
"category": "System",
}
import bpy
def quantity_string(quantity, text_single, text_plural, text_none=None):
sep = " "
if not text_none:
text_none = text_plural
if quantity == 0:
string = str(quantity) + sep + text_none
if quantity == 1:
string = str(quantity) + sep + text_single
if quantity >= 2:
string = str(quantity) + sep + text_plural
if quantity < 0:
return None
return string
class OBJECT_PT_blendinfo(bpy.types.Panel):
bl_label = "Blend Info"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "scene"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
ob_cols = []
db_cols = []
objects = bpy.data.objects
layout = self.layout
# OBJECTS
l_row = layout.row()
num = len(bpy.data.objects)
l_row.label(text=quantity_string(num, "Object", "Objects")
+ " in the scene:",
icon='OBJECT_DATA')
l_row = layout.row()
ob_cols.append(l_row.column())
ob_cols.append(l_row.column())
row = ob_cols[0].row()
meshes = [o for o in objects.values() if o.type == 'MESH']
num = len(meshes)
row.label(text=quantity_string(num, "Mesh", "Meshes"),
icon='MESH_DATA')
row = ob_cols[1].row()
curves = [o for o in objects.values() if o.type == 'CURVE']
num = len(curves)
row.label(text=quantity_string(num, "Curve", "Curves"),
icon='CURVE_DATA')
row = ob_cols[0].row()
cameras = [o for o in objects.values() if o.type == 'CAMERA']
num = len(cameras)
row.label(text=quantity_string(num, "Camera", "Cameras"),
icon='CAMERA_DATA')
row = ob_cols[1].row()
lamps = [o for o in objects.values() if o.type == 'LIGHT']
num = len(lamps)
row.label(text=quantity_string(num, "Lamp", "Lamps"),
icon='LIGHT_DATA')
row = ob_cols[0].row()
armatures = [o for o in objects.values() if o.type == 'ARMATURE']
num = len(armatures)
row.label(text=quantity_string(num, "Armature", "Armatures"),
icon='ARMATURE_DATA')
row = ob_cols[1].row()
lattices = [o for o in objects.values() if o.type == 'LATTICE']
num = len(lattices)
row.label(text=quantity_string(num, "Lattice", "Lattices"),
icon='LATTICE_DATA')
row = ob_cols[0].row()
empties = [o for o in objects.values() if o.type == 'EMPTY']
num = len(empties)
row.label(text=quantity_string(num, "Empty", "Empties"),
icon='EMPTY_DATA')
row = ob_cols[1].row()
empties = [o for o in objects.values() if o.type == 'SPEAKER']
num = len(empties)
row.label(text=quantity_string(num, "Speaker", "Speakers"),
icon='OUTLINER_OB_SPEAKER')
layout.separator()
# DATABLOCKS
l_row = layout.row()
num = len(bpy.data.objects)
l_row.label(text="Datablocks in the scene:")
l_row = layout.row()
db_cols.append(l_row.column())
db_cols.append(l_row.column())
row = db_cols[0].row()
num = len(bpy.data.meshes)
row.label(text=quantity_string(num, "Mesh", "Meshes"),
icon='MESH_DATA')
row = db_cols[1].row()
num = len(bpy.data.curves)
row.label(text=quantity_string(num, "Curve", "Curves"),
icon='CURVE_DATA')
row = db_cols[0].row()
num = len(bpy.data.cameras)
row.label(text=quantity_string(num, "Camera", "Cameras"),
icon='CAMERA_DATA')
row = db_cols[1].row()
num = len(bpy.data.lights)
row.label(text=quantity_string(num, "Lamp", "Lamps"),
icon='LIGHT_DATA')
row = db_cols[0].row()
num = len(bpy.data.armatures)
row.label(text=quantity_string(num, "Armature", "Armatures"),
icon='ARMATURE_DATA')
row = db_cols[1].row()
num = len(bpy.data.lattices)
row.label(text=quantity_string(num, "Lattice", "Lattices"),
icon='LATTICE_DATA')
row = db_cols[0].row()
num = len(bpy.data.materials)
row.label(text=quantity_string(num, "Material", "Materials"),
icon='MATERIAL_DATA')
row = db_cols[1].row()
num = len(bpy.data.worlds)
row.label(text=quantity_string(num, "World", "Worlds"),
icon='WORLD_DATA')
row = db_cols[0].row()
num = len(bpy.data.textures)
row.label(text=quantity_string(num, "Texture", "Textures"),
icon='TEXTURE_DATA')
row = db_cols[1].row()
num = len(bpy.data.images)
row.label(text=quantity_string(num, "Image", "Images"),
icon='IMAGE_DATA')
row = db_cols[0].row()
num = len(bpy.data.texts)
row.label(text=quantity_string(num, "Text", "Texts"),
icon='TEXT')
# Register
classes = [
OBJECT_PT_blendinfo
]
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
def unregister():
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
if __name__ == "__main__":
register()
|
PypiClean
|
/ecreall_daceui-1.0.4.tar.gz/ecreall_daceui-1.0.4/daceui/static/flot/jquery.colorhelpers.js
|
(function($) {
$.color = {};
// construct color object with some convenient chainable helpers
$.color.make = function (r, g, b, a) {
var o = {};
o.r = r || 0;
o.g = g || 0;
o.b = b || 0;
o.a = a != null ? a : 1;
o.add = function (c, d) {
for (var i = 0; i < c.length; ++i)
o[c.charAt(i)] += d;
return o.normalize();
};
o.scale = function (c, f) {
for (var i = 0; i < c.length; ++i)
o[c.charAt(i)] *= f;
return o.normalize();
};
o.toString = function () {
if (o.a >= 1.0) {
return "rgb("+[o.r, o.g, o.b].join(",")+")";
} else {
return "rgba("+[o.r, o.g, o.b, o.a].join(",")+")";
}
};
o.normalize = function () {
function clamp(min, value, max) {
return value < min ? min: (value > max ? max: value);
}
o.r = clamp(0, parseInt(o.r), 255);
o.g = clamp(0, parseInt(o.g), 255);
o.b = clamp(0, parseInt(o.b), 255);
o.a = clamp(0, o.a, 1);
return o;
};
o.clone = function () {
return $.color.make(o.r, o.b, o.g, o.a);
};
return o.normalize();
}
// extract CSS color property from element, going up in the DOM
// if it's "transparent"
$.color.extract = function (elem, css) {
var c;
do {
c = elem.css(css).toLowerCase();
// keep going until we find an element that has color, or
// we hit the body or root (have no parent)
if (c != '' && c != 'transparent')
break;
elem = elem.parent();
} while (elem.length && !$.nodeName(elem.get(0), "body"));
// catch Safari's way of signalling transparent
if (c == "rgba(0, 0, 0, 0)")
c = "transparent";
return $.color.parse(c);
}
// parse CSS color string (like "rgb(10, 32, 43)" or "#fff"),
// returns color object, if parsing failed, you get black (0, 0,
// 0) out
$.color.parse = function (str) {
var res, m = $.color.make;
// Look for rgb(num,num,num)
if (res = /rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(str))
return m(parseInt(res[1], 10), parseInt(res[2], 10), parseInt(res[3], 10));
// Look for rgba(num,num,num,num)
if (res = /rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str))
return m(parseInt(res[1], 10), parseInt(res[2], 10), parseInt(res[3], 10), parseFloat(res[4]));
// Look for rgb(num%,num%,num%)
if (res = /rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(str))
return m(parseFloat(res[1])*2.55, parseFloat(res[2])*2.55, parseFloat(res[3])*2.55);
// Look for rgba(num%,num%,num%,num)
if (res = /rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str))
return m(parseFloat(res[1])*2.55, parseFloat(res[2])*2.55, parseFloat(res[3])*2.55, parseFloat(res[4]));
// Look for #a0b1c2
if (res = /#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(str))
return m(parseInt(res[1], 16), parseInt(res[2], 16), parseInt(res[3], 16));
// Look for #fff
if (res = /#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(str))
return m(parseInt(res[1]+res[1], 16), parseInt(res[2]+res[2], 16), parseInt(res[3]+res[3], 16));
// Otherwise, we're most likely dealing with a named color
var name = $.trim(str).toLowerCase();
if (name == "transparent")
return m(255, 255, 255, 0);
else {
// default to black
res = lookupColors[name] || [0, 0, 0];
return m(res[0], res[1], res[2]);
}
}
var lookupColors = {
aqua:[0,255,255],
azure:[240,255,255],
beige:[245,245,220],
black:[0,0,0],
blue:[0,0,255],
brown:[165,42,42],
cyan:[0,255,255],
darkblue:[0,0,139],
darkcyan:[0,139,139],
darkgrey:[169,169,169],
darkgreen:[0,100,0],
darkkhaki:[189,183,107],
darkmagenta:[139,0,139],
darkolivegreen:[85,107,47],
darkorange:[255,140,0],
darkorchid:[153,50,204],
darkred:[139,0,0],
darksalmon:[233,150,122],
darkviolet:[148,0,211],
fuchsia:[255,0,255],
gold:[255,215,0],
green:[0,128,0],
indigo:[75,0,130],
khaki:[240,230,140],
lightblue:[173,216,230],
lightcyan:[224,255,255],
lightgreen:[144,238,144],
lightgrey:[211,211,211],
lightpink:[255,182,193],
lightyellow:[255,255,224],
lime:[0,255,0],
magenta:[255,0,255],
maroon:[128,0,0],
navy:[0,0,128],
olive:[128,128,0],
orange:[255,165,0],
pink:[255,192,203],
purple:[128,0,128],
violet:[128,0,128],
red:[255,0,0],
silver:[192,192,192],
white:[255,255,255],
yellow:[255,255,0]
};
})(jQuery);
|
PypiClean
|
/DeepGProp-1.0.8-py3-none-any.whl/dgp/ga_optimizer/toolbox.py
|
import random
import time
from os import environ
from typing import Callable, Tuple, Union
import numpy as np
from deap import base, creator, tools
from sklearn.metrics import accuracy_score, fbeta_score
from dgp.dgp_logger import DGPLOGGER
from dgp.ga_optimizer.types import Layer, MLPIndividual
from dgp.utils import Proben1Partition, Proben1Split
environ["KERAS_BACKEND"] = "theano"
# pylint: disable=C0411,C0413
from keras.layers import Dense # noqa: E402 # isort:skip
# pylint: disable=C0411,C0413
from keras.losses import ( # noqa: E402 # isort:skip
BinaryCrossentropy,
CategoricalCrossentropy,
)
# pylint: disable=C0411,C0413
from keras.models import Sequential # noqa: E402 # isort:skip
# pylint: disable=C0411,C0413
from keras.optimizers import SGD # noqa: E402 # isort:skip
def individual_initializer(
individual_class: Callable,
nin_nout: Tuple[int, int],
neuron_layer_ranges: Tuple[Tuple[int, int], Tuple[int, int]],
constant_hidden_layers: int,
):
"""Initialize an individual with uniform.
:param individual_class: individual class.
:param model_input: number of neurons in the input layer.
:param max_neurons: top limit for the random neurons generator.
:param max_layers: top limit for the random layers generator.
:param model_output: number of classes to predict.
layer configuration and weights..
"""
hidden_layers = np.random.randint(
neuron_layer_ranges[0][0],
neuron_layer_ranges[0][1] + 1,
random.randint(*neuron_layer_ranges[1]),
).tolist()
return individual_class(
nin_nout[0], hidden_layers, constant_hidden_layers, nin_nout[1]
)
def individual_evaluator(
individual: MLPIndividual, trn: Proben1Split, tst: Proben1Split, **kwargs
):
"""Evaluate an individual.
:param individual: current individual to evaluate.
:param trn: training data and labels.
:param tst: validation data and labels.
:param multi_class: ``True`` if the dataset is for multiclass
classification.
:returns: the fitness values.
"""
multi_class = kwargs.get("multi_class", False)
start_time = time.perf_counter()
units_size_list = [
layer.config["units"] for layer in individual.layers[:-1]
]
DGPLOGGER.debug(
f" Evaluating individual with neuron number: {units_size_list}"
)
# Create the model with the individual configuration
model = Sequential()
for layer_index, layer in enumerate(individual.layers):
model.add(Dense.from_config(layer.config))
model.layers[layer_index].set_weights([layer.weights, layer.bias])
model.compile(
optimizer=SGD(learning_rate=0.01),
loss=CategoricalCrossentropy()
if multi_class
else BinaryCrossentropy(),
)
model.fit(trn.X, trn.y_cat, epochs=100, batch_size=16, verbose=0)
# Predict the scores
predicted_y = model.predict_classes(tst.X)
f2_score = fbeta_score(
tst.y,
predicted_y,
beta=2,
average="micro" if multi_class else "binary",
)
error_perc = (
1.0 - accuracy_score(tst.y, predicted_y, normalize=True)
) * 100
neuron_layer_score = sum(units_size_list) * len(units_size_list)
DGPLOGGER.debug(
f" error%={error_perc:.2f}\n"
f" neuron/layer-score={neuron_layer_score:.2f}\n"
f" f2-score={f2_score:.5f}\n"
f" evaluation time={time.perf_counter() - start_time: .2f} sec"
)
return (error_perc, neuron_layer_score, f2_score)
def crossover_operator(ind1: MLPIndividual, ind2: MLPIndividual):
"""Apply crossover betweent two individuals.
This method will swap neurons with two random points from a random layer.
The neurons associated bias and weights are swapped.
:param ind1: the first individual.
:param ind2: the second individual.
:returns: a tuple with the cross points and the crossed layer.
"""
# Choose randomly the layer index to swap. If the hidden layers of any of
# the two individuals are constant, swap neurons from the output layer
# neuron in the output layer.
layer_index = (
len(ind1) - 1
if ind1.constant_hidden_layers or ind2.constant_hidden_layers
else random.randint(0, len(ind1) - 1)
)
cx_pts = random.sample(range(len(ind1.layers[layer_index].bias)), 2)
(
ind1.layers[layer_index].weights[:, cx_pts[0] : cx_pts[1]],
ind2.layers[layer_index].weights[:, cx_pts[0] : cx_pts[1]],
) = (
ind2.layers[layer_index].weights[:, cx_pts[0] : cx_pts[1]].copy(),
ind1.layers[layer_index].weights[:, cx_pts[0] : cx_pts[1]].copy(),
)
(
ind1.layers[layer_index].bias[cx_pts[0] : cx_pts[1]],
ind2.layers[layer_index].bias[cx_pts[0] : cx_pts[1]],
) = (
ind2.layers[layer_index].bias[cx_pts[0] : cx_pts[1]].copy(),
ind1.layers[layer_index].bias[cx_pts[0] : cx_pts[1]].copy(),
)
return cx_pts, layer_index
def layer_mutator(individual: MLPIndividual) -> int:
"""Add/remove one layer to the model.
Compute whether to append a new hidden layer or pop the last one.
:param individual: individual to mutate.
:return: wether the layer was added or removed.
"""
# Choose randomly to add or delete a layer. Ensure there are 2 or more
# layers in the model before deleting one. The output layer is included in
# the count.
choice = 1 if len(individual) <= 2 else random.choice((-1, 1))
difference = 0
if choice > 0:
# Choose a random number of neurons
new_layer_output_neurons = random.randint(2, 5)
# Obtain current last hidden layer neuron number
previous_layer_output = individual.layers[-2].config["units"]
# Insert a new hidden layer into the individual
individual.append_hidden(
Layer.uniform(
name=f"Hidden{len(individual)}",
input_neurons=previous_layer_output,
output_neurons=new_layer_output_neurons,
)
)
# Obtain the differences between the new layer neurons and the output
# layer input neurons and apply necessary changes to this last one
output_layer_input_neurons = individual.layers[-1].weights.shape[0]
difference = new_layer_output_neurons - output_layer_input_neurons
# Add input neuron entries
if difference > 0:
next_layer_neurons = len(individual.layers[-1].bias)
individual.layers[-1].weights = np.append(
individual.layers[-1].weights,
np.random.uniform(-1.0, 1.0, (difference, next_layer_neurons)),
axis=0,
)
# Remove input neuron entries
elif difference < 0:
individual.layers[-1].weights = np.delete(
individual.layers[-1].weights,
slice(
output_layer_input_neurons + difference,
output_layer_input_neurons,
),
axis=0,
)
else:
# Obtain the predecessor output units and delte the chosen layer
removed_predecessor_units = individual.layers[-3].config["units"]
del individual.layers[-2]
# Calculate the difference between the predecesor layer and the output
# layer
output_layer_input_len = individual.layers[-1].weights.shape[0]
difference = removed_predecessor_units - output_layer_input_len
# Append the neccesary input neuron entries
if difference > 0:
next_layer_neurons = len(individual.layers[-1].bias)
individual.layers[-1].weights = np.append(
individual.layers[-1].weights,
np.random.uniform(-0.5, 0.5, (difference, next_layer_neurons)),
axis=0,
)
# Remove the leftovers
elif difference < 0:
individual.layers[-1].weights = np.delete(
individual.layers[-1].weights,
slice(
output_layer_input_len + difference, output_layer_input_len
),
axis=0,
)
# Update output layer input neurons
individual.layers[-1].config["batch_input_shape"][1] += difference
return choice
def neuron_mutator(individual: MLPIndividual) -> int:
"""Add/remove one neuron from a random hidden layer.
For a random layer append a new neuron or pop the last one.
:param individual: individual to mutate.
:returns: whether the neuron was added or removed.
"""
# We want to ignore output layer so it only adds/pops from a hidden layer
layer_index = random.randint(0, len(individual) - 2)
# Choose randomly to add or delete a neuron. If the number of neurons is
# two, just add a new one.
choice = (
1
if len(individual.layers[layer_index].bias) <= 2
else random.choice((-1, 1))
)
if choice > 0:
# Get previous layer neurons as a reference for creating a new neuron
# for this layer
previous_layer_neurons = individual.layers[layer_index].weights.shape[
0
]
# Append a new neuron to the weights and bias of the chosen layer
individual.layers[layer_index].weights = np.append(
individual.layers[layer_index].weights,
np.random.uniform(-0.5, 0.5, (previous_layer_neurons, 1)),
axis=1,
)
individual.layers[layer_index].bias = np.append(
individual.layers[layer_index].bias,
[random.uniform(-0.5, 0.5)],
axis=0,
)
# Append a new input entry for the chosen layer in the following layer
next_layer_neurons = len(individual.layers[layer_index + 1].bias)
individual.layers[layer_index + 1].weights = np.append(
individual.layers[layer_index + 1].weights,
np.random.uniform(-0.5, 0.5, (1, next_layer_neurons)),
axis=0,
)
else:
# Remove last neuron weights and bias from the chosen layer
individual.layers[layer_index].weights = np.delete(
individual.layers[layer_index].weights, -1, axis=1
)
individual.layers[layer_index].bias = np.delete(
individual.layers[layer_index].bias, -1, axis=0
)
# Remove the input neuron from the next layer
individual.layers[layer_index + 1].weights = np.delete(
individual.layers[layer_index + 1].weights, -1, axis=0
)
# Update the units in the chosen and next layer config
individual.layers[layer_index].config["units"] += choice
individual.layers[layer_index + 1].config["batch_input_shape"][1] += choice
return choice
def weights_mutator(
individual: MLPIndividual, attribute: str, gen_prob: float
) -> int:
"""Mutate some individual weights genes.
For each layer weights or bias, obtain a random :class:`np.ndarray`(with
values in the range ``[0.0 and 1.0]``) with the same shape as the selected
attribute and mutate the genes that satisfy the ``gen_prob`` probability
with a value in the range ``[-0.5, 0.5]``
:param individual: individual to mutate.
:param attribute: attribute to mutate. Must be either ``weights`` or
``bias``.
:param gen_prob: probability of a gen to mutate.
:returns: number of genes mutated.
"""
mutated_genes = 0
layer_list = (
[individual.layers[-1]]
if individual.constant_hidden_layers
else individual.layers
)
for layer in layer_list:
weights = getattr(layer, attribute)
weights_shape = weights.shape
mask = np.random.rand(*weights_shape) < gen_prob
mutated_genes += np.count_nonzero(mask)
mutations = np.random.uniform(-0.5, 0.5, weights_shape)
mutations[~mask] = 0
weights += mutations
return mutated_genes
# pylint: disable=no-member
def configure_toolbox(
dataset: Proben1Partition, **config: Union[float, bool, tuple]
):
r"""Register all neccesary objects and functions.
:param dataset: data to work with.
:param \**config: diversed configuration parameters.
:Keyword Arguments:
- *neurons_range* -- max and min values given for the neurons random
generator for each layer.
- *layers_range* -- max and min values given for the layers random
generator.
- *mut_bias_prob* -- probability to mutate the individual bias
genes.
- *mut_weights_prob* -- probability to mutate the individual weights
genes.
- *const_hidden_layers* -- ``True`` if the no crossover or mutation
can be applied to the hidden layers.
:returns: the toolbox with the registered functions.
"""
# --------------------------------
# Individual registration
# --------------------------------
DGPLOGGER.debug("-- Register necessary functions and elements")
DGPLOGGER.debug("Register the fitness measure...")
creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -0.01, 0.5))
DGPLOGGER.debug("Register the individual...")
creator.create("Individual", MLPIndividual, fitness=creator.FitnessMulti)
toolbox = base.Toolbox()
DGPLOGGER.debug("Register the individual initializer...")
toolbox.register(
"individual",
individual_initializer,
creator.Individual,
(dataset.nin, dataset.nout),
(config["neurons_range"], config["layers_range"]),
config["const_hidden_layers"],
)
# define the population to be a list of individuals
DGPLOGGER.debug("Register the population initializer...")
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# --------------------------------
# Operator registration
# --------------------------------
DGPLOGGER.debug("Register the evaluator function...")
toolbox.register(
"evaluate",
individual_evaluator,
trn=dataset.trn,
tst=dataset.val,
multi_class=dataset.nout > 2,
)
DGPLOGGER.debug("Register the crossover operator...")
toolbox.register("crossover", crossover_operator)
DGPLOGGER.debug("Register the bias mutate operator...")
toolbox.register(
"mutate_bias",
weights_mutator,
attribute="bias",
gen_prob=config["mut_bias_prob"],
)
DGPLOGGER.debug("Register the weights mutate operator...")
toolbox.register(
"mutate_weights",
weights_mutator,
attribute="weights",
gen_prob=config["mut_weights_prob"],
)
DGPLOGGER.debug("register the neuron mutator operator")
toolbox.register("mutate_neuron", neuron_mutator)
DGPLOGGER.debug("register the layer mutator operator")
toolbox.register("mutate_layer", layer_mutator)
return toolbox
|
PypiClean
|
/tencentcloud-python-sdk-3.0.960.tar.gz/tencentcloud-python-sdk-3.0.960/tencentcloud/faceid/v20180301/models.py
|
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class BankCard2EVerificationRequest(AbstractModel):
"""BankCard2EVerification请求参数结构体
"""
def __init__(self):
r"""
:param _Name: 姓名
:type Name: str
:param _BankCard: 银行卡
:type BankCard: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、银行卡号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._Name = None
self._BankCard = None
self._Encryption = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def BankCard(self):
return self._BankCard
@BankCard.setter
def BankCard(self, BankCard):
self._BankCard = BankCard
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._Name = params.get("Name")
self._BankCard = params.get("BankCard")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BankCard2EVerificationResponse(AbstractModel):
"""BankCard2EVerification返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码
计费结果码:
'0': '认证通过'
'-1': '认证未通过'
'-4': '持卡人信息有误'
'-5': '未开通无卡支付'
'-6': '此卡被没收'
'-7': '无效卡号'
'-8': '此卡无对应发卡行'
'-9': '该卡未初始化或睡眠卡'
'-10': '作弊卡、吞卡'
'-11': '此卡已挂失'
'-12': '该卡已过期'
'-13': '受限制的卡'
'-14': '密码错误次数超限'
'-15': '发卡行不支持此交易'
不计费结果码:
'-2': '姓名校验不通过'
'-3': '银行卡号码有误'
'-16': '验证中心服务繁忙'
'-17': '验证次数超限,请次日重试'
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._RequestId = params.get("RequestId")
class BankCard4EVerificationRequest(AbstractModel):
"""BankCard4EVerification请求参数结构体
"""
def __init__(self):
r"""
:param _Name: 姓名
:type Name: str
:param _BankCard: 银行卡
:type BankCard: str
:param _Phone: 手机号码
:type Phone: str
:param _IdCard: 开户证件号,与CertType参数的证件类型一致,如:身份证,则传入身份证号。
:type IdCard: str
:param _CertType: 证件类型,请确认该证件为开户时使用的证件类型,未用于开户的证件信息不支持验证。
目前默认为0:身份证,其他证件类型暂不支持。
:type CertType: int
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号、手机号、银行卡号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._Name = None
self._BankCard = None
self._Phone = None
self._IdCard = None
self._CertType = None
self._Encryption = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def BankCard(self):
return self._BankCard
@BankCard.setter
def BankCard(self, BankCard):
self._BankCard = BankCard
@property
def Phone(self):
return self._Phone
@Phone.setter
def Phone(self, Phone):
self._Phone = Phone
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def CertType(self):
return self._CertType
@CertType.setter
def CertType(self, CertType):
self._CertType = CertType
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._Name = params.get("Name")
self._BankCard = params.get("BankCard")
self._Phone = params.get("Phone")
self._IdCard = params.get("IdCard")
self._CertType = params.get("CertType")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BankCard4EVerificationResponse(AbstractModel):
"""BankCard4EVerification返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码
收费结果码:
'0': '认证通过'
'-1': '认证未通过'
'-6': '持卡人信息有误'
'-7': '未开通无卡支付'
'-8': '此卡被没收'
'-9': '无效卡号'
'-10': '此卡无对应发卡行'
'-11': '该卡未初始化或睡眠卡'
'-12': '作弊卡、吞卡'
'-13': '此卡已挂失'
'-14': '该卡已过期'
'-15': '受限制的卡'
'-16': '密码错误次数超限'
'-17': '发卡行不支持此交易'
不收费结果码:
'-2': '姓名校验不通过'
'-3': '身份证号码有误'
'-4': '银行卡号码有误'
'-5': '手机号码不合法'
'-18': '验证中心服务繁忙'
'-19': '验证次数超限,请次日重试'
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._RequestId = params.get("RequestId")
class BankCardVerificationRequest(AbstractModel):
"""BankCardVerification请求参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 开户证件号,与CertType参数的证件类型一致,如:身份证,则传入身份证号。
:type IdCard: str
:param _Name: 姓名
:type Name: str
:param _BankCard: 银行卡
:type BankCard: str
:param _CertType: 证件类型,请确认该证件为开户时使用的证件类型,未用于开户的证件信息不支持验证。
目前默认:0 身份证,其他证件类型暂不支持。
:type CertType: int
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号、银行卡号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._IdCard = None
self._Name = None
self._BankCard = None
self._CertType = None
self._Encryption = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def BankCard(self):
return self._BankCard
@BankCard.setter
def BankCard(self, BankCard):
self._BankCard = BankCard
@property
def CertType(self):
return self._CertType
@CertType.setter
def CertType(self, CertType):
self._CertType = CertType
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._BankCard = params.get("BankCard")
self._CertType = params.get("CertType")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BankCardVerificationResponse(AbstractModel):
"""BankCardVerification返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码
收费结果码:
'0': '认证通过'
'-1': '认证未通过'
'-5': '持卡人信息有误'
'-6': '未开通无卡支付'
'-7': '此卡被没收'
'-8': '无效卡号'
'-9': '此卡无对应发卡行'
'-10': '该卡未初始化或睡眠卡'
'-11': '作弊卡、吞卡'
'-12': '此卡已挂失'
'-13': '该卡已过期'
'-14': '受限制的卡'
'-15': '密码错误次数超限'
'-16': '发卡行不支持此交易'
不收费结果码:
'-2': '姓名校验不通过'
'-3': '身份证号码有误'
'-4': '银行卡号码有误'
'-17': '验证中心服务繁忙'
'-18': '验证次数超限,请次日重试'
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._RequestId = params.get("RequestId")
class ChargeDetail(AbstractModel):
"""计费详情
"""
def __init__(self):
r"""
:param _ReqTime: 一比一时间时间戳,13位。
:type ReqTime: str
:param _Seq: 一比一请求的唯一标记。
:type Seq: str
:param _Idcard: 一比一时使用的、脱敏后的身份证号。
:type Idcard: str
:param _Name: 一比一时使用的、脱敏后的姓名。
:type Name: str
:param _Sim: 一比一的相似度。0-100,保留2位小数。
:type Sim: str
:param _IsNeedCharge: 本次详情是否收费。
:type IsNeedCharge: bool
:param _ChargeType: 收费类型,比对、核身、混合部署。
:type ChargeType: str
:param _ErrorCode: 本次活体一比一最终结果。
:type ErrorCode: str
:param _ErrorMessage: 本次活体一比一最终结果描述。
:type ErrorMessage: str
"""
self._ReqTime = None
self._Seq = None
self._Idcard = None
self._Name = None
self._Sim = None
self._IsNeedCharge = None
self._ChargeType = None
self._ErrorCode = None
self._ErrorMessage = None
@property
def ReqTime(self):
return self._ReqTime
@ReqTime.setter
def ReqTime(self, ReqTime):
self._ReqTime = ReqTime
@property
def Seq(self):
return self._Seq
@Seq.setter
def Seq(self, Seq):
self._Seq = Seq
@property
def Idcard(self):
return self._Idcard
@Idcard.setter
def Idcard(self, Idcard):
self._Idcard = Idcard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Sim(self):
return self._Sim
@Sim.setter
def Sim(self, Sim):
self._Sim = Sim
@property
def IsNeedCharge(self):
return self._IsNeedCharge
@IsNeedCharge.setter
def IsNeedCharge(self, IsNeedCharge):
self._IsNeedCharge = IsNeedCharge
@property
def ChargeType(self):
return self._ChargeType
@ChargeType.setter
def ChargeType(self, ChargeType):
self._ChargeType = ChargeType
@property
def ErrorCode(self):
return self._ErrorCode
@ErrorCode.setter
def ErrorCode(self, ErrorCode):
self._ErrorCode = ErrorCode
@property
def ErrorMessage(self):
return self._ErrorMessage
@ErrorMessage.setter
def ErrorMessage(self, ErrorMessage):
self._ErrorMessage = ErrorMessage
def _deserialize(self, params):
self._ReqTime = params.get("ReqTime")
self._Seq = params.get("Seq")
self._Idcard = params.get("Idcard")
self._Name = params.get("Name")
self._Sim = params.get("Sim")
self._IsNeedCharge = params.get("IsNeedCharge")
self._ChargeType = params.get("ChargeType")
self._ErrorCode = params.get("ErrorCode")
self._ErrorMessage = params.get("ErrorMessage")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CheckBankCardInformationRequest(AbstractModel):
"""CheckBankCardInformation请求参数结构体
"""
def __init__(self):
r"""
:param _BankCard: 银行卡号。
:type BankCard: str
:param _Encryption: 敏感数据加密信息。对传入信息(银行卡号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._BankCard = None
self._Encryption = None
@property
def BankCard(self):
return self._BankCard
@BankCard.setter
def BankCard(self, BankCard):
self._BankCard = BankCard
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._BankCard = params.get("BankCard")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CheckBankCardInformationResponse(AbstractModel):
"""CheckBankCardInformation返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码,收费情况如下。
收费结果码:
0: 查询成功
-1: 未查到信息
不收费结果码:
-2:验证中心服务繁忙
-3:银行卡不存在
:type Result: str
:param _Description: 业务结果描述
:type Description: str
:param _AccountBank: 开户行
:type AccountBank: str
:param _AccountType: 卡性质:1. 借记卡;2. 贷记卡;3. 预付费卡;4. 准贷记卡
:type AccountType: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._AccountBank = None
self._AccountType = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def AccountBank(self):
return self._AccountBank
@AccountBank.setter
def AccountBank(self, AccountBank):
self._AccountBank = AccountBank
@property
def AccountType(self):
return self._AccountType
@AccountType.setter
def AccountType(self, AccountType):
self._AccountType = AccountType
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._AccountBank = params.get("AccountBank")
self._AccountType = params.get("AccountType")
self._RequestId = params.get("RequestId")
class CheckEidTokenStatusRequest(AbstractModel):
"""CheckEidTokenStatus请求参数结构体
"""
def __init__(self):
r"""
:param _EidToken: E证通流程的唯一标识,调用GetEidToken接口时生成。
:type EidToken: str
"""
self._EidToken = None
@property
def EidToken(self):
return self._EidToken
@EidToken.setter
def EidToken(self, EidToken):
self._EidToken = EidToken
def _deserialize(self, params):
self._EidToken = params.get("EidToken")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CheckEidTokenStatusResponse(AbstractModel):
"""CheckEidTokenStatus返回参数结构体
"""
def __init__(self):
r"""
:param _Status: 枚举:
init:token未验证
doing: 验证中
finished: 验证完成
timeout: token已超时
:type Status: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Status = None
self._RequestId = None
@property
def Status(self):
return self._Status
@Status.setter
def Status(self, Status):
self._Status = Status
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Status = params.get("Status")
self._RequestId = params.get("RequestId")
class CheckIdCardInformationRequest(AbstractModel):
"""CheckIdCardInformation请求参数结构体
"""
def __init__(self):
r"""
:param _ImageBase64: 身份证人像面的 Base64 值
支持的图片格式:PNG、JPG、JPEG,暂不支持 GIF 格式。
支持的图片大小:所下载图片经Base64编码后不超过 7M。
请使用标准的Base64编码方式(带=补位),编码规范参考RFC4648。
ImageBase64、ImageUrl二者必须提供其中之一。若都提供了,则按照ImageUrl>ImageBase64的优先级使用参数。
:type ImageBase64: str
:param _ImageUrl: 身份证人像面的 Url 地址
支持的图片格式:PNG、JPG、JPEG,暂不支持 GIF 格式。
支持的图片大小:所下载图片经 Base64 编码后不超过 3M。图片下载时间不超过 3 秒。
图片存储于腾讯云的 Url 可保障更高的下载速度和稳定性,建议图片存储于腾讯云。
非腾讯云存储的 Url 速度和稳定性可能受一定影响。
:type ImageUrl: str
:param _Config: 以下可选字段均为bool 类型,默认false:
CopyWarn,复印件告警
BorderCheckWarn,边框和框内遮挡告警
ReshootWarn,翻拍告警
DetectPsWarn,PS检测告警(疑似存在PS痕迹)
TempIdWarn,临时身份证告警
Quality,图片质量告警(评价图片模糊程度)
SDK 设置方式参考:
Config = Json.stringify({"CopyWarn":true,"ReshootWarn":true})
API 3.0 Explorer 设置方式参考:
Config = {"CopyWarn":true,"ReshootWarn":true}
:type Config: str
:param _IsEncrypt: 是否需要对返回中的敏感信息进行加密。默认false。
其中敏感信息包括:Response.IdNum、Response.Name
:type IsEncrypt: bool
"""
self._ImageBase64 = None
self._ImageUrl = None
self._Config = None
self._IsEncrypt = None
@property
def ImageBase64(self):
return self._ImageBase64
@ImageBase64.setter
def ImageBase64(self, ImageBase64):
self._ImageBase64 = ImageBase64
@property
def ImageUrl(self):
return self._ImageUrl
@ImageUrl.setter
def ImageUrl(self, ImageUrl):
self._ImageUrl = ImageUrl
@property
def Config(self):
return self._Config
@Config.setter
def Config(self, Config):
self._Config = Config
@property
def IsEncrypt(self):
return self._IsEncrypt
@IsEncrypt.setter
def IsEncrypt(self, IsEncrypt):
self._IsEncrypt = IsEncrypt
def _deserialize(self, params):
self._ImageBase64 = params.get("ImageBase64")
self._ImageUrl = params.get("ImageUrl")
self._Config = params.get("Config")
self._IsEncrypt = params.get("IsEncrypt")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CheckIdCardInformationResponse(AbstractModel):
"""CheckIdCardInformation返回参数结构体
"""
def __init__(self):
r"""
:param _Sim: 相似度,取值范围 [0.00, 100.00]。推荐相似度大于等于70时可判断为同一人,可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)
:type Sim: float
:param _Result: 业务错误码,成功情况返回Success, 错误情况请参考下方错误码 列表中FailedOperation部分
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _Name: 姓名
:type Name: str
:param _Sex: 性别
:type Sex: str
:param _Nation: 民族
:type Nation: str
:param _Birth: 出生日期
:type Birth: str
:param _Address: 地址
:type Address: str
:param _IdNum: 身份证号
:type IdNum: str
:param _Portrait: 身份证头像照片的base64编码,如果抠图失败会拿整张身份证做比对并返回空。
:type Portrait: str
:param _Warnings: 告警信息,当在Config中配置了告警信息会停止人像比对,Result返回错误(FailedOperation.OcrWarningOccurred)并有此告警信息,Code 告警码列表和释义:
-9101 身份证边框不完整告警,
-9102 身份证复印件告警,
-9103 身份证翻拍告警,
-9105 身份证框内遮挡告警,
-9104 临时身份证告警,
-9106 身份证 PS 告警(疑似存在PS痕迹)。
-8001 图片模糊告警
多个会 | 隔开如 "-9101|-9106|-9104"
:type Warnings: str
:param _Quality: 图片质量分数,当请求Config中配置图片模糊告警该参数才有意义,取值范围(0~100),目前默认阈值是50分,低于50分会触发模糊告警。
:type Quality: float
:param _Encryption: 敏感数据加密信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Sim = None
self._Result = None
self._Description = None
self._Name = None
self._Sex = None
self._Nation = None
self._Birth = None
self._Address = None
self._IdNum = None
self._Portrait = None
self._Warnings = None
self._Quality = None
self._Encryption = None
self._RequestId = None
@property
def Sim(self):
return self._Sim
@Sim.setter
def Sim(self, Sim):
self._Sim = Sim
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Sex(self):
return self._Sex
@Sex.setter
def Sex(self, Sex):
self._Sex = Sex
@property
def Nation(self):
return self._Nation
@Nation.setter
def Nation(self, Nation):
self._Nation = Nation
@property
def Birth(self):
return self._Birth
@Birth.setter
def Birth(self, Birth):
self._Birth = Birth
@property
def Address(self):
return self._Address
@Address.setter
def Address(self, Address):
self._Address = Address
@property
def IdNum(self):
return self._IdNum
@IdNum.setter
def IdNum(self, IdNum):
self._IdNum = IdNum
@property
def Portrait(self):
return self._Portrait
@Portrait.setter
def Portrait(self, Portrait):
self._Portrait = Portrait
@property
def Warnings(self):
return self._Warnings
@Warnings.setter
def Warnings(self, Warnings):
self._Warnings = Warnings
@property
def Quality(self):
return self._Quality
@Quality.setter
def Quality(self, Quality):
self._Quality = Quality
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Sim = params.get("Sim")
self._Result = params.get("Result")
self._Description = params.get("Description")
self._Name = params.get("Name")
self._Sex = params.get("Sex")
self._Nation = params.get("Nation")
self._Birth = params.get("Birth")
self._Address = params.get("Address")
self._IdNum = params.get("IdNum")
self._Portrait = params.get("Portrait")
self._Warnings = params.get("Warnings")
self._Quality = params.get("Quality")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
self._RequestId = params.get("RequestId")
class CheckIdNameDateRequest(AbstractModel):
"""CheckIdNameDate请求参数结构体
"""
def __init__(self):
r"""
:param _Name: 姓名
:type Name: str
:param _IdCard: 身份证号
:type IdCard: str
:param _ValidityBegin: 身份证有效期开始时间,格式:YYYYMMDD。如:20210701
:type ValidityBegin: str
:param _ValidityEnd: 身份证有效期到期时间,格式:YYYYMMDD,长期用“00000000”代替;如:20210701
:type ValidityEnd: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._Name = None
self._IdCard = None
self._ValidityBegin = None
self._ValidityEnd = None
self._Encryption = None
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def ValidityBegin(self):
return self._ValidityBegin
@ValidityBegin.setter
def ValidityBegin(self, ValidityBegin):
self._ValidityBegin = ValidityBegin
@property
def ValidityEnd(self):
return self._ValidityEnd
@ValidityEnd.setter
def ValidityEnd(self, ValidityEnd):
self._ValidityEnd = ValidityEnd
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._Name = params.get("Name")
self._IdCard = params.get("IdCard")
self._ValidityBegin = params.get("ValidityBegin")
self._ValidityEnd = params.get("ValidityEnd")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CheckIdNameDateResponse(AbstractModel):
"""CheckIdNameDate返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码,收费情况如下。
收费结果码:
0: 一致
-1: 不一致
不收费结果码:
-2: 非法身份证号(长度、校验位等不正确)
-3: 非法姓名(长度、格式等不正确)
-4: 非法有效期(长度、格式等不正确)
-5: 身份信息无效
-6: 证件库服务异常
-7: 证件库中无此身份证记录
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._RequestId = params.get("RequestId")
class CheckPhoneAndNameRequest(AbstractModel):
"""CheckPhoneAndName请求参数结构体
"""
def __init__(self):
r"""
:param _Mobile: ⼿机号
:type Mobile: str
:param _Name: 姓名
:type Name: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、手机号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._Mobile = None
self._Name = None
self._Encryption = None
@property
def Mobile(self):
return self._Mobile
@Mobile.setter
def Mobile(self, Mobile):
self._Mobile = Mobile
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._Mobile = params.get("Mobile")
self._Name = params.get("Name")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CheckPhoneAndNameResponse(AbstractModel):
"""CheckPhoneAndName返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码,收费情况如下。
收费结果码:
0: 验证结果一致
1: 验证结果不一致
不收费结果码:
-1:查无记录
-2:引擎未知错误
-3:引擎服务异常
:type Result: str
:param _Description: 业务结果描述
:type Description: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._RequestId = params.get("RequestId")
class DetectAuthRequest(AbstractModel):
"""DetectAuth请求参数结构体
"""
def __init__(self):
r"""
:param _RuleId: 用于细分客户使用场景,申请开通服务后,可以在腾讯云慧眼人脸核身控制台(https://console.cloud.tencent.com/faceid) 自助接入里面创建,审核通过后即可调用。如有疑问,请添加[腾讯云人脸核身小助手](https://cloud.tencent.com/document/product/1007/56130)进行咨询。
:type RuleId: str
:param _TerminalType: 本接口不需要传递此参数。
:type TerminalType: str
:param _IdCard: 身份标识(未使用OCR服务时,必须传入)。
规则:a-zA-Z0-9组合。最长长度32位。
:type IdCard: str
:param _Name: 姓名。(未使用OCR服务时,必须传入)最长长度32位。中文请使用UTF-8编码。
:type Name: str
:param _RedirectUrl: 认证结束后重定向的回调链接地址。最长长度1024位。
:type RedirectUrl: str
:param _Extra: 透传字段,在获取验证结果时返回。
:type Extra: str
:param _ImageBase64: 用于人脸比对的照片,图片的Base64值;
Base64编码后的图片数据大小不超过3M,仅支持jpg、png格式。请使用标准的Base64编码方式(带=补位),编码规范参考RFC4648。
:type ImageBase64: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
:param _IntentionVerifyText: 意愿核身(朗读模式)使用的文案,若未使用意愿核身(朗读模式),则该字段无需传入。默认为空,最长可接受120的字符串长度。
:type IntentionVerifyText: str
:param _IntentionQuestions: 意愿核身(问答模式)使用的文案,包括:系统语音播报的文本、需要核验的标准文本。当前仅支持一个播报文本+回答文本。
:type IntentionQuestions: list of IntentionQuestion
:param _Config: RuleId相关配置
:type Config: :class:`tencentcloud.faceid.v20180301.models.RuleIdConfig`
"""
self._RuleId = None
self._TerminalType = None
self._IdCard = None
self._Name = None
self._RedirectUrl = None
self._Extra = None
self._ImageBase64 = None
self._Encryption = None
self._IntentionVerifyText = None
self._IntentionQuestions = None
self._Config = None
@property
def RuleId(self):
return self._RuleId
@RuleId.setter
def RuleId(self, RuleId):
self._RuleId = RuleId
@property
def TerminalType(self):
return self._TerminalType
@TerminalType.setter
def TerminalType(self, TerminalType):
self._TerminalType = TerminalType
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def RedirectUrl(self):
return self._RedirectUrl
@RedirectUrl.setter
def RedirectUrl(self, RedirectUrl):
self._RedirectUrl = RedirectUrl
@property
def Extra(self):
return self._Extra
@Extra.setter
def Extra(self, Extra):
self._Extra = Extra
@property
def ImageBase64(self):
return self._ImageBase64
@ImageBase64.setter
def ImageBase64(self, ImageBase64):
self._ImageBase64 = ImageBase64
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
@property
def IntentionVerifyText(self):
return self._IntentionVerifyText
@IntentionVerifyText.setter
def IntentionVerifyText(self, IntentionVerifyText):
self._IntentionVerifyText = IntentionVerifyText
@property
def IntentionQuestions(self):
return self._IntentionQuestions
@IntentionQuestions.setter
def IntentionQuestions(self, IntentionQuestions):
self._IntentionQuestions = IntentionQuestions
@property
def Config(self):
return self._Config
@Config.setter
def Config(self, Config):
self._Config = Config
def _deserialize(self, params):
self._RuleId = params.get("RuleId")
self._TerminalType = params.get("TerminalType")
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._RedirectUrl = params.get("RedirectUrl")
self._Extra = params.get("Extra")
self._ImageBase64 = params.get("ImageBase64")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
self._IntentionVerifyText = params.get("IntentionVerifyText")
if params.get("IntentionQuestions") is not None:
self._IntentionQuestions = []
for item in params.get("IntentionQuestions"):
obj = IntentionQuestion()
obj._deserialize(item)
self._IntentionQuestions.append(obj)
if params.get("Config") is not None:
self._Config = RuleIdConfig()
self._Config._deserialize(params.get("Config"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DetectAuthResponse(AbstractModel):
"""DetectAuth返回参数结构体
"""
def __init__(self):
r"""
:param _Url: 用于发起核身流程的URL,仅微信H5场景使用。
:type Url: str
:param _BizToken: 一次核身流程的标识,有效时间为7,200秒;
完成核身后,可用该标识获取验证结果信息。
:type BizToken: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Url = None
self._BizToken = None
self._RequestId = None
@property
def Url(self):
return self._Url
@Url.setter
def Url(self, Url):
self._Url = Url
@property
def BizToken(self):
return self._BizToken
@BizToken.setter
def BizToken(self, BizToken):
self._BizToken = BizToken
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Url = params.get("Url")
self._BizToken = params.get("BizToken")
self._RequestId = params.get("RequestId")
class DetectDetail(AbstractModel):
"""活体一比一详情
"""
def __init__(self):
r"""
:param _ReqTime: 请求时间戳。
注意:此字段可能返回 null,表示取不到有效值。
:type ReqTime: str
:param _Seq: 本次活体一比一请求的唯一标记。
注意:此字段可能返回 null,表示取不到有效值。
:type Seq: str
:param _Idcard: 参与本次活体一比一的身份证号。
注意:此字段可能返回 null,表示取不到有效值。
:type Idcard: str
:param _Name: 参与本次活体一比一的姓名。
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param _Sim: 本次活体一比一的相似度。
注意:此字段可能返回 null,表示取不到有效值。
:type Sim: str
:param _IsNeedCharge: 本次活体一比一是否收费
注意:此字段可能返回 null,表示取不到有效值。
:type IsNeedCharge: bool
:param _Errcode: 本次活体一比一最终结果。0为成功
注意:此字段可能返回 null,表示取不到有效值。
:type Errcode: int
:param _Errmsg: 本次活体一比一最终结果描述。(仅描述用,文案更新时不会通知。)
注意:此字段可能返回 null,表示取不到有效值。
:type Errmsg: str
:param _Livestatus: 本次活体结果。0为成功
注意:此字段可能返回 null,表示取不到有效值。
:type Livestatus: int
:param _Livemsg: 本次活体结果描述。(仅描述用,文案更新时不会通知。)
注意:此字段可能返回 null,表示取不到有效值。
:type Livemsg: str
:param _Comparestatus: 本次一比一结果。0为成功
注意:此字段可能返回 null,表示取不到有效值。
:type Comparestatus: int
:param _Comparemsg: 本次一比一结果描述。(仅描述用,文案更新时不会通知。)
注意:此字段可能返回 null,表示取不到有效值。
:type Comparemsg: str
:param _CompareLibType: 比对库源类型。包括:
公安商业库;
业务方自有库(用户上传照片、客户的混合库、混合部署库);
二次验证库;
人工审核库;
注意:此字段可能返回 null,表示取不到有效值。
:type CompareLibType: str
:param _LivenessMode: 枚举活体检测类型:
0:未知
1:数字活体
2:动作活体
3:静默活体
4:一闪活体(动作+光线)
注意:此字段可能返回 null,表示取不到有效值。
:type LivenessMode: int
"""
self._ReqTime = None
self._Seq = None
self._Idcard = None
self._Name = None
self._Sim = None
self._IsNeedCharge = None
self._Errcode = None
self._Errmsg = None
self._Livestatus = None
self._Livemsg = None
self._Comparestatus = None
self._Comparemsg = None
self._CompareLibType = None
self._LivenessMode = None
@property
def ReqTime(self):
return self._ReqTime
@ReqTime.setter
def ReqTime(self, ReqTime):
self._ReqTime = ReqTime
@property
def Seq(self):
return self._Seq
@Seq.setter
def Seq(self, Seq):
self._Seq = Seq
@property
def Idcard(self):
return self._Idcard
@Idcard.setter
def Idcard(self, Idcard):
self._Idcard = Idcard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Sim(self):
return self._Sim
@Sim.setter
def Sim(self, Sim):
self._Sim = Sim
@property
def IsNeedCharge(self):
return self._IsNeedCharge
@IsNeedCharge.setter
def IsNeedCharge(self, IsNeedCharge):
self._IsNeedCharge = IsNeedCharge
@property
def Errcode(self):
return self._Errcode
@Errcode.setter
def Errcode(self, Errcode):
self._Errcode = Errcode
@property
def Errmsg(self):
return self._Errmsg
@Errmsg.setter
def Errmsg(self, Errmsg):
self._Errmsg = Errmsg
@property
def Livestatus(self):
return self._Livestatus
@Livestatus.setter
def Livestatus(self, Livestatus):
self._Livestatus = Livestatus
@property
def Livemsg(self):
return self._Livemsg
@Livemsg.setter
def Livemsg(self, Livemsg):
self._Livemsg = Livemsg
@property
def Comparestatus(self):
return self._Comparestatus
@Comparestatus.setter
def Comparestatus(self, Comparestatus):
self._Comparestatus = Comparestatus
@property
def Comparemsg(self):
return self._Comparemsg
@Comparemsg.setter
def Comparemsg(self, Comparemsg):
self._Comparemsg = Comparemsg
@property
def CompareLibType(self):
return self._CompareLibType
@CompareLibType.setter
def CompareLibType(self, CompareLibType):
self._CompareLibType = CompareLibType
@property
def LivenessMode(self):
return self._LivenessMode
@LivenessMode.setter
def LivenessMode(self, LivenessMode):
self._LivenessMode = LivenessMode
def _deserialize(self, params):
self._ReqTime = params.get("ReqTime")
self._Seq = params.get("Seq")
self._Idcard = params.get("Idcard")
self._Name = params.get("Name")
self._Sim = params.get("Sim")
self._IsNeedCharge = params.get("IsNeedCharge")
self._Errcode = params.get("Errcode")
self._Errmsg = params.get("Errmsg")
self._Livestatus = params.get("Livestatus")
self._Livemsg = params.get("Livemsg")
self._Comparestatus = params.get("Comparestatus")
self._Comparemsg = params.get("Comparemsg")
self._CompareLibType = params.get("CompareLibType")
self._LivenessMode = params.get("LivenessMode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DetectInfoBestFrame(AbstractModel):
"""核身最佳帧信息
"""
def __init__(self):
r"""
:param _BestFrame: 活体比对最佳帧Base64编码。
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrame: str
:param _BestFrames: 自截帧Base64编码数组。
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrames: list of str
"""
self._BestFrame = None
self._BestFrames = None
@property
def BestFrame(self):
return self._BestFrame
@BestFrame.setter
def BestFrame(self, BestFrame):
self._BestFrame = BestFrame
@property
def BestFrames(self):
return self._BestFrames
@BestFrames.setter
def BestFrames(self, BestFrames):
self._BestFrames = BestFrames
def _deserialize(self, params):
self._BestFrame = params.get("BestFrame")
self._BestFrames = params.get("BestFrames")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DetectInfoIdCardData(AbstractModel):
"""核身身份证图片信息
"""
def __init__(self):
r"""
:param _OcrFront: OCR正面照片的base64编码。
注意:此字段可能返回 null,表示取不到有效值。
:type OcrFront: str
:param _OcrBack: OCR反面照片的base64编码
注意:此字段可能返回 null,表示取不到有效值。
:type OcrBack: str
:param _ProcessedFrontImage: 旋转裁边后的正面照片base64编码。
注意:此字段可能返回 null,表示取不到有效值。
:type ProcessedFrontImage: str
:param _ProcessedBackImage: 旋转裁边后的背面照片base64编码。
注意:此字段可能返回 null,表示取不到有效值。
:type ProcessedBackImage: str
:param _Avatar: 身份证正面人像图base64编码。
注意:此字段可能返回 null,表示取不到有效值。
:type Avatar: str
:param _WarnInfos: 身份证人像面告警码,开启身份证告警功能后才会返回,返回数组中可能出现的告警码如下:
-9100 身份证有效日期不合法告警,
-9101 身份证边框不完整告警,
-9102 身份证复印件告警,
-9103 身份证翻拍告警,
-9105 身份证框内遮挡告警,
-9104 临时身份证告警,
-9106 身份证 PS 告警(疑似存在PS痕迹),
-9107 身份证反光告警。
注意:此字段可能返回 null,表示取不到有效值。
:type WarnInfos: list of int
:param _BackWarnInfos: 身份证国徽面告警码,开启身份证告警功能后才会返回,返回数组中可能出现的告警码如下:
-9100 身份证有效日期不合法告警,
-9101 身份证边框不完整告警,
-9102 身份证复印件告警,
-9103 身份证翻拍告警,
-9105 身份证框内遮挡告警,
-9104 临时身份证告警,
-9106 身份证 PS 告警(疑似存在PS痕迹),
-9107 身份证反光告警。
注意:此字段可能返回 null,表示取不到有效值。
:type BackWarnInfos: list of int
"""
self._OcrFront = None
self._OcrBack = None
self._ProcessedFrontImage = None
self._ProcessedBackImage = None
self._Avatar = None
self._WarnInfos = None
self._BackWarnInfos = None
@property
def OcrFront(self):
return self._OcrFront
@OcrFront.setter
def OcrFront(self, OcrFront):
self._OcrFront = OcrFront
@property
def OcrBack(self):
return self._OcrBack
@OcrBack.setter
def OcrBack(self, OcrBack):
self._OcrBack = OcrBack
@property
def ProcessedFrontImage(self):
return self._ProcessedFrontImage
@ProcessedFrontImage.setter
def ProcessedFrontImage(self, ProcessedFrontImage):
self._ProcessedFrontImage = ProcessedFrontImage
@property
def ProcessedBackImage(self):
return self._ProcessedBackImage
@ProcessedBackImage.setter
def ProcessedBackImage(self, ProcessedBackImage):
self._ProcessedBackImage = ProcessedBackImage
@property
def Avatar(self):
return self._Avatar
@Avatar.setter
def Avatar(self, Avatar):
self._Avatar = Avatar
@property
def WarnInfos(self):
return self._WarnInfos
@WarnInfos.setter
def WarnInfos(self, WarnInfos):
self._WarnInfos = WarnInfos
@property
def BackWarnInfos(self):
return self._BackWarnInfos
@BackWarnInfos.setter
def BackWarnInfos(self, BackWarnInfos):
self._BackWarnInfos = BackWarnInfos
def _deserialize(self, params):
self._OcrFront = params.get("OcrFront")
self._OcrBack = params.get("OcrBack")
self._ProcessedFrontImage = params.get("ProcessedFrontImage")
self._ProcessedBackImage = params.get("ProcessedBackImage")
self._Avatar = params.get("Avatar")
self._WarnInfos = params.get("WarnInfos")
self._BackWarnInfos = params.get("BackWarnInfos")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DetectInfoText(AbstractModel):
"""核身文本信息
"""
def __init__(self):
r"""
:param _ErrCode: 本次流程最终验证结果。0为成功
注意:此字段可能返回 null,表示取不到有效值。
:type ErrCode: int
:param _ErrMsg: 本次流程最终验证结果描述。(仅描述用,文案更新时不会通知。)
注意:此字段可能返回 null,表示取不到有效值。
:type ErrMsg: str
:param _IdCard: 本次验证使用的身份证号。
注意:此字段可能返回 null,表示取不到有效值。
:type IdCard: str
:param _UseIDType: 用户认证时使用的证件号码类型:
0:二代身份证的证件号码
1:港澳台居住证的证件号码
2:其他(核验使用的证件号码非合法身份号码)
注意:此字段可能返回 null,表示取不到有效值。
:type UseIDType: int
:param _Name: 本次验证使用的姓名。
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param _OcrNation: 身份校验环节识别结果:民族。
注意:此字段可能返回 null,表示取不到有效值。
:type OcrNation: str
:param _OcrAddress: 身份校验环节识别结果:家庭住址。
注意:此字段可能返回 null,表示取不到有效值。
:type OcrAddress: str
:param _OcrBirth: 身份校验环节识别结果:生日。格式为:YYYY/M/D
注意:此字段可能返回 null,表示取不到有效值。
:type OcrBirth: str
:param _OcrAuthority: 身份校验环节识别结果:签发机关。
注意:此字段可能返回 null,表示取不到有效值。
:type OcrAuthority: str
:param _OcrValidDate: 身份校验环节识别结果:有效日期。格式为:YYYY.MM.DD-YYYY.MM.DD
注意:此字段可能返回 null,表示取不到有效值。
:type OcrValidDate: str
:param _OcrName: 身份校验环节识别结果:姓名。
注意:此字段可能返回 null,表示取不到有效值。
:type OcrName: str
:param _OcrIdCard: 身份校验环节识别结果:身份证号。
注意:此字段可能返回 null,表示取不到有效值。
:type OcrIdCard: str
:param _OcrGender: 身份校验环节识别结果:性别。
注意:此字段可能返回 null,表示取不到有效值。
:type OcrGender: str
:param _IdInfoFrom: 身份校验环节采用的信息上传方式。
取值有"NFC"、"OCR"、"手动输入"、"其他"
注意:此字段可能返回 null,表示取不到有效值。
:type IdInfoFrom: str
:param _LiveStatus: 本次流程最终活体结果。0为成功
注意:此字段可能返回 null,表示取不到有效值。
:type LiveStatus: int
:param _LiveMsg: 本次流程最终活体结果描述。(仅描述用,文案更新时不会通知。)
注意:此字段可能返回 null,表示取不到有效值。
:type LiveMsg: str
:param _Comparestatus: 本次流程最终一比一结果。0为成功
注意:此字段可能返回 null,表示取不到有效值。
:type Comparestatus: int
:param _Comparemsg: 本次流程最终一比一结果描述。(仅描述用,文案更新时不会通知。)
注意:此字段可能返回 null,表示取不到有效值。
:type Comparemsg: str
:param _Sim: 本次流程活体一比一的分数,取值范围 [0.00, 100.00]。相似度大于等于70时才判断为同一人,也可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)
注意:此字段可能返回 null,表示取不到有效值。
:type Sim: str
:param _Location: 地理位置经纬度。
注意:此字段可能返回 null,表示取不到有效值。
:type Location: str
:param _Extra: Auth接口带入额外信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Extra: str
:param _LivenessDetail: 本次流程进行的活体一比一流水。
注意:此字段可能返回 null,表示取不到有效值。
:type LivenessDetail: list of DetectDetail
:param _Mobile: 手机号码。
注意:此字段可能返回 null,表示取不到有效值。
:type Mobile: str
:param _CompareLibType: 本次流程最终比对库源类型。包括:
权威库;
业务方自有库(用户上传照片、客户的混合库、混合部署库);
二次验证库;
人工审核库;
注意:此字段可能返回 null,表示取不到有效值。
:type CompareLibType: str
:param _LivenessMode: 本次流程最终活体类型。包括:
0:未知
1:数字活体
2:动作活体
3:静默活体
4:一闪活体(动作+光线)
注意:此字段可能返回 null,表示取不到有效值。
:type LivenessMode: int
:param _NFCRequestIds: nfc重复计费requestId列表
注意:此字段可能返回 null,表示取不到有效值。
:type NFCRequestIds: list of str
:param _NFCBillingCounts: nfc重复计费计数
注意:此字段可能返回 null,表示取不到有效值。
:type NFCBillingCounts: int
:param _PassNo: 港澳台居住证通行证号码
注意:此字段可能返回 null,表示取不到有效值。
:type PassNo: str
:param _VisaNum: 港澳台居住证签发次数
注意:此字段可能返回 null,表示取不到有效值。
:type VisaNum: str
"""
self._ErrCode = None
self._ErrMsg = None
self._IdCard = None
self._UseIDType = None
self._Name = None
self._OcrNation = None
self._OcrAddress = None
self._OcrBirth = None
self._OcrAuthority = None
self._OcrValidDate = None
self._OcrName = None
self._OcrIdCard = None
self._OcrGender = None
self._IdInfoFrom = None
self._LiveStatus = None
self._LiveMsg = None
self._Comparestatus = None
self._Comparemsg = None
self._Sim = None
self._Location = None
self._Extra = None
self._LivenessDetail = None
self._Mobile = None
self._CompareLibType = None
self._LivenessMode = None
self._NFCRequestIds = None
self._NFCBillingCounts = None
self._PassNo = None
self._VisaNum = None
@property
def ErrCode(self):
return self._ErrCode
@ErrCode.setter
def ErrCode(self, ErrCode):
self._ErrCode = ErrCode
@property
def ErrMsg(self):
return self._ErrMsg
@ErrMsg.setter
def ErrMsg(self, ErrMsg):
self._ErrMsg = ErrMsg
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def UseIDType(self):
return self._UseIDType
@UseIDType.setter
def UseIDType(self, UseIDType):
self._UseIDType = UseIDType
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def OcrNation(self):
return self._OcrNation
@OcrNation.setter
def OcrNation(self, OcrNation):
self._OcrNation = OcrNation
@property
def OcrAddress(self):
return self._OcrAddress
@OcrAddress.setter
def OcrAddress(self, OcrAddress):
self._OcrAddress = OcrAddress
@property
def OcrBirth(self):
return self._OcrBirth
@OcrBirth.setter
def OcrBirth(self, OcrBirth):
self._OcrBirth = OcrBirth
@property
def OcrAuthority(self):
return self._OcrAuthority
@OcrAuthority.setter
def OcrAuthority(self, OcrAuthority):
self._OcrAuthority = OcrAuthority
@property
def OcrValidDate(self):
return self._OcrValidDate
@OcrValidDate.setter
def OcrValidDate(self, OcrValidDate):
self._OcrValidDate = OcrValidDate
@property
def OcrName(self):
return self._OcrName
@OcrName.setter
def OcrName(self, OcrName):
self._OcrName = OcrName
@property
def OcrIdCard(self):
return self._OcrIdCard
@OcrIdCard.setter
def OcrIdCard(self, OcrIdCard):
self._OcrIdCard = OcrIdCard
@property
def OcrGender(self):
return self._OcrGender
@OcrGender.setter
def OcrGender(self, OcrGender):
self._OcrGender = OcrGender
@property
def IdInfoFrom(self):
return self._IdInfoFrom
@IdInfoFrom.setter
def IdInfoFrom(self, IdInfoFrom):
self._IdInfoFrom = IdInfoFrom
@property
def LiveStatus(self):
return self._LiveStatus
@LiveStatus.setter
def LiveStatus(self, LiveStatus):
self._LiveStatus = LiveStatus
@property
def LiveMsg(self):
return self._LiveMsg
@LiveMsg.setter
def LiveMsg(self, LiveMsg):
self._LiveMsg = LiveMsg
@property
def Comparestatus(self):
return self._Comparestatus
@Comparestatus.setter
def Comparestatus(self, Comparestatus):
self._Comparestatus = Comparestatus
@property
def Comparemsg(self):
return self._Comparemsg
@Comparemsg.setter
def Comparemsg(self, Comparemsg):
self._Comparemsg = Comparemsg
@property
def Sim(self):
return self._Sim
@Sim.setter
def Sim(self, Sim):
self._Sim = Sim
@property
def Location(self):
return self._Location
@Location.setter
def Location(self, Location):
self._Location = Location
@property
def Extra(self):
return self._Extra
@Extra.setter
def Extra(self, Extra):
self._Extra = Extra
@property
def LivenessDetail(self):
return self._LivenessDetail
@LivenessDetail.setter
def LivenessDetail(self, LivenessDetail):
self._LivenessDetail = LivenessDetail
@property
def Mobile(self):
return self._Mobile
@Mobile.setter
def Mobile(self, Mobile):
self._Mobile = Mobile
@property
def CompareLibType(self):
return self._CompareLibType
@CompareLibType.setter
def CompareLibType(self, CompareLibType):
self._CompareLibType = CompareLibType
@property
def LivenessMode(self):
return self._LivenessMode
@LivenessMode.setter
def LivenessMode(self, LivenessMode):
self._LivenessMode = LivenessMode
@property
def NFCRequestIds(self):
return self._NFCRequestIds
@NFCRequestIds.setter
def NFCRequestIds(self, NFCRequestIds):
self._NFCRequestIds = NFCRequestIds
@property
def NFCBillingCounts(self):
return self._NFCBillingCounts
@NFCBillingCounts.setter
def NFCBillingCounts(self, NFCBillingCounts):
self._NFCBillingCounts = NFCBillingCounts
@property
def PassNo(self):
return self._PassNo
@PassNo.setter
def PassNo(self, PassNo):
self._PassNo = PassNo
@property
def VisaNum(self):
return self._VisaNum
@VisaNum.setter
def VisaNum(self, VisaNum):
self._VisaNum = VisaNum
def _deserialize(self, params):
self._ErrCode = params.get("ErrCode")
self._ErrMsg = params.get("ErrMsg")
self._IdCard = params.get("IdCard")
self._UseIDType = params.get("UseIDType")
self._Name = params.get("Name")
self._OcrNation = params.get("OcrNation")
self._OcrAddress = params.get("OcrAddress")
self._OcrBirth = params.get("OcrBirth")
self._OcrAuthority = params.get("OcrAuthority")
self._OcrValidDate = params.get("OcrValidDate")
self._OcrName = params.get("OcrName")
self._OcrIdCard = params.get("OcrIdCard")
self._OcrGender = params.get("OcrGender")
self._IdInfoFrom = params.get("IdInfoFrom")
self._LiveStatus = params.get("LiveStatus")
self._LiveMsg = params.get("LiveMsg")
self._Comparestatus = params.get("Comparestatus")
self._Comparemsg = params.get("Comparemsg")
self._Sim = params.get("Sim")
self._Location = params.get("Location")
self._Extra = params.get("Extra")
if params.get("LivenessDetail") is not None:
self._LivenessDetail = []
for item in params.get("LivenessDetail"):
obj = DetectDetail()
obj._deserialize(item)
self._LivenessDetail.append(obj)
self._Mobile = params.get("Mobile")
self._CompareLibType = params.get("CompareLibType")
self._LivenessMode = params.get("LivenessMode")
self._NFCRequestIds = params.get("NFCRequestIds")
self._NFCBillingCounts = params.get("NFCBillingCounts")
self._PassNo = params.get("PassNo")
self._VisaNum = params.get("VisaNum")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DetectInfoVideoData(AbstractModel):
"""核身视频信息
"""
def __init__(self):
r"""
:param _LivenessVideo: 活体视频的base64编码
注意:此字段可能返回 null,表示取不到有效值。
:type LivenessVideo: str
"""
self._LivenessVideo = None
@property
def LivenessVideo(self):
return self._LivenessVideo
@LivenessVideo.setter
def LivenessVideo(self, LivenessVideo):
self._LivenessVideo = LivenessVideo
def _deserialize(self, params):
self._LivenessVideo = params.get("LivenessVideo")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class EidInfo(AbstractModel):
"""Eid出参,包括商户方用户的标识和加密的用户姓名身份证信息。
"""
def __init__(self):
r"""
:param _EidCode: 商户方 appeIDcode 的数字证书
:type EidCode: str
:param _EidSign: Eid中心针对商户方EidCode的电子签名
:type EidSign: str
:param _DesKey: 商户方公钥加密的会话密钥的base64字符串,[指引详见](https://cloud.tencent.com/document/product/1007/63370)
:type DesKey: str
:param _UserInfo: 会话密钥sm2加密后的base64字符串,[指引详见](https://cloud.tencent.com/document/product/1007/63370)
:type UserInfo: str
"""
self._EidCode = None
self._EidSign = None
self._DesKey = None
self._UserInfo = None
@property
def EidCode(self):
return self._EidCode
@EidCode.setter
def EidCode(self, EidCode):
self._EidCode = EidCode
@property
def EidSign(self):
return self._EidSign
@EidSign.setter
def EidSign(self, EidSign):
self._EidSign = EidSign
@property
def DesKey(self):
return self._DesKey
@DesKey.setter
def DesKey(self, DesKey):
self._DesKey = DesKey
@property
def UserInfo(self):
return self._UserInfo
@UserInfo.setter
def UserInfo(self, UserInfo):
self._UserInfo = UserInfo
def _deserialize(self, params):
self._EidCode = params.get("EidCode")
self._EidSign = params.get("EidSign")
self._DesKey = params.get("DesKey")
self._UserInfo = params.get("UserInfo")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class EncryptedPhoneVerificationRequest(AbstractModel):
"""EncryptedPhoneVerification请求参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 身份证号,加密方式以EncryptionMode为准
:type IdCard: str
:param _Name: 姓名,加密方式以EncryptionMode为准
:type Name: str
:param _Phone: 手机号,加密方式以EncryptionMode为准
:type Phone: str
:param _EncryptionMode: 敏感信息的加密方式,目前支持明文、MD5和SHA256加密传输,参数取值:
0:明文,不加密
1: 使用MD5加密
2: 使用SHA256
:type EncryptionMode: str
"""
self._IdCard = None
self._Name = None
self._Phone = None
self._EncryptionMode = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Phone(self):
return self._Phone
@Phone.setter
def Phone(self, Phone):
self._Phone = Phone
@property
def EncryptionMode(self):
return self._EncryptionMode
@EncryptionMode.setter
def EncryptionMode(self, EncryptionMode):
self._EncryptionMode = EncryptionMode
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._Phone = params.get("Phone")
self._EncryptionMode = params.get("EncryptionMode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class EncryptedPhoneVerificationResponse(AbstractModel):
"""EncryptedPhoneVerification返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码:
【收费结果码】
0: 三要素信息一致
-4: 三要素信息不一致
【不收费结果码】
-7: 身份证号码有误
-8: 参数错误
-9: 没有记录
-11: 验证中心服务繁忙
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _ISP: 运营商名称。
取值范围为["移动","联通","电信",""]
:type ISP: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._ISP = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def ISP(self):
return self._ISP
@ISP.setter
def ISP(self, ISP):
self._ISP = ISP
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._ISP = params.get("ISP")
self._RequestId = params.get("RequestId")
class Encryption(AbstractModel):
"""敏感数据加密
"""
def __init__(self):
r"""
:param _EncryptList: 在使用加密服务时,填入要被加密的字段。本接口中可填入加密后的一个或多个字段
注意:此字段可能返回 null,表示取不到有效值。
:type EncryptList: list of str
:param _CiphertextBlob: 有加密需求的用户,接入传入kms的CiphertextBlob,关于数据加密可查阅<a href="https://cloud.tencent.com/document/product/1007/47180">数据加密</a> 文档。
注意:此字段可能返回 null,表示取不到有效值。
:type CiphertextBlob: str
:param _Iv: 有加密需求的用户,传入CBC加密的初始向量(客户自定义字符串,长度16字符)。
注意:此字段可能返回 null,表示取不到有效值。
:type Iv: str
:param _Algorithm: 加密使用的算法(支持'AES-256-CBC'、'SM4-GCM'),不传默认为'AES-256-CBC'
注意:此字段可能返回 null,表示取不到有效值。
:type Algorithm: str
:param _TagList: SM4-GCM算法生成的消息摘要(校验消息完整性时使用)
注意:此字段可能返回 null,表示取不到有效值。
:type TagList: list of str
"""
self._EncryptList = None
self._CiphertextBlob = None
self._Iv = None
self._Algorithm = None
self._TagList = None
@property
def EncryptList(self):
return self._EncryptList
@EncryptList.setter
def EncryptList(self, EncryptList):
self._EncryptList = EncryptList
@property
def CiphertextBlob(self):
return self._CiphertextBlob
@CiphertextBlob.setter
def CiphertextBlob(self, CiphertextBlob):
self._CiphertextBlob = CiphertextBlob
@property
def Iv(self):
return self._Iv
@Iv.setter
def Iv(self, Iv):
self._Iv = Iv
@property
def Algorithm(self):
return self._Algorithm
@Algorithm.setter
def Algorithm(self, Algorithm):
self._Algorithm = Algorithm
@property
def TagList(self):
return self._TagList
@TagList.setter
def TagList(self, TagList):
self._TagList = TagList
def _deserialize(self, params):
self._EncryptList = params.get("EncryptList")
self._CiphertextBlob = params.get("CiphertextBlob")
self._Iv = params.get("Iv")
self._Algorithm = params.get("Algorithm")
self._TagList = params.get("TagList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetActionSequenceRequest(AbstractModel):
"""GetActionSequence请求参数结构体
"""
def __init__(self):
r"""
:param _ActionType: 默认不需要使用
:type ActionType: str
"""
self._ActionType = None
@property
def ActionType(self):
return self._ActionType
@ActionType.setter
def ActionType(self, ActionType):
self._ActionType = ActionType
def _deserialize(self, params):
self._ActionType = params.get("ActionType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetActionSequenceResponse(AbstractModel):
"""GetActionSequence返回参数结构体
"""
def __init__(self):
r"""
:param _ActionSequence: 动作顺序(2,1 or 1,2) 。1代表张嘴,2代表闭眼。
:type ActionSequence: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._ActionSequence = None
self._RequestId = None
@property
def ActionSequence(self):
return self._ActionSequence
@ActionSequence.setter
def ActionSequence(self, ActionSequence):
self._ActionSequence = ActionSequence
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._ActionSequence = params.get("ActionSequence")
self._RequestId = params.get("RequestId")
class GetDetectInfoEnhancedRequest(AbstractModel):
"""GetDetectInfoEnhanced请求参数结构体
"""
def __init__(self):
r"""
:param _BizToken: 人脸核身流程的标识,调用DetectAuth接口时生成。
:type BizToken: str
:param _RuleId: 用于细分客户使用场景,由腾讯侧在线下对接时分配。
:type RuleId: str
:param _InfoType: 指定拉取的结果信息,取值(0:全部;1:文本类;2:身份证信息;3:视频最佳截图信息)。
如 13表示拉取文本类、视频最佳截图信息。
默认值:0
:type InfoType: str
:param _BestFramesCount: 从活体视频中截取一定张数的最佳帧(仅部分服务支持,若需使用请与慧眼小助手沟通)。默认为0,最大为10,超出10的最多只给10张。(InfoType需要包含3)
:type BestFramesCount: int
:param _IsCutIdCardImage: 是否对身份证照片进行裁边。默认为false。(InfoType需要包含2)
:type IsCutIdCardImage: bool
:param _IsNeedIdCardAvatar: 是否需要从身份证中抠出头像。默认为false。(InfoType需要包含2)
:type IsNeedIdCardAvatar: bool
:param _IsEncrypt: 已弃用。
:type IsEncrypt: bool
:param _Encryption: 是否需要对返回中的敏感信息进行加密。仅指定加密算法Algorithm即可,其余字段传入默认值。其中敏感信息包括:Response.Text.IdCard、Response.Text.Name、Response.Text.OcrIdCard、Response.Text.OcrName
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._BizToken = None
self._RuleId = None
self._InfoType = None
self._BestFramesCount = None
self._IsCutIdCardImage = None
self._IsNeedIdCardAvatar = None
self._IsEncrypt = None
self._Encryption = None
@property
def BizToken(self):
return self._BizToken
@BizToken.setter
def BizToken(self, BizToken):
self._BizToken = BizToken
@property
def RuleId(self):
return self._RuleId
@RuleId.setter
def RuleId(self, RuleId):
self._RuleId = RuleId
@property
def InfoType(self):
return self._InfoType
@InfoType.setter
def InfoType(self, InfoType):
self._InfoType = InfoType
@property
def BestFramesCount(self):
return self._BestFramesCount
@BestFramesCount.setter
def BestFramesCount(self, BestFramesCount):
self._BestFramesCount = BestFramesCount
@property
def IsCutIdCardImage(self):
return self._IsCutIdCardImage
@IsCutIdCardImage.setter
def IsCutIdCardImage(self, IsCutIdCardImage):
self._IsCutIdCardImage = IsCutIdCardImage
@property
def IsNeedIdCardAvatar(self):
return self._IsNeedIdCardAvatar
@IsNeedIdCardAvatar.setter
def IsNeedIdCardAvatar(self, IsNeedIdCardAvatar):
self._IsNeedIdCardAvatar = IsNeedIdCardAvatar
@property
def IsEncrypt(self):
return self._IsEncrypt
@IsEncrypt.setter
def IsEncrypt(self, IsEncrypt):
self._IsEncrypt = IsEncrypt
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._BizToken = params.get("BizToken")
self._RuleId = params.get("RuleId")
self._InfoType = params.get("InfoType")
self._BestFramesCount = params.get("BestFramesCount")
self._IsCutIdCardImage = params.get("IsCutIdCardImage")
self._IsNeedIdCardAvatar = params.get("IsNeedIdCardAvatar")
self._IsEncrypt = params.get("IsEncrypt")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetDetectInfoEnhancedResponse(AbstractModel):
"""GetDetectInfoEnhanced返回参数结构体
"""
def __init__(self):
r"""
:param _Text: 文本类信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Text: :class:`tencentcloud.faceid.v20180301.models.DetectInfoText`
:param _IdCardData: 身份证照片信息。
注意:此字段可能返回 null,表示取不到有效值。
:type IdCardData: :class:`tencentcloud.faceid.v20180301.models.DetectInfoIdCardData`
:param _BestFrame: 最佳帧信息。
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrame: :class:`tencentcloud.faceid.v20180301.models.DetectInfoBestFrame`
:param _VideoData: 视频信息。
注意:此字段可能返回 null,表示取不到有效值。
:type VideoData: :class:`tencentcloud.faceid.v20180301.models.DetectInfoVideoData`
:param _Encryption: 敏感数据加密信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
:param _IntentionVerifyData: 意愿核身朗读模式结果信息。若未使用意愿核身功能,该字段返回值可以不处理。
注意:此字段可能返回 null,表示取不到有效值。
:type IntentionVerifyData: :class:`tencentcloud.faceid.v20180301.models.IntentionVerifyData`
:param _IntentionQuestionResult: 意愿核身问答模式结果。若未使用该意愿核身功能,该字段返回值可以不处理。
注意:此字段可能返回 null,表示取不到有效值。
:type IntentionQuestionResult: :class:`tencentcloud.faceid.v20180301.models.IntentionQuestionResult`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Text = None
self._IdCardData = None
self._BestFrame = None
self._VideoData = None
self._Encryption = None
self._IntentionVerifyData = None
self._IntentionQuestionResult = None
self._RequestId = None
@property
def Text(self):
return self._Text
@Text.setter
def Text(self, Text):
self._Text = Text
@property
def IdCardData(self):
return self._IdCardData
@IdCardData.setter
def IdCardData(self, IdCardData):
self._IdCardData = IdCardData
@property
def BestFrame(self):
return self._BestFrame
@BestFrame.setter
def BestFrame(self, BestFrame):
self._BestFrame = BestFrame
@property
def VideoData(self):
return self._VideoData
@VideoData.setter
def VideoData(self, VideoData):
self._VideoData = VideoData
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
@property
def IntentionVerifyData(self):
return self._IntentionVerifyData
@IntentionVerifyData.setter
def IntentionVerifyData(self, IntentionVerifyData):
self._IntentionVerifyData = IntentionVerifyData
@property
def IntentionQuestionResult(self):
return self._IntentionQuestionResult
@IntentionQuestionResult.setter
def IntentionQuestionResult(self, IntentionQuestionResult):
self._IntentionQuestionResult = IntentionQuestionResult
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Text") is not None:
self._Text = DetectInfoText()
self._Text._deserialize(params.get("Text"))
if params.get("IdCardData") is not None:
self._IdCardData = DetectInfoIdCardData()
self._IdCardData._deserialize(params.get("IdCardData"))
if params.get("BestFrame") is not None:
self._BestFrame = DetectInfoBestFrame()
self._BestFrame._deserialize(params.get("BestFrame"))
if params.get("VideoData") is not None:
self._VideoData = DetectInfoVideoData()
self._VideoData._deserialize(params.get("VideoData"))
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
if params.get("IntentionVerifyData") is not None:
self._IntentionVerifyData = IntentionVerifyData()
self._IntentionVerifyData._deserialize(params.get("IntentionVerifyData"))
if params.get("IntentionQuestionResult") is not None:
self._IntentionQuestionResult = IntentionQuestionResult()
self._IntentionQuestionResult._deserialize(params.get("IntentionQuestionResult"))
self._RequestId = params.get("RequestId")
class GetDetectInfoRequest(AbstractModel):
"""GetDetectInfo请求参数结构体
"""
def __init__(self):
r"""
:param _BizToken: 人脸核身流程的标识,调用DetectAuth接口时生成。
:type BizToken: str
:param _RuleId: 用于细分客户使用场景,申请开通服务后,可以在腾讯云慧眼人脸核身控制台(https://console.cloud.tencent.com/faceid) 自助接入里面创建,审核通过后即可调用。如有疑问,请加慧眼小助手微信(faceid001)进行咨询。
:type RuleId: str
:param _InfoType: 指定拉取的结果信息,取值(0:全部;1:文本类;2:身份证正反面;3:视频最佳截图照片;4:视频)。
如 134表示拉取文本类、视频最佳截图照片、视频。
默认值:0
:type InfoType: str
"""
self._BizToken = None
self._RuleId = None
self._InfoType = None
@property
def BizToken(self):
return self._BizToken
@BizToken.setter
def BizToken(self, BizToken):
self._BizToken = BizToken
@property
def RuleId(self):
return self._RuleId
@RuleId.setter
def RuleId(self, RuleId):
self._RuleId = RuleId
@property
def InfoType(self):
return self._InfoType
@InfoType.setter
def InfoType(self, InfoType):
self._InfoType = InfoType
def _deserialize(self, params):
self._BizToken = params.get("BizToken")
self._RuleId = params.get("RuleId")
self._InfoType = params.get("InfoType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetDetectInfoResponse(AbstractModel):
"""GetDetectInfo返回参数结构体
"""
def __init__(self):
r"""
:param _DetectInfo: JSON字符串。
{
// 文本类信息
"Text": {
"ErrCode": null, // 本次核身最终结果。0为成功
"ErrMsg": null, // 本次核身最终结果信息描述。
"IdCard": "", // 本次核身最终获得的身份证号。
"Name": "", // 本次核身最终获得的姓名。
"OcrNation": null, // ocr阶段获取的民族
"OcrAddress": null, // ocr阶段获取的地址
"OcrBirth": null, // ocr阶段获取的出生信息
"OcrAuthority": null, // ocr阶段获取的证件签发机关
"OcrValidDate": null, // ocr阶段获取的证件有效期
"OcrName": null, // ocr阶段获取的姓名
"OcrIdCard": null, // ocr阶段获取的身份证号
"OcrGender": null, // ocr阶段获取的性别
"LiveStatus": null, // 活体检测阶段的错误码。0为成功
"LiveMsg": null, // 活体检测阶段的错误信息
"Comparestatus": null,// 一比一阶段的错误码。0为成功
"Comparemsg": null, // 一比一阶段的错误信息
"Sim": null, // 比对相似度
"Location": null, // 地理位置信息
"Extra": "", // DetectAuth结果传进来的Extra信息
"Detail": { // 活体一比一信息详情
"LivenessData": [
{
ErrCode: null, // 活体比对验证错误码
ErrMsg: null, // 活体比对验证错误描述
ReqTime: null, // 活体验证时间戳
IdCard: null, // 验证身份证号
Name: null // 验证姓名
}
]
}
},
// 身份证正反面照片Base64
"IdCardData": {
"OcrFront": null,
"OcrBack": null
},
// 视频最佳帧截图Base64
"BestFrame": {
"BestFrame": null
},
// 活体视频Base64
"VideoData": {
"LivenessVideo": null
}
}
:type DetectInfo: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._DetectInfo = None
self._RequestId = None
@property
def DetectInfo(self):
return self._DetectInfo
@DetectInfo.setter
def DetectInfo(self, DetectInfo):
self._DetectInfo = DetectInfo
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._DetectInfo = params.get("DetectInfo")
self._RequestId = params.get("RequestId")
class GetEidResultRequest(AbstractModel):
"""GetEidResult请求参数结构体
"""
def __init__(self):
r"""
:param _EidToken: E证通流程的唯一标识,调用GetEidToken接口时生成。
:type EidToken: str
:param _InfoType: 指定拉取的结果信息,取值(0:全部;1:文本类;2:身份证信息;3:最佳截图信息;5:意愿核身朗读模式相关结果;6:意愿核身问答模式相关结果)。
如 13表示拉取文本类、最佳截图信息。
默认值:0
:type InfoType: str
:param _BestFramesCount: 从活体视频中截取一定张数的最佳帧。默认为0,最大为3,超出3的最多只给3张。(InfoType需要包含3)
:type BestFramesCount: int
"""
self._EidToken = None
self._InfoType = None
self._BestFramesCount = None
@property
def EidToken(self):
return self._EidToken
@EidToken.setter
def EidToken(self, EidToken):
self._EidToken = EidToken
@property
def InfoType(self):
return self._InfoType
@InfoType.setter
def InfoType(self, InfoType):
self._InfoType = InfoType
@property
def BestFramesCount(self):
return self._BestFramesCount
@BestFramesCount.setter
def BestFramesCount(self, BestFramesCount):
self._BestFramesCount = BestFramesCount
def _deserialize(self, params):
self._EidToken = params.get("EidToken")
self._InfoType = params.get("InfoType")
self._BestFramesCount = params.get("BestFramesCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetEidResultResponse(AbstractModel):
"""GetEidResult返回参数结构体
"""
def __init__(self):
r"""
:param _Text: 文本类信息。(基于对敏感信息的保护,验证使用的姓名和身份证号统一通过加密后从Eidinfo参数中返回,如需获取请在控制台申请返回身份信息,详见[E证通获取实名信息指引](https://cloud.tencent.com/document/product/1007/63370))
注意:此字段可能返回 null,表示取不到有效值。
:type Text: :class:`tencentcloud.faceid.v20180301.models.DetectInfoText`
:param _IdCardData: 身份证照片信息。
注意:此字段可能返回 null,表示取不到有效值。
:type IdCardData: :class:`tencentcloud.faceid.v20180301.models.DetectInfoIdCardData`
:param _BestFrame: 最佳帧信息。
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrame: :class:`tencentcloud.faceid.v20180301.models.DetectInfoBestFrame`
:param _EidInfo: Eid信息。(包括商户下用户唯一标识以及加密后的姓名、身份证号信息。解密方式详见[E证通获取实名信息指引](https://cloud.tencent.com/document/product/1007/63370))
注意:此字段可能返回 null,表示取不到有效值。
:type EidInfo: :class:`tencentcloud.faceid.v20180301.models.EidInfo`
:param _IntentionVerifyData: 意愿核身朗读模式相关信息。若未使用意愿核身朗读功能,该字段返回值可以不处理。
注意:此字段可能返回 null,表示取不到有效值。
:type IntentionVerifyData: :class:`tencentcloud.faceid.v20180301.models.IntentionVerifyData`
:param _IntentionQuestionResult: 意愿核身问答模式相关信息。若未使用意愿核身问答模式功能,该字段返回值可以不处理。
注意:此字段可能返回 null,表示取不到有效值。
:type IntentionQuestionResult: :class:`tencentcloud.faceid.v20180301.models.IntentionQuestionResult`
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Text = None
self._IdCardData = None
self._BestFrame = None
self._EidInfo = None
self._IntentionVerifyData = None
self._IntentionQuestionResult = None
self._RequestId = None
@property
def Text(self):
return self._Text
@Text.setter
def Text(self, Text):
self._Text = Text
@property
def IdCardData(self):
return self._IdCardData
@IdCardData.setter
def IdCardData(self, IdCardData):
self._IdCardData = IdCardData
@property
def BestFrame(self):
return self._BestFrame
@BestFrame.setter
def BestFrame(self, BestFrame):
self._BestFrame = BestFrame
@property
def EidInfo(self):
return self._EidInfo
@EidInfo.setter
def EidInfo(self, EidInfo):
self._EidInfo = EidInfo
@property
def IntentionVerifyData(self):
return self._IntentionVerifyData
@IntentionVerifyData.setter
def IntentionVerifyData(self, IntentionVerifyData):
self._IntentionVerifyData = IntentionVerifyData
@property
def IntentionQuestionResult(self):
return self._IntentionQuestionResult
@IntentionQuestionResult.setter
def IntentionQuestionResult(self, IntentionQuestionResult):
self._IntentionQuestionResult = IntentionQuestionResult
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
if params.get("Text") is not None:
self._Text = DetectInfoText()
self._Text._deserialize(params.get("Text"))
if params.get("IdCardData") is not None:
self._IdCardData = DetectInfoIdCardData()
self._IdCardData._deserialize(params.get("IdCardData"))
if params.get("BestFrame") is not None:
self._BestFrame = DetectInfoBestFrame()
self._BestFrame._deserialize(params.get("BestFrame"))
if params.get("EidInfo") is not None:
self._EidInfo = EidInfo()
self._EidInfo._deserialize(params.get("EidInfo"))
if params.get("IntentionVerifyData") is not None:
self._IntentionVerifyData = IntentionVerifyData()
self._IntentionVerifyData._deserialize(params.get("IntentionVerifyData"))
if params.get("IntentionQuestionResult") is not None:
self._IntentionQuestionResult = IntentionQuestionResult()
self._IntentionQuestionResult._deserialize(params.get("IntentionQuestionResult"))
self._RequestId = params.get("RequestId")
class GetEidTokenConfig(AbstractModel):
"""获取token时的配置
"""
def __init__(self):
r"""
:param _InputType: 姓名身份证输入方式。
1:传身份证正反面OCR
2:传身份证正面OCR
3:用户手动输入
4:客户后台传入
默认1
注:使用OCR时仅支持用户修改结果中的姓名
:type InputType: str
:param _UseIntentionVerify: 是否使用意愿核身,默认不使用。注意:如开启使用,则计费标签按【意愿核身】计费标签计价;如不开启,则计费标签按【E证通】计费标签计价,价格详见:[价格说明](https://cloud.tencent.com/document/product/1007/56804)。
:type UseIntentionVerify: bool
:param _IntentionMode: 意愿核身模式。枚举值:1( 朗读模式),2(问答模式) 。默认值1
:type IntentionMode: str
:param _IntentionVerifyText: 意愿核身朗读模式使用的文案,若未使用意愿核身朗读功能,该字段无需传入。默认为空,最长可接受120的字符串长度。
:type IntentionVerifyText: str
:param _IntentionQuestions: 意愿核身问答模式的配置列表。当前仅支持一个问答。
:type IntentionQuestions: list of IntentionQuestion
:param _IntentionRecognition: 意愿核身过程中识别用户的回答意图,开启后除了IntentionQuestions的Answers列表中的标准回答会通过,近似意图的回答也会通过,默认不开启。
:type IntentionRecognition: bool
:param _IsSupportHMTResidentPermitOCR: 是否支持港澳台居住证识别
:type IsSupportHMTResidentPermitOCR: bool
"""
self._InputType = None
self._UseIntentionVerify = None
self._IntentionMode = None
self._IntentionVerifyText = None
self._IntentionQuestions = None
self._IntentionRecognition = None
self._IsSupportHMTResidentPermitOCR = None
@property
def InputType(self):
return self._InputType
@InputType.setter
def InputType(self, InputType):
self._InputType = InputType
@property
def UseIntentionVerify(self):
return self._UseIntentionVerify
@UseIntentionVerify.setter
def UseIntentionVerify(self, UseIntentionVerify):
self._UseIntentionVerify = UseIntentionVerify
@property
def IntentionMode(self):
return self._IntentionMode
@IntentionMode.setter
def IntentionMode(self, IntentionMode):
self._IntentionMode = IntentionMode
@property
def IntentionVerifyText(self):
return self._IntentionVerifyText
@IntentionVerifyText.setter
def IntentionVerifyText(self, IntentionVerifyText):
self._IntentionVerifyText = IntentionVerifyText
@property
def IntentionQuestions(self):
return self._IntentionQuestions
@IntentionQuestions.setter
def IntentionQuestions(self, IntentionQuestions):
self._IntentionQuestions = IntentionQuestions
@property
def IntentionRecognition(self):
return self._IntentionRecognition
@IntentionRecognition.setter
def IntentionRecognition(self, IntentionRecognition):
self._IntentionRecognition = IntentionRecognition
@property
def IsSupportHMTResidentPermitOCR(self):
return self._IsSupportHMTResidentPermitOCR
@IsSupportHMTResidentPermitOCR.setter
def IsSupportHMTResidentPermitOCR(self, IsSupportHMTResidentPermitOCR):
self._IsSupportHMTResidentPermitOCR = IsSupportHMTResidentPermitOCR
def _deserialize(self, params):
self._InputType = params.get("InputType")
self._UseIntentionVerify = params.get("UseIntentionVerify")
self._IntentionMode = params.get("IntentionMode")
self._IntentionVerifyText = params.get("IntentionVerifyText")
if params.get("IntentionQuestions") is not None:
self._IntentionQuestions = []
for item in params.get("IntentionQuestions"):
obj = IntentionQuestion()
obj._deserialize(item)
self._IntentionQuestions.append(obj)
self._IntentionRecognition = params.get("IntentionRecognition")
self._IsSupportHMTResidentPermitOCR = params.get("IsSupportHMTResidentPermitOCR")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetEidTokenRequest(AbstractModel):
"""GetEidToken请求参数结构体
"""
def __init__(self):
r"""
:param _MerchantId: EID商户id,字段长度最长50位。
:type MerchantId: str
:param _IdCard: 身份标识(未使用OCR服务时,必须传入)。
规则:a-zA-Z0-9组合。最长长度32位。
:type IdCard: str
:param _Name: 姓名。(未使用OCR服务时,必须传入)最长长度32位。中文请使用UTF-8编码。
:type Name: str
:param _Extra: 透传字段,在获取验证结果时返回。最长长度1024位。
:type Extra: str
:param _Config: 小程序模式配置,包括如何传入姓名身份证的配置,以及是否使用意愿核身。
:type Config: :class:`tencentcloud.faceid.v20180301.models.GetEidTokenConfig`
:param _RedirectUrl: 最长长度1024位。用户从Url中进入核身认证结束后重定向的回调链接地址。EidToken会在该链接的query参数中。
:type RedirectUrl: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._MerchantId = None
self._IdCard = None
self._Name = None
self._Extra = None
self._Config = None
self._RedirectUrl = None
self._Encryption = None
@property
def MerchantId(self):
return self._MerchantId
@MerchantId.setter
def MerchantId(self, MerchantId):
self._MerchantId = MerchantId
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Extra(self):
return self._Extra
@Extra.setter
def Extra(self, Extra):
self._Extra = Extra
@property
def Config(self):
return self._Config
@Config.setter
def Config(self, Config):
self._Config = Config
@property
def RedirectUrl(self):
return self._RedirectUrl
@RedirectUrl.setter
def RedirectUrl(self, RedirectUrl):
self._RedirectUrl = RedirectUrl
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._MerchantId = params.get("MerchantId")
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._Extra = params.get("Extra")
if params.get("Config") is not None:
self._Config = GetEidTokenConfig()
self._Config._deserialize(params.get("Config"))
self._RedirectUrl = params.get("RedirectUrl")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetEidTokenResponse(AbstractModel):
"""GetEidToken返回参数结构体
"""
def __init__(self):
r"""
:param _EidToken: 一次核身流程的标识,有效时间为600秒;
完成核身后,可用该标识获取验证结果信息。
:type EidToken: str
:param _Url: 发起核身流程的URL,用于H5场景核身。
:type Url: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._EidToken = None
self._Url = None
self._RequestId = None
@property
def EidToken(self):
return self._EidToken
@EidToken.setter
def EidToken(self, EidToken):
self._EidToken = EidToken
@property
def Url(self):
return self._Url
@Url.setter
def Url(self, Url):
self._Url = Url
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._EidToken = params.get("EidToken")
self._Url = params.get("Url")
self._RequestId = params.get("RequestId")
class GetFaceIdResultRequest(AbstractModel):
"""GetFaceIdResult请求参数结构体
"""
def __init__(self):
r"""
:param _FaceIdToken: SDK人脸核身流程的标识,调用GetFaceIdToken接口时生成。
:type FaceIdToken: str
:param _IsNeedVideo: 是否需要拉取视频,默认false不需要
:type IsNeedVideo: bool
:param _IsNeedBestFrame: 是否需要拉取截帧,默认false不需要
:type IsNeedBestFrame: bool
"""
self._FaceIdToken = None
self._IsNeedVideo = None
self._IsNeedBestFrame = None
@property
def FaceIdToken(self):
return self._FaceIdToken
@FaceIdToken.setter
def FaceIdToken(self, FaceIdToken):
self._FaceIdToken = FaceIdToken
@property
def IsNeedVideo(self):
return self._IsNeedVideo
@IsNeedVideo.setter
def IsNeedVideo(self, IsNeedVideo):
self._IsNeedVideo = IsNeedVideo
@property
def IsNeedBestFrame(self):
return self._IsNeedBestFrame
@IsNeedBestFrame.setter
def IsNeedBestFrame(self, IsNeedBestFrame):
self._IsNeedBestFrame = IsNeedBestFrame
def _deserialize(self, params):
self._FaceIdToken = params.get("FaceIdToken")
self._IsNeedVideo = params.get("IsNeedVideo")
self._IsNeedBestFrame = params.get("IsNeedBestFrame")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetFaceIdResultResponse(AbstractModel):
"""GetFaceIdResult返回参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 身份证
:type IdCard: str
:param _Name: 姓名
:type Name: str
:param _Result: 业务核验结果,参考https://cloud.tencent.com/document/product/1007/47912
:type Result: str
:param _Description: 业务核验描述
:type Description: str
:param _Similarity: 相似度,0-100,数值越大相似度越高
:type Similarity: float
:param _VideoBase64: 用户核验的视频base64,如果选择了使用cos,返回完整cos地址如https://bucket.cos.ap-guangzhou.myqcloud.com/objectKey
注意:此字段可能返回 null,表示取不到有效值。
:type VideoBase64: str
:param _BestFrameBase64: 用户核验视频的截帧base64,如果选择了使用cos,返回完整cos地址如https://bucket.cos.ap-guangzhou.myqcloud.com/objectKey
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrameBase64: str
:param _Extra: 获取token时透传的信息
注意:此字段可能返回 null,表示取不到有效值。
:type Extra: str
:param _DeviceInfoTag: 设备风险标签,仅错误码返回1007(设备疑似被劫持)时返回风险标签。标签说明:
202、5001:设备疑似被Root
203、5004:设备疑似被注入
205:设备疑似被Hook
206:设备疑似虚拟运行环境
5007、1005:设备疑似摄像头被劫持
8000:设备疑似存在异常篡改行为
注意:此字段可能返回 null,表示取不到有效值。
:type DeviceInfoTag: str
:param _RiskInfoTag: 行为风险标签,仅错误码返回1007(设备疑似被劫持)时返回风险标签。标签说明:
02:攻击风险
注意:此字段可能返回 null,表示取不到有效值。
:type RiskInfoTag: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._IdCard = None
self._Name = None
self._Result = None
self._Description = None
self._Similarity = None
self._VideoBase64 = None
self._BestFrameBase64 = None
self._Extra = None
self._DeviceInfoTag = None
self._RiskInfoTag = None
self._RequestId = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def Similarity(self):
return self._Similarity
@Similarity.setter
def Similarity(self, Similarity):
self._Similarity = Similarity
@property
def VideoBase64(self):
return self._VideoBase64
@VideoBase64.setter
def VideoBase64(self, VideoBase64):
self._VideoBase64 = VideoBase64
@property
def BestFrameBase64(self):
return self._BestFrameBase64
@BestFrameBase64.setter
def BestFrameBase64(self, BestFrameBase64):
self._BestFrameBase64 = BestFrameBase64
@property
def Extra(self):
return self._Extra
@Extra.setter
def Extra(self, Extra):
self._Extra = Extra
@property
def DeviceInfoTag(self):
return self._DeviceInfoTag
@DeviceInfoTag.setter
def DeviceInfoTag(self, DeviceInfoTag):
self._DeviceInfoTag = DeviceInfoTag
@property
def RiskInfoTag(self):
return self._RiskInfoTag
@RiskInfoTag.setter
def RiskInfoTag(self, RiskInfoTag):
self._RiskInfoTag = RiskInfoTag
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._Result = params.get("Result")
self._Description = params.get("Description")
self._Similarity = params.get("Similarity")
self._VideoBase64 = params.get("VideoBase64")
self._BestFrameBase64 = params.get("BestFrameBase64")
self._Extra = params.get("Extra")
self._DeviceInfoTag = params.get("DeviceInfoTag")
self._RiskInfoTag = params.get("RiskInfoTag")
self._RequestId = params.get("RequestId")
class GetFaceIdTokenRequest(AbstractModel):
"""GetFaceIdToken请求参数结构体
"""
def __init__(self):
r"""
:param _CompareLib: 本地上传照片(LOCAL)、商业库(BUSINESS)
:type CompareLib: str
:param _IdCard: CompareLib为商业库时必传。
:type IdCard: str
:param _Name: CompareLib为商业库时必传。
:type Name: str
:param _ImageBase64: CompareLib为上传照片比对时必传,Base64后图片最大8MB。
请使用标准的Base64编码方式(带=补位),编码规范参考RFC4648。
:type ImageBase64: str
:param _Meta: SDK中生成的Meta字符串
:type Meta: str
:param _Extra: 透传参数 1000长度字符串
:type Extra: str
:param _UseCos: 默认为false,设置该参数为true后,核身过程中的视频图片将会存储在人脸核身控制台授权cos的bucket中,拉取结果时会返回对应资源完整cos地址。开通地址见https://console.cloud.tencent.com/faceid/cos
【注意】选择该参数为true后将不返回base64数据,请根据接入情况谨慎修改。
:type UseCos: bool
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._CompareLib = None
self._IdCard = None
self._Name = None
self._ImageBase64 = None
self._Meta = None
self._Extra = None
self._UseCos = None
self._Encryption = None
@property
def CompareLib(self):
return self._CompareLib
@CompareLib.setter
def CompareLib(self, CompareLib):
self._CompareLib = CompareLib
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def ImageBase64(self):
return self._ImageBase64
@ImageBase64.setter
def ImageBase64(self, ImageBase64):
self._ImageBase64 = ImageBase64
@property
def Meta(self):
return self._Meta
@Meta.setter
def Meta(self, Meta):
self._Meta = Meta
@property
def Extra(self):
return self._Extra
@Extra.setter
def Extra(self, Extra):
self._Extra = Extra
@property
def UseCos(self):
return self._UseCos
@UseCos.setter
def UseCos(self, UseCos):
self._UseCos = UseCos
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._CompareLib = params.get("CompareLib")
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._ImageBase64 = params.get("ImageBase64")
self._Meta = params.get("Meta")
self._Extra = params.get("Extra")
self._UseCos = params.get("UseCos")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetFaceIdTokenResponse(AbstractModel):
"""GetFaceIdToken返回参数结构体
"""
def __init__(self):
r"""
:param _FaceIdToken: 有效期 10分钟。只能完成1次核身。
:type FaceIdToken: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._FaceIdToken = None
self._RequestId = None
@property
def FaceIdToken(self):
return self._FaceIdToken
@FaceIdToken.setter
def FaceIdToken(self, FaceIdToken):
self._FaceIdToken = FaceIdToken
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._FaceIdToken = params.get("FaceIdToken")
self._RequestId = params.get("RequestId")
class GetLiveCodeRequest(AbstractModel):
"""GetLiveCode请求参数结构体
"""
class GetLiveCodeResponse(AbstractModel):
"""GetLiveCode返回参数结构体
"""
def __init__(self):
r"""
:param _LiveCode: 数字验证码,如:1234
:type LiveCode: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._LiveCode = None
self._RequestId = None
@property
def LiveCode(self):
return self._LiveCode
@LiveCode.setter
def LiveCode(self, LiveCode):
self._LiveCode = LiveCode
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._LiveCode = params.get("LiveCode")
self._RequestId = params.get("RequestId")
class GetWeChatBillDetailsRequest(AbstractModel):
"""GetWeChatBillDetails请求参数结构体
"""
def __init__(self):
r"""
:param _Date: 拉取的日期(YYYY-MM-DD)。最大可追溯到365天前。当天6点后才能拉取前一天的数据。
:type Date: str
:param _Cursor: 游标。用于分页,取第一页时传0,取后续页面时,传入本接口响应中返回的NextCursor字段的值。
:type Cursor: int
:param _RuleId: 需要拉取账单详情业务对应的RuleId。不传会返回所有RuleId数据。默认为空字符串。
:type RuleId: str
"""
self._Date = None
self._Cursor = None
self._RuleId = None
@property
def Date(self):
return self._Date
@Date.setter
def Date(self, Date):
self._Date = Date
@property
def Cursor(self):
return self._Cursor
@Cursor.setter
def Cursor(self, Cursor):
self._Cursor = Cursor
@property
def RuleId(self):
return self._RuleId
@RuleId.setter
def RuleId(self, RuleId):
self._RuleId = RuleId
def _deserialize(self, params):
self._Date = params.get("Date")
self._Cursor = params.get("Cursor")
self._RuleId = params.get("RuleId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetWeChatBillDetailsResponse(AbstractModel):
"""GetWeChatBillDetails返回参数结构体
"""
def __init__(self):
r"""
:param _HasNextPage: 是否还有下一页。该字段为true时,需要将NextCursor的值作为入参Cursor继续调用本接口。
:type HasNextPage: bool
:param _NextCursor: 下一页的游标。用于分页。
:type NextCursor: int
:param _WeChatBillDetails: 数据
:type WeChatBillDetails: list of WeChatBillDetail
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._HasNextPage = None
self._NextCursor = None
self._WeChatBillDetails = None
self._RequestId = None
@property
def HasNextPage(self):
return self._HasNextPage
@HasNextPage.setter
def HasNextPage(self, HasNextPage):
self._HasNextPage = HasNextPage
@property
def NextCursor(self):
return self._NextCursor
@NextCursor.setter
def NextCursor(self, NextCursor):
self._NextCursor = NextCursor
@property
def WeChatBillDetails(self):
return self._WeChatBillDetails
@WeChatBillDetails.setter
def WeChatBillDetails(self, WeChatBillDetails):
self._WeChatBillDetails = WeChatBillDetails
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._HasNextPage = params.get("HasNextPage")
self._NextCursor = params.get("NextCursor")
if params.get("WeChatBillDetails") is not None:
self._WeChatBillDetails = []
for item in params.get("WeChatBillDetails"):
obj = WeChatBillDetail()
obj._deserialize(item)
self._WeChatBillDetails.append(obj)
self._RequestId = params.get("RequestId")
class IdCardOCRVerificationRequest(AbstractModel):
"""IdCardOCRVerification请求参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 身份证号
姓名和身份证号、ImageBase64、ImageUrl三者必须提供其中之一。若都提供了,则按照姓名和身份证号>ImageBase64>ImageUrl的优先级使用参数。
:type IdCard: str
:param _Name: 姓名
:type Name: str
:param _ImageBase64: 身份证人像面的 Base64 值
支持的图片格式:PNG、JPG、JPEG,暂不支持 GIF 格式。
支持的图片大小:所下载图片经Base64编码后不超过 3M。请使用标准的Base64编码方式(带=补位),编码规范参考RFC4648。
:type ImageBase64: str
:param _ImageUrl: 身份证人像面的 Url 地址
支持的图片格式:PNG、JPG、JPEG,暂不支持 GIF 格式。
支持的图片大小:所下载图片经 Base64 编码后不超过 3M。图片下载时间不超过 3 秒。
图片存储于腾讯云的 Url 可保障更高的下载速度和稳定性,建议图片存储于腾讯云。
非腾讯云存储的 Url 速度和稳定性可能受一定影响。
:type ImageUrl: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._IdCard = None
self._Name = None
self._ImageBase64 = None
self._ImageUrl = None
self._Encryption = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def ImageBase64(self):
return self._ImageBase64
@ImageBase64.setter
def ImageBase64(self, ImageBase64):
self._ImageBase64 = ImageBase64
@property
def ImageUrl(self):
return self._ImageUrl
@ImageUrl.setter
def ImageUrl(self, ImageUrl):
self._ImageUrl = ImageUrl
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._ImageBase64 = params.get("ImageBase64")
self._ImageUrl = params.get("ImageUrl")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IdCardOCRVerificationResponse(AbstractModel):
"""IdCardOCRVerification返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码,收费情况如下。
收费结果码:
0: 姓名和身份证号一致
-1: 姓名和身份证号不一致
不收费结果码:
-2: 非法身份证号(长度、校验位等不正确)
-3: 非法姓名(长度、格式等不正确)
-4: 证件库服务异常
-5: 证件库中无此身份证记录
-6: 权威比对系统升级中,请稍后再试
-7: 认证次数超过当日限制
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _Name: 用于验证的姓名
:type Name: str
:param _IdCard: 用于验证的身份证号
:type IdCard: str
:param _Sex: OCR得到的性别
注意:此字段可能返回 null,表示取不到有效值。
:type Sex: str
:param _Nation: OCR得到的民族
注意:此字段可能返回 null,表示取不到有效值。
:type Nation: str
:param _Birth: OCR得到的生日
注意:此字段可能返回 null,表示取不到有效值。
:type Birth: str
:param _Address: OCR得到的地址
注意:此字段可能返回 null,表示取不到有效值。
:type Address: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._Name = None
self._IdCard = None
self._Sex = None
self._Nation = None
self._Birth = None
self._Address = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Sex(self):
return self._Sex
@Sex.setter
def Sex(self, Sex):
self._Sex = Sex
@property
def Nation(self):
return self._Nation
@Nation.setter
def Nation(self, Nation):
self._Nation = Nation
@property
def Birth(self):
return self._Birth
@Birth.setter
def Birth(self, Birth):
self._Birth = Birth
@property
def Address(self):
return self._Address
@Address.setter
def Address(self, Address):
self._Address = Address
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._Name = params.get("Name")
self._IdCard = params.get("IdCard")
self._Sex = params.get("Sex")
self._Nation = params.get("Nation")
self._Birth = params.get("Birth")
self._Address = params.get("Address")
self._RequestId = params.get("RequestId")
class IdCardVerificationRequest(AbstractModel):
"""IdCardVerification请求参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 身份证号
:type IdCard: str
:param _Name: 姓名
:type Name: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._IdCard = None
self._Name = None
self._Encryption = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IdCardVerificationResponse(AbstractModel):
"""IdCardVerification返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码,收费情况如下。
收费结果码:
0: 姓名和身份证号一致
-1: 姓名和身份证号不一致
不收费结果码:
-2: 非法身份证号(长度、校验位等不正确)
-3: 非法姓名(长度、格式等不正确)
-4: 证件库服务异常
-5: 证件库中无此身份证记录
-6: 权威比对系统升级中,请稍后再试
-7: 认证次数超过当日限制
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._RequestId = params.get("RequestId")
class ImageRecognitionRequest(AbstractModel):
"""ImageRecognition请求参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 身份证号
:type IdCard: str
:param _Name: 姓名。中文请使用UTF-8编码。
:type Name: str
:param _ImageBase64: 用于人脸比对的照片,图片的Base64值;
Base64编码后的图片数据大小不超过3M,仅支持jpg、png格式。
请使用标准的Base64编码方式(带=补位),编码规范参考RFC4648。
:type ImageBase64: str
:param _Optional: 本接口不需要传递此参数。
:type Optional: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._IdCard = None
self._Name = None
self._ImageBase64 = None
self._Optional = None
self._Encryption = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def ImageBase64(self):
return self._ImageBase64
@ImageBase64.setter
def ImageBase64(self, ImageBase64):
self._ImageBase64 = ImageBase64
@property
def Optional(self):
return self._Optional
@Optional.setter
def Optional(self, Optional):
self._Optional = Optional
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._ImageBase64 = params.get("ImageBase64")
self._Optional = params.get("Optional")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ImageRecognitionResponse(AbstractModel):
"""ImageRecognition返回参数结构体
"""
def __init__(self):
r"""
:param _Sim: 相似度,取值范围 [0.00, 100.00]。推荐相似度大于等于70时可判断为同一人,可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)
:type Sim: float
:param _Result: 业务错误码,成功情况返回Success, 错误情况请参考下方错误码 列表中FailedOperation部分
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Sim = None
self._Result = None
self._Description = None
self._RequestId = None
@property
def Sim(self):
return self._Sim
@Sim.setter
def Sim(self, Sim):
self._Sim = Sim
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Sim = params.get("Sim")
self._Result = params.get("Result")
self._Description = params.get("Description")
self._RequestId = params.get("RequestId")
class IntentionQuestion(AbstractModel):
"""意愿核身过程中播报的问题文本、用户回答的标准文本。
"""
def __init__(self):
r"""
:param _Question: 系统播报的问题文本,问题最大长度为150个字符。
:type Question: str
:param _Answers: 用户答案的标准文本列表,用于识别用户回答的语音与标准文本是否一致。列表长度最大为50,单个答案长度限制10个字符。
:type Answers: list of str
"""
self._Question = None
self._Answers = None
@property
def Question(self):
return self._Question
@Question.setter
def Question(self, Question):
self._Question = Question
@property
def Answers(self):
return self._Answers
@Answers.setter
def Answers(self, Answers):
self._Answers = Answers
def _deserialize(self, params):
self._Question = params.get("Question")
self._Answers = params.get("Answers")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IntentionQuestionResult(AbstractModel):
"""意愿核身问答模式结果
"""
def __init__(self):
r"""
:param _FinalResultDetailCode: 意愿核身错误码:
0: "成功"
-1: "参数错误"
-2: "系统异常"
-101: "请保持人脸在框内"
-102: "检测到多张人脸"
-103: "人脸检测失败"
-104: "人脸检测不完整"
-105: "请勿遮挡眼睛"
-106: "请勿遮挡嘴巴"
-107: "请勿遮挡鼻子"
-201: "人脸比对相似度低"
-202: "人脸比对失败"
-301: "意愿核验不通过"
-800: "前端不兼容错误"
-801: "用户未授权摄像头和麦克风权限"
-802: "获取视频流失败"
-803: "用户主动关闭链接/异常断开链接"
-998: "系统数据异常"
-999: "系统未知错误,请联系人工核实"
注意:此字段可能返回 null,表示取不到有效值。
:type FinalResultDetailCode: int
:param _FinalResultMessage: 意愿核身错误信息
注意:此字段可能返回 null,表示取不到有效值。
:type FinalResultMessage: str
:param _Video: 视频base64(其中包含全程问题和回答音频,mp4格式)
注意:此字段可能返回 null,表示取不到有效值。
:type Video: str
:param _ScreenShot: 屏幕截图base64列表
注意:此字段可能返回 null,表示取不到有效值。
:type ScreenShot: list of str
:param _ResultCode: 和答案匹配结果列表
0:成功,-1:不匹配
注意:此字段可能返回 null,表示取不到有效值。
:type ResultCode: list of str
:param _AsrResult: 回答问题语音识别结果列表
注意:此字段可能返回 null,表示取不到有效值。
:type AsrResult: list of str
:param _Audios: 答案录音音频
注意:此字段可能返回 null,表示取不到有效值。
:type Audios: list of str
:param _FinalResultCode: 意愿核身最终结果:
0:认证通过,-1:认证未通过,-2:浏览器内核不兼容,无法进行意愿校验。建议使用“FinalResultDetailCode”参数获取详细的错误码信息。
注意:此字段可能返回 null,表示取不到有效值。
:type FinalResultCode: str
"""
self._FinalResultDetailCode = None
self._FinalResultMessage = None
self._Video = None
self._ScreenShot = None
self._ResultCode = None
self._AsrResult = None
self._Audios = None
self._FinalResultCode = None
@property
def FinalResultDetailCode(self):
return self._FinalResultDetailCode
@FinalResultDetailCode.setter
def FinalResultDetailCode(self, FinalResultDetailCode):
self._FinalResultDetailCode = FinalResultDetailCode
@property
def FinalResultMessage(self):
return self._FinalResultMessage
@FinalResultMessage.setter
def FinalResultMessage(self, FinalResultMessage):
self._FinalResultMessage = FinalResultMessage
@property
def Video(self):
return self._Video
@Video.setter
def Video(self, Video):
self._Video = Video
@property
def ScreenShot(self):
return self._ScreenShot
@ScreenShot.setter
def ScreenShot(self, ScreenShot):
self._ScreenShot = ScreenShot
@property
def ResultCode(self):
return self._ResultCode
@ResultCode.setter
def ResultCode(self, ResultCode):
self._ResultCode = ResultCode
@property
def AsrResult(self):
return self._AsrResult
@AsrResult.setter
def AsrResult(self, AsrResult):
self._AsrResult = AsrResult
@property
def Audios(self):
return self._Audios
@Audios.setter
def Audios(self, Audios):
self._Audios = Audios
@property
def FinalResultCode(self):
return self._FinalResultCode
@FinalResultCode.setter
def FinalResultCode(self, FinalResultCode):
self._FinalResultCode = FinalResultCode
def _deserialize(self, params):
self._FinalResultDetailCode = params.get("FinalResultDetailCode")
self._FinalResultMessage = params.get("FinalResultMessage")
self._Video = params.get("Video")
self._ScreenShot = params.get("ScreenShot")
self._ResultCode = params.get("ResultCode")
self._AsrResult = params.get("AsrResult")
self._Audios = params.get("Audios")
self._FinalResultCode = params.get("FinalResultCode")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IntentionVerifyData(AbstractModel):
"""意愿核身相关结果
"""
def __init__(self):
r"""
:param _IntentionVerifyVideo: 意愿确认环节中录制的视频(base64)。若不存在则为空字符串。
注意:此字段可能返回 null,表示取不到有效值。
:type IntentionVerifyVideo: str
:param _AsrResult: 意愿确认环节中用户语音转文字的识别结果。若不存在则为空字符串。
注意:此字段可能返回 null,表示取不到有效值。
:type AsrResult: str
:param _ErrorCode: 意愿确认环节的结果码。当该结果码为0时,语音朗读的视频与语音识别结果才会返回。
注意:此字段可能返回 null,表示取不到有效值。
:type ErrorCode: int
:param _ErrorMessage: 意愿确认环节的结果信息。
注意:此字段可能返回 null,表示取不到有效值。
:type ErrorMessage: str
:param _IntentionVerifyBestFrame: 意愿确认环节中录制视频的最佳帧(base64)。若不存在则为空字符串。
注意:此字段可能返回 null,表示取不到有效值。
:type IntentionVerifyBestFrame: str
:param _AsrResultSimilarity: 本次流程用户语音与传入文本比对的相似度分值,取值范围 [0.00, 100.00]。只有配置了相似度阈值后才进行语音校验并返回相似度分值。
注意:此字段可能返回 null,表示取不到有效值。
:type AsrResultSimilarity: str
"""
self._IntentionVerifyVideo = None
self._AsrResult = None
self._ErrorCode = None
self._ErrorMessage = None
self._IntentionVerifyBestFrame = None
self._AsrResultSimilarity = None
@property
def IntentionVerifyVideo(self):
return self._IntentionVerifyVideo
@IntentionVerifyVideo.setter
def IntentionVerifyVideo(self, IntentionVerifyVideo):
self._IntentionVerifyVideo = IntentionVerifyVideo
@property
def AsrResult(self):
return self._AsrResult
@AsrResult.setter
def AsrResult(self, AsrResult):
self._AsrResult = AsrResult
@property
def ErrorCode(self):
return self._ErrorCode
@ErrorCode.setter
def ErrorCode(self, ErrorCode):
self._ErrorCode = ErrorCode
@property
def ErrorMessage(self):
return self._ErrorMessage
@ErrorMessage.setter
def ErrorMessage(self, ErrorMessage):
self._ErrorMessage = ErrorMessage
@property
def IntentionVerifyBestFrame(self):
return self._IntentionVerifyBestFrame
@IntentionVerifyBestFrame.setter
def IntentionVerifyBestFrame(self, IntentionVerifyBestFrame):
self._IntentionVerifyBestFrame = IntentionVerifyBestFrame
@property
def AsrResultSimilarity(self):
return self._AsrResultSimilarity
@AsrResultSimilarity.setter
def AsrResultSimilarity(self, AsrResultSimilarity):
self._AsrResultSimilarity = AsrResultSimilarity
def _deserialize(self, params):
self._IntentionVerifyVideo = params.get("IntentionVerifyVideo")
self._AsrResult = params.get("AsrResult")
self._ErrorCode = params.get("ErrorCode")
self._ErrorMessage = params.get("ErrorMessage")
self._IntentionVerifyBestFrame = params.get("IntentionVerifyBestFrame")
self._AsrResultSimilarity = params.get("AsrResultSimilarity")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LivenessCompareRequest(AbstractModel):
"""LivenessCompare请求参数结构体
"""
def __init__(self):
r"""
:param _LivenessType: 活体检测类型,取值:LIP/ACTION/SILENT。
LIP为数字模式,ACTION为动作模式,SILENT为静默模式,三种模式选择一种传入。
:type LivenessType: str
:param _ImageBase64: 用于人脸比对的照片的Base64值;
Base64编码后的图片数据大小不超过3M,仅支持jpg、png格式。
请使用标准的Base64编码方式(带=补位),编码规范参考RFC4648。
图片的 ImageUrl、ImageBase64 必须提供一个,如果都提供,只使用 ImageBase64。
:type ImageBase64: str
:param _ImageUrl: 用于人脸比对照片的URL地址;图片下载后经Base64编码后的数据大小不超过3M,仅支持jpg、png格式。
图片的 ImageUrl、ImageBase64 必须提供一个,如果都提供,只使用 ImageBase64。
图片存储于腾讯云的 Url 可保障更高的下载速度和稳定性,建议图片存储于腾讯云。非腾讯云存储的 Url 速度和稳定性可能受一定影响。
:type ImageUrl: str
:param _ValidateData: 数字模式传参:传数字验证码,验证码需先调用<a href="https://cloud.tencent.com/document/product/1007/31821">获取数字验证码接口</a>得到;
动作模式传参:传动作顺序,动作顺序需先调用<a href="https://cloud.tencent.com/document/product/1007/31822">获取动作顺序接口</a>得到;
静默模式传参:空。
:type ValidateData: str
:param _Optional: 额外配置,传入JSON字符串。
{
"BestFrameNum": 2 //需要返回多张最佳截图,取值范围2-10
}
:type Optional: str
:param _VideoBase64: 用于活体检测的视频,视频的Base64值;
Base64编码后的大小不超过8M,支持mp4、avi、flv格式。
请使用标准的Base64编码方式(带=补位),编码规范参考RFC4648。
视频的 VideoUrl、VideoBase64 必须提供一个,如果都提供,只使用 VideoBase64。
:type VideoBase64: str
:param _VideoUrl: 用于活体检测的视频Url 地址。视频下载后经Base64编码后不超过 8M,视频下载耗时不超过4S,支持mp4、avi、flv格式。
视频的 VideoUrl、VideoBase64 必须提供一个,如果都提供,只使用 VideoBase64。
建议视频存储于腾讯云的 Url 可保障更高的下载速度和稳定性,建议视频存储于腾讯云。非腾讯云存储的 Url 速度和稳定性可能受一定影响。
:type VideoUrl: str
"""
self._LivenessType = None
self._ImageBase64 = None
self._ImageUrl = None
self._ValidateData = None
self._Optional = None
self._VideoBase64 = None
self._VideoUrl = None
@property
def LivenessType(self):
return self._LivenessType
@LivenessType.setter
def LivenessType(self, LivenessType):
self._LivenessType = LivenessType
@property
def ImageBase64(self):
return self._ImageBase64
@ImageBase64.setter
def ImageBase64(self, ImageBase64):
self._ImageBase64 = ImageBase64
@property
def ImageUrl(self):
return self._ImageUrl
@ImageUrl.setter
def ImageUrl(self, ImageUrl):
self._ImageUrl = ImageUrl
@property
def ValidateData(self):
return self._ValidateData
@ValidateData.setter
def ValidateData(self, ValidateData):
self._ValidateData = ValidateData
@property
def Optional(self):
return self._Optional
@Optional.setter
def Optional(self, Optional):
self._Optional = Optional
@property
def VideoBase64(self):
return self._VideoBase64
@VideoBase64.setter
def VideoBase64(self, VideoBase64):
self._VideoBase64 = VideoBase64
@property
def VideoUrl(self):
return self._VideoUrl
@VideoUrl.setter
def VideoUrl(self, VideoUrl):
self._VideoUrl = VideoUrl
def _deserialize(self, params):
self._LivenessType = params.get("LivenessType")
self._ImageBase64 = params.get("ImageBase64")
self._ImageUrl = params.get("ImageUrl")
self._ValidateData = params.get("ValidateData")
self._Optional = params.get("Optional")
self._VideoBase64 = params.get("VideoBase64")
self._VideoUrl = params.get("VideoUrl")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LivenessCompareResponse(AbstractModel):
"""LivenessCompare返回参数结构体
"""
def __init__(self):
r"""
:param _BestFrameBase64: 验证通过后的视频最佳截图照片,照片为BASE64编码后的值,jpg格式。
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrameBase64: str
:param _Sim: 相似度,取值范围 [0.00, 100.00]。推荐相似度大于等于70时可判断为同一人,可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)。
:type Sim: float
:param _Result: 业务错误码,成功情况返回Success, 错误情况请参考下方错误码 列表中FailedOperation部分
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _BestFrameList: 最佳截图列表,仅在配置了返回多张最佳截图时返回。
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrameList: list of str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._BestFrameBase64 = None
self._Sim = None
self._Result = None
self._Description = None
self._BestFrameList = None
self._RequestId = None
@property
def BestFrameBase64(self):
return self._BestFrameBase64
@BestFrameBase64.setter
def BestFrameBase64(self, BestFrameBase64):
self._BestFrameBase64 = BestFrameBase64
@property
def Sim(self):
return self._Sim
@Sim.setter
def Sim(self, Sim):
self._Sim = Sim
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def BestFrameList(self):
return self._BestFrameList
@BestFrameList.setter
def BestFrameList(self, BestFrameList):
self._BestFrameList = BestFrameList
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._BestFrameBase64 = params.get("BestFrameBase64")
self._Sim = params.get("Sim")
self._Result = params.get("Result")
self._Description = params.get("Description")
self._BestFrameList = params.get("BestFrameList")
self._RequestId = params.get("RequestId")
class LivenessRecognitionRequest(AbstractModel):
"""LivenessRecognition请求参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 身份证号
:type IdCard: str
:param _Name: 姓名。中文请使用UTF-8编码。
:type Name: str
:param _LivenessType: 活体检测类型,取值:LIP/ACTION/SILENT。
LIP为数字模式,ACTION为动作模式,SILENT为静默模式,三种模式选择一种传入。
:type LivenessType: str
:param _VideoBase64: 用于活体检测的视频,视频的BASE64值;
BASE64编码后的大小不超过8M,支持mp4、avi、flv格式。
:type VideoBase64: str
:param _VideoUrl: 用于活体检测的视频Url 地址。视频下载后经Base64编码不超过 8M,视频下载耗时不超过4S,支持mp4、avi、flv格式。
视频的 VideoUrl、VideoBase64 必须提供一个,如果都提供,只使用 VideoBase64。
建议视频存储于腾讯云的 Url 可保障更高的下载速度和稳定性,建议视频存储于腾讯云。非腾讯云存储的 Url 速度和稳定性可能受一定影响。
:type VideoUrl: str
:param _ValidateData: 数字模式传参:传数字验证码,验证码需先调用<a href="https://cloud.tencent.com/document/product/1007/31821">获取数字验证码接口</a>得到;
动作模式传参:传动作顺序,动作顺序需先调用<a href="https://cloud.tencent.com/document/product/1007/31822">获取动作顺序接口</a>得到;
静默模式传参:空。
:type ValidateData: str
:param _Optional: 额外配置,传入JSON字符串。
{
"BestFrameNum": 2 //需要返回多张最佳截图,取值范围2-10
}
:type Optional: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._IdCard = None
self._Name = None
self._LivenessType = None
self._VideoBase64 = None
self._VideoUrl = None
self._ValidateData = None
self._Optional = None
self._Encryption = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def LivenessType(self):
return self._LivenessType
@LivenessType.setter
def LivenessType(self, LivenessType):
self._LivenessType = LivenessType
@property
def VideoBase64(self):
return self._VideoBase64
@VideoBase64.setter
def VideoBase64(self, VideoBase64):
self._VideoBase64 = VideoBase64
@property
def VideoUrl(self):
return self._VideoUrl
@VideoUrl.setter
def VideoUrl(self, VideoUrl):
self._VideoUrl = VideoUrl
@property
def ValidateData(self):
return self._ValidateData
@ValidateData.setter
def ValidateData(self, ValidateData):
self._ValidateData = ValidateData
@property
def Optional(self):
return self._Optional
@Optional.setter
def Optional(self, Optional):
self._Optional = Optional
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._LivenessType = params.get("LivenessType")
self._VideoBase64 = params.get("VideoBase64")
self._VideoUrl = params.get("VideoUrl")
self._ValidateData = params.get("ValidateData")
self._Optional = params.get("Optional")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LivenessRecognitionResponse(AbstractModel):
"""LivenessRecognition返回参数结构体
"""
def __init__(self):
r"""
:param _BestFrameBase64: 验证通过后的视频最佳截图照片,照片为BASE64编码后的值,jpg格式。
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrameBase64: str
:param _Sim: 相似度,取值范围 [0.00, 100.00]。推荐相似度大于等于70时可判断为同一人,可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)
:type Sim: float
:param _Result: 业务错误码,成功情况返回Success, 错误情况请参考下方错误码 列表中FailedOperation部分
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _BestFrameList: 最佳截图列表,仅在配置了返回多张最佳截图时返回。
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrameList: list of str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._BestFrameBase64 = None
self._Sim = None
self._Result = None
self._Description = None
self._BestFrameList = None
self._RequestId = None
@property
def BestFrameBase64(self):
return self._BestFrameBase64
@BestFrameBase64.setter
def BestFrameBase64(self, BestFrameBase64):
self._BestFrameBase64 = BestFrameBase64
@property
def Sim(self):
return self._Sim
@Sim.setter
def Sim(self, Sim):
self._Sim = Sim
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def BestFrameList(self):
return self._BestFrameList
@BestFrameList.setter
def BestFrameList(self, BestFrameList):
self._BestFrameList = BestFrameList
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._BestFrameBase64 = params.get("BestFrameBase64")
self._Sim = params.get("Sim")
self._Result = params.get("Result")
self._Description = params.get("Description")
self._BestFrameList = params.get("BestFrameList")
self._RequestId = params.get("RequestId")
class LivenessRequest(AbstractModel):
"""Liveness请求参数结构体
"""
def __init__(self):
r"""
:param _VideoBase64: 用于活体检测的视频,视频的BASE64值;
BASE64编码后的大小不超过8M,支持mp4、avi、flv格式。
:type VideoBase64: str
:param _LivenessType: 活体检测类型,取值:LIP/ACTION/SILENT。
LIP为数字模式,ACTION为动作模式,SILENT为静默模式,三种模式选择一种传入。
:type LivenessType: str
:param _ValidateData: 数字模式传参:数字验证码(1234),需先调用接口获取数字验证码;
动作模式传参:传动作顺序(2,1 or 1,2),需先调用接口获取动作顺序;
静默模式传参:不需要传递此参数。
:type ValidateData: str
:param _Optional: 额外配置,传入JSON字符串。
{
"BestFrameNum": 2 //需要返回多张最佳截图,取值范围1-10
}
:type Optional: str
"""
self._VideoBase64 = None
self._LivenessType = None
self._ValidateData = None
self._Optional = None
@property
def VideoBase64(self):
return self._VideoBase64
@VideoBase64.setter
def VideoBase64(self, VideoBase64):
self._VideoBase64 = VideoBase64
@property
def LivenessType(self):
return self._LivenessType
@LivenessType.setter
def LivenessType(self, LivenessType):
self._LivenessType = LivenessType
@property
def ValidateData(self):
return self._ValidateData
@ValidateData.setter
def ValidateData(self, ValidateData):
self._ValidateData = ValidateData
@property
def Optional(self):
return self._Optional
@Optional.setter
def Optional(self, Optional):
self._Optional = Optional
def _deserialize(self, params):
self._VideoBase64 = params.get("VideoBase64")
self._LivenessType = params.get("LivenessType")
self._ValidateData = params.get("ValidateData")
self._Optional = params.get("Optional")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class LivenessResponse(AbstractModel):
"""Liveness返回参数结构体
"""
def __init__(self):
r"""
:param _BestFrameBase64: 验证通过后的视频最佳截图照片,照片为BASE64编码后的值,jpg格式。
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrameBase64: str
:param _Result: 业务错误码,成功情况返回Success, 错误情况请参考下方错误码 列表中FailedOperation部分
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _BestFrameList: 最佳最佳截图列表,仅在配置了返回多张最佳截图时有效。
注意:此字段可能返回 null,表示取不到有效值。
:type BestFrameList: list of str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._BestFrameBase64 = None
self._Result = None
self._Description = None
self._BestFrameList = None
self._RequestId = None
@property
def BestFrameBase64(self):
return self._BestFrameBase64
@BestFrameBase64.setter
def BestFrameBase64(self, BestFrameBase64):
self._BestFrameBase64 = BestFrameBase64
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def BestFrameList(self):
return self._BestFrameList
@BestFrameList.setter
def BestFrameList(self, BestFrameList):
self._BestFrameList = BestFrameList
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._BestFrameBase64 = params.get("BestFrameBase64")
self._Result = params.get("Result")
self._Description = params.get("Description")
self._BestFrameList = params.get("BestFrameList")
self._RequestId = params.get("RequestId")
class MinorsVerificationRequest(AbstractModel):
"""MinorsVerification请求参数结构体
"""
def __init__(self):
r"""
:param _Type: 参与校验的参数类型。
0:使用手机号进行校验;
1:使用姓名与身份证号进行校验。
:type Type: str
:param _Mobile: 手机号,11位数字,
特别提示:
手机号验证只限制在腾讯健康守护可信模型覆盖的数据范围内,与手机号本身在运营商是否实名无关联,不在范围会提示“手机号未实名”,建议客户与传入姓名和身份证号信息组合使用。
:type Mobile: str
:param _IdCard: 身份证号码。
:type IdCard: str
:param _Name: 姓名。
:type Name: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号、手机号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._Type = None
self._Mobile = None
self._IdCard = None
self._Name = None
self._Encryption = None
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def Mobile(self):
return self._Mobile
@Mobile.setter
def Mobile(self, Mobile):
self._Mobile = Mobile
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._Type = params.get("Type")
self._Mobile = params.get("Mobile")
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MinorsVerificationResponse(AbstractModel):
"""MinorsVerification返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 结果码,收费情况如下。
收费结果码:
0: 成年
-1: 未成年
-3: 姓名和身份证号不一致
不收费结果码:
-2: 未查询到手机号信息
-4: 非法身份证号(长度、校验位等不正确)
-5: 非法姓名(长度、格式等不正确)
-6: 权威数据源服务异常
-7: 未查询到身份信息
-8: 权威数据源升级中,请稍后再试
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _AgeRange: 该字段的值为年龄区间。格式为[a,b),
[0,8)表示年龄小于8周岁区间,不包括8岁;
[8,16)表示年龄8-16周岁区间,不包括16岁;
[16,18)表示年龄16-18周岁区间,不包括18岁;
[18,+)表示年龄大于18周岁。
:type AgeRange: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._AgeRange = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def AgeRange(self):
return self._AgeRange
@AgeRange.setter
def AgeRange(self, AgeRange):
self._AgeRange = AgeRange
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._AgeRange = params.get("AgeRange")
self._RequestId = params.get("RequestId")
class MobileNetworkTimeVerificationRequest(AbstractModel):
"""MobileNetworkTimeVerification请求参数结构体
"""
def __init__(self):
r"""
:param _Mobile: 手机号码
:type Mobile: str
:param _Encryption: 敏感数据加密信息。对传入信息(手机号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._Mobile = None
self._Encryption = None
@property
def Mobile(self):
return self._Mobile
@Mobile.setter
def Mobile(self, Mobile):
self._Mobile = Mobile
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._Mobile = params.get("Mobile")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MobileNetworkTimeVerificationResponse(AbstractModel):
"""MobileNetworkTimeVerification返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码,收费情况如下。
收费结果码:
0: 成功
-2: 手机号不存在
-3: 手机号存在,但无法查询到在网时长
不收费结果码:
-1: 手机号格式不正确
-4: 验证中心服务繁忙
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _Range: 在网时长区间。
格式为(a,b],表示在网时长在a个月以上,b个月以下。若b为+时表示没有上限。
:type Range: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._Range = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def Range(self):
return self._Range
@Range.setter
def Range(self, Range):
self._Range = Range
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._Range = params.get("Range")
self._RequestId = params.get("RequestId")
class MobileStatusRequest(AbstractModel):
"""MobileStatus请求参数结构体
"""
def __init__(self):
r"""
:param _Mobile: 手机号码
:type Mobile: str
:param _Encryption: 敏感数据加密信息。对传入信息(手机号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._Mobile = None
self._Encryption = None
@property
def Mobile(self):
return self._Mobile
@Mobile.setter
def Mobile(self, Mobile):
self._Mobile = Mobile
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._Mobile = params.get("Mobile")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MobileStatusResponse(AbstractModel):
"""MobileStatus返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码,收费情况如下。
收费结果码:
0:成功
不收费结果码:
-1:未查询到结果
-2:手机号格式不正确
-3:验证中心服务繁忙
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _StatusCode: 状态码:
0:正常
1:停机
2:销号
3:空号
4:不在网
99:未知状态
:type StatusCode: int
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._StatusCode = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def StatusCode(self):
return self._StatusCode
@StatusCode.setter
def StatusCode(self, StatusCode):
self._StatusCode = StatusCode
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._StatusCode = params.get("StatusCode")
self._RequestId = params.get("RequestId")
class ParseNfcDataRequest(AbstractModel):
"""ParseNfcData请求参数结构体
"""
def __init__(self):
r"""
:param _ReqId: 前端SDK返回
:type ReqId: str
"""
self._ReqId = None
@property
def ReqId(self):
return self._ReqId
@ReqId.setter
def ReqId(self, ReqId):
self._ReqId = ReqId
def _deserialize(self, params):
self._ReqId = params.get("ReqId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ParseNfcDataResponse(AbstractModel):
"""ParseNfcData返回参数结构体
"""
def __init__(self):
r"""
:param _ResultCode: 0为首次查询成功,-1为查询失败。
注意:此字段可能返回 null,表示取不到有效值。
:type ResultCode: str
:param _IdNum: 身份证号
注意:此字段可能返回 null,表示取不到有效值。
:type IdNum: str
:param _Name: 姓名
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param _Picture: 照片
注意:此字段可能返回 null,表示取不到有效值。
:type Picture: str
:param _BirthDate: 出生日期
注意:此字段可能返回 null,表示取不到有效值。
:type BirthDate: str
:param _BeginTime: 有效期起始时间
注意:此字段可能返回 null,表示取不到有效值。
:type BeginTime: str
:param _EndTime: 有效期结束时间
注意:此字段可能返回 null,表示取不到有效值。
:type EndTime: str
:param _Address: 住址
注意:此字段可能返回 null,表示取不到有效值。
:type Address: str
:param _Nation: 民族
注意:此字段可能返回 null,表示取不到有效值。
:type Nation: str
:param _Sex: 性别
注意:此字段可能返回 null,表示取不到有效值。
:type Sex: str
:param _IdType: 身份证 01 中国护照 03 军官证 04 武警证 05 港澳通行证 06 台胞证 07 外国护照 08 士兵证 09 临时身份证 10 户口本 11 警官证 12 外国人永久居留证 13 港澳台居民居住证 14 回乡证 15 大陆居民来往台湾通行证 16 其他证件 99
注意:此字段可能返回 null,表示取不到有效值。
:type IdType: str
:param _EnName: 英文姓名
注意:此字段可能返回 null,表示取不到有效值。
:type EnName: str
:param _SigningOrganization: 签发机关
注意:此字段可能返回 null,表示取不到有效值。
:type SigningOrganization: str
:param _OtherIdNum: 港澳台居民居住证,通行证号码
注意:此字段可能返回 null,表示取不到有效值。
:type OtherIdNum: str
:param _Nationality: 旅行证件国籍
注意:此字段可能返回 null,表示取不到有效值。
:type Nationality: str
:param _PersonalNumber: 旅行证件机读区第二行 29~42 位
注意:此字段可能返回 null,表示取不到有效值。
:type PersonalNumber: str
:param _CheckMRTD: 旅行证件类的核验结果。JSON格式如下:
{"result_issuer ":"签发者证书合法性验证结果 ","result_pape r":"证件安全对象合法性验证 结果 ","result_data" :"防数据篡改验证结果 ","result_chip" :"防证书件芯片被复制验证结果"}
0:验证通过,1: 验证不通过,2: 未验证,3:部分通过,当4项核验结果都为0时,表示证件为真
注意:此字段可能返回 null,表示取不到有效值。
:type CheckMRTD: str
:param _ImageA: 身份证照片面合成图片
注意:此字段可能返回 null,表示取不到有效值。
:type ImageA: str
:param _ImageB: 身份证国徽面合成图片
注意:此字段可能返回 null,表示取不到有效值。
:type ImageB: str
:param _ResultDescription: 对result code的结果描述
注意:此字段可能返回 null,表示取不到有效值。
:type ResultDescription: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._ResultCode = None
self._IdNum = None
self._Name = None
self._Picture = None
self._BirthDate = None
self._BeginTime = None
self._EndTime = None
self._Address = None
self._Nation = None
self._Sex = None
self._IdType = None
self._EnName = None
self._SigningOrganization = None
self._OtherIdNum = None
self._Nationality = None
self._PersonalNumber = None
self._CheckMRTD = None
self._ImageA = None
self._ImageB = None
self._ResultDescription = None
self._RequestId = None
@property
def ResultCode(self):
return self._ResultCode
@ResultCode.setter
def ResultCode(self, ResultCode):
self._ResultCode = ResultCode
@property
def IdNum(self):
return self._IdNum
@IdNum.setter
def IdNum(self, IdNum):
self._IdNum = IdNum
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Picture(self):
return self._Picture
@Picture.setter
def Picture(self, Picture):
self._Picture = Picture
@property
def BirthDate(self):
return self._BirthDate
@BirthDate.setter
def BirthDate(self, BirthDate):
self._BirthDate = BirthDate
@property
def BeginTime(self):
return self._BeginTime
@BeginTime.setter
def BeginTime(self, BeginTime):
self._BeginTime = BeginTime
@property
def EndTime(self):
return self._EndTime
@EndTime.setter
def EndTime(self, EndTime):
self._EndTime = EndTime
@property
def Address(self):
return self._Address
@Address.setter
def Address(self, Address):
self._Address = Address
@property
def Nation(self):
return self._Nation
@Nation.setter
def Nation(self, Nation):
self._Nation = Nation
@property
def Sex(self):
return self._Sex
@Sex.setter
def Sex(self, Sex):
self._Sex = Sex
@property
def IdType(self):
return self._IdType
@IdType.setter
def IdType(self, IdType):
self._IdType = IdType
@property
def EnName(self):
return self._EnName
@EnName.setter
def EnName(self, EnName):
self._EnName = EnName
@property
def SigningOrganization(self):
return self._SigningOrganization
@SigningOrganization.setter
def SigningOrganization(self, SigningOrganization):
self._SigningOrganization = SigningOrganization
@property
def OtherIdNum(self):
return self._OtherIdNum
@OtherIdNum.setter
def OtherIdNum(self, OtherIdNum):
self._OtherIdNum = OtherIdNum
@property
def Nationality(self):
return self._Nationality
@Nationality.setter
def Nationality(self, Nationality):
self._Nationality = Nationality
@property
def PersonalNumber(self):
return self._PersonalNumber
@PersonalNumber.setter
def PersonalNumber(self, PersonalNumber):
self._PersonalNumber = PersonalNumber
@property
def CheckMRTD(self):
return self._CheckMRTD
@CheckMRTD.setter
def CheckMRTD(self, CheckMRTD):
self._CheckMRTD = CheckMRTD
@property
def ImageA(self):
return self._ImageA
@ImageA.setter
def ImageA(self, ImageA):
self._ImageA = ImageA
@property
def ImageB(self):
return self._ImageB
@ImageB.setter
def ImageB(self, ImageB):
self._ImageB = ImageB
@property
def ResultDescription(self):
return self._ResultDescription
@ResultDescription.setter
def ResultDescription(self, ResultDescription):
self._ResultDescription = ResultDescription
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._ResultCode = params.get("ResultCode")
self._IdNum = params.get("IdNum")
self._Name = params.get("Name")
self._Picture = params.get("Picture")
self._BirthDate = params.get("BirthDate")
self._BeginTime = params.get("BeginTime")
self._EndTime = params.get("EndTime")
self._Address = params.get("Address")
self._Nation = params.get("Nation")
self._Sex = params.get("Sex")
self._IdType = params.get("IdType")
self._EnName = params.get("EnName")
self._SigningOrganization = params.get("SigningOrganization")
self._OtherIdNum = params.get("OtherIdNum")
self._Nationality = params.get("Nationality")
self._PersonalNumber = params.get("PersonalNumber")
self._CheckMRTD = params.get("CheckMRTD")
self._ImageA = params.get("ImageA")
self._ImageB = params.get("ImageB")
self._ResultDescription = params.get("ResultDescription")
self._RequestId = params.get("RequestId")
class PhoneVerificationCMCCRequest(AbstractModel):
"""PhoneVerificationCMCC请求参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 身份证号
:type IdCard: str
:param _Name: 姓名
:type Name: str
:param _Phone: 手机号
:type Phone: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号、手机号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._IdCard = None
self._Name = None
self._Phone = None
self._Encryption = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Phone(self):
return self._Phone
@Phone.setter
def Phone(self, Phone):
self._Phone = Phone
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._Phone = params.get("Phone")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PhoneVerificationCMCCResponse(AbstractModel):
"""PhoneVerificationCMCC返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码,收费情况如下。
收费结果码:
0: 认证通过
-4: 信息不一致(手机号已实名,但姓名和身份证号与实名信息不一致)
不收费结果码:
-6: 手机号码不合法
-7: 身份证号码有误
-8: 姓名校验不通过
-9: 没有记录
-10: 认证未通过
-11: 验证中心服务繁忙
:type Result: str
:param _Isp: 运营商名称。
取值范围为["移动","联通","电信",""]
:type Isp: str
:param _Description: 业务结果描述。
:type Description: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Isp = None
self._Description = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Isp(self):
return self._Isp
@Isp.setter
def Isp(self, Isp):
self._Isp = Isp
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Isp = params.get("Isp")
self._Description = params.get("Description")
self._RequestId = params.get("RequestId")
class PhoneVerificationCTCCRequest(AbstractModel):
"""PhoneVerificationCTCC请求参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 身份证号
:type IdCard: str
:param _Name: 姓名
:type Name: str
:param _Phone: 手机号
:type Phone: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号、手机号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._IdCard = None
self._Name = None
self._Phone = None
self._Encryption = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Phone(self):
return self._Phone
@Phone.setter
def Phone(self, Phone):
self._Phone = Phone
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._Phone = params.get("Phone")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PhoneVerificationCTCCResponse(AbstractModel):
"""PhoneVerificationCTCC返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码,收费情况如下。
收费结果码:
0: 认证通过
-4: 信息不一致(手机号已实名,但姓名和身份证号与实名信息不一致)
不收费结果码:
-6: 手机号码不合法
-7: 身份证号码有误
-8: 姓名校验不通过
-9: 没有记录
-10: 认证未通过
-11: 验证中心服务繁忙
:type Result: str
:param _Isp: 运营商名称。
取值范围为["移动","联通","电信",""]
:type Isp: str
:param _Description: 业务结果描述。
:type Description: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Isp = None
self._Description = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Isp(self):
return self._Isp
@Isp.setter
def Isp(self, Isp):
self._Isp = Isp
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Isp = params.get("Isp")
self._Description = params.get("Description")
self._RequestId = params.get("RequestId")
class PhoneVerificationCUCCRequest(AbstractModel):
"""PhoneVerificationCUCC请求参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 身份证号
:type IdCard: str
:param _Name: 姓名
:type Name: str
:param _Phone: 手机号
:type Phone: str
:param _Encryption: 敏感数据加密信息。对传入信息(姓名、身份证号、手机号)有加密需求的用户可使用此参数,详情请点击左侧链接。
:type Encryption: :class:`tencentcloud.faceid.v20180301.models.Encryption`
"""
self._IdCard = None
self._Name = None
self._Phone = None
self._Encryption = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Phone(self):
return self._Phone
@Phone.setter
def Phone(self, Phone):
self._Phone = Phone
@property
def Encryption(self):
return self._Encryption
@Encryption.setter
def Encryption(self, Encryption):
self._Encryption = Encryption
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._Phone = params.get("Phone")
if params.get("Encryption") is not None:
self._Encryption = Encryption()
self._Encryption._deserialize(params.get("Encryption"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PhoneVerificationCUCCResponse(AbstractModel):
"""PhoneVerificationCUCC返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码,收费情况如下。
收费结果码:
0: 认证通过
-4: 信息不一致(手机号已实名,但姓名和身份证号与实名信息不一致)
不收费结果码:
-6: 手机号码不合法
-7: 身份证号码有误
-8: 姓名校验不通过
-9: 没有记录
-10: 认证未通过
-11: 验证中心服务繁忙
:type Result: str
:param _Isp: 运营商名称。
取值范围为["移动","联通","电信",""]
:type Isp: str
:param _Description: 业务结果描述。
:type Description: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Isp = None
self._Description = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Isp(self):
return self._Isp
@Isp.setter
def Isp(self, Isp):
self._Isp = Isp
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Isp = params.get("Isp")
self._Description = params.get("Description")
self._RequestId = params.get("RequestId")
class PhoneVerificationRequest(AbstractModel):
"""PhoneVerification请求参数结构体
"""
def __init__(self):
r"""
:param _IdCard: 身份证号
:type IdCard: str
:param _Name: 姓名
:type Name: str
:param _Phone: 手机号
:type Phone: str
:param _CiphertextBlob: 有加密需求的用户,传入kms的CiphertextBlob,关于数据加密可查阅 <a href="https://cloud.tencent.com/document/product/1007/47180">数据加密</a> 文档。
:type CiphertextBlob: str
:param _EncryptList: 在使用加密服务时,填入要被加密的字段。本接口中可填入加密后的IdCard,Name,Phone中的一个或多个。
:type EncryptList: list of str
:param _Iv: 有加密需求的用户,传入CBC加密的初始向量。
:type Iv: str
"""
self._IdCard = None
self._Name = None
self._Phone = None
self._CiphertextBlob = None
self._EncryptList = None
self._Iv = None
@property
def IdCard(self):
return self._IdCard
@IdCard.setter
def IdCard(self, IdCard):
self._IdCard = IdCard
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Phone(self):
return self._Phone
@Phone.setter
def Phone(self, Phone):
self._Phone = Phone
@property
def CiphertextBlob(self):
return self._CiphertextBlob
@CiphertextBlob.setter
def CiphertextBlob(self, CiphertextBlob):
self._CiphertextBlob = CiphertextBlob
@property
def EncryptList(self):
return self._EncryptList
@EncryptList.setter
def EncryptList(self, EncryptList):
self._EncryptList = EncryptList
@property
def Iv(self):
return self._Iv
@Iv.setter
def Iv(self, Iv):
self._Iv = Iv
def _deserialize(self, params):
self._IdCard = params.get("IdCard")
self._Name = params.get("Name")
self._Phone = params.get("Phone")
self._CiphertextBlob = params.get("CiphertextBlob")
self._EncryptList = params.get("EncryptList")
self._Iv = params.get("Iv")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PhoneVerificationResponse(AbstractModel):
"""PhoneVerification返回参数结构体
"""
def __init__(self):
r"""
:param _Result: 认证结果码:
收费结果码
0: 三要素信息一致
-4: 三要素信息不一致
不收费结果码
-6: 手机号码不合法
-7: 身份证号码有误
-8: 姓名校验不通过
-9: 没有记录
-11: 验证中心服务繁忙
:type Result: str
:param _Description: 业务结果描述。
:type Description: str
:param _Isp: 运营商名称。
取值范围为["","移动","电信","联通"]
:type Isp: str
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._Result = None
self._Description = None
self._Isp = None
self._RequestId = None
@property
def Result(self):
return self._Result
@Result.setter
def Result(self, Result):
self._Result = Result
@property
def Description(self):
return self._Description
@Description.setter
def Description(self, Description):
self._Description = Description
@property
def Isp(self):
return self._Isp
@Isp.setter
def Isp(self, Isp):
self._Isp = Isp
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._Result = params.get("Result")
self._Description = params.get("Description")
self._Isp = params.get("Isp")
self._RequestId = params.get("RequestId")
class RuleIdConfig(AbstractModel):
"""RuleId相关配置
"""
def __init__(self):
r"""
:param _IntentionRecognition: 意愿核身过程中识别用户的回答意图,开启后除了IntentionQuestions的Answers列表中的标准回答会通过,近似意图的回答也会通过,默认不开启。
:type IntentionRecognition: bool
"""
self._IntentionRecognition = None
@property
def IntentionRecognition(self):
return self._IntentionRecognition
@IntentionRecognition.setter
def IntentionRecognition(self, IntentionRecognition):
self._IntentionRecognition = IntentionRecognition
def _deserialize(self, params):
self._IntentionRecognition = params.get("IntentionRecognition")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class WeChatBillDetail(AbstractModel):
"""账单详情
"""
def __init__(self):
r"""
:param _BizToken: token
:type BizToken: str
:param _ChargeCount: 本token收费次数
:type ChargeCount: int
:param _ChargeDetails: 本token计费详情
:type ChargeDetails: list of ChargeDetail
:param _RuleId: 业务RuleId
:type RuleId: str
"""
self._BizToken = None
self._ChargeCount = None
self._ChargeDetails = None
self._RuleId = None
@property
def BizToken(self):
return self._BizToken
@BizToken.setter
def BizToken(self, BizToken):
self._BizToken = BizToken
@property
def ChargeCount(self):
return self._ChargeCount
@ChargeCount.setter
def ChargeCount(self, ChargeCount):
self._ChargeCount = ChargeCount
@property
def ChargeDetails(self):
return self._ChargeDetails
@ChargeDetails.setter
def ChargeDetails(self, ChargeDetails):
self._ChargeDetails = ChargeDetails
@property
def RuleId(self):
return self._RuleId
@RuleId.setter
def RuleId(self, RuleId):
self._RuleId = RuleId
def _deserialize(self, params):
self._BizToken = params.get("BizToken")
self._ChargeCount = params.get("ChargeCount")
if params.get("ChargeDetails") is not None:
self._ChargeDetails = []
for item in params.get("ChargeDetails"):
obj = ChargeDetail()
obj._deserialize(item)
self._ChargeDetails.append(obj)
self._RuleId = params.get("RuleId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
|
PypiClean
|
/zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/uniswap/liquitidy_math.py
|
from decimal import Decimal
# -*- coding: utf-8 -*-
"""
!!! IMPORTANT
this module is developed and enhanced from active-strategy-framework of GammaStrategies
source code: https://github.com/GammaStrategies/active-strategy-framework/blob/main/UNI_v3_funcs.py
Original author information:
=============================================
Created on Mon Jun 14 18:53:09 2021
@author: JNP
"""
'''liquitidymath'''
'''Python library to emulate the calculations done in liquiditymath.sol of UNI_V3 peryphery contract'''
# sqrtP: format X96 = int(1.0001**(tick/2)*(2**96))
# liquidity: int
# sqrtA = price for lower tick
# sqrtB = price for upper tick
'''get_amounts function'''
# Use 'get_amounts' function to calculate amounts as a function of liquitidy and price range
def get_amount0(sqrtA: int, sqrtB: int, liquidity: int, decimals: int) -> Decimal:
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
amount0 = (Decimal(liquidity * 2 ** 96 * (sqrtB - sqrtA)) / sqrtB / sqrtA) / 10 ** decimals
return amount0
def get_amount1(sqrtA: int, sqrtB: int, liquidity: int, decimals: int) -> Decimal:
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
amount1 = Decimal(liquidity * (sqrtB - sqrtA)) / 2 ** 96 / 10 ** decimals
return amount1
def get_sqrt(tick: int):
return Decimal(1.0001 ** (tick / 2) * (2 ** 96))
def get_amounts(sqrt_price_x96: int, tickA: int, tickB: int, liquidity: int, decimal0: int, decimal1: int) -> \
(Decimal, Decimal):
sqrt = sqrt_price_x96
sqrtA = get_sqrt_ratio_at_tick(tickA)
sqrtB = get_sqrt_ratio_at_tick(tickB)
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
if sqrt <= sqrtA:
amount0 = get_amount0(sqrtA, sqrtB, liquidity, decimal0)
return amount0, Decimal(0)
elif sqrtB > sqrt > sqrtA:
amount0 = get_amount0(sqrt, sqrtB, liquidity, decimal0)
amount1 = get_amount1(sqrtA, sqrt, liquidity, decimal1)
return amount0, amount1
else:
amount1 = get_amount1(sqrtA, sqrtB, liquidity, decimal1)
return Decimal(0), amount1
'''get token amounts relation'''
# Use this formula to calculate amount of t0 based on amount of t1 (required before calculate liquidity)
# relation = t1/t0
def amounts_relation(tick: int, tickA: int, tickB: int, decimals0: int, decimals1: int) -> Decimal:
sqrt = (1.0001 ** tick / 10 ** (decimals1 - decimals0)) ** (1 / 2)
sqrtA = (1.0001 ** tickA / 10 ** (decimals1 - decimals0)) ** (1 / 2)
sqrtB = (1.0001 ** tickB / 10 ** (decimals1 - decimals0)) ** (1 / 2)
if sqrt == sqrtA or sqrt == sqrtB:
relation = 0
relation = (sqrt - sqrtA) / ((1 / sqrt) - (1 / sqrtB))
return relation
'''get_liquidity function'''
def mul_div(a: int, b: int, denominator: int) -> int:
"""
this function is very long in contract. but It's because max length in solidity is limited.
But python has unlimit integer.
ensure all the parameter is int !
"""
return a * b // denominator
def get_liquidity_for_amount0(sqrtA: int, sqrtB: int, amount: int) -> int:
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
intermediate = mul_div(sqrtA, sqrtB, 2 ** 96)
return mul_div(amount, intermediate, sqrtB - sqrtA)
def get_liquidity_for_amount1(sqrtA: int, sqrtB: int, amount: int) -> int:
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
return mul_div(amount, 2 ** 96, sqrtB - sqrtA)
def to_wei(amount, decimals) -> int:
return int(amount * 10 ** decimals)
def get_liquidity(sqrt_price_x96: int, tickA: int, tickB: int,
amount0: Decimal, amount1: Decimal,
decimal0: int, decimal1: int) -> int:
sqrt = sqrt_price_x96
sqrtA = get_sqrt_ratio_at_tick(tickA)
sqrtB = get_sqrt_ratio_at_tick(tickB)
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
amount0wei: int = to_wei(amount0, decimal0)
amount1wei: int = to_wei(amount1, decimal1)
if sqrt <= sqrtA:
liquidity0 = get_liquidity_for_amount0(sqrtA, sqrtB, amount0wei)
return liquidity0
elif sqrtB > sqrt > sqrtA:
liquidity0 = get_liquidity_for_amount0(sqrt, sqrtB, amount0wei)
liquidity1 = get_liquidity_for_amount1(sqrtA, sqrt, amount1wei)
liquidity = liquidity0 if liquidity0 < liquidity1 else liquidity1
return liquidity
else:
liquidity1 = get_liquidity_for_amount1(sqrtA, sqrtB, amount1wei)
return liquidity1
def get_sqrt_ratio_at_tick(tick: int) -> int:
tick = int(tick)
abs_tick = tick if tick >= 0 else -tick
assert abs_tick <= 887272
# 这些魔数分别表示 1/sqrt(1.0001)^1, 1/sqrt(1.0001)^2, 1/sqrt(1.0001)^4....
ratio: int = 0xfffcb933bd6fad37aa2d162d1a594001 if abs_tick & 0x1 != 0 else 0x100000000000000000000000000000000
if abs_tick & 0x2 != 0: ratio = (ratio * 0xfff97272373d413259a46990580e213a) >> 128
if abs_tick & 0x4 != 0: ratio = (ratio * 0xfff2e50f5f656932ef12357cf3c7fdcc) >> 128
if abs_tick & 0x8 != 0: ratio = (ratio * 0xffe5caca7e10e4e61c3624eaa0941cd0) >> 128
if abs_tick & 0x10 != 0: ratio = (ratio * 0xffcb9843d60f6159c9db58835c926644) >> 128
if abs_tick & 0x20 != 0: ratio = (ratio * 0xff973b41fa98c081472e6896dfb254c0) >> 128
if abs_tick & 0x40 != 0: ratio = (ratio * 0xff2ea16466c96a3843ec78b326b52861) >> 128
if abs_tick & 0x80 != 0: ratio = (ratio * 0xfe5dee046a99a2a811c461f1969c3053) >> 128
if abs_tick & 0x100 != 0: ratio = (ratio * 0xfcbe86c7900a88aedcffc83b479aa3a4) >> 128
if abs_tick & 0x200 != 0: ratio = (ratio * 0xf987a7253ac413176f2b074cf7815e54) >> 128
if abs_tick & 0x400 != 0: ratio = (ratio * 0xf3392b0822b70005940c7a398e4b70f3) >> 128
if abs_tick & 0x800 != 0: ratio = (ratio * 0xe7159475a2c29b7443b29c7fa6e889d9) >> 128
if abs_tick & 0x1000 != 0: ratio = (ratio * 0xd097f3bdfd2022b8845ad8f792aa5825) >> 128
if abs_tick & 0x2000 != 0: ratio = (ratio * 0xa9f746462d870fdf8a65dc1f90e061e5) >> 128
if abs_tick & 0x4000 != 0: ratio = (ratio * 0x70d869a156d2a1b890bb3df62baf32f7) >> 128
if abs_tick & 0x8000 != 0: ratio = (ratio * 0x31be135f97d08fd981231505542fcfa6) >> 128
if abs_tick & 0x10000 != 0: ratio = (ratio * 0x9aa508b5b7a84e1c677de54f3e99bc9) >> 128
if abs_tick & 0x20000 != 0: ratio = (ratio * 0x5d6af8dedb81196699c329225ee604) >> 128
if abs_tick & 0x40000 != 0: ratio = (ratio * 0x2216e584f5fa1ea926041bedfe98) >> 128
if abs_tick & 0x80000 != 0: ratio = (ratio * 0x48a170391f7dc42444e8fa2) >> 128
if tick > 0:
# type(uint256).max
ratio = int(115792089237316195423570985008687907853269984665640564039457584007913129639935 // ratio)
# this divides by 1<<32 rounding up to go from a Q128.128 to a Q128.96.
# we then downcast because we know the result always fits within 160 bits due to our tick input constraint
# we round up in the division so getTickAtSqrtRatio of the output price is always consistent
sqrt_price_x96 = (ratio >> 32) + (0 if ratio % (1 << 32) == 0 else 1)
return sqrt_price_x96
|
PypiClean
|
/classla-2.1.tar.gz/classla-2.1/README.md
|
# A [CLASSLA](http://www.clarin.si/info/k-centre/) Fork of [Stanza](https://github.com/stanfordnlp/stanza) for Processing Slovenian, Croatian, Serbian, Macedonian and Bulgarian
## Description
This pipeline allows for processing of standard Slovenian, Croatian, Serbian and Bulgarian on the levels of
- tokenization and sentence splitting
- part-of-speech tagging
- lemmatization
- dependency parsing
- named entity recognition
It also allows for (alpha) processing of standard Macedonian on the levels of
- tokenization and sentence splitting
- part-of-speech tagging
- lemmatization
Finally, it allows for processing of non-standard (Internet) Slovenian, Croatian and Serbian on the same levels as standard language (all models are tailored to non-standard language except for dependency parsing where the standard module is used).
## Differences to Stanza
The differences of this pipeline to the original Stanza pipeline are the following:
- usage of language-specific rule-based tokenizers and sentence splitters, [obeliks](https://pypi.org/project/obeliks/) for standard Slovenian and [reldi-tokeniser](https://pypi.org/project/reldi-tokeniser/) for the remaining varieties and languages (Stanza uses inferior machine-learning-based tokenization and sentence splitting trained on UD data)
- default pre-tagging and pre-lemmatization on the level of tokenizers for the following phenomena: punctuation, symbol, e-mail, URL, mention, hashtag, emoticon, emoji (usage documented [here](https://github.com/clarinsi/classla/blob/master/README.superuser.md#usage-of-tagging-control-via-the-tokenizer))
- optional control of the tagger for Slovenian via an inflectional lexicon on the levels of XPOS, UPOS, FEATS (usage documented [here](https://github.com/clarinsi/classla/blob/master/README.superuser.md#usage-of-inflectional-lexicon))
- closed class handling depending on the usage of the options described in the last two bullets, as documented [here](https://github.com/clarinsi/classla/blob/master/README.closed_classes.md)
- usage of external inflectional lexicons for lookup lemmatization, seq2seq being used very infrequently on OOVs only (Stanza uses only UD training data for lookup lemmatization)
- morphosyntactic tagging models based on larger quantities of training data than is available in UD (training data that are morphosyntactically tagged, but not UD-parsed)
- lemmatization models based on larger quantities of training data than is available in UD (training data that are lemmatized, but not UD-parsed)
- optional JOS-project-based parsing of Slovenian (usage documented [here](https://github.com/clarinsi/classla/blob/master/README.superuser.md#jos-dependency-parsing-system))
- named entity recognition models for all languages except Macedonian (Stanza does not cover named entity recognition for any of the languages supported by classla)
- Macedonian models (Macedonian is not available in UD yet)
- non-standard models for Croatian, Slovenian, Serbian (there is no UD data for these varieties)
The above modifications led to some important improvements in the tool’s performance in comparison to original Stanza. For standard Slovenian, for example, running the full classla pipeline increases sentence segmentation F1 scores to 99.52 (94.29% error reduction), lemmatization to 99.17 (68.8% error reduction), XPOS tagging to 97.38 (46.75% error reduction), UPOS tagging to 98.69 (23.4% error reduction), and LAS to 92.05 (23.56% error reduction). See official [Stanza performance](https://stanfordnlp.github.io/stanza/performance.html) (evaluated on different data splits) for comparison.
## Installation
### pip
We recommend that you install CLASSLA via pip, the Python package manager. To install, run:
```bash
pip install classla
```
This will also resolve all dependencies.
__NOTE TO EXISTING USERS__: Once you install this classla version, you will HAVE TO re-download the models. All previously downloaded models will not be used anymore. We suggest you delete the old models. Their default location is at `~/classla_resources`.
## Running CLASSLA
### Getting started
To run the CLASSLA pipeline for the first time on processing standard Slovenian, follow these steps:
```
>>> import classla
>>> classla.download('sl') # download standard models for Slovenian, use hr for Croatian, sr for Serbian, bg for Bulgarian, mk for Macedonian
>>> nlp = classla.Pipeline('sl') # initialize the default Slovenian pipeline, use hr for Croatian, sr for Serbian, bg for Bulgarian, mk for Macedonian
>>> doc = nlp("France Prešeren je rojen v Vrbi.") # run the pipeline
>>> print(doc.to_conll()) # print the output in CoNLL-U format
# newpar id = 1
# sent_id = 1.1
# text = France Prešeren je rojen v Vrbi.
1 France France PROPN Npmsn Case=Nom|Gender=Masc|Number=Sing 4 nsubj _ NER=B-PER
2 Prešeren Prešeren PROPN Npmsn Case=Nom|Gender=Masc|Number=Sing 1 flat:name _ NER=I-PER
3 je biti AUX Va-r3s-n Mood=Ind|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin 4 cop _ NER=O
4 rojen rojen ADJ Appmsnn Case=Nom|Definite=Ind|Degree=Pos|Gender=Masc|Number=Sing|VerbForm=Part 0 root _ NER=O
5 v v ADP Sl Case=Loc 6 case _ NER=O
6 Vrbi Vrba PROPN Npfsl Case=Loc|Gender=Fem|Number=Sing 4 obl _ NER=B-LOC|SpaceAfter=No
7 . . PUNCT Z _ 4 punct _ NER=O
```
You can find examples of standard language processing for [Croatian](#example-of-standard-croatian), [Serbian](#example-of-standard-serbian), [Macedonian](#example-of-standard-macedonian) and [Bulgarian](#example-of-standard-bulgarian) at the end of this document.
### Processing non-standard language
Processing non-standard Slovenian differs to the above standard example just by an additional argument ```type="nonstandard"```:
```
>>> import classla
>>> classla.download('sl', type='nonstandard') # download non-standard models for Slovenian, use hr for Croatian and sr for Serbian
>>> nlp = classla.Pipeline('sl', type='nonstandard') # initialize the default non-standard Slovenian pipeline, use hr for Croatian and sr for Serbian
>>> doc = nlp("kva smo mi zurali zadnje leto v zagrebu...") # run the pipeline
>>> print(doc.to_conll()) # print the output in CoNLL-U format
# newpar id = 1
# sent_id = 1.1
# text = kva smo mi zurali zadnje leto v zagrebu...
1 kva kaj PRON Pq-nsa Case=Acc|Gender=Neut|Number=Sing|PronType=Int 4 obj _ NER=O
2 smo biti AUX Va-r1p-n Mood=Ind|Number=Plur|Person=1|Polarity=Pos|Tense=Pres|VerbForm=Fin 4 aux _ NER=O
3 mi jaz PRON Pp1mpn Case=Nom|Gender=Masc|Number=Plur|Person=1|PronType=Prs 4 nsubj _ NER=O
4 zurali zurati VERB Vmpp-pm Aspect=Imp|Gender=Masc|Number=Plur|VerbForm=Part 0 root _ NER=O
5 zadnje zadnji ADJ Agpnsa Case=Acc|Degree=Pos|Gender=Neut|Number=Sing 6 amod _ NER=O
6 leto leto NOUN Ncnsa Case=Acc|Gender=Neut|Number=Sing 4 obl _ NER=O
7 v v ADP Sl Case=Loc 8 case _ NER=O
8 zagrebu Zagreb PROPN Npmsl Case=Loc|Gender=Masc|Number=Sing 4 obl _ NER=B-LOC|SpaceAfter=No
9 ... ... PUNCT Z _ 4 punct _ NER=O
```
You can find examples of non-standard language processing for [Croatian](#example-of-non-standard-croatian) and [Serbian](#example-of-non-standard-serbian) at the end of this document.
For additional usage examples you can also consult the ```pipeline_demo.py``` file.
### Processing online texts
A special web processing mode for processing texts obtained from the internet can be activated with the ```type="web"``` argument:
```
>>> import classla
>>> classla.download('sl', type='web') # download web models for Slovenian, use hr for Croatian and sr for Serbian
>>> nlp = classla.Pipeline('sl', type='web') # initialize the default Slovenian web pipeline, use hr for Croatian and sr for Serbian
>>> doc = nlp("Kdor hoce prenesti preko racunalnika http://t.co/LwWyzs0cA0") # run the pipeline
>>> print(doc.to_conll()) # print the output in CoNLL-U format
# newpar id = 1
# sent_id = 1.1
# text = Kdor hoce prenesti preko racunalnika http://t.co/LwWyzs0cA0
1 Kdor kdor PRON Pr-msn Case=Nom|Gender=Masc|Number=Sing|PronType=Rel 2 nsubj _ NER=O
2 hoce hoteti VERB Vmpr3s-n Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin 0 root _ NER=O
3 prenesti prenesti VERB Vmen Aspect=Perf|VerbForm=Inf 2 xcomp _ NER=O
4 preko preko ADP Sg Case=Gen 5 case _ NER=O
5 racunalnika računalnik NOUN Ncmsg Case=Gen|Gender=Masc|Number=Sing 3 obl _ NER=O
6 http://t.co/LwWyzs0cA0 http://t.co/LwWyzs0cA0 SYM Xw _ 5 nmod _ NER=O
```
## Processors
The CLASSLA pipeline is built from multiple units. These units are called processors. By default CLASSLA runs the ```tokenize```, ```ner```, ```pos```, ```lemma``` and ```depparse``` processors.
You can specify which processors CLASSLA should run, via the ```processors``` attribute as in the following example, performing tokenization, named entity recognition, part-of-speech tagging and lemmatization.
```python
>>> nlp = classla.Pipeline('sl', processors='tokenize,ner,pos,lemma')
```
Another popular option might be to perform tokenization, part-of-speech tagging, lemmatization and dependency parsing.
```python
>>> nlp = classla.Pipeline('sl', processors='tokenize,pos,lemma,depparse')
```
### Tokenization and sentence splitting
The tokenization and sentence splitting processor ```tokenize``` is the first processor and is required for any further processing.
In case you already have tokenized text, you should separate tokens via spaces and pass the attribute ```tokenize_pretokenized=True```.
By default CLASSLA uses a rule-based tokenizer - [obeliks](https://github.com/clarinsi/obeliks) for Slovenian standard language pipeline. In other cases we use [reldi-tokeniser](https://github.com/clarinsi/reldi-tokeniser).
<!--Most important attributes:
```
tokenize_pretokenized - [boolean] ignores tokenizer
```-->
### Part-of-speech tagging
The POS tagging processor ```pos``` will general output that contains morphosyntactic description following the [MULTEXT-East standard](http://nl.ijs.si/ME/V6/msd/html/msd.lang-specific.html) and universal part-of-speech tags and universal features following the [Universal Dependencies standard](https://universaldependencies.org). This processing requires the usage of the ```tokenize``` processor.
<!--Most important attributes:
```
pos_model_path - [str] alternative path to model file
pos_pretrain_path - [str] alternative path to pretrain file
```-->
### Lemmatization
The lemmatization processor ```lemma``` will produce lemmas (basic forms) for each token in the input. It requires the usage of both the ```tokenize``` and ```pos``` processors.
### Dependency parsing
The dependency parsing processor ```depparse``` performs syntactic dependency parsing of sentences following the [Universal Dependencies formalism](https://universaldependencies.org/introduction.html#:~:text=Universal%20Dependencies%20(UD)%20is%20a,from%20a%20language%20typology%20perspective.). It requires the ```tokenize``` and ```pos``` processors.
### Named entity recognition
The named entity recognition processor ```ner``` identifies named entities in text following the [IOB2](https://en.wikipedia.org/wiki/Inside–outside–beginning_(tagging)) format. It requires only the ```tokenize``` processor.
## Citing
If you use this tool, please cite the following paper:
```
@inproceedings{ljubesic-dobrovoljc-2019-neural,
title = "What does Neural Bring? Analysing Improvements in Morphosyntactic Annotation and Lemmatisation of {S}lovenian, {C}roatian and {S}erbian",
author = "Ljube{\v{s}}i{\'c}, Nikola and
Dobrovoljc, Kaja",
booktitle = "Proceedings of the 7th Workshop on Balto-Slavic Natural Language Processing",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W19-3704",
doi = "10.18653/v1/W19-3704",
pages = "29--34"
}
```
## Croatian examples
### Example of standard Croatian
```
>>> import classla
>>> nlp = classla.Pipeline('hr') # run classla.download('hr') beforehand if necessary
>>> doc = nlp("Ante Starčević rođen je u Velikom Žitniku.")
>>> print(doc.to_conll())
# newpar id = 1
# sent_id = 1.1
# text = Ante Starčević rođen je u Velikom Žitniku.
1 Ante Ante PROPN Npmsn Case=Nom|Gender=Masc|Number=Sing 3 nsubj _ NER=B-PER
2 Starčević Starčević PROPN Npmsn Case=Nom|Gender=Masc|Number=Sing 1 flat _ NER=I-PER
3 rođen roditi ADJ Appmsnn Case=Nom|Definite=Ind|Degree=Pos|Gender=Masc|Number=Sing|VerbForm=Part|Voice=Pass 0 root _ NER=O
4 je biti AUX Var3s Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 aux _ NER=O
5 u u ADP Sl Case=Loc 7 case _ NER=O
6 Velikom velik ADJ Agpmsly Case=Loc|Definite=Def|Degree=Pos|Gender=Masc|Number=Sing 7 amod _ NER=B-LOC
7 Žitniku Žitnik PROPN Npmsl Case=Loc|Gender=Masc|Number=Sing 3 obl _ NER=I-LOC|SpaceAfter=No
8 . . PUNCT Z _ 3 punct _ NER=O
```
### Example of non-standard Croatian
```
>>> import classla
>>> nlp = classla.Pipeline('hr', type='nonstandard') # run classla.download('hr', type='nonstandard') beforehand if necessary
>>> doc = nlp("kaj sam ja tulumaril jucer u ljubljani...")
>>> print(doc.to_conll())
# newpar id = 1
# sent_id = 1.1
# text = kaj sam ja tulumaril jucer u ljubljani...
1 kaj što PRON Pq3n-a Case=Acc|Gender=Neut|PronType=Int,Rel 4 obj _ NER=O
2 sam biti AUX Var1s Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin 4 aux _ NER=O
3 ja ja PRON Pp1-sn Case=Nom|Number=Sing|Person=1|PronType=Prs 4 nsubj _ NER=O
4 tulumaril tulumariti VERB Vmp-sm Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part|Voice=Act 0 root _ NER=O
5 jucer jučer ADV Rgp Degree=Pos 4 advmod _ NER=O
6 u u ADP Sl Case=Loc 7 case _ NER=O
7 ljubljani Ljubljana PROPN Npfsl Case=Loc|Gender=Fem|Number=Sing 4 obl _ NER=B-LOC|SpaceAfter=No
8 ... ... PUNCT Z _ 4 punct _ NER=O
```
## Serbian examples
### Example of standard Serbian
```
>>> import classla
>>> nlp = classla.Pipeline('sr') # run classla.download('sr') beforehand if necessary
>>> doc = nlp("Slobodan Jovanović rođen je u Novom Sadu.")
>>> print(doc.to_conll())
# newpar id = 1
# sent_id = 1.1
# text = Slobodan Jovanović rođen je u Novom Sadu.
1 Slobodan Slobodan PROPN Npmsn Case=Nom|Gender=Masc|Number=Sing 3 nsubj _ NER=B-PER
2 Jovanović Jovanović PROPN Npmsn Case=Nom|Gender=Masc|Number=Sing 1 flat _ NER=I-PER
3 rođen roditi ADJ Appmsnn Case=Nom|Definite=Ind|Degree=Pos|Gender=Masc|Number=Sing|VerbForm=Part|Voice=Pass 0 root _ NER=O
4 je biti AUX Var3s Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 3 aux _ NER=O
5 u u ADP Sl Case=Loc 7 case _ NER=O
6 Novom nov ADJ Agpmsly Case=Loc|Definite=Def|Degree=Pos|Gender=Masc|Number=Sing 7 amod _ NER=B-LOC
7 Sadu Sad PROPN Npmsl Case=Loc|Gender=Masc|Number=Sing 3 obl _ NER=I-LOC|SpaceAfter=No
8 . . PUNCT Z _ 3 punct _ NER=O
```
### Example of non-standard Serbian
```
>>> import classla
>>> nlp = classla.Pipeline('sr', type='nonstandard') # run classla.download('sr', type='nonstandard') beforehand if necessary
>>> doc = nlp("ne mogu da verujem kakvo je zezanje bilo prosle godine u zagrebu...")
>>> print(doc.to_conll())
# newpar id = 1
# sent_id = 1.1
# text = ne mogu da verujem kakvo je zezanje bilo prosle godine u zagrebu...
1 ne ne PART Qz Polarity=Neg 2 advmod _ NER=O
2 mogu moći VERB Vmr1s Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin 0 root _ NER=O
3 da da SCONJ Cs _ 4 mark _ NER=O
4 verujem verovati VERB Vmr1s Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin 2 xcomp _ NER=O
5 kakvo kakav DET Pi-nsn Case=Nom|Gender=Neut|Number=Sing|PronType=Int,Rel 4 ccomp _ NER=O
6 je biti AUX Var3s Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin 5 aux _ NER=O
7 zezanje zezanje NOUN Ncnsn Case=Nom|Gender=Neut|Number=Sing 8 nsubj _ NER=O
8 bilo biti AUX Vap-sn Gender=Neut|Number=Sing|Tense=Past|VerbForm=Part|Voice=Act 5 cop _ NER=O
9 prosle prošli ADJ Agpfsgy Case=Gen|Definite=Def|Degree=Pos|Gender=Fem|Number=Sing 10 amod _ NER=O
10 godine godina NOUN Ncfsg Case=Gen|Gender=Fem|Number=Sing 8 obl _ NER=O
11 u u ADP Sl Case=Loc 12 case _ NER=O
12 zagrebu Zagreb PROPN Npmsl Case=Loc|Gender=Masc|Number=Sing 8 obl _ NER=B-LOC|SpaceAfter=No
13 ... ... PUNCT Z _ 2 punct _ NER=O
```
## Bulgarian examples
### Example of standard Bulgarian
```
>>> import classla
>>> nlp = classla.Pipeline('bg') # run classla.download('bg') beforehand if necessary
>>> doc = nlp("Алеко Константинов е роден в Свищов.")
>>> print(doc.to_conll())
# newpar id = 1
# sent_id = 1.1
# text = Алеко Константинов е роден в Свищов.
1 Алеко алеко PROPN Npmsi Definite=Ind|Gender=Masc|Number=Sing 4 nsubj:pass _ NER=B-PER
2 Константинов константинов PROPN Hmsi Definite=Ind|Gender=Masc|Number=Sing 1 flat _ NER=I-PER
3 е съм AUX Vxitf-r3s Aspect=Imp|Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin|Voice=Act 4 aux:pass _ NER=O
4 роден родя-(се) VERB Vpptcv--smi Aspect=Perf|Definite=Ind|Gender=Masc|Number=Sing|VerbForm=Part|Voice=Pass 0 root _ NER=O
5 в в ADP R _ 6 case _ NER=O
6 Свищов свищов PROPN Npmsi Definite=Ind|Gender=Masc|Number=Sing 4 iobj _ NER=B-LOC|SpaceAfter=No
7 . . PUNCT punct _ 4 punct _ NER=O
```
## Macedonian examples
### Example of standard Macedonian
```
>>> import classla
>>> nlp = classla.Pipeline('mk') # run classla.download('mk') beforehand if necessary
>>> doc = nlp('Крсте Петков Мисирков е роден во Постол.')
>>> print(doc.to_conll())
# newpar id = 1
# sent_id = 1.1
# text = Крсте Петков Мисирков е роден во Постол.
1 Крсте Крсте PROPN Npmsnn Case=Nom|Definite=Ind|Gender=Masc|Number=Sing _ _ _ _
2 Петков Петков PROPN Npmsnn Case=Nom|Definite=Ind|Gender=Masc|Number=Sing _ _ _ _
3 Мисирков Мисирков PROPN Npmsnn Case=Nom|Definite=Ind|Gender=Masc|Number=Sing _ _ _ _
4 е сум AUX Vapip3s-n Aspect=Prog|Mood=Ind|Number=Sing|Person=3|Polarity=Pos|Tense=Pres _ _ _ _
5 роден роден ADJ Ap-ms-n Definite=Ind|Gender=Masc|Number=Sing|VerbForm=Part _ _ _ _
6 во во ADP Sps AdpType=Prep _ _ _ _
7 Постол Постол PROPN Npmsnn Case=Nom|Definite=Ind|Gender=Masc|Number=Sing _ _ _ SpaceAfter=No
8 . . PUNCT Z _ _ _ _ _
```
## Training instructions
[Training instructions](https://github.com/clarinsi/classla-stanfordnlp/blob/master/README.train.md)
## Superuser instructions
[Superuser instructions](https://github.com/clarinsi/classla-stanfordnlp/blob/master/README.superuser.md)
|
PypiClean
|
/baiduads-sdk-auto-snapshot-2022.2.1.5.tar.gz/baiduads-sdk-auto-snapshot-2022.2.1.5/baiduads/rtafeed/model/update_rta_setting_request_wrapper.py
|
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
def lazy_import():
from baiduads.common.model.api_request_header import ApiRequestHeader
from baiduads.rtafeed.model.rta_setting_type import RtaSettingType
globals()['ApiRequestHeader'] = ApiRequestHeader
globals()['RtaSettingType'] = RtaSettingType
class UpdateRtaSettingRequestWrapper(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'header': (ApiRequestHeader,), # noqa: E501
'body': (RtaSettingType,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'header': 'header', # noqa: E501
'body': 'body', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""UpdateRtaSettingRequestWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiRequestHeader): [optional] # noqa: E501
body (RtaSettingType): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""UpdateRtaSettingRequestWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiRequestHeader): [optional] # noqa: E501
body (RtaSettingType): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/alipay-sdk-python-pycryptodome-3.3.202.tar.gz/alipay-sdk-python-pycryptodome-3.3.202/alipay/aop/api/domain/SceneExtParam.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
class SceneExtParam(object):
def __init__(self):
self._apply_reason = None
self._contract_no = None
self._discountamt = None
self._firstpayamt = None
self._interestrate = None
self._lastpayamt = None
self._monthpayamt = None
self._remark = None
@property
def apply_reason(self):
return self._apply_reason
@apply_reason.setter
def apply_reason(self, value):
self._apply_reason = value
@property
def contract_no(self):
return self._contract_no
@contract_no.setter
def contract_no(self, value):
self._contract_no = value
@property
def discountamt(self):
return self._discountamt
@discountamt.setter
def discountamt(self, value):
self._discountamt = value
@property
def firstpayamt(self):
return self._firstpayamt
@firstpayamt.setter
def firstpayamt(self, value):
self._firstpayamt = value
@property
def interestrate(self):
return self._interestrate
@interestrate.setter
def interestrate(self, value):
self._interestrate = value
@property
def lastpayamt(self):
return self._lastpayamt
@lastpayamt.setter
def lastpayamt(self, value):
self._lastpayamt = value
@property
def monthpayamt(self):
return self._monthpayamt
@monthpayamt.setter
def monthpayamt(self, value):
self._monthpayamt = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
def to_alipay_dict(self):
params = dict()
if self.apply_reason:
if hasattr(self.apply_reason, 'to_alipay_dict'):
params['apply_reason'] = self.apply_reason.to_alipay_dict()
else:
params['apply_reason'] = self.apply_reason
if self.contract_no:
if hasattr(self.contract_no, 'to_alipay_dict'):
params['contract_no'] = self.contract_no.to_alipay_dict()
else:
params['contract_no'] = self.contract_no
if self.discountamt:
if hasattr(self.discountamt, 'to_alipay_dict'):
params['discountamt'] = self.discountamt.to_alipay_dict()
else:
params['discountamt'] = self.discountamt
if self.firstpayamt:
if hasattr(self.firstpayamt, 'to_alipay_dict'):
params['firstpayamt'] = self.firstpayamt.to_alipay_dict()
else:
params['firstpayamt'] = self.firstpayamt
if self.interestrate:
if hasattr(self.interestrate, 'to_alipay_dict'):
params['interestrate'] = self.interestrate.to_alipay_dict()
else:
params['interestrate'] = self.interestrate
if self.lastpayamt:
if hasattr(self.lastpayamt, 'to_alipay_dict'):
params['lastpayamt'] = self.lastpayamt.to_alipay_dict()
else:
params['lastpayamt'] = self.lastpayamt
if self.monthpayamt:
if hasattr(self.monthpayamt, 'to_alipay_dict'):
params['monthpayamt'] = self.monthpayamt.to_alipay_dict()
else:
params['monthpayamt'] = self.monthpayamt
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SceneExtParam()
if 'apply_reason' in d:
o.apply_reason = d['apply_reason']
if 'contract_no' in d:
o.contract_no = d['contract_no']
if 'discountamt' in d:
o.discountamt = d['discountamt']
if 'firstpayamt' in d:
o.firstpayamt = d['firstpayamt']
if 'interestrate' in d:
o.interestrate = d['interestrate']
if 'lastpayamt' in d:
o.lastpayamt = d['lastpayamt']
if 'monthpayamt' in d:
o.monthpayamt = d['monthpayamt']
if 'remark' in d:
o.remark = d['remark']
return o
|
PypiClean
|
/nni_upload_test-0.7.1904290925-py3-none-win_amd64.whl/nni_upload_test-0.7.1904290925.data/data/nni/node_modules/node-forge/lib/task.js
|
var forge = require('./forge');
require('./debug');
require('./log');
require('./util');
// logging category
var cat = 'forge.task';
// verbose level
// 0: off, 1: a little, 2: a whole lot
// Verbose debug logging is surrounded by a level check to avoid the
// performance issues with even calling the logging code regardless if it
// is actually logged. For performance reasons this should not be set to 2
// for production use.
// ex: if(sVL >= 2) forge.log.verbose(....)
var sVL = 0;
// track tasks for debugging
var sTasks = {};
var sNextTaskId = 0;
// debug access
forge.debug.set(cat, 'tasks', sTasks);
// a map of task type to task queue
var sTaskQueues = {};
// debug access
forge.debug.set(cat, 'queues', sTaskQueues);
// name for unnamed tasks
var sNoTaskName = '?';
// maximum number of doNext() recursions before a context swap occurs
// FIXME: might need to tweak this based on the browser
var sMaxRecursions = 30;
// time slice for doing tasks before a context swap occurs
// FIXME: might need to tweak this based on the browser
var sTimeSlice = 20;
/**
* Task states.
*
* READY: ready to start processing
* RUNNING: task or a subtask is running
* BLOCKED: task is waiting to acquire N permits to continue
* SLEEPING: task is sleeping for a period of time
* DONE: task is done
* ERROR: task has an error
*/
var READY = 'ready';
var RUNNING = 'running';
var BLOCKED = 'blocked';
var SLEEPING = 'sleeping';
var DONE = 'done';
var ERROR = 'error';
/**
* Task actions. Used to control state transitions.
*
* STOP: stop processing
* START: start processing tasks
* BLOCK: block task from continuing until 1 or more permits are released
* UNBLOCK: release one or more permits
* SLEEP: sleep for a period of time
* WAKEUP: wakeup early from SLEEPING state
* CANCEL: cancel further tasks
* FAIL: a failure occured
*/
var STOP = 'stop';
var START = 'start';
var BLOCK = 'block';
var UNBLOCK = 'unblock';
var SLEEP = 'sleep';
var WAKEUP = 'wakeup';
var CANCEL = 'cancel';
var FAIL = 'fail';
/**
* State transition table.
*
* nextState = sStateTable[currentState][action]
*/
var sStateTable = {};
sStateTable[READY] = {};
sStateTable[READY][STOP] = READY;
sStateTable[READY][START] = RUNNING;
sStateTable[READY][CANCEL] = DONE;
sStateTable[READY][FAIL] = ERROR;
sStateTable[RUNNING] = {};
sStateTable[RUNNING][STOP] = READY;
sStateTable[RUNNING][START] = RUNNING;
sStateTable[RUNNING][BLOCK] = BLOCKED;
sStateTable[RUNNING][UNBLOCK] = RUNNING;
sStateTable[RUNNING][SLEEP] = SLEEPING;
sStateTable[RUNNING][WAKEUP] = RUNNING;
sStateTable[RUNNING][CANCEL] = DONE;
sStateTable[RUNNING][FAIL] = ERROR;
sStateTable[BLOCKED] = {};
sStateTable[BLOCKED][STOP] = BLOCKED;
sStateTable[BLOCKED][START] = BLOCKED;
sStateTable[BLOCKED][BLOCK] = BLOCKED;
sStateTable[BLOCKED][UNBLOCK] = BLOCKED;
sStateTable[BLOCKED][SLEEP] = BLOCKED;
sStateTable[BLOCKED][WAKEUP] = BLOCKED;
sStateTable[BLOCKED][CANCEL] = DONE;
sStateTable[BLOCKED][FAIL] = ERROR;
sStateTable[SLEEPING] = {};
sStateTable[SLEEPING][STOP] = SLEEPING;
sStateTable[SLEEPING][START] = SLEEPING;
sStateTable[SLEEPING][BLOCK] = SLEEPING;
sStateTable[SLEEPING][UNBLOCK] = SLEEPING;
sStateTable[SLEEPING][SLEEP] = SLEEPING;
sStateTable[SLEEPING][WAKEUP] = SLEEPING;
sStateTable[SLEEPING][CANCEL] = DONE;
sStateTable[SLEEPING][FAIL] = ERROR;
sStateTable[DONE] = {};
sStateTable[DONE][STOP] = DONE;
sStateTable[DONE][START] = DONE;
sStateTable[DONE][BLOCK] = DONE;
sStateTable[DONE][UNBLOCK] = DONE;
sStateTable[DONE][SLEEP] = DONE;
sStateTable[DONE][WAKEUP] = DONE;
sStateTable[DONE][CANCEL] = DONE;
sStateTable[DONE][FAIL] = ERROR;
sStateTable[ERROR] = {};
sStateTable[ERROR][STOP] = ERROR;
sStateTable[ERROR][START] = ERROR;
sStateTable[ERROR][BLOCK] = ERROR;
sStateTable[ERROR][UNBLOCK] = ERROR;
sStateTable[ERROR][SLEEP] = ERROR;
sStateTable[ERROR][WAKEUP] = ERROR;
sStateTable[ERROR][CANCEL] = ERROR;
sStateTable[ERROR][FAIL] = ERROR;
/**
* Creates a new task.
*
* @param options options for this task
* run: the run function for the task (required)
* name: the run function for the task (optional)
* parent: parent of this task (optional)
*
* @return the empty task.
*/
var Task = function(options) {
// task id
this.id = -1;
// task name
this.name = options.name || sNoTaskName;
// task has no parent
this.parent = options.parent || null;
// save run function
this.run = options.run;
// create a queue of subtasks to run
this.subtasks = [];
// error flag
this.error = false;
// state of the task
this.state = READY;
// number of times the task has been blocked (also the number
// of permits needed to be released to continue running)
this.blocks = 0;
// timeout id when sleeping
this.timeoutId = null;
// no swap time yet
this.swapTime = null;
// no user data
this.userData = null;
// initialize task
// FIXME: deal with overflow
this.id = sNextTaskId++;
sTasks[this.id] = this;
if(sVL >= 1) {
forge.log.verbose(cat, '[%s][%s] init', this.id, this.name, this);
}
};
/**
* Logs debug information on this task and the system state.
*/
Task.prototype.debug = function(msg) {
msg = msg || '';
forge.log.debug(cat, msg,
'[%s][%s] task:', this.id, this.name, this,
'subtasks:', this.subtasks.length,
'queue:', sTaskQueues);
};
/**
* Adds a subtask to run after task.doNext() or task.fail() is called.
*
* @param name human readable name for this task (optional).
* @param subrun a function to run that takes the current task as
* its first parameter.
*
* @return the current task (useful for chaining next() calls).
*/
Task.prototype.next = function(name, subrun) {
// juggle parameters if it looks like no name is given
if(typeof(name) === 'function') {
subrun = name;
// inherit parent's name
name = this.name;
}
// create subtask, set parent to this task, propagate callbacks
var subtask = new Task({
run: subrun,
name: name,
parent: this
});
// start subtasks running
subtask.state = RUNNING;
subtask.type = this.type;
subtask.successCallback = this.successCallback || null;
subtask.failureCallback = this.failureCallback || null;
// queue a new subtask
this.subtasks.push(subtask);
return this;
};
/**
* Adds subtasks to run in parallel after task.doNext() or task.fail()
* is called.
*
* @param name human readable name for this task (optional).
* @param subrun functions to run that take the current task as
* their first parameter.
*
* @return the current task (useful for chaining next() calls).
*/
Task.prototype.parallel = function(name, subrun) {
// juggle parameters if it looks like no name is given
if(forge.util.isArray(name)) {
subrun = name;
// inherit parent's name
name = this.name;
}
// Wrap parallel tasks in a regular task so they are started at the
// proper time.
return this.next(name, function(task) {
// block waiting for subtasks
var ptask = task;
ptask.block(subrun.length);
// we pass the iterator from the loop below as a parameter
// to a function because it is otherwise included in the
// closure and changes as the loop changes -- causing i
// to always be set to its highest value
var startParallelTask = function(pname, pi) {
forge.task.start({
type: pname,
run: function(task) {
subrun[pi](task);
},
success: function(task) {
ptask.unblock();
},
failure: function(task) {
ptask.unblock();
}
});
};
for(var i = 0; i < subrun.length; i++) {
// Type must be unique so task starts in parallel:
// name + private string + task id + sub-task index
// start tasks in parallel and unblock when the finish
var pname = name + '__parallel-' + task.id + '-' + i;
var pi = i;
startParallelTask(pname, pi);
}
});
};
/**
* Stops a running task.
*/
Task.prototype.stop = function() {
this.state = sStateTable[this.state][STOP];
};
/**
* Starts running a task.
*/
Task.prototype.start = function() {
this.error = false;
this.state = sStateTable[this.state][START];
// try to restart
if(this.state === RUNNING) {
this.start = new Date();
this.run(this);
runNext(this, 0);
}
};
/**
* Blocks a task until it one or more permits have been released. The
* task will not resume until the requested number of permits have
* been released with call(s) to unblock().
*
* @param n number of permits to wait for(default: 1).
*/
Task.prototype.block = function(n) {
n = typeof(n) === 'undefined' ? 1 : n;
this.blocks += n;
if(this.blocks > 0) {
this.state = sStateTable[this.state][BLOCK];
}
};
/**
* Releases a permit to unblock a task. If a task was blocked by
* requesting N permits via block(), then it will only continue
* running once enough permits have been released via unblock() calls.
*
* If multiple processes need to synchronize with a single task then
* use a condition variable (see forge.task.createCondition). It is
* an error to unblock a task more times than it has been blocked.
*
* @param n number of permits to release (default: 1).
*
* @return the current block count (task is unblocked when count is 0)
*/
Task.prototype.unblock = function(n) {
n = typeof(n) === 'undefined' ? 1 : n;
this.blocks -= n;
if(this.blocks === 0 && this.state !== DONE) {
this.state = RUNNING;
runNext(this, 0);
}
return this.blocks;
};
/**
* Sleep for a period of time before resuming tasks.
*
* @param n number of milliseconds to sleep (default: 0).
*/
Task.prototype.sleep = function(n) {
n = typeof(n) === 'undefined' ? 0 : n;
this.state = sStateTable[this.state][SLEEP];
var self = this;
this.timeoutId = setTimeout(function() {
self.timeoutId = null;
self.state = RUNNING;
runNext(self, 0);
}, n);
};
/**
* Waits on a condition variable until notified. The next task will
* not be scheduled until notification. A condition variable can be
* created with forge.task.createCondition().
*
* Once cond.notify() is called, the task will continue.
*
* @param cond the condition variable to wait on.
*/
Task.prototype.wait = function(cond) {
cond.wait(this);
};
/**
* If sleeping, wakeup and continue running tasks.
*/
Task.prototype.wakeup = function() {
if(this.state === SLEEPING) {
cancelTimeout(this.timeoutId);
this.timeoutId = null;
this.state = RUNNING;
runNext(this, 0);
}
};
/**
* Cancel all remaining subtasks of this task.
*/
Task.prototype.cancel = function() {
this.state = sStateTable[this.state][CANCEL];
// remove permits needed
this.permitsNeeded = 0;
// cancel timeouts
if(this.timeoutId !== null) {
cancelTimeout(this.timeoutId);
this.timeoutId = null;
}
// remove subtasks
this.subtasks = [];
};
/**
* Finishes this task with failure and sets error flag. The entire
* task will be aborted unless the next task that should execute
* is passed as a parameter. This allows levels of subtasks to be
* skipped. For instance, to abort only this tasks's subtasks, then
* call fail(task.parent). To abort this task's subtasks and its
* parent's subtasks, call fail(task.parent.parent). To abort
* all tasks and simply call the task callback, call fail() or
* fail(null).
*
* The task callback (success or failure) will always, eventually, be
* called.
*
* @param next the task to continue at, or null to abort entirely.
*/
Task.prototype.fail = function(next) {
// set error flag
this.error = true;
// finish task
finish(this, true);
if(next) {
// propagate task info
next.error = this.error;
next.swapTime = this.swapTime;
next.userData = this.userData;
// do next task as specified
runNext(next, 0);
} else {
if(this.parent !== null) {
// finish root task (ensures it is removed from task queue)
var parent = this.parent;
while(parent.parent !== null) {
// propagate task info
parent.error = this.error;
parent.swapTime = this.swapTime;
parent.userData = this.userData;
parent = parent.parent;
}
finish(parent, true);
}
// call failure callback if one exists
if(this.failureCallback) {
this.failureCallback(this);
}
}
};
/**
* Asynchronously start a task.
*
* @param task the task to start.
*/
var start = function(task) {
task.error = false;
task.state = sStateTable[task.state][START];
setTimeout(function() {
if(task.state === RUNNING) {
task.swapTime = +new Date();
task.run(task);
runNext(task, 0);
}
}, 0);
};
/**
* Run the next subtask or finish this task.
*
* @param task the task to process.
* @param recurse the recursion count.
*/
var runNext = function(task, recurse) {
// get time since last context swap (ms), if enough time has passed set
// swap to true to indicate that doNext was performed asynchronously
// also, if recurse is too high do asynchronously
var swap =
(recurse > sMaxRecursions) ||
(+new Date() - task.swapTime) > sTimeSlice;
var doNext = function(recurse) {
recurse++;
if(task.state === RUNNING) {
if(swap) {
// update swap time
task.swapTime = +new Date();
}
if(task.subtasks.length > 0) {
// run next subtask
var subtask = task.subtasks.shift();
subtask.error = task.error;
subtask.swapTime = task.swapTime;
subtask.userData = task.userData;
subtask.run(subtask);
if(!subtask.error) {
runNext(subtask, recurse);
}
} else {
finish(task);
if(!task.error) {
// chain back up and run parent
if(task.parent !== null) {
// propagate task info
task.parent.error = task.error;
task.parent.swapTime = task.swapTime;
task.parent.userData = task.userData;
// no subtasks left, call run next subtask on parent
runNext(task.parent, recurse);
}
}
}
}
};
if(swap) {
// we're swapping, so run asynchronously
setTimeout(doNext, 0);
} else {
// not swapping, so run synchronously
doNext(recurse);
}
};
/**
* Finishes a task and looks for the next task in the queue to start.
*
* @param task the task to finish.
* @param suppressCallbacks true to suppress callbacks.
*/
var finish = function(task, suppressCallbacks) {
// subtask is now done
task.state = DONE;
delete sTasks[task.id];
if(sVL >= 1) {
forge.log.verbose(cat, '[%s][%s] finish',
task.id, task.name, task);
}
// only do queue processing for root tasks
if(task.parent === null) {
// report error if queue is missing
if(!(task.type in sTaskQueues)) {
forge.log.error(cat,
'[%s][%s] task queue missing [%s]',
task.id, task.name, task.type);
} else if(sTaskQueues[task.type].length === 0) {
// report error if queue is empty
forge.log.error(cat,
'[%s][%s] task queue empty [%s]',
task.id, task.name, task.type);
} else if(sTaskQueues[task.type][0] !== task) {
// report error if this task isn't the first in the queue
forge.log.error(cat,
'[%s][%s] task not first in queue [%s]',
task.id, task.name, task.type);
} else {
// remove ourselves from the queue
sTaskQueues[task.type].shift();
// clean up queue if it is empty
if(sTaskQueues[task.type].length === 0) {
if(sVL >= 1) {
forge.log.verbose(cat, '[%s][%s] delete queue [%s]',
task.id, task.name, task.type);
}
/* Note: Only a task can delete a queue of its own type. This
is used as a way to synchronize tasks. If a queue for a certain
task type exists, then a task of that type is running.
*/
delete sTaskQueues[task.type];
} else {
// dequeue the next task and start it
if(sVL >= 1) {
forge.log.verbose(cat,
'[%s][%s] queue start next [%s] remain:%s',
task.id, task.name, task.type,
sTaskQueues[task.type].length);
}
sTaskQueues[task.type][0].start();
}
}
if(!suppressCallbacks) {
// call final callback if one exists
if(task.error && task.failureCallback) {
task.failureCallback(task);
} else if(!task.error && task.successCallback) {
task.successCallback(task);
}
}
}
};
/* Tasks API */
module.exports = forge.task = forge.task || {};
/**
* Starts a new task that will run the passed function asynchronously.
*
* In order to finish the task, either task.doNext() or task.fail()
* *must* be called.
*
* The task must have a type (a string identifier) that can be used to
* synchronize it with other tasks of the same type. That type can also
* be used to cancel tasks that haven't started yet.
*
* To start a task, the following object must be provided as a parameter
* (each function takes a task object as its first parameter):
*
* {
* type: the type of task.
* run: the function to run to execute the task.
* success: a callback to call when the task succeeds (optional).
* failure: a callback to call when the task fails (optional).
* }
*
* @param options the object as described above.
*/
forge.task.start = function(options) {
// create a new task
var task = new Task({
run: options.run,
name: options.name || sNoTaskName
});
task.type = options.type;
task.successCallback = options.success || null;
task.failureCallback = options.failure || null;
// append the task onto the appropriate queue
if(!(task.type in sTaskQueues)) {
if(sVL >= 1) {
forge.log.verbose(cat, '[%s][%s] create queue [%s]',
task.id, task.name, task.type);
}
// create the queue with the new task
sTaskQueues[task.type] = [task];
start(task);
} else {
// push the task onto the queue, it will be run after a task
// with the same type completes
sTaskQueues[options.type].push(task);
}
};
/**
* Cancels all tasks of the given type that haven't started yet.
*
* @param type the type of task to cancel.
*/
forge.task.cancel = function(type) {
// find the task queue
if(type in sTaskQueues) {
// empty all but the current task from the queue
sTaskQueues[type] = [sTaskQueues[type][0]];
}
};
/**
* Creates a condition variable to synchronize tasks. To make a task wait
* on the condition variable, call task.wait(condition). To notify all
* tasks that are waiting, call condition.notify().
*
* @return the condition variable.
*/
forge.task.createCondition = function() {
var cond = {
// all tasks that are blocked
tasks: {}
};
/**
* Causes the given task to block until notify is called. If the task
* is already waiting on this condition then this is a no-op.
*
* @param task the task to cause to wait.
*/
cond.wait = function(task) {
// only block once
if(!(task.id in cond.tasks)) {
task.block();
cond.tasks[task.id] = task;
}
};
/**
* Notifies all waiting tasks to wake up.
*/
cond.notify = function() {
// since unblock() will run the next task from here, make sure to
// clear the condition's blocked task list before unblocking
var tmp = cond.tasks;
cond.tasks = {};
for(var id in tmp) {
tmp[id].unblock();
}
};
return cond;
};
|
PypiClean
|
/trans_video_data-0.13.tar.gz/trans_video_data-0.13/db-controllers/db_crud_pb2_grpc.py
|
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import db_crud_pb2 as db__crud__pb2
class DataServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AddData = channel.unary_unary(
'/DataService/AddData',
request_serializer=db__crud__pb2.AddParam.SerializeToString,
response_deserializer=db__crud__pb2.AddRes.FromString,
)
self.ModData = channel.unary_unary(
'/DataService/ModData',
request_serializer=db__crud__pb2.ModParam.SerializeToString,
response_deserializer=db__crud__pb2.ModRes.FromString,
)
self.DelData = channel.unary_unary(
'/DataService/DelData',
request_serializer=db__crud__pb2.DelParam.SerializeToString,
response_deserializer=db__crud__pb2.DelRes.FromString,
)
self.SelData = channel.unary_unary(
'/DataService/SelData',
request_serializer=db__crud__pb2.SelParam.SerializeToString,
response_deserializer=db__crud__pb2.SelRes.FromString,
)
class DataServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def AddData(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModData(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DelData(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SelData(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DataServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'AddData': grpc.unary_unary_rpc_method_handler(
servicer.AddData,
request_deserializer=db__crud__pb2.AddParam.FromString,
response_serializer=db__crud__pb2.AddRes.SerializeToString,
),
'ModData': grpc.unary_unary_rpc_method_handler(
servicer.ModData,
request_deserializer=db__crud__pb2.ModParam.FromString,
response_serializer=db__crud__pb2.ModRes.SerializeToString,
),
'DelData': grpc.unary_unary_rpc_method_handler(
servicer.DelData,
request_deserializer=db__crud__pb2.DelParam.FromString,
response_serializer=db__crud__pb2.DelRes.SerializeToString,
),
'SelData': grpc.unary_unary_rpc_method_handler(
servicer.SelData,
request_deserializer=db__crud__pb2.SelParam.FromString,
response_serializer=db__crud__pb2.SelRes.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'DataService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class DataService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def AddData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/DataService/AddData',
db__crud__pb2.AddParam.SerializeToString,
db__crud__pb2.AddRes.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ModData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/DataService/ModData',
db__crud__pb2.ModParam.SerializeToString,
db__crud__pb2.ModRes.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DelData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/DataService/DelData',
db__crud__pb2.DelParam.SerializeToString,
db__crud__pb2.DelRes.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SelData(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/DataService/SelData',
db__crud__pb2.SelParam.SerializeToString,
db__crud__pb2.SelRes.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
PypiClean
|
/GRR-M2Crypto-0.22.6.tar.gz/GRR-M2Crypto-0.22.6/M2Crypto/EC.py
|
import util, BIO, m2
class ECError(Exception): pass
m2.ec_init(ECError)
# Curve identifier constants
NID_secp112r1 = m2.NID_secp112r1
NID_secp112r2 = m2.NID_secp112r2
NID_secp128r1 = m2.NID_secp128r1
NID_secp128r2 = m2.NID_secp128r2
NID_secp160k1 = m2.NID_secp160k1
NID_secp160r1 = m2.NID_secp160r1
NID_secp160r2 = m2.NID_secp160r2
NID_secp192k1 = m2.NID_secp192k1
NID_secp224k1 = m2.NID_secp224k1
NID_secp224r1 = m2.NID_secp224r1
NID_secp256k1 = m2.NID_secp256k1
NID_secp384r1 = m2.NID_secp384r1
NID_secp521r1 = m2.NID_secp521r1
NID_sect113r1 = m2.NID_sect113r1
NID_sect113r2 = m2.NID_sect113r2
NID_sect131r1 = m2.NID_sect131r1
NID_sect131r2 = m2.NID_sect131r2
NID_sect163k1 = m2.NID_sect163k1
NID_sect163r1 = m2.NID_sect163r1
NID_sect163r2 = m2.NID_sect163r2
NID_sect193r1 = m2.NID_sect193r1
NID_sect193r2 = m2.NID_sect193r2
NID_sect233k1 = m2.NID_sect233k1 # default for secg.org TLS test server
NID_sect233r1 = m2.NID_sect233r1
NID_sect239k1 = m2.NID_sect239k1
NID_sect283k1 = m2.NID_sect283k1
NID_sect283r1 = m2.NID_sect283r1
NID_sect409k1 = m2.NID_sect409k1
NID_sect409r1 = m2.NID_sect409r1
NID_sect571k1 = m2.NID_sect571k1
NID_sect571r1 = m2.NID_sect571r1
NID_X9_62_prime192v1 = m2.NID_X9_62_prime192v1
NID_X9_62_prime192v2 = m2.NID_X9_62_prime192v2
NID_X9_62_prime192v3 = m2.NID_X9_62_prime192v3
NID_X9_62_prime239v1 = m2.NID_X9_62_prime239v1
NID_X9_62_prime239v2 = m2.NID_X9_62_prime239v2
NID_X9_62_prime239v3 = m2.NID_X9_62_prime239v3
NID_X9_62_prime256v1 = m2.NID_X9_62_prime256v1
NID_X9_62_c2pnb163v1 = m2.NID_X9_62_c2pnb163v1
NID_X9_62_c2pnb163v2 = m2.NID_X9_62_c2pnb163v2
NID_X9_62_c2pnb163v3 = m2.NID_X9_62_c2pnb163v3
NID_X9_62_c2pnb176v1 = m2.NID_X9_62_c2pnb176v1
NID_X9_62_c2tnb191v1 = m2.NID_X9_62_c2tnb191v1
NID_X9_62_c2tnb191v2 = m2.NID_X9_62_c2tnb191v2
NID_X9_62_c2tnb191v3 = m2.NID_X9_62_c2tnb191v3
NID_X9_62_c2pnb208w1 = m2.NID_X9_62_c2pnb208w1
NID_X9_62_c2tnb239v1 = m2.NID_X9_62_c2tnb239v1
NID_X9_62_c2tnb239v2 = m2.NID_X9_62_c2tnb239v2
NID_X9_62_c2tnb239v3 = m2.NID_X9_62_c2tnb239v3
NID_X9_62_c2pnb272w1 = m2.NID_X9_62_c2pnb272w1
NID_X9_62_c2pnb304w1 = m2.NID_X9_62_c2pnb304w1
NID_X9_62_c2tnb359v1 = m2.NID_X9_62_c2tnb359v1
NID_X9_62_c2pnb368w1 = m2.NID_X9_62_c2pnb368w1
NID_X9_62_c2tnb431r1 = m2.NID_X9_62_c2tnb431r1
NID_wap_wsg_idm_ecid_wtls1 = m2.NID_wap_wsg_idm_ecid_wtls1
NID_wap_wsg_idm_ecid_wtls3 = m2.NID_wap_wsg_idm_ecid_wtls3
NID_wap_wsg_idm_ecid_wtls4 = m2.NID_wap_wsg_idm_ecid_wtls4
NID_wap_wsg_idm_ecid_wtls5 = m2.NID_wap_wsg_idm_ecid_wtls5
NID_wap_wsg_idm_ecid_wtls6 = m2.NID_wap_wsg_idm_ecid_wtls6
NID_wap_wsg_idm_ecid_wtls7 = m2.NID_wap_wsg_idm_ecid_wtls7
NID_wap_wsg_idm_ecid_wtls8 = m2.NID_wap_wsg_idm_ecid_wtls8
NID_wap_wsg_idm_ecid_wtls9 = m2.NID_wap_wsg_idm_ecid_wtls9
NID_wap_wsg_idm_ecid_wtls10 = m2.NID_wap_wsg_idm_ecid_wtls10
NID_wap_wsg_idm_ecid_wtls11 = m2.NID_wap_wsg_idm_ecid_wtls11
NID_wap_wsg_idm_ecid_wtls12 = m2.NID_wap_wsg_idm_ecid_wtls12
# The following two curves, according to OpenSSL, have a
# "Questionable extension field!" and are not supported by
# the OpenSSL inverse function. ECError: no inverse.
# As such they cannot be used for signing. They might,
# however, be usable for encryption but that has not
# been tested. Until thir usefulness can be established,
# they are not supported at this time.
# NID_ipsec3 = m2.NID_ipsec3
# NID_ipsec4 = m2.NID_ipsec4
class EC:
"""
Object interface to a EC key pair.
"""
m2_ec_key_free = m2.ec_key_free
def __init__(self, ec, _pyfree=0):
assert m2.ec_key_type_check(ec), "'ec' type error"
self.ec = ec
self._pyfree = _pyfree
def __del__(self):
if getattr(self, '_pyfree', 0):
self.m2_ec_key_free(self.ec)
def __len__(self):
assert m2.ec_key_type_check(self.ec), "'ec' type error"
return m2.ec_key_keylen(self.ec)
def gen_key(self):
"""
Generates the key pair from its parameters. Use::
keypair = EC.gen_params(curve)
keypair.gen_key()
to create an EC key pair.
"""
assert m2.ec_key_type_check(self.ec), "'ec' type error"
m2.ec_key_gen_key(self.ec)
def pub(self):
# Don't let python free
return EC_pub(self.ec, 0)
def sign_dsa(self, digest):
"""
Sign the given digest using ECDSA. Returns a tuple (r,s), the two
ECDSA signature parameters.
"""
assert self._check_key_type(), "'ec' type error"
return m2.ecdsa_sign(self.ec, digest)
def verify_dsa(self, digest, r, s):
"""
Verify the given digest using ECDSA. r and s are the ECDSA
signature parameters.
"""
assert self._check_key_type(), "'ec' type error"
return m2.ecdsa_verify(self.ec, digest, r, s)
def sign_dsa_asn1(self, digest):
assert self._check_key_type(), "'ec' type error"
return m2.ecdsa_sign_asn1(self.ec, digest)
def verify_dsa_asn1(self, digest, blob):
assert self._check_key_type(), "'ec' type error"
return m2.ecdsa_verify_asn1(self.ec, digest, blob)
def compute_dh_key(self,pub_key):
"""
Compute the ECDH shared key of this key pair and the given public
key object. They must both use the same curve. Returns the
shared key in binary as a buffer object. No Key Derivation Function is
applied.
"""
assert self.check_key(), 'key is not initialised'
return m2.ecdh_compute_key(self.ec, pub_key.ec)
def save_key_bio(self, bio, cipher='aes_128_cbc', callback=util.passphrase_callback):
"""
Save the key pair to an M2Crypto.BIO.BIO object in PEM format.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object to save key to.
@type cipher: string
@param cipher: Symmetric cipher to protect the key. The default
cipher is 'aes_128_cbc'. If cipher is None, then the key is saved
in the clear.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the key.
The default is util.passphrase_callback.
"""
if cipher is None:
return m2.ec_key_write_bio_no_cipher(self.ec, bio._ptr(), callback)
else:
ciph = getattr(m2, cipher, None)
if ciph is None:
raise ValueError('not such cipher %s' % cipher)
return m2.ec_key_write_bio(self.ec, bio._ptr(), ciph(), callback)
def save_key(self, file, cipher='aes_128_cbc', callback=util.passphrase_callback):
"""
Save the key pair to a file in PEM format.
@type file: string
@param file: Name of file to save key to.
@type cipher: string
@param cipher: Symmetric cipher to protect the key. The default
cipher is 'aes_128_cbc'. If cipher is None, then the key is saved
in the clear.
@type callback: Python callable
@param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the key.
The default is util.passphrase_callback.
"""
bio = BIO.openfile(file, 'wb')
return self.save_key_bio(bio, cipher, callback)
def save_pub_key_bio(self, bio):
"""
Save the public key to an M2Crypto.BIO.BIO object in PEM format.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object to save key to.
"""
return m2.ec_key_write_pubkey(self.ec, bio._ptr())
def save_pub_key(self, file):
"""
Save the public key to a file in PEM format.
@type file: string
@param file: Name of file to save key to.
"""
bio = BIO.openfile(file, 'wb')
return m2.ec_key_write_pubkey(self.ec, bio._ptr())
def _check_key_type(self):
return m2.ec_key_type_check(self.ec)
def check_key(self):
assert m2.ec_key_type_check(self.ec), "'ec' type error"
return m2.ec_key_check_key(self.ec)
class EC_pub(EC):
"""
Object interface to an EC public key.
((don't like this implementation inheritance))
"""
def __init__(self,ec,_pyfree=0):
EC.__init__(self,ec,_pyfree)
self.der = None
def get_der(self):
"""
Returns the public key in DER format as a buffer object.
"""
assert self.check_key(), 'key is not initialised'
if self.der is None:
self.der = m2.ec_key_get_public_der(self.ec)
return self.der
save_key = EC.save_pub_key
save_key_bio = EC.save_pub_key_bio
def gen_params(curve):
"""
Factory function that generates EC parameters and
instantiates a EC object from the output.
@param curve: This is the OpenSSL nid of the curve to use.
"""
return EC(m2.ec_key_new_by_curve_name(curve), 1)
def load_key(file, callback=util.passphrase_callback):
"""
Factory function that instantiates a EC object.
@param file: Names the file that contains the PEM representation
of the EC key pair.
@param callback: Python callback object that will be invoked
if the EC key pair is passphrase-protected.
"""
bio = BIO.openfile(file)
return load_key_bio(bio, callback)
def load_key_bio(bio, callback=util.passphrase_callback):
"""
Factory function that instantiates a EC object.
@param bio: M2Crypto.BIO object that contains the PEM
representation of the EC key pair.
@param callback: Python callback object that will be invoked
if the EC key pair is passphrase-protected.
"""
return EC(m2.ec_key_read_bio(bio._ptr(), callback), 1)
def load_pub_key(file):
"""
Load an EC public key from file.
@type file: string
@param file: Name of file containing EC public key in PEM format.
@rtype: M2Crypto.EC.EC_pub
@return: M2Crypto.EC.EC_pub object.
"""
bio = BIO.openfile(file)
return load_pub_key_bio(bio)
def load_pub_key_bio(bio):
"""
Load an EC public key from an M2Crypto.BIO.BIO object.
@type bio: M2Crypto.BIO.BIO
@param bio: M2Crypto.BIO.BIO object containing EC public key in PEM
format.
@rtype: M2Crypto.EC.EC_pub
@return: M2Crypto.EC.EC_pub object.
"""
ec = m2.ec_key_read_pubkey(bio._ptr())
if ec is None:
ec_error()
return EC_pub(ec, 1)
def ec_error():
raise ECError, m2.err_reason_error_string(m2.err_get_error())
def pub_key_from_der(der):
"""
Create EC_pub from DER.
"""
return EC_pub(m2.ec_key_from_pubkey_der(der), 1)
|
PypiClean
|
/tren-1.239.tar.gz/tren-1.239/tren.py
|
# Program Information
PROGNAME = "tren.py"
BASENAME = PROGNAME.split(".py")[0]
PROGENV = BASENAME.upper()
INCLENV = PROGENV + "INCL"
RCSID = "$Id: tren.py,v 1.239 2010/10/07 17:34:23 tundra Exp $"
VERSION = RCSID.split()[2]
# Copyright Information
CPRT = "(c)"
DATE = "2010"
OWNER = "TundraWare Inc."
RIGHTS = "All Rights Reserved."
COPYRIGHT = "Copyright %s %s, %s %s" % (CPRT, DATE, OWNER, RIGHTS)
PROGVER = PROGNAME + " " + VERSION + (" - %s" % COPYRIGHT)
HOMEPAGE = "http://www.tundraware.com/Software/%s\n" % BASENAME
#----------------------------------------------------------#
# Variables User Might Change #
#----------------------------------------------------------#
#------------------- Nothing Below Here Should Need Changing ------------------#
#----------------------------------------------------------#
# Imports #
#----------------------------------------------------------#
import copy
import getopt
import glob
import os
import random
import re
import shlex
from stat import *
import sys
import time
#####
# Imports conditional on OS
#####
# Set OS type - this allows us to trigger OS-specific code
# where needed.
OSNAME = os.name
POSIX = False
WINDOWS = False
if OSNAME == 'nt':
WINDOWS = True
elif OSNAME == 'posix':
POSIX = True
# Set up Windows-specific stuff
if WINDOWS:
# Try to load win32all stuff if it's available
try:
from win32api import GetFileAttributes, GetComputerName
import win32con
from win32file import GetDriveType
from win32wnet import WNetGetUniversalName
from win32security import *
WIN32HOST = GetComputerName()
WIN32ALL = True
except:
WIN32ALL = False
# Set up Unix-specific stuff
elif POSIX:
# Get Unix password and group features
import grp
import pwd
# Uh oh, this is not an OS we know about
else:
sys.stderr.write("Unsupported Operating System! Aborting ...\n")
sys.exit(1)
#----------------------------------------------------------#
# Aliases & Redefinitions #
#----------------------------------------------------------#
#----------------------------------------------------------#
# Constants & Literals #
#----------------------------------------------------------#
#####
# General Program Constants
#####
MAXINCLUDES = 1000 # Maximum number of includes allowed - used to catch circular references
MAXNAMELEN = 255 # Maximum file or directory name length
MINNAMELEN = 1 # Minimum file or directory name length
#####
# Message Formatting Constants
#####
# Make sure these make sense: ProgramOptions[MAXLINELEN] > PADWIDTH + WRAPINDENT
# because of the way line conditioning/wrap works.
PADCHAR = " " # Padding character
PADWIDTH = 30 # Column width
LSTPAD = 13 # Padding to use when dumping lists
WRAPINDENT = 8 # Extra indent on wrapped lines
MINLEN = PADWIDTH + WRAPINDENT + 1 # Minimum line length
#####
# Command Line Option Strings
#####
# List all legal command line options that will be processed by getopt() later.
# We exclude -I here because it is parsed manually before the getopt() call.
OPTIONSLIST = "A:abCcde:fhi:P:qR:r:S:T:tvw:Xx" # All legal command line options in getopt() format
#####
# Literals
#####
ARROW = "--->" # Used for formatting renaming messages
ASKDOREST = "!" # Do rest of renaming without asking
ASKNO = "N" # Do not rename current file
ASKQUIT = "q" # Quit renaming all further files
ASKYES = "y" # Rename current file
COMMENT = "#" # Comment character in include files
DEFINST = 0 # Default replacement instance
DEFLEN = 80 # Default output line length
DEFSEP = "=" # Default rename command separator: old=new
DEFSUFFIX = ".backup" # String used to rename existing targets
DEFESC = "\\" # Escape character
INCL = "I" # Include file command line option
INDENT = " " # Indent string for nested messages
NULLESC = "Escape string" # Cannot be null
NULLRENSEP = "Old/New separator string" # Cannot be null
NULLSUFFIX = "Forced renaming suffix string" # Cannot be null
OPTINTRO = "-" # Option introducer
PATHDELUNIX = ":" # Separates include path elements on Unix systems
PATHDELWIN = ";" # Separates include path elements on Windows systems
PATHSEP = os.path.sep # File path separator character
RANGESEP = ":" # Separator for instance ranges
SINGLEINST = "SINGLEINST" # Indicates a single, not range, in a slice
WINDOWSGROUP = "WindowsGroup" # Returned on Windows w/o win32all
WINDOWSUSER = "WindowsUser" # Reutrned on Windows w/o win32all
WINGROUPNOT = "GroupNotAvailable" # Returned when win32all can't get a group name
WINUSERNOT = "UserNotAvailable" # Returned when win32all can't get a user name
#####
# Replacement Token Literals
#####
# Sequence Alphabets
BINARY = "Binary"
DECIMAL = "Decimal"
OCTAL = "Octal"
HEXLOWER = "HexLower"
HEXUPPER = "HexUpper"
LOWER = "Lower"
LOWERUPPER = "LowerUpper"
UPPER = "Upper"
UPPERLOWER = "UpperLower"
# General Literals
ALPHADELIM = ":" # Delimits alphabet name in a Sequence renaming token
TOKDELIM = "/" # Delimiter for all renaming tokens
# Shared File Attribute And Sequence Renaming Tokens
TOKFILADATE = "ADATE"
TOKFILATIME = "ATIME"
TOKFILCMD = "CMDLINE"
TOKFILCDATE = "CDATE"
TOKFILCTIME = "CTIME"
TOKFILDEV = "DEV"
TOKFILFNAME = "FNAME"
TOKFILGID = "GID"
TOKFILGROUP = "GROUP"
TOKFILINODE = "INODE"
TOKFILMODE = "MODE"
TOKFILMDATE = "MDATE"
TOKFILMTIME = "MTIME"
TOKFILNLINK = "NLINK"
TOKFILSIZE = "SIZE"
TOKFILUID = "UID"
TOKFILUSER = "USER"
# File Time Renaming Tokens
TOKADAY = "ADAY" # mm replacement token
TOKAHOUR = "AHOUR" # hh replacement token
TOKAMIN = "AMIN" # mm replacement token
TOKAMON = "AMON" # MM replacement token
TOKAMONTH = "AMONTH" # Mmm replacement token
TOKASEC = "ASEC" # ss replacement token
TOKAWDAY = "AWDAY" # Ddd replacement token
TOKAYEAR = "AYEAR" # yyyy replacement token
TOKCDAY = "CDAY" # mm replacement token
TOKCHOUR = "CHOUR" # hh replacement token
TOKCMIN = "CMIN" # mm replacement token
TOKCMON = "CMON" # MM replacement token
TOKCMONTH = "CMONTH" # Mmm replacement token
TOKCSEC = "CSEC" # ss replacement token
TOKCWDAY = "CWDAY" # Ddd replacement token
TOKCYEAR = "CYEAR" # yyyy replacement token
TOKMDAY = "MDAY" # mm replacement token
TOKMHOUR = "MHOUR" # hh replacement token
TOKMMIN = "MMIN" # mm replacement token
TOKMMON = "MMON" # MM replacement token
TOKMMONTH = "MMONTH" # Mmm replacement token
TOKMSEC = "MSEC" # ss replacement token
TOKMWDAY = "MWDAY" # Ddd replacement token
TOKMYEAR = "MYEAR" # yyyy replacement token
# System Renaming Tokens
TOKCMDEXEC = "`" # Delimiter for command execution renaming tokens
TOKENV = "$" # Introducer for environment variable replacement tokens
TOKRAND = "RAND" # Random replacement token
TOKNAMESOFAR = "NAMESOFAR" # New name so far
# Sequence Renaming Tokens
TOKASCEND = "+" # Ascending order flag
TOKDESCEND = "-" # Descending order flag
#####
# Internal program state literals
#####
ASK = "ASK"
BACKUPS = "BACKUPS"
DEBUG = "DEBUG"
CASECONV = "CASECONV"
CASESENSITIVE = "CASESENSITIVE"
ESCAPE = "ESCAPE"
EXISTSUFFIX = "EXISTSUFFIX"
FORCERENAME = "FORCERENAME"
INSTANCESTART = "INSTANCESTART"
INSTANCEEND = "INSTANCEEND"
MAXLINELEN = "MAXLINELEN"
QUIET = "QUIET"
REGEX = "REGEX"
RENSEP = "RENSEP"
TARGETSTART = "TARGETSTART"
TARGETEND = "TARGETEND"
TESTMODE = "TESTMODE"
#####
# Renaming Literals
#####
# Rename target keys
BASE = "BASENAME"
PATHNAME = "PATHNAME"
STATS = "STATS"
# These literals serve two purposes:
#
# 1) They are used as the type indicator in a Sequence Renaming Token
# 2) They are keys to the SortViews and DateViews dictionaries that stores the prestorted views
ORDERBYADATE = TOKFILADATE
ORDERBYATIME = TOKFILATIME
ORDERBYCMDLINE = TOKFILCMD
ORDERBYCDATE = TOKFILCDATE
ORDERBYCTIME = TOKFILCTIME
ORDERBYDEV = TOKFILDEV
ORDERBYFNAME = TOKFILFNAME
ORDERBYGID = TOKFILGID
ORDERBYGROUP = TOKFILGROUP
ORDERBYINODE = TOKFILINODE
ORDERBYMODE = TOKFILMODE
ORDERBYMDATE = TOKFILMDATE
ORDERBYMTIME = TOKFILMTIME
ORDERBYNLINK = TOKFILNLINK
ORDERBYSIZE = TOKFILSIZE
ORDERBYUID = TOKFILUID
ORDERBYUSER = TOKFILUSER
# Rename string keys
NEW = "NEW"
OLD = "OLD"
#----------------------------------------------------------#
# Prompts, & Application Strings #
#----------------------------------------------------------#
#####
# Debug Messages
#####
DEBUGFLAG = "-d"
dALPHABETS = "Alphabets"
dCMDLINE = "Command Line"
dCURSTATE = "Current State Of Program Options"
dDATEVIEW = "Date View:"
dDEBUG = "DEBUG"
dDUMPOBJ = "Dumping Object %s"
dINCLFILES = "Included Files:"
dPROGENV = "$" + PROGENV
dRENREQ = "Renaming Request:"
dRENSEQ = "Renaming Sequence: %s"
dRENTARGET = "Rename Target:"
dRESOLVEDOPTS = "Resolved Command Line"
dSEPCHAR = "-" # Used for debug separator lines
dSORTVIEW = "Sort View:"
#####
# Error Messages
#####
eALPHABETEXIST = "Sequence renaming token '%s' specifies a non-existent alphabet!"
eALPHABETMISSING = "Sequence renaming token '%s' has a missing or incorrect alphabet specification!"
eALPHACMDBAD = "Alphabet specificaton '%s' malformed! Try \"Name:Alphabet\""
eALPHACMDLEN = "Alphabet '%s' too short! Must contain at least 2 symbols."
eARGLENGTH = "%s must contain exactly %s character(s)!"
eBADARG = "Invalid command line: %s!"
eBADCASECONV = "Invalid case conversion argument: %s! Must be one of: %s"
eBADINCL = "option -%s requires argument" % INCL
eBADLEN = "Bad line length '%s'!"
eBADNEWOLD = "Bad -r argument '%s'! Requires exactly one new, old string separator (Default: " + DEFSEP + ")"
eBADREGEX = "Invalid Regular Expression: %s"
eBADSLICE = "%s invalid slice format! Must be integer values in the form: n, :n, n:, start:end, or :"
eERROR = "ERROR"
eEXECFAIL = "Renaming token: '%s', command '%s' Failed To Execute!"
eFILEOPEN = "Cannot open file '%s': %s!"
eLINELEN = "Specified line length too short! Must be at least %s" % MINLEN
eNAMELONG = "Renaming '%s' to new name '%s' too long! (Maximum length is %s.)"
eNAMESHORT = "Renaming '%s' to new name '%s' too short! (Minimum length is %s.)"
eNOROOTRENAME = "Cannot rename root of file tree!"
eNULLARG = "%s cannot be empty!"
eRENAMEFAIL = "Attempt to rename '%s' to '%s' failed : %s!"
eTOKBADSEQ = "Unknown sequence renaming token, '%s'!"
eTOKDELIM = "Renaming token '%s' missing delimiter!"
eTOKRANDIG = "Renaming token: '%s' has invalid random precision! Must be integer > 0."
eTOKUNKNOWN = "Unknown renaming token, '%s'!"
eTOOMANYINC = "Too many includes! (Max is %d) Possible circular reference?" % MAXINCLUDES
#####
# Informational Messages
#####
iFORCEDNOBKU = "Forced renaming WITHOUT backups in effect!!! %s is overwriting %s."
iRENFORCED = "Target '%s' exists. Creating backup."
iRENSKIPPED = "Target '%s' exists. Renaming '%s' skipped."
iRENAMING = "Renaming '%s' " + ARROW + " '%s'"
iSEQTOOLONG = "Sequence number %s, longer than format string %s, Rolling over!"
#####
# Usage Prompts
#####
uTable = [PROGVER,
HOMEPAGE,
"usage: " + PROGNAME + " [[-abCcdfhqtvwXx] [-e type] [-I file] [-i instance] [-P escape] [ -R separator] [-r old=new] [-S suffix] [-T target] [-w width]] ... file|dir file|dir ...",
" where,",
" -A alphabet Install \"alphabet\" for use by sequence renaming tokens",
" -a Ask interactively before renaming (Default: Off)",
" -b Turn off backups during forced renaming (Default: Do Backups)",
" -C Do case-sensitive renaming (Default)",
" -c Collapse case when doing string substitution (Default: False)",
" -d Dump debugging information (Default: False)",
" -e type Force case conversion (Default: None)",
" -f Force renaming even if target file or directory name already exists (Default: False)",
" -h Print help information (Default: False)",
" -I file Include command line arguments from file",
" -i num|range Specify which instance(s) to replace (Default: %s)" % DEFINST,
" -P char Use 'char' as the escape sequence (Default: %s)" % DEFESC,
" -q Quiet mode, do not show progress (Default: False)",
" -R char Separator character for -r rename arguments (Default: %s)" % DEFSEP,
" -r old=new Replace old with new in file or directory names",
" -S suffix Suffix to use when renaming existing filenames (Default: %s)" % DEFSUFFIX,
" -t Test mode, don't rename, just show what the program *would* do (Default: False)",
" -T num|range Specify which characters in file name are targeted for renaming (Default: Whole Name)",
" -v Print detailed program version information and continue (Default: False)",
" -w length Line length of diagnostic and error output (Default: %s)" % DEFLEN,
" -X Treat the renaming strings literally (Default)",
" -x Treat the old replacement string as a Python regular expression (Default: False)",
]
#----------------------------------------------------------#
# Lookup Tables #
#----------------------------------------------------------#
# Case Conversion
# Notice use of *unbound* string function methods from the class definition
CASETBL = {'c' : str.capitalize,
'l' : str.lower,
's' : str.swapcase,
't' : str.title,
'u' : str.upper
}
CASEOPS = CASETBL.keys()
CASEOPS.sort()
# Day And Month Conversion Tables
DAYS = {0:"Mon", 1:"Tue", 2:"Wed", 3:"Thu", 4:"Fri", 5:"Sat", 6:"Sun"}
MONTHS = {1:"Jan", 2:"Feb", 3:"Mar", 4:"Apr", 5:"May", 6:"Jun",
7:"Jul", 8:"Aug", 9:"Sep", 10:"Oct", 11:"Nov", 12:"Dec"}
# File Time Renaming Token Lookup Table
FILETIMETOKS = { TOKADAY : ("%02d", "ST_ATIME", "tm_mday"),
TOKAHOUR : ("%02d", "ST_ATIME", "tm_hour"),
TOKAMIN : ("%02d", "ST_ATIME", "tm_min"),
TOKAMON : ("%02d", "ST_ATIME", "tm_mon"),
TOKAMONTH : ("", "ST_ATIME", "tm_mon"),
TOKASEC : ("%02d", "ST_ATIME", "tm_sec"),
TOKAWDAY : ("", "ST_ATIME", "tm_wday"),
TOKAYEAR : ("%04d", "ST_ATIME", "tm_year"),
TOKCDAY : ("%02d", "ST_CTIME", "tm_mday"),
TOKCHOUR : ("%02d", "ST_CTIME", "tm_hour"),
TOKCMIN : ("%02d", "ST_CTIME", "tm_min"),
TOKCMON : ("%02d", "ST_CTIME", "tm_mon"),
TOKCMONTH : ("", "ST_CTIME", "tm_mon"),
TOKCSEC : ("%02d", "ST_CTIME", "tm_sec"),
TOKCWDAY : ("", "ST_CTIME", "tm_wday"),
TOKCYEAR : ("%04d", "ST_CTIME", "tm_year"),
TOKMDAY : ("%02d", "ST_MTIME", "tm_mday"),
TOKMHOUR : ("%02d", "ST_MTIME", "tm_hour"),
TOKMMIN : ("%02d", "ST_MTIME", "tm_min"),
TOKMMON : ("%02d", "ST_MTIME", "tm_mon"),
TOKMMONTH : ("", "ST_MTIME", "tm_mon"),
TOKMSEC : ("%02d", "ST_MTIME", "tm_sec"),
TOKMWDAY : ("", "ST_MTIME", "tm_wday"),
TOKMYEAR : ("%04d", "ST_MTIME", "tm_year")
}
# Alphabets - The user can add to these on the command line
ALPHABETS = {
BINARY : ["0", "1"],
DECIMAL : ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
HEXLOWER : ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"],
HEXUPPER : ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"],
LOWER : ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p",
"q", "r", "s", "t", "u", "v", "w", "x", "y", "z" ],
LOWERUPPER : ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p",
"q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F",
"G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V",
"W", "X", "Y", "Z"],
OCTAL : ["0", "1", "2", "3", "4", "5", "6", "7"],
UPPER : ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P",
"Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z" ],
UPPERLOWER : ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P",
"Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v",
"w", "x", "y", "z"]
}
#----------------------------------------------------------#
# Global Variables & Data Structures #
#----------------------------------------------------------#
# List of all the included files
IncludedFiles = []
# Program toggle and option defaults
ProgramOptions = {
ASK : False, # Interactively ask user before renaming each file
BACKUPS : True, # Do backups during forced renaming
DEBUG : False, # Debugging off
CASECONV : None, # Forced case conversions
CASESENSITIVE : True, # Search is case-sensitive
ESCAPE : DEFESC, # Escape string
EXISTSUFFIX : DEFSUFFIX, # What to tack on when renaming existing targets
FORCERENAME : False, # Do not rename if target already exists
INSTANCESTART : DEFINST, # Replace first, leftmost instance by default
INSTANCEEND : SINGLEINST,
MAXLINELEN : DEFLEN, # Width of output messages
QUIET : False, # Display progress
REGEX : False, # Do not treat old string as a regex
RENSEP : DEFSEP, # Old, New string separator for -r
TARGETSTART : False, # Entire file name is renaming target by default
TARGETEND : False,
TESTMODE : False # Test mode off
}
# Used to track the sequence of name transformations as each renaming
# request is applied. The -1th entry is thus also the "name so far"
# used for the /NAMESOFAR/ renaming token.
RenSequence = []
#--------------------------- Code Begins Here ---------------------------------#
#----------------------------------------------------------#
# Object Base Class Definitions #
#----------------------------------------------------------#
#####
# Container For Holding Rename Targets And Renaming Requests
#####
class RenameTargets:
"""
This class is used to keep track of all the files and/or
directories we're renaming. After the class is constructed
and the command line fully parsed, this will contain:
self.RenNames = { fullname : {BASE : basename, PATHNAME : pathtofile, STATS : stats}
... (repeated for each rename target)
}
self.SortViews = {
ORDERBYATIME : [fullnames in atimes order],
ORDERBYCMDLINE : [fullnames in command line order],
ORDERBYCTIME : [fullnames in ctimes order],
ORDERBYDEV : [fullnames in devs order],
ORDERBYFNAME : [fullnames in alphabetic order],
ORDERBYGID : [fullnames in gids order],
ORDERBYGROUP ; [fullnames in group name order],
ORDERBYINODE : [fullnames in inode order],
ORDERBYMODE : [fullnames in mode order],
ORDERBYMTIME : [fullnames in mtimes order],
ORDERBYNLINK : [fullnames in nlinks order],
ORDERBYSIZE : [fullnames in size order],
ORDERBYUID : [fullnames in uids order],
ORDERBYUSER : [fullnames in user name order]
}
self.DateViews = {
ORDERBYADATE-date... : [fullnames in order by atime within same 'date'] ... (repeated for each date),
ORDERBYCDATE-date... : [fullnames in order by ctime within same 'date'] ... (repeated for each date),
ORDERBYMDATE-date... : [fullnames in order by mtime within same 'date'] ... (repeated for each date)
}
self.RenRequests = [
{
ASK : interactive ask flag
BACKUPS : do backups during forced renaming flag,
OLD : old rename string,
NEW : new rename string,
DEBUG : debug flag,
CASECONV : type of case conversion,
CASESENSITIVE : case sensitivity flag,
FORCERENAME : force renaming flag,
INSTANCESTART : Replace first, leftmost instance by default,
INSTANCEEND : ,
MAXLINELEN : max output line length,
QUIET : quiet output flag,
REGEX : regular expression enable flag,
RENSEP : old/new rename separator string,
TARGETSTART : Entire file name target for renaming by default,
TARGETEND : ,
TESTMODE : testmode flag
} ... (repeated for each rename request)
]
"""
#####
# Constructor
#####
def __init__(self, targs):
# Keep track of all the new filenames we write (or would have)
# so test mode can correctly report just what the the progam
# *would* do. Without this, backup generation is not properly
# reported in test mode.
self.RenamedFiles = []
self.NewFiles = []
# Dictionary of all rename targets and their stat info
self.RenNames = {}
# Dictionary of all possible sort views
# We can load the first two right away since they're based
# only on the target names provided on the command line
i=0
while i < len(targs):
targs[i] = os.path.abspath(targs[i])
i += 1
alpha = targs[:]
alpha.sort()
self.SortViews = {ORDERBYCMDLINE : targs, ORDERBYFNAME : alpha}
del alpha
# Dictionary to hold all possible date views - files sorted
# by time *within* a common date.
self.DateViews = {}
# Dictionary of all the renaming requests - will be filled in
# by -r command line parsing.
self.RenRequests = []
# This data structure is used to build various sort views
# A null first field means the view requires special handling,
# otherwise it's just a stat structure lookup.
SeqTypes = [
[ST_ATIME, {}, ORDERBYATIME],
[ST_CTIME, {}, ORDERBYCTIME],
[ST_DEV, {}, ORDERBYDEV],
[ST_GID, {}, ORDERBYGID],
["", {}, ORDERBYGROUP],
[ST_INO, {}, ORDERBYINODE],
[ST_MODE, {}, ORDERBYMODE],
[ST_MTIME, {}, ORDERBYMTIME],
[ST_NLINK, {}, ORDERBYNLINK],
[ST_SIZE, {}, ORDERBYSIZE],
[ST_UID, {}, ORDERBYUID],
["", {}, ORDERBYUSER],
]
# Populate the data structures with each targets' stat information
for fullname in targs:
try:
pathname, basename = os.path.split(fullname)
stats = os.stat(fullname)
except (IOError, OSError) as e:
ErrorMsg(eFILEOPEN % (fullname, e.args[1]))
# Some operating systems (Windows) terminate the path with
# a separator, some (Posix) do not.
if pathname[-1] != os.sep:
pathname += os.sep
# Store fullname, basename, and stat info for this file
if basename:
self.RenNames[fullname] = {BASE : basename, PATHNAME : pathname, STATS : stats}
# Catch the case where they're trying to rename the root of the directory tree
else:
ErrorMsg(eNOROOTRENAME)
# Incrementally build lists of keys that will later be
# used to create sequence renaming tokens
for seqtype in SeqTypes:
statflag, storage, order = seqtype
# Handle os.stat() values
if statflag:
sortkey = stats[statflag]
# Handle group name values
elif order == ORDERBYGROUP:
sortkey = self.__GetFileGroupname(fullname)
# Handle user name values
elif order == ORDERBYUSER:
sortkey = self.__GetFileUsername(fullname)
# Save into storage
if sortkey in storage:
storage[sortkey].append(fullname)
else:
storage[sortkey] = [fullname]
# Create the various sorted views we may need for sequence
# renaming tokens
for seqtype in SeqTypes:
statflag, storage, order = seqtype
vieworder = storage.keys()
vieworder.sort()
# Sort alphabetically when multiple filenames
# map to the same key, creating overall
# ordering as we go.
t = []
for i in vieworder:
storage[i].sort()
for j in storage[i]:
t.append(j)
# Now store for future reference
self.SortViews[order] = t
# Release the working data structures
del SeqTypes
# Now build the special cases of ordering by time within date
# for each of the timestamp types.
for dateorder, timeorder, year, mon, day in ((ORDERBYADATE, ORDERBYATIME,
FILETIMETOKS[TOKAYEAR],
FILETIMETOKS[TOKAMON],
FILETIMETOKS[TOKADAY]),
(ORDERBYCDATE, ORDERBYCTIME,
FILETIMETOKS[TOKCYEAR],
FILETIMETOKS[TOKCMON],
FILETIMETOKS[TOKCDAY]),
(ORDERBYMDATE, ORDERBYMTIME,
FILETIMETOKS[TOKMYEAR],
FILETIMETOKS[TOKMMON],
FILETIMETOKS[TOKMDAY])):
lastdate = ""
for fullname in self.SortViews[timeorder]:
targettime = eval("time.localtime(self.RenNames[fullname][STATS][%s])" % year[1])
newdate = year[0] % eval("targettime.%s" % year[2]) + \
mon[0] % eval("targettime.%s" % mon[2]) + \
day[0] % eval("targettime.%s" % day[2])
key = dateorder+newdate
# New file date encountered
if newdate != lastdate:
self.DateViews[key] = [fullname]
lastdate = newdate
# Add file to existing list of others sharing that date
else:
self.DateViews[key].append(fullname)
# End of '__init__()'
#####
# Debug Dump
#####
def DumpObj(self):
SEPARATOR = dSEPCHAR * ProgramOptions[MAXLINELEN]
DebugMsg("\n")
DebugMsg(SEPARATOR)
DebugMsg(dDUMPOBJ % str(self))
DebugMsg(SEPARATOR)
# Dump the RenNames and SortView dictionaries
for i, msg in ((self.RenNames, dRENTARGET), (self.SortViews, dSORTVIEW), (self.DateViews, dDATEVIEW)):
for j in i:
DumpList(msg, j, i[j])
for rr in self.RenRequests:
DumpList(dRENREQ, "", rr)
DebugMsg(SEPARATOR + "\n\n")
# End of 'DumpObj()'
#####
# Determine File's Group Name
#####
def __GetFileGroupname(self, fullname):
if POSIX:
return grp.getgrgid(self.RenNames[fullname][STATS][ST_GID])[0]
else:
retval = WINDOWSGROUP
if WIN32ALL:
try:
# Get the internal Win32 security group information for this file.
hg = GetFileSecurity(fullname, GROUP_SECURITY_INFORMATION)
sidg = hg.GetSecurityDescriptorGroup()
# We have to know who is hosting the filesystem for this file
drive = fullname[0:3]
if GetDriveType(drive) == win32con.DRIVE_REMOTE:
fnhost = WNetGetUniversalName(drive, 1).split('\\')[2]
else:
fnhost = WIN32HOST
# Now we can translate the sids into names
retval = LookupAccountSid(fnhost, sidg)[0]
# On any error, just act like win32all isn't there
except:
retval = WINGROUPNOT
return retval
# End of 'GetFileGroupname()'
#####
# Determine File's User Name
#####
def __GetFileUsername(self, fullname):
if POSIX:
return pwd.getpwuid(self.RenNames[fullname][STATS][ST_UID])[0]
else:
retval = WINDOWSUSER
if WIN32ALL:
try:
# Get the internal Win32 security information for this file.
ho = GetFileSecurity(fullname, OWNER_SECURITY_INFORMATION)
sido = ho.GetSecurityDescriptorOwner()
# We have to know who is hosting the filesystem for this file
drive = fullname[0:3]
if GetDriveType(drive) == win32con.DRIVE_REMOTE:
fnhost = WNetGetUniversalName(drive, 1).split('\\')[2]
else:
fnhost = WIN32HOST
# Now we can translate the sids into names
retval = LookupAccountSid(fnhost, sido)[0]
# On any error, just act like win32all isn't there
except:
retval = WINUSERNOT
return retval
# End of 'GetFileUsername()'
#####
# Go Do The Requested Renaming
#####
def ProcessRenameRequests(self):
global RenSequence
self.indentlevel = -1
# Create a list of all renaming to be done.
# This includes the renaming of any existing targets.
for target in self.SortViews[ORDERBYCMDLINE]:
oldname, pathname = self.RenNames[target][BASE], self.RenNames[target][PATHNAME]
newname = oldname
# Keep track of incremental renaming for use by debug
RenSequence = [oldname]
for renrequest in self.RenRequests:
# Select portion of name targeted for renaming
lname = ""
rname = ""
tstart = renrequest[TARGETSTART]
tend = renrequest[TARGETEND]
# Condition values so that range slicing works properly below.
# This couldn't be done at the time the target range was
# saved intially, because the length of the name being processed
# isn't known until here.
if tstart == None:
tstart = 0
if tend == None:
tend = len(newname)
if tstart or tend:
bound = len(newname)
# Normalize negative refs so we can use consistent
# logic below
if tstart < 0:
tstart = bound + tstart
if (tend != SINGLEINST and tend < 0):
tend = bound + tend
# Condition and bounds check the target range as needed
# Handle single position references
if (tend == SINGLEINST):
# Select the desired position. Notice that
# out-of-bounds references are ignored and the
# name is left untouched. This is so the user
# can specify renaming operations on file
# names of varying lengths and have them apply
# only to those files long enough to
# accommodate the request without blowing up
# on the ones that are not long enough.
if 0 <= tstart < bound:
lname, newname, rname = newname[:tstart], newname[tstart], newname[tstart+1:]
# Reference is out of bounds - leave name untouched
else:
lname, newname, rname = newname, "", ""
# Handle slice range requests
else:
# Out-Of-Bounds or invalid slice ranges will
# cause renaming request to be ignored as above
if newname[tstart:tend]:
lname, newname, rname = newname[:tstart], newname[tstart:tend], newname[tend:]
else:
lname, newname, rname = newname, "", ""
# Handle conventional string replacement renaming requests
# An empty newname here means that the -T argument processing
# selected a new string and/or was out of bounds -> we ignore the request.
if newname and (renrequest[OLD] or renrequest[NEW]):
# Resolve any embedded renaming tokens
old = self.__ResolveRenameTokens(target, renrequest[OLD])
new = self.__ResolveRenameTokens(target, renrequest[NEW])
oldstrings = []
# Build a list of indexes to every occurence of the old string,
# taking case sensitivity into account
# Handle the case when old = "". This means to
# *replace the entire* old name with new. More
# specifically, replace the entire old name *as
# modified so far by preceding rename commands*.
if not old:
old = newname
# Find every instance of the 'old' string in the
# current filename. 'Find' in this case can be either
# a regular expression pattern match or a literal
# string match.
#
# Either way, each 'hit' is recorded as a tuple:
#
# (index to beginning of hit, beginning of next non-hit text)
#
# This is so subsequent replacement logic knows:
#
# 1) Where the replacement begins
# 2) Where the replacement ends
#
# These two values are used during actual string
# replacement to properly replace the 'new' string
# into the requested locations.
# Handle regular expression pattern matching
if renrequest[REGEX]:
try:
# Do the match either case-insentitive or not
if renrequest[CASESENSITIVE]:
rematches = re.finditer(old, newname)
else:
rematches = re.finditer(old, newname, re.I)
# And save off the results
for match in rematches:
oldstrings.append(match.span())
except:
ErrorMsg(eBADREGEX % old)
# Handle literal string replacement
else:
searchtarget = newname
# Collapse case if requested
if not renrequest[CASESENSITIVE]:
searchtarget = searchtarget.lower()
old = old.lower()
oldlen = len(old)
i = searchtarget.find(old)
while i >= 0:
nextloc = i + oldlen
oldstrings.append((i, nextloc))
i = searchtarget.find(old, nextloc)
# If we found any matching strings, replace them
if oldstrings:
# But only process the instances the user asked for
todo = []
# Handle single instance requests doing bounds checking as we go
start = renrequest[INSTANCESTART]
end = renrequest[INSTANCEEND]
if (end == SINGLEINST):
# Compute bounds for positive and negative indicies.
# This is necessary because positive indicies are 0-based,
# but negative indicies start at -1.
bound = len(oldstrings)
if start < 0:
bound += 1
# Now go get that entry
if abs(start) < bound:
todo.append(oldstrings[start])
# Handle instance range requests
else:
todo = oldstrings[start:end]
# Replace selected substring(s). Substitute from
# R->L in original string so as not to mess up the
# replacement indicies.
todo.reverse()
for i in todo:
newname = newname[:i[0]] + new + newname[i[1]:]
# Handle case conversion renaming requests
elif renrequest[CASECONV]:
newname = CASETBL[renrequest[CASECONV]](newname)
# Any subsequent replacements operate on the modified name
# which is reconstructed by combining what we've renamed
# with anything that was excluded from the rename operation.
newname = lname + newname + rname
# Keep track of incremental renaming for use by debug
RenSequence.append(newname)
# Show the incremental renaming steps if debug is on
if ProgramOptions[DEBUG]:
DebugMsg(dRENSEQ % ARROW.join(RenSequence))
# Nothing to do, if old- and new names are the same
if newname != oldname:
self.__RenameIt(pathname, oldname, newname)
# End of 'ProcessRenameRequests()'
#####
# Actually Rename A File
#####
def __RenameIt(self, pathname, oldname, newname):
self.indentlevel += 1
indent = self.indentlevel * INDENT
newlen = len(newname)
# First make sure the new name meets length constraints
if newlen < MINNAMELEN:
ErrorMsg(indent + eNAMESHORT% (oldname, newname, MINNAMELEN))
return
if newlen > MAXNAMELEN:
ErrorMsg(indent + eNAMELONG % (oldname, newname, MAXNAMELEN))
return
# Get names into absolute path form
fullold = pathname + oldname
fullnew = pathname + newname
# See if our proposed renaming is about to stomp on an
# existing file, and create a backup if forced renaming
# requested. We do such backups with a recursive call to
# ourselves so that filename length limits are observed and
# backups-of-backups are preserved.
doit = True
newexists = os.path.exists(fullnew)
if (not ProgramOptions[TESTMODE] and newexists) or \
(ProgramOptions[TESTMODE] and fullnew not in self.RenamedFiles and (newexists or fullnew in self.NewFiles)):
if ProgramOptions[FORCERENAME]:
# Create the backup unless we've been told not to
if ProgramOptions[BACKUPS]:
bkuname = newname + ProgramOptions[EXISTSUFFIX]
InfoMsg(indent + iRENFORCED % fullnew)
self.__RenameIt(pathname, newname, bkuname)
else:
InfoMsg(iFORCEDNOBKU % (fullold, fullnew))
else:
InfoMsg(indent + iRENSKIPPED % (fullnew, fullold))
doit = False
if doit:
if ProgramOptions[ASK]:
answer = ""
while answer.lower() not in [ASKNO.lower(), ASKYES.lower(), ASKDOREST.lower(), ASKQUIT.lower()]:
PrintStdout("Rename %s to %s? [%s]: " % (fullold, fullnew, ASKNO+ASKYES+ASKDOREST+ASKQUIT), TRAILING="")
answer = sys.stdin.readline().lower().strip()
# A blank line means take the default - do nothing.
if not answer:
answer = ASKNO.lower()
if answer == ASKNO.lower():
doit = False
if answer == ASKYES.lower():
doit = True
if answer == ASKDOREST.lower():
doit = True
ProgramOptions[ASK] = False
if answer == ASKQUIT.lower():
sys.exit(1)
if doit:
# In test mode, track file names that would be produced.
if ProgramOptions[TESTMODE]:
self.NewFiles.append(fullnew)
self.RenamedFiles.append(fullold)
if fullold in self.NewFiles:
self.NewFiles.remove(fullold)
if fullnew in self.RenamedFiles:
self.RenamedFiles.remove(fullnew)
InfoMsg(indent + iRENAMING % (fullold, fullnew))
if not ProgramOptions[TESTMODE]:
try:
os.rename(fullold, fullnew)
except OSError as e:
ErrorMsg(eRENAMEFAIL % (fullold, fullnew, e.args[1]))
self.indentlevel -= 1
# End of '__RenameIt()'
#####
# Resolve Rename Tokens
#####
""" This replaces all renaming token references in 'renstring'
with the appropriate content and returns the resolved string.
'target' is the name of the current file being renamed. We
need that as well because some renaming tokens refer to file
stat attributes or even the file name itself.
"""
def __ResolveRenameTokens(self, target, renstring):
# Find all token delimiters but ignore any that might appear
# inside a command execution replacement token string.
rentokens = []
odd = True
incmdexec = False
i=0
while i < len(renstring):
if renstring[i] == TOKCMDEXEC:
incmdexec = not incmdexec
elif renstring[i] == TOKDELIM:
if incmdexec:
pass
elif odd:
rentokens.append([i])
odd = not odd
else:
rentokens[-1].append(i)
odd = not odd
i += 1
# There must be an even number of token delimiters
# or the renaming token is malformed
if rentokens and len(rentokens[-1]) != 2:
ErrorMsg(eTOKDELIM % renstring)
# Now add the renaming token contents. This will be used to
# figure out what the replacement text should be.
i = 0
while i < len(rentokens):
rentokens[i].append(renstring[rentokens[i][0]+1 : rentokens[i][1]])
i += 1
# Process each token. Work left to right so as not to mess up
# the previously stored indexes.
rentokens.reverse()
for r in rentokens:
fullrentoken = "%s%s%s" % (TOKDELIM, r[2], TOKDELIM) # Need this for error messages.
###
# File Attribute Renaming Tokens
###
if r[2] == TOKFILDEV:
r[2] = str(self.RenNames[target][STATS][ST_DEV])
elif r[2] == TOKFILFNAME:
r[2] = os.path.basename(target)
elif r[2] == TOKFILGID:
r[2] = str(self.RenNames[target][STATS][ST_GID])
elif r[2] == TOKFILGROUP:
r[2] = self.__GetFileGroupname(target)
elif r[2] == TOKFILINODE:
r[2] = str(self.RenNames[target][STATS][ST_INO])
elif r[2] == TOKFILMODE:
r[2] = str(self.RenNames[target][STATS][ST_MODE])
elif r[2] == TOKFILNLINK:
r[2] = str(self.RenNames[target][STATS][ST_NLINK])
elif r[2] == TOKFILSIZE:
r[2] = str(self.RenNames[target][STATS][ST_SIZE])
elif r[2] == TOKFILUID:
r[2] = str(self.RenNames[target][STATS][ST_UID])
elif r[2] == TOKFILUSER:
r[2] = self.__GetFileUsername(target)
###
# File Time Renaming Tokens
###
elif r[2] in FILETIMETOKS:
parms = FILETIMETOKS[r[2]]
val = eval("time.localtime(self.RenNames[target][STATS][%s]).%s" % (parms[1], parms[2]))
# The first value of FILETIMETOKS table entry
# indicates the formatting string to use (if the entry
# is non null), or that we're doing a lookup for the
# name of a month (if the entry is null)
if parms[0]:
r[2] = parms[0] % val
elif parms[2] == "tm_mon":
r[2] = MONTHS[val]
elif parms[2] == "tm_wday":
r[2] = DAYS[val]
###
# System Renaming Tokens
###
# Environment Variable replacement token
elif r[2].startswith(TOKENV):
r[2] = os.getenv(r[2][1:])
# Handle case for nonexistent environment variable
if not r[2]:
r[2] = ""
# Command Run replacement token
elif r[2].startswith(TOKCMDEXEC) and r[2].endswith(TOKCMDEXEC):
command = r[2][1:-1]
# Handle Windows variants - they act differently
if not POSIX:
pipe = os.popen(command, 'r')
# Handle Unix variants
else:
pipe = os.popen('{ ' + command + '; } 2>&1', 'r')
output = pipe.read()
status = pipe.close()
if status == None:
status = 0
# Nonzero status means error attempting to execute the command
if status:
ErrorMsg(eEXECFAIL % (fullrentoken, command))
# Otherwise swap the command with its results, stripping newlines
else:
r[2] = output.replace("\n", "")
# Random Number Replacement token
elif r[2].startswith(TOKRAND):
random.seed()
# Figure out how many digits of randomness the user want
try:
precision = r[2].split(TOKRAND)[1]
precision = int(precision)
except:
ErrorMsg(eTOKRANDIG % fullrentoken)
if precision < 1:
ErrorMsg(eTOKRANDIG % fullrentoken)
fmt = '"%0' + str(precision) + 'd" % random.randint(0, pow(10, precision)-1)'
r[2] = eval(fmt)
# Name So Far Replacement Token
elif r[2] == (TOKNAMESOFAR):
r[2] = RenSequence[-1]
###
# Sequence Renaming Tokens
###
elif r[2] and (r[2][0] == TOKASCEND or r[2][0] == TOKDESCEND):
# Parse the Sequence Renaming Token into the token itself
# and its corresponding formatting field.
# Note that the a legal Sequence Renaming Token will either
# be one of the keys of the SortViews dictionary or one
# of the "ORDERBYnDATE" orderings.
token = r[2][1:]
found = False
for seqtoken in self.SortViews.keys() + [ORDERBYADATE, ORDERBYCDATE, ORDERBYMDATE]:
if token.split(ALPHADELIM)[0] == (seqtoken):
token, field = token[:len(seqtoken)], token[len(seqtoken):]
found = True
break
if not found:
ErrorMsg(eTOKBADSEQ % fullrentoken)
# Now derive the name of the alphabet to use
if not field.startswith(ALPHADELIM):
ErrorMsg(eALPHABETMISSING % fullrentoken)
field = field[1:]
alphabet, alphadelim, field = field.partition(ALPHADELIM)
if not alphadelim:
ErrorMsg(eALPHABETMISSING % fullrentoken)
# Empty alphabet string means default to decimal counting
if not alphabet:
alphabet = DECIMAL
if alphabet not in ALPHABETS:
ErrorMsg(eALPHABETEXIST % fullrentoken)
# Retrieve the ordered list of the requested type,
# adjust for descending order, and plug in the
# sequence number for the current renaming target
# (which is just the index of that filename in the
# list).
# One of the standard sorted views requested
if token in self.SortViews:
orderedlist = self.SortViews[token][:]
# One of the views sorted within dates requested
else:
if token == ORDERBYADATE:
year, mon, day = FILETIMETOKS[TOKAYEAR], FILETIMETOKS[TOKAMON], FILETIMETOKS[TOKADAY]
elif token == ORDERBYCDATE:
year, mon, day = FILETIMETOKS[TOKCYEAR], FILETIMETOKS[TOKCMON], FILETIMETOKS[TOKCDAY]
elif token == ORDERBYMDATE:
year, mon, day = FILETIMETOKS[TOKMYEAR], FILETIMETOKS[TOKMMON], FILETIMETOKS[TOKMDAY]
targettime = eval("time.localtime(self.RenNames[target][STATS][%s])" % year[1])
key = token + \
year[0] % eval("targettime.%s" % year[2]) + \
mon[0] % eval("targettime.%s" % mon[2]) + \
day[0] % eval("targettime.%s" % day[2])
orderedlist = self.DateViews[key][:]
if r[2][0] == TOKDESCEND:
orderedlist.reverse()
r[2] = ComputeSeqString(field, orderedlist.index(target), ALPHABETS[alphabet])
###
# Unrecognized Renaming Token
###
else:
ErrorMsg(eTOKUNKNOWN % fullrentoken)
###
# Successful Lookup, Do the actual replacement
###
renstring = renstring[:r[0]] + r[2] + renstring[r[1]+1:]
return renstring
# End of '__ResolveRenameTokens()'
# End of class 'RenameTargets'
#----------------------------------------------------------#
# Supporting Function Definitions #
#----------------------------------------------------------#
#####
# Check For Correct Slice Syntax
#####
def CheckSlice(val):
try:
# Process ranges
if val.count(RANGESEP):
lhs, rhs = val.split(RANGESEP)
if not lhs:
lhs = None
else:
lhs = int(lhs)
if not rhs:
rhs = None
else:
rhs = int(rhs)
# Process single indexes
else:
lhs = int(val)
rhs = SINGLEINST
# Something about the argument was bogus
except:
ErrorMsg(eBADSLICE % val)
return (lhs, rhs)
# End Of 'CheckSlice()'
#####
# Turn A List Into Columns With Space Padding
#####
def ColumnPad(list, PAD=PADCHAR, WIDTH=PADWIDTH):
retval = ""
for l in list:
l = str(l)
retval += l + ((WIDTH - len(l)) * PAD)
return retval
# End of 'ColumnPad()'
def ComputeSeqString(fmt, incr, alphabet):
"""
fmt = A literal "format field" string
incr = A integer to be "added" to the field
alphabet = The alphabet of characters to use, in ascending order
Add 'incr' to 'fmt' in base(len(alphabet)). Characters in
'fmt' that are not in 'alphabet' are ignored in this addition.
The final result is limited to be no longer than 'fmt'. Any
result longer than fmt has MSD dropped, thereby effectively
rolling over the count. If 'fmt' is null on entry, the final
result length is unlimited.
"""
base = len(alphabet)
# Do position-wise "addition" via symbol substitution moving from
# right-to-left adjusting for the fact that not all symbols in the
# format string will be in the alphabet.
# First convert the increment into a string in the base of the
# alphabet
idigits = []
while incr > base-1:
incr, digit = incr/base, incr % base
idigits.append(alphabet[digit])
idigits.append(alphabet[incr])
idigits.reverse()
incr = "".join(idigits)
# Now do right-to-left digit addition with the format
# field.
# Do position-wise "addition" via symbol substitution moving from
# right-to-left. Take into account that the format pattern string
# may be a different length than the increment string and that not
# all characters in the format pattern are guaranteed to exist in
# the alphabet.
newval = ""
carry = None
fmtlen = len(fmt)
incrlen = len(incr)
calcsize = max(fmtlen, incrlen)
i = -1
done = False
while abs(i) <= calcsize and not done:
sum = 0
if carry:
sum += carry
if fmt and (abs(i) <= fmtlen) and fmt[i] in alphabet:
sum += alphabet.index(fmt[i])
if abs(i) <= incrlen:
sum += alphabet.index(incr[i])
# Do arithmetic modulo alphabet length
carry, digit = sum/base, sum % base
if not carry:
carry = None
# We're completely done if we're out of digits in incr and
# there's no carry to propagate. This prevents us from
# tacking on leading 0th characters which could overwrite
# out-of-alphabet characters in the format field.
if abs(i-1) > incrlen:
done =True
newval = alphabet[digit] + newval
i -= 1
if carry:
newval = alphabet[carry] + newval
# Constrain the results to the length of the original format
# string, rolling over and warning the user as necessary. The one
# exception to this is when a null format string is passed. This
# is understood to mean that sequences of any length are
# permitted.
# Result length constrained by format string
if fmtlen:
if len(newval) > fmtlen:
InfoMsg(iSEQTOOLONG % (newval,fmt))
newval = newval[-fmtlen:]
return fmt[:-len(newval)] + newval
# Any length result permitted
else:
return newval
# End of 'ComputeSeqString()'
#####
# Condition Line Length With Fancy Wrap And Formatting
#####
def ConditionLine(msg,
PAD=PADCHAR, \
WIDTH=PADWIDTH, \
wrapindent=WRAPINDENT ):
retval = []
retval.append(msg[:ProgramOptions[MAXLINELEN]])
msg = msg[ProgramOptions[MAXLINELEN]:]
while msg:
msg = PAD * (WIDTH + wrapindent) + msg
retval.append(msg[:ProgramOptions[MAXLINELEN]])
msg = msg[ProgramOptions[MAXLINELEN]:]
return retval
# End of 'ConditionLine()'
#####
# Print A Debug Message
#####
def DebugMsg(msg):
l = ConditionLine(msg)
for msg in l:
PrintStderr(PROGNAME + " " + dDEBUG + ": " + msg)
# End of 'DebugMsg()'
#####
# Debug Dump Of A List
#####
def DumpList(msg, listname, content):
DebugMsg(msg)
itemarrow = ColumnPad([listname, " "], WIDTH=LSTPAD)
DebugMsg(ColumnPad([" ", " %s %s" % (itemarrow, content)]))
# End of 'DumpList()'
#####
# Dump The State Of The Program
#####
def DumpState():
SEPARATOR = dSEPCHAR * ProgramOptions[MAXLINELEN]
DebugMsg(SEPARATOR)
DebugMsg(dCURSTATE)
DebugMsg(SEPARATOR)
opts = ProgramOptions.keys()
opts.sort()
for o in opts:
DebugMsg(ColumnPad([o, ProgramOptions[o]]))
DumpList(dALPHABETS, "", ALPHABETS)
DebugMsg(SEPARATOR)
# End of 'DumpState()'
#####
# Print An Error Message And Exit
#####
def ErrorMsg(emsg):
l = ConditionLine(emsg)
for emsg in l:
PrintStderr(PROGNAME + " " + eERROR + ": " + emsg)
sys.exit(1)
# End of 'ErrorMsg()'
#####
# Split -r Argument Into Separate Old And New Strings
#####
def GetOldNew(arg):
escaping = False
numseps = 0
sepindex = 0
oldnewsep = ProgramOptions[RENSEP]
i = 0
while i < len(arg):
# Scan string ignoring escaped separators
if arg[i:].startswith(oldnewsep):
if (i > 0 and (arg[i-1] != ProgramOptions[ESCAPE])) or i == 0:
sepindex = i
numseps += 1
i += len(oldnewsep)
else:
i += 1
if numseps != 1:
ErrorMsg(eBADNEWOLD % arg)
else:
old, new = arg[:sepindex], arg[sepindex + len(oldnewsep):]
old = old.replace(ProgramOptions[ESCAPE] + oldnewsep, oldnewsep)
new = new.replace(ProgramOptions[ESCAPE] + oldnewsep, oldnewsep)
return [old, new]
# End of 'GetOldNew()'
#####
# Print An Informational Message
#####
def InfoMsg(imsg):
l = ConditionLine(imsg)
msgtype = ""
if ProgramOptions[TESTMODE]:
msgtype = TESTMODE
if not ProgramOptions[QUIET]:
for msg in l:
PrintStdout(PROGNAME + " " + msgtype + ": " + msg)
# End of 'InfoMsg()'
#####
# Print To stderr
#####
def PrintStderr(msg, TRAILING="\n"):
sys.stderr.write(msg + TRAILING)
# End of 'PrintStderr()'
#####
# Print To stdout
#####
def PrintStdout(msg, TRAILING="\n"):
sys.stdout.write(msg + TRAILING)
# End of 'PrintStdout()'
#####
# Process Include Files On The Command Line
#####
def ProcessIncludes(OPTIONS):
""" Resolve include file references allowing for nested includes.
This has to be done here separate from the command line
options so that normal getopt() processing below will "see"
the included statements.
This is a bit tricky because we have to handle every possible
legal command line syntax for option specification:
-I filename
-Ifilename
-....I filename
-....Ifilename
"""
# Build a list of all the options that take arguments. This is
# needed to determine whether the include symbol is an include
# option or part of an argument to a preceding option.
OPTIONSWITHARGS = ""
for i in re.finditer(":", OPTIONSLIST):
OPTIONSWITHARGS += OPTIONSLIST[i.start() - 1]
NUMINCLUDES = 0
FoundNewInclude = True
while FoundNewInclude:
FoundNewInclude = False
i = 0
while i < len(OPTIONS):
# Detect all possible command line include constructs,
# isolating the requested filename and replaciing its
# contents at that position in the command line.
field = OPTIONS[i]
position = field.find(INCL)
if field.startswith(OPTINTRO) and (position > -1):
lhs = field[:position]
rhs = field[position+1:]
# Make sure the include symbol isn't part of some
# previous option argument
previousopt = False
for c in OPTIONSWITHARGS:
if c in lhs:
previousopt = True
break
# If the include symbol appears in the context of a
# previous option, skip this field, otherwise process
# it as an include.
if not previousopt:
FoundNewInclude = True
if lhs == OPTINTRO:
lhs = ""
if rhs == "":
if i < len(OPTIONS)-1:
inclfile = OPTIONS[i+1]
OPTIONS = OPTIONS[:i+1] + OPTIONS[i+2:]
# We have an include without a filename at the end
# of the command line which is bogus.
else:
ErrorMsg(eBADARG % eBADINCL)
else:
inclfile = rhs
# Before actually doing the include, make sure we've
# not exceeded the limit. This is here mostly to make
# sure we stop recursive/circular includes.
NUMINCLUDES += 1
if NUMINCLUDES > MAXINCLUDES:
ErrorMsg(eTOOMANYINC)
# Read the included file, stripping out comments
# Use include path if one was provided
inclpath = os.getenv(INCLENV)
if inclpath:
found = searchpath(inclfile, inclpath, PATHDEL)
if found:
inclfile = found[0]
try:
n = []
f = open(inclfile)
for line in f.readlines():
line = line.split(COMMENT)[0]
n += shlex.split(line)
f.close()
# Keep track of the filenames being included for debug output
IncludedFiles.append(os.path.abspath(inclfile))
# Insert content of included file at current
# command line position
# A non-null left hand side means that there were
# options before the include we need to preserve
if lhs:
n = [lhs] + n
OPTIONS = OPTIONS[:i] + n + OPTIONS[i+1:]
except IOError as e:
ErrorMsg(eFILEOPEN % (inclfile, e.args[1]))
i += 1
return OPTIONS
# End of 'ProcessIncludes()'
#####
# Search Path Looking For Include File
#####
def searchpath(filename, pathlist, pathdelim):
# What we'll return if we find nothing
retval = []
# Find all instances of filename in specified paths
paths = pathlist.split(pathdelim)
for path in paths:
if path and path[-1] != PATHSEP:
path += PATHSEP
path += filename
if os.path.exists(path):
retval.append(os.path.realpath(path))
return retval
# End of 'searchpath()'
#####
# Print Usage Information
#####
def Usage():
for line in uTable:
PrintStdout(line)
# End of 'Usage()'
#----------------------------------------------------------#
# Program Entry Point #
#----------------------------------------------------------#
# Set up proper include path delimiter
if WINDOWS:
PATHDEL = PATHDELWIN
else:
PATHDEL = PATHDELUNIX
#####
# Command Line Preprocessing
#
# Some things have to be done *before* the command line
# options can actually be processed. This includes:
#
# 1) Prepending any options specified in the environment variable.
#
# 2) Resolving any include file references
#
# 3) Building the data structures that depend on the file/dir names
# specified for renaming. We have to do this first, because
# -r renaming operations specified on the command line will
# need this information if they make use of renaming tokens.
#
#####
# Process any options set in the environment first, and then those
# given on the command line
OPTIONS = sys.argv[1:]
envopt = os.getenv(PROGENV)
if envopt:
OPTIONS = shlex.split(envopt) + OPTIONS
# Deal with include files
OPTIONS = ProcessIncludes(OPTIONS)
# And parse the command line
try:
opts, args = getopt.getopt(OPTIONS, OPTIONSLIST)
except getopt.GetoptError as e:
ErrorMsg(eBADARG % e.args[0])
# Create and populate an object with rename targets. This must be
# done here because this object also stores the -r renaming requests
# we may find in the options processing below. Also, this object must
# be fully populated before any actual renaming can take place since
# many of the renaming tokens derive information about the file being
# renamed.
# Do wildcard expansion on the rename targets because they may
# have come from an include file (where they are not expanded)
# or from a Windows shell that doesn't know how to handle globbing
# properly.
# If the glob expands to nothing, then supply the original string.
# That way an error will be thrown if either an explicitly named file
# does not exist, or if a wildcard expands to nothing.
expandedlist = []
for arg in args:
wc = glob.glob(arg)
if wc:
expandedlist += wc
else:
expandedlist.append(arg)
targs = RenameTargets(expandedlist)
# Now process the options
for opt, val in opts:
# Install new alphabet
if opt == "-A":
alphaname, delim, alpha = val.partition(ALPHADELIM)
if not delim:
ErrorMsg(eALPHACMDBAD % val)
if not alphaname:
ErrorMsg(eALPHACMDBAD % val)
if len(alpha) < 2:
ErrorMsg(eALPHACMDLEN % val)
a = []
for c in alpha:
a.append(c)
ALPHABETS[alphaname] = a
if opt == "-a":
ProgramOptions[ASK] = True
# Turn off backups during forced renaming
if opt == "-b":
ProgramOptions[BACKUPS] = False
# Select case-sensitivity for replacements (or not)
if opt == "-C":
ProgramOptions[CASESENSITIVE] = True
if opt == "-c":
ProgramOptions[CASESENSITIVE] = False
# Turn on debugging
if opt == "-d":
ProgramOptions[DEBUG] = True
DumpState()
# Force case conversion
if opt == "-e":
# Make sure we support the requested case conversion
if val in CASEOPS:
ProgramOptions[CASECONV] = val
# Construct a renaming request
req = {}
req[OLD], req[NEW] = None, None
for opt in ProgramOptions:
req[opt] = ProgramOptions[opt]
targs.RenRequests.append(req)
# Error out if we don't recognize it
else:
ErrorMsg(eBADCASECONV % (val, ", ".join(CASEOPS)))
# Force renaming of existing targets
if opt == "-f":
ProgramOptions[FORCERENAME] = True
# Output usage information
if opt == "-h":
Usage()
sys.exit(0)
# Specify which instances to replace
if opt == "-i":
ProgramOptions[INSTANCESTART], ProgramOptions[INSTANCEEND] = CheckSlice(val)
# Set the escape character
if opt == "-P":
if len(val) == 1:
ProgramOptions[ESCAPE] = val
else:
ErrorMsg(eARGLENGTH % (NULLESC, 1))
# Set quiet mode
if opt == "-q":
ProgramOptions[QUIET] = True
# Set the separator character for replacement specifications
if opt == '-R':
if len(val) == 1:
ProgramOptions[RENSEP] = val
else:
ErrorMsg(eARGLENGTH % (NULLRENSEP, 1))
# Specify a replacement command
if opt == "-r":
req = {}
req[OLD], req[NEW] = GetOldNew(val)
ProgramOptions[CASECONV] = None
for opt in ProgramOptions:
req[opt] = ProgramOptions[opt]
targs.RenRequests.append(req)
# Specify a renaming suffix
if opt == "-S":
if val:
ProgramOptions[EXISTSUFFIX] = val
else:
ErrorMsg(eNULLARG % NULLSUFFIX)
# Set substring targeted for renaming
if opt == "-T":
ProgramOptions[TARGETSTART], ProgramOptions[TARGETEND] = CheckSlice(val)
# Request test mode
if opt == "-t":
ProgramOptions[TESTMODE] = True
# Output program version info
if opt == "-v":
PrintStdout(RCSID)
# Set output width
if opt == "-w":
try:
l = int(val)
except:
ErrorMsg(eBADLEN % val)
if l < MINLEN:
ErrorMsg(eLINELEN)
ProgramOptions[MAXLINELEN] = l
# Select whether 'old' replacement string is a regex or not
if opt == "-X":
ProgramOptions[REGEX] = False
if opt == "-x":
ProgramOptions[REGEX] = True
# At this point, the command line has been fully processed and the
# container fully populated. Provide debug info about both if
# requested.
if ProgramOptions[DEBUG]:
# Dump what we know about the command line
DumpList(dCMDLINE, "", sys.argv)
DumpList(dPROGENV, "", envopt)
DumpList(dRESOLVEDOPTS, "", OPTIONS)
# Dump what we know about included files
DumpList(dINCLFILES, "", IncludedFiles)
# Dump what we know about the container
targs.DumpObj()
# Perform reqested renamings
targs.ProcessRenameRequests()
# Release the target container if we created one
del targs
|
PypiClean
|
/coconut-3.0.3.tar.gz/coconut-3.0.3/HELP.md
|
# Coconut Tutorial
```{contents}
---
local:
---
```
## Introduction
Welcome to the tutorial for the [Coconut Programming Language](http://evhub.github.io/coconut/)! Coconut is a variant of [Python](https://www.python.org/) built for **simple, elegant, Pythonic functional programming**. But those are just words; what they mean in practice is that _all valid Python 3 is valid Coconut_ but Coconut builds on top of Python a suite of _simple, elegant utilities for functional programming_.
Why use Coconut? Coconut is built to be useful. Coconut enhances the repertoire of Python programmers to include the tools of modern functional programming, in such a way that those tools are _easy_ to use and immensely _powerful;_ that is, Coconut does to functional programming what Python did to imperative programming. And Coconut code runs the same on _any Python version_, making the Python 2/3 split a thing of the past.
Specifically, Coconut adds to Python _built-in, syntactical support_ for:
- pattern-matching
- algebraic data types
- destructuring assignment
- partial application
- lazy lists
- function composition
- prettier lambdas
- infix notation
- pipeline-style programming
- operator functions
- tail call optimization
- where statements
and much more!
### Interactive Tutorial
This tutorial is non-interactive. To get an interactive tutorial instead, check out [Coconut's interactive tutorial](https://hmcfabfive.github.io/coconut-tutorial).
Note, however, that the interactive tutorial is less up-to-date than this one and thus may contain old, deprecated syntax (though Coconut will let you know if you encounter such a situation) as well as outdated idioms (meaning that the example code in the interactive tutorial is likely to be much less elegant than the example code here).
### Installation
At its very core, Coconut is a compiler that turns Coconut code into Python code. That means that anywhere where you can use a Python script, you can also use a compiled Coconut script. To access that core compiler, Coconut comes with a command-line utility, which can
- compile single Coconut files or entire Coconut projects,
- interpret Coconut code on-the-fly, and
- hook into existing Python applications like IPython/Jupyter and MyPy.
Installing Coconut, including all the features above, is drop-dead simple. Just
1. install [Python](https://www.python.org/downloads/),
2. open a command-line prompt,
3. and enter:
```
pip install coconut
```
_Note: If you are having trouble installing Coconut, try following the debugging steps in the [installation section of Coconut's documentation](./DOCS.md#installation)._
To check that your installation is functioning properly, try entering into the command line
```
coconut -h
```
which should display Coconut's command-line help.
_Note: If you're having trouble, or if anything mentioned in this tutorial doesn't seem to work for you, feel free to [ask for help on Gitter](https://gitter.im/evhub/coconut) and somebody will try to answer your question as soon as possible._
## Starting Out
### Using the Interpreter
Now that you've got Coconut installed, the obvious first thing to do is to play around with it. To launch the Coconut interpreter, just go to the command line and type
```
coconut
```
and you should see something like
```coconut
Coconut Interpreter vX.X.X:
(enter 'exit()' or press Ctrl-D to end)
>>>
```
which is Coconut's way of telling you you're ready to start entering code for it to evaluate. So let's do that!
In case you missed it earlier, _all valid Python 3 is valid Coconut_. That doesn't mean compiled Coconut will only run on Python 3—in fact, compiled Coconut will run the same on any Python version—but it does mean that only Python 3 code is guaranteed to compile as Coconut code.
That means that if you're familiar with Python, you're already familiar with a good deal of Coconut's core syntax and Coconut's entire standard library. To show that, let's try entering some basic Python into the Coconut interpreter. For example:
```coconut_pycon
>>> "hello, world!"
'hello, world!'
>>> 1 + 1
2
```
### Writing Coconut Files
Of course, while being able to interpret Coconut code on-the-fly is a great thing, it wouldn't be very useful without the ability to write and compile larger programs. To that end, it's time to write our first Coconut program: "hello, world!" Coconut-style.
First, we're going to need to create a file to put our code into. The file extension for Coconut source files is `.coco`, so let's create the new file `hello_world.coco`. After you do that, you should take the time now to set up your text editor to properly highlight Coconut code. For instructions on how to do that, see the documentation on [Coconut syntax highlighting](./DOCS.md#syntax-highlighting).
Now let's put some code in our `hello_world.coco` file. Unlike in Python, where headers like
```coconut_python
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function, absolute_import, unicode_literals, division
```
are common and often very necessary, the Coconut compiler will automatically take care of all of that for you, so all you need to worry about is your own code. To that end, let's add the code for our "hello, world!" program.
In pure Python 3, "hello, world!" is
```coconut_python
print("hello, world!")
```
and while that will work in Coconut, equally as valid is to use a pipeline-style approach, which is what we'll do, and write
```coconut
"hello, world!" |> print
```
which should let you see very clearly how Coconut's `|>` operator enables pipeline-style programming: it allows an object to be passed along from function to function, with a different operation performed at each step. In this case, we are piping the object `"hello, world!"` into the operation `print`. Now let's save our simple "hello, world!" program, and try to run it.
### Using the Compiler
Compiling Coconut files and projects with the Coconut command-line utility is incredibly simple. Just `cd` into the directory of your `hello_world.coco` file and type
```
coconut hello_world.coco
```
which should give the output
```
Coconut: Compiling hello_world.coco ...
Coconut: Compiled to hello_world.py .
```
and deposit a new `hello_world.py` file in the same directory as the `hello_world.coco` file. You should then be able to run that file with
```
python hello_world.py
```
which should produce `hello, world!` as the output.
_Note: You can compile and run your code all in one step if you use Coconut's `--run` option (`-r` for short)._
Compiling single files is not the only way to use the Coconut command-line utility, however. We can also compile all the Coconut files in a given directory simply by passing that directory as the first argument, which will get rid of the need to run the same Coconut header code in each file by storing it in a `__coconut__.py` file in the same directory.
The Coconut compiler supports a large variety of different compilation options, the help for which can always be accessed by entering `coconut -h` into the command line. One of the most useful of these is `--line-numbers` (or `-l` for short). Using `--line-numbers` will add the line numbers of your source code as comments in the compiled code, allowing you to see what line in your source code corresponds to a line in the compiled code where an error occurred, for ease of debugging.
_Note: If you don't need the full control of the Coconut compiler, you can also [access your Coconut code just by importing it](./DOCS.md#automatic-compilation), either from the Coconut interpreter, or in any Python file where you import [`coconut.api`](./DOCS.md#coconut-api)._
### Using IPython/Jupyter
Although all different types of programming can benefit from using more functional techniques, scientific computing, perhaps more than any other field, lends itself very well to functional programming, an observation the case studies in this tutorial are very good examples of. That's why Coconut aims to provide extensive support for the established tools of scientific computing in Python.
To that end, Coconut provides [built-in IPython/Jupyter support](./DOCS.md#ipython-jupyter-support). To launch a Jupyter notebook with Coconut, just enter the command
```
coconut --jupyter notebook
```
_Alternatively, to launch the Jupyter interpreter with Coconut as the kernel, run `coconut --jupyter console` instead. Additionally, you can launch an interactive Coconut Jupyter console initialized from the current namespace by inserting `from coconut import embed; embed()` into your code, which can be a very useful debugging tool._
### Case Studies
Because Coconut is built to be useful, the best way to demo it is to show it in action. To that end, the majority of this tutorial will be showing how to apply Coconut to solve particular problems, which we'll call case studies.
These case studies are not intended to provide a complete picture of all of Coconut's features. For that, see Coconut's [documentation](./DOCS.md). Instead, they are intended to show how Coconut can actually be used to solve practical programming problems.
## Case Study 1: `factorial`
In the first case study we will be defining a `factorial` function, that is, a function that computes `n!` where `n` is an integer `>= 0`. This is somewhat of a toy example, since Python can fairly easily do this, but it will serve as a good showcase of some of the basic features of Coconut and how they can be used to great effect.
To start off with, we're going to have to decide what sort of an implementation of `factorial` we want. There are many different ways to tackle this problem, but for the sake of concision we'll split them into four major categories: imperative, recursive, iterative, and `addpattern`.
### Imperative Method
The imperative approach is the way you'd write `factorial` in a language like C. Imperative approaches involve lots of state change, where variables are regularly modified and loops are liberally used. In Coconut, the imperative approach to the `factorial` problem looks like this:
```coconut
def factorial(n):
"""Compute n! where n is an integer >= 0."""
if n `isinstance` int and n >= 0:
acc = 1
for x in range(1, n+1):
acc *= x
return acc
else:
raise TypeError("the argument to factorial must be an integer >= 0")
# Test cases:
-1 |> factorial |> print # TypeError
0.5 |> factorial |> print # TypeError
0 |> factorial |> print # 1
3 |> factorial |> print # 6
```
Before we delve into what exactly is happening here, let's give it a run and make sure the test cases check out. If we were really writing a Coconut program, we'd want to save and compile an actual file, but since we're just playing around, let's try copy-pasting into the interpreter. Here, you should get two `TypeErrors`, then `1`, then `6`.
Now that we've verified it works, let's take a look at what's going on. Since the imperative approach is a fundamentally non-functional method, Coconut can't help us improve this example very much. Even here, though, the use of Coconut's infix notation (where the function is put in-between its arguments, surrounded in backticks) in `` n `isinstance` int `` makes the code slightly cleaner and easier to read.
### Recursive Method
The recursive approach is the first of the fundamentally functional approaches, in that it doesn't involve the state change and loops of the imperative approach. Recursive approaches avoid the need to change variables by making that variable change implicit in the recursive function call. Here's the recursive approach to the `factorial` problem in Coconut:
```coconut
def factorial(n):
"""Compute n! where n is an integer >= 0."""
match n:
case 0:
return 1
case x `isinstance` int if x > 0:
return x * factorial(x-1)
else:
raise TypeError("the argument to factorial must be an integer >= 0")
# Test cases:
-1 |> factorial |> print # TypeError
0.5 |> factorial |> print # TypeError
0 |> factorial |> print # 1
3 |> factorial |> print # 6
```
Go ahead and copy and paste the code and tests into the interpreter. You should get the same test results as you got for the imperative version—but you can probably tell there's quite a lot more going on here than there. That's intentional: Coconut is intended for functional programming, not imperative programming, and so its new features are built to be most useful when programming in a functional style.
Let's take a look at the specifics of the syntax in this example. The first thing we see is `match n`. This statement starts a `case` block, in which only `case` statements can occur. Each `case` statement will attempt to match its given pattern against the value in the `case` block. Only the first successful match inside of any given `case` block will be executed. When a match is successful, any variable bindings in that match will also be performed. Additionally, as is true in this case, `case` statements can also have `if` guards that will check the given condition before the match is considered final. Finally, after the `case` block, an `else` statement is allowed, which will only be executed if no `case` statement is.
Specifically, in this example, the first `case` statement checks whether `n` matches to `0`. If it does, it executes `return 1`. Then the second `case` statement checks whether `n` matches to `` x `isinstance` int ``, which checks that `n` is an `int` (using `isinstance`) and assigns `x = n` if so, then checks whether `x > 0`, and if so, executes `return x * factorial(x-1)`. If neither of those two statements are executed, the `else` statement triggers and executes `raise TypeError("the argument to factorial must be an integer >= 0")`.
Although this example is very basic, pattern-matching is both one of Coconut's most powerful and most complicated features. As a general intuitive guide, it is helpful to think _assignment_ whenever you see the keyword `match`. A good way to showcase this is that all `match` statements can be converted into equivalent destructuring assignment statements, which are also valid Coconut. In this case, the destructuring assignment equivalent to the `factorial` function above would be:
```coconut
def factorial(n):
"""Compute n! where n is an integer >= 0."""
try:
# The only value that can be assigned to 0 is 0, since 0 is an
# immutable constant; thus, this assignment fails if n is not 0.
0 = n
except MatchError:
pass
else:
return 1
try:
# This attempts to assign n to x, which has been declared to be
# an int; since only an int can be assigned to an int, this
# fails if n is not an int.
x `isinstance` int = n
except MatchError:
pass
else: if x > 0: # in Coconut, statements can be nested on the same line
return x * factorial(x-1)
raise TypeError("the argument to factorial must be an integer >= 0")
# Test cases:
-1 |> factorial |> print # TypeError
0.5 |> factorial |> print # TypeError
0 |> factorial |> print # 1
3 |> factorial |> print # 6
```
First, copy and paste! While this destructuring assignment equivalent should work, it is much more cumbersome than `match` statements when you expect that they'll fail, which is why `match` statement syntax exists. But the destructuring assignment equivalent illuminates what exactly the pattern-matching is doing, by making it clear that `match` statements are really just fancy destructuring assignment statements. In fact, to be explicit about using destructuring assignment instead of normal assignment, the `match` keyword can be put before a destructuring assignment statement to signify it as such.
It will be helpful to, as we continue to use Coconut's pattern-matching and destructuring assignment statements in further examples, think _assignment_ whenever you see the keyword `match`.
Next, we can make a couple of simple improvements to our `factorial` function. First, we don't actually need to assign `x` as a new variable, since it has the same value as `n`, so if we use `_` instead of `x`, Coconut won't ever actually assign the variable. Thus, we can rewrite our `factorial` function as:
```coconut
def factorial(n):
"""Compute n! where n is an integer >= 0."""
match n:
case 0:
return 1
case _ `isinstance` int if n > 0:
return n * factorial(n-1)
else:
raise TypeError("the argument to factorial must be an integer >= 0")
# Test cases:
-1 |> factorial |> print # TypeError
0.5 |> factorial |> print # TypeError
0 |> factorial |> print # 1
3 |> factorial |> print # 6
```
Copy, paste! This new `factorial` function should behave exactly the same as before.
Second, we can replace the `` _ `isinstance` int `` pattern with the class pattern `int()`, which, when used with no arguments like that, is equivalent. Thus, we can again rewrite our `factorial` function to:
```coconut
def factorial(n):
"""Compute n! where n is an integer >= 0."""
match n:
case 0:
return 1
case int() if n > 0:
return n * factorial(n-1)
else:
raise TypeError("the argument to factorial must be an integer >= 0")
# Test cases:
-1 |> factorial |> print # TypeError
0.5 |> factorial |> print # TypeError
0 |> factorial |> print # 1
3 |> factorial |> print # 6
```
Up until now, for the recursive method, we have only dealt with pattern-matching, but there's actually another way that Coconut allows us to improve our `factorial` function. Coconut performs automatic tail call optimization, which means that whenever a function directly returns a call to another function, Coconut will optimize away the additional call. Thus, we can improve our `factorial` function by rewriting it to use a tail call:
```coconut
def factorial(n, acc=1):
"""Compute n! where n is an integer >= 0."""
match n:
case 0:
return acc
case int() if n > 0:
return factorial(n-1, acc*n)
else:
raise TypeError("the argument to factorial must be an integer >= 0")
# Test cases:
-1 |> factorial |> print # TypeError
0.5 |> factorial |> print # TypeError
0 |> factorial |> print # 1
3 |> factorial |> print # 6
```
Copy, paste! This new `factorial` function is equivalent to the original version, with the exception that it will never raise a `RuntimeError` due to reaching Python's maximum recursion depth, since Coconut will optimize away the tail call.
### Iterative Method
The other main functional approach is the iterative one. Iterative approaches avoid the need for state change and loops by using higher-order functions, those that take other functions as their arguments, like `map` and `reduce`, to abstract out the basic operations being performed. In Coconut, the iterative approach to the `factorial` problem is:
```coconut
def factorial(n):
"""Compute n! where n is an integer >= 0."""
match n:
case 0:
return 1
case int() if n > 0:
return range(1, n+1) |> reduce$(*)
else:
raise TypeError("the argument to factorial must be an integer >= 0")
# Test cases:
-1 |> factorial |> print # TypeError
0.5 |> factorial |> print # TypeError
0 |> factorial |> print # 1
3 |> factorial |> print # 6
```
Copy, paste! This definition differs from the recursive definition only by one line. That's intentional: because both the iterative and recursive approaches are functional approaches, Coconut can provide a great assist in making the code cleaner and more readable. The one line that differs is this one:
```coconut
return range(1, n+1) |> reduce$(*)
```
Let's break down what's happening on this line. First, the `range` function constructs an iterator of all the numbers that need to be multiplied together. Then, it is piped into the function `reduce$(*)`, which does that multiplication. But how? What is `reduce$(*)`?
We'll start with the base, the `reduce` function. `reduce` used to exist as a built-in in Python 2, and Coconut brings it back. `reduce` is a higher-order function that takes a function of two arguments as its first argument, and an iterator as its second argument, and applies that function to the given iterator by starting with the first element, and calling the function on the accumulated call so far and the next element, until the iterator is exhausted. Here's a visual representation:
```coconut
reduce(f, (a, b, c, d))
acc iter
(a, b, c, d)
a (b, c, d)
f(a, b) (c, d)
f(f(a, b), c) (d)
f(f(f(a, b), c), d)
return acc
```
Now let's take a look at what we do to `reduce` to make it multiply all the numbers we feed into it together. The Coconut code that we saw for that was `reduce$(*)`. There are two different Coconut constructs being used here: the operator function for multiplication in the form of `(*)`, and partial application in the form of `$`.
First, the operator function. In Coconut, a function form of any operator can be retrieved by surrounding that operator in parentheses. In this case, `(*)` is roughly equivalent to `lambda x, y: x*y`, but much cleaner and neater. In Coconut's lambda syntax, `(*)` is also equivalent to `(x, y) => x*y`, which we will use from now on for all lambdas, even though both are legal Coconut, because Python's `lambda` statement is too ugly and bulky to use regularly.
_Note: If Coconut's `--strict` mode is enabled, which will force your code to obey certain cleanliness standards, it will raise an error whenever Python `lambda` statements are used._
Second, the partial application. Think of partial application as _lazy function calling_, and `$` as the _lazy-ify_ operator, where lazy just means "don't evaluate this until you need to." In Coconut, if a function call is prefixed by a `$`, like in this example, instead of actually performing the function call, a new function is returned with the given arguments already provided to it, so that when it is then called, it will be called with both the partially-applied arguments and the new arguments, in that order. In this case, `reduce$(*)` is roughly equivalent to `(*args, **kwargs) => reduce((*), *args, **kwargs)`.
_You can partially apply arguments in any order using `?` in place of missing arguments, as in `to_binary = int$(?, 2)`._
Putting it all together, we can see how the single line of code
```coconut
range(1, n+1) |> reduce$(*)
```
is able to compute the proper factorial, without using any state or loops, only higher-order functions, in true functional style. By supplying the tools we use here like partial application (`$`), pipeline-style programming (`|>`), higher-order functions (`reduce`), and operator functions (`(*)`), Coconut enables this sort of functional programming to be done cleanly, neatly, and easily.
### `addpattern` Method
While the iterative approach is very clean, there are still some bulky pieces—looking at the iterative version below, you can see that it takes three entire indentation levels to get from the function definition to the actual objects being returned:
```coconut
def factorial(n):
"""Compute n! where n is an integer >= 0."""
match n:
case 0:
return 1
case int() if n > 0:
return range(1, n+1) |> reduce$(*)
else:
raise TypeError("the argument to factorial must be an integer >= 0")
```
By making use of the [Coconut `addpattern` syntax](./DOCS.md#addpattern), we can take that from three indentation levels down to one. Take a look:
```
def factorial(0) = 1
addpattern def factorial(int() as n if n > 0) =
"""Compute n! where n is an integer >= 0."""
range(1, n+1) |> reduce$(*)
# Test cases:
-1 |> factorial |> print # MatchError
0.5 |> factorial |> print # MatchError
0 |> factorial |> print # 1
3 |> factorial |> print # 6
```
Copy, paste! This should work exactly like before, except now it raises `MatchError` as a fall through instead of `TypeError`. There are three major new concepts to talk about here: `addpattern`, of course, assignment function notation, and pattern-matching function definition—how both of the functions above are defined.
First, assignment function notation. This one's pretty straightforward. If a function is defined with an `=` instead of a `:`, the last line is required to be an expression, and is automatically returned.
Second, pattern-matching function definition. Pattern-matching function definition does exactly that—pattern-matches against all the arguments that are passed to the function. Unlike normal function definition, however, if the pattern doesn't match (if for example the wrong number of arguments are passed), your function will raise a `MatchError`. Finally, like destructuring assignment, if you want to be more explicit about using pattern-matching function definition, you can add a `match` before the `def`. In this case, we're also using one new pattern-matching construct, the `as` match, which matches against the pattern on the left and assigns the result to the name on the right.
Third, `addpattern`. `addpattern` creates a new pattern-matching function by adding the new pattern as an additional case to the old pattern-matching function it is replacing. Thus, `addpattern` can be thought of as doing exactly what it says—it adds a new pattern to an existing pattern-matching function.
Finally, not only can we rewrite the iterative approach using `addpattern`, as we did above, we can also rewrite the recursive approach using `addpattern`, like so:
```coconut
def factorial(0) = 1
addpattern def factorial(int() as n if n > 0) =
"""Compute n! where n is an integer >= 0."""
n * factorial(n - 1)
# Test cases:
-1 |> factorial |> print # MatchError
0.5 |> factorial |> print # MatchError
0 |> factorial |> print # 1
3 |> factorial |> print # 6
```
Copy, paste! It should work exactly like before, except, as above, with `TypeError` replaced by `MatchError`.
## Case Study 2: `quick_sort`
In the second case study, we will be implementing the [quick sort algorithm](https://en.wikipedia.org/wiki/Quicksort). We will implement two versions: first, a `quick_sort` function that takes in a list and outputs a list, and second, a `quick_sort` function that takes in an iterator and outputs an iterator.
### Sorting a Sequence
First up is `quick_sort` for lists. We're going to use a recursive `addpattern`-based approach to tackle this problem—a similar approach to the very last `factorial` function we wrote, using `addpattern` to reduce the amount of indentation we're going to need. Without further ado, here's our implementation of `quick_sort` for lists:
```coconut
def quick_sort([]) = []
addpattern def quick_sort([head] + tail) =
"""Sort the input sequence using the quick sort algorithm."""
quick_sort(left) + [head] + quick_sort(right) where:
left = [x for x in tail if x < head]
right = [x for x in tail if x >= head]
# Test cases:
[] |> quick_sort |> print # []
[3] |> quick_sort |> print # [3]
[0,1,2,3,4] |> quick_sort |> print # [0,1,2,3,4]
[4,3,2,1,0] |> quick_sort |> print # [0,1,2,3,4]
[3,0,4,2,1] |> quick_sort |> print # [0,1,2,3,4]
```
Copy, paste! Two new feature here: head-tail pattern-matching and `where` statements.
First, `where` statements are extremely straightforward. In fact, I bet you've already figured out what they do from the code above. A `where` statement is just a way to compute something in the context of some set of assignment statements.
Second, head-tail pattern-matching, which you can see here as `[head] + tail`, simply follows the form of a list or tuple added to a variable. When this appears in any pattern-matching context, the value being matched against will be treated as a sequence, the list or tuple matched against the beginning of that sequence, and the rest of it bound to the variable. In this case, we use the head-tail pattern to remove the head so we can use it as the pivot for splitting the rest of the list.
### Sorting an Iterator
Now it's time to try `quick_sort` for iterators. Our method for tackling this problem is going to be a combination of the recursive and iterative approaches we used for the `factorial` problem, in that we're going to be lazily building up an iterator, and we're going to be doing it recursively. Here's the code:
```coconut
def quick_sort(l):
"""Sort the input iterator using the quick sort algorithm."""
match [head] :: tail in l:
tail = reiterable(tail)
yield from quick_sort(left) :: [head] :: quick_sort(right) where:
left = (x for x in tail if x < head)
right = (x for x in tail if x >= head)
# By yielding nothing if the match falls through, we implicitly return an empty iterator.
# Test cases:
[] |> quick_sort |> list |> print # []
[3] |> quick_sort |> list |> print # [3]
[0,1,2,3,4] |> quick_sort |> list |> print # [0,1,2,3,4]
[4,3,2,1,0] |> quick_sort |> list |> print # [0,1,2,3,4]
[3,0,4,2,1] |> quick_sort |> list |> print # [0,1,2,3,4]
```
Copy, paste! This `quick_sort` algorithm works uses a bunch of new constructs, so let's go over them.
First, the `::` operator, which appears here both in pattern-matching and by itself. In essence, the `::` operator is lazy `+` for iterators. On its own, it takes two iterators and concatenates, or chains, them together, and it does this lazily, not evaluating anything until its needed, so it can be used for making infinite iterators. In pattern-matching, it inverts that operation, destructuring the beginning of an iterator into a pattern, and binding the rest of that iterator to a variable.
Which brings us to the second new thing, `match ... in ...` notation. The notation
```coconut
match pattern in item:
<body>
else:
<else>
```
is shorthand for
```coconut
match item:
case pattern:
<body>
else:
<else>
```
that avoids the need for an additional level of indentation when only one `match` is being performed.
The third new construct is the [Coconut built-in `reiterable`](./DOCS.md#reiterable). There is a problem in doing immutable functional programming with Python iterators: whenever an element of an iterator is accessed, it's lost. `reiterable` solves this problem by allowing the iterable it's called on to be iterated over multiple times while still yielding the same result each time
Finally, although it's not a new construct, since it exists in Python 3, the use of `yield from` here deserves a mention. In Python, `yield` is the statement used to construct iterators, functioning much like `return`, with the exception that multiple `yield`s can be encountered, and each one will produce another element. `yield from` is very similar, except instead of adding a single element to the produced iterator, it adds another whole iterator.
Putting it all together, here's our `quick_sort` function again:
```coconut
def quick_sort(l):
"""Sort the input iterator using the quick sort algorithm."""
match [head] :: tail in l:
tail = reiterable(tail)
yield from quick_sort(left) :: [head] :: quick_sort(right) where:
left = (x for x in tail if x < head)
right = (x for x in tail if x >= head)
# By yielding nothing if the match falls through, we implicitly return an empty iterator.
```
The function first attempts to split `l` into an initial element and a remaining iterator. If `l` is the empty iterator, that match will fail, and it will fall through, yielding the empty iterator (that's how the function handles the base case). Otherwise, we make a copy of the rest of the iterator, and yield the join of (the quick sort of all the remaining elements less than the initial element), (the initial element), and (the quick sort of all the remaining elements greater than the initial element).
The advantages of the basic approach used here, heavy use of iterators and recursion, as opposed to the classical imperative approach, are numerous. First, our approach is more clear and more readable, since it is describing _what_ `quick_sort` is instead of _how_ `quick_sort` could be implemented. Second, our approach is _lazy_ in that our `quick_sort` won't evaluate any data until it needs it. Finally, and although this isn't relevant for `quick_sort` it is relevant in many other cases, an example of which we'll see later in this tutorial, our approach allows for working with _infinite_ series just like they were finite.
And Coconut makes programming in such an advantageous functional approach significantly easier. In this example, Coconut's pattern-matching lets us easily split the given iterator, and Coconut's `::` iterator joining operator lets us easily put it back together again in sorted order.
## Case Study 3: `vector` Part I
In the next case study, we'll be doing something slightly different—instead of defining a function, we'll be creating an object. Specifically, we're going to try to implement an immutable n-vector that supports all the basic vector operations.
In functional programming, it is often very desirable to define _immutable_ objects, those that can't be changed once created—like Python's strings or tuples. Like strings and tuples, immutable objects are useful for a wide variety of reasons:
- they're easier to reason about, since you can be guaranteed they won't change,
- they're hashable and pickleable, so they can be used as keys and serialized,
- they're significantly more efficient since they require much less overhead,
- and when combined with pattern-matching, they can be used as what are called _algebraic data types_ to build up and then match against large, complicated data structures very easily.
### 2-Vector
Coconut's `data` statement brings the power and utility of _immutable, algebraic data types_ to Python, and it is this that we will be using to construct our `vector` type. The demonstrate the syntax of `data` statements, we'll start by defining a simple 2-vector. Our vector will have one special method `__abs__` which will compute the vector's magnitude, defined as the square root of the sum of the squares of the elements. Here's our 2-vector:
```coconut
data vector2(x, y):
"""Immutable 2-vector."""
def __abs__(self) =
"""Return the magnitude of the 2-vector."""
(self.x**2 + self.y**2)**0.5
# Test cases:
vector2(1, 2) |> print # vector2(x=1, y=2)
vector2(3, 4) |> abs |> print # 5
vector2(1, 2) |> fmap$(x => x*2) |> print # vector2(x=2, y=4)
v = vector2(2, 3)
v.x = 7 # AttributeError
```
Copy, paste! This example shows the basic syntax of `data` statements:
```coconut
data <name>(<attributes>):
<body>
```
where `<name>` and `<body>` are the same as the equivalent `class` definition, but `<attributes>` are the different attributes of the data type, in order that the constructor should take them as arguments. In this case, `vector2` is a data type of two attributes, `x` and `y`, with one defined method, `__abs__`, that computes the magnitude. As the test cases show, we can then create, print, but _not modify_ instances of `vector2`.
One other thing to call attention to here is the use of the [Coconut built-in `fmap`](./DOCS.md#fmap). `fmap` allows you to map functions over algebraic data types. Coconut's `data` types do support iteration, so the standard `map` works on them, but it doesn't return another object of the same data type. In this case, `fmap` is simply `map` plus a call to the object's constructor.
### n-Vector Constructor
Now that we've got the 2-vector under our belt, let's move to back to our original, more complicated problem: n-vectors, that is, vectors of arbitrary length. We're going to try to make our n-vector support all the basic vector operations, but we'll start out with just the `data` definition and the constructor:
```coconut
data vector(*pts):
"""Immutable n-vector."""
def __new__(cls, *pts):
"""Create a new vector from the given pts."""
match [v `isinstance` vector] in pts:
return v # vector(v) where v is a vector should return v
else:
return pts |*> makedata$(cls) # accesses base constructor
# Test cases:
vector(1, 2, 3) |> print # vector(*pts=(1, 2, 3))
vector(4, 5) |> vector |> print # vector(*pts=(4, 5))
```
Copy, paste! The big new thing here is how to write `data` constructors. Since `data` types are immutable, `__init__` construction won't work. Instead, a different special method `__new__` is used, which must return the newly constructed instance, and unlike most methods, takes the class not the object as the first argument. Since `__new__` needs to return a fully constructed instance, in almost all cases it will be necessary to access the underlying `data` constructor. To achieve this, Coconut provides the [built-in `makedata` function](./DOCS.md#makedata), which takes a data type and calls its underlying `data` constructor with the rest of the arguments.
In this case, the constructor checks whether nothing but another `vector` was passed, in which case it returns that, otherwise it returns the result of passing the arguments to the underlying constructor, the form of which is `vector(*pts)`, since that is how we declared the data type. We use sequence pattern-matching to determine whether we were passed a single vector, which is just a list or tuple of patterns to match against the contents of the sequence.
One important pitfall that's worth pointing out here: in this case, you must use `` v `isinstance` vector `` rather than `vector() as v`, since, as we'll see later, patterns like `vector()` behave differently for `data` types than normal classes. In this case, `vector()` would only match a _zero-length_ vector, not just any vector.
The other new construct used here is the `|*>`, or star-pipe, operator, which functions exactly like the normal pipe, except that instead of calling the function with one argument, it calls it with as many arguments as there are elements in the sequence passed into it. The difference between `|>` and `|*>` is exactly analogous to the difference between `f(args)` and `f(*args)`.
### n-Vector Methods
Now that we have a constructor for our n-vector, it's time to write its methods. First up is `__abs__`, which should compute the vector's magnitude. This will be slightly more complicated than with the 2-vector, since we have to make it work over an arbitrary number of `pts`. Fortunately, we can use Coconut's pipeline-style programming and partial application to make it simple:
```coconut
def __abs__(self) =
"""Return the magnitude of the vector."""
self.pts |> map$(.**2) |> sum |> (.**0.5)
```
The basic algorithm here is map square over each element, sum them all, then square root the result. The one new construct here is the `(.**2)` and `(.**0.5)` syntax, which are effectively equivalent to `(x => x**2)` and `(x => x**0.5)`, respectively (though the `(.**2)` syntax produces a pickleable object). This syntax works for all [operator functions](./DOCS.md#operator-functions), so you can do things like `(1-.)` or `(cond() or .)`.
Next up is vector addition. The goal here is to add two vectors of equal length by adding their components. To do this, we're going to make use of Coconut's ability to perform pattern-matching, or in this case destructuring assignment, to data types, like so:
```coconut
def __add__(self, vector(*other_pts)
if len(other_pts) == len(self.pts)) =
"""Add two vectors together."""
map((+), self.pts, other_pts) |*> vector
```
There are a couple of new constructs here, but the main notable one is the pattern-matching `vector(*other_pts)` which showcases the syntax for pattern-matching against data types: it mimics exactly the original `data` declaration of that data type. In this case, `vector(*other_pts)` will only match a vector, raising a `MatchError` otherwise, and if it does match a vector, will assign the vector's `pts` attribute to the variable `other_pts`.
Next is vector subtraction, which is just like vector addition, but with `(-)` instead of `(+)`:
```coconut
def __sub__(self, vector(*other_pts)
if len(other_pts) == len(self.pts)) =
"""Subtract one vector from another."""
map((-), self.pts, other_pts) |*> vector
```
One thing to note here is that unlike the other operator functions, `(-)` can either mean negation or subtraction, the meaning of which will be inferred based on how many arguments are passed, 1 for negation, 2 for subtraction. To show this, we'll use the same `(-)` function to implement vector negation, which should simply negate each element:
```coconut
def __neg__(self) =
"""Retrieve the negative of the vector."""
self.pts |> map$(-) |*> vector
```
The last method we'll implement is multiplication. This one is a little bit tricky, since mathematically, there are a whole bunch of different ways to multiply vectors. For our purposes, we're just going to look at two: between two vectors of equal length, we want to compute the dot product, defined as the sum of the corresponding elements multiplied together, and between a vector and a scalar, we want to compute the scalar multiple, which is just each element multiplied by that scalar. Here's our implementation:
```coconut
def __mul__(self, other):
"""Scalar multiplication and dot product."""
match vector(*other_pts) in other:
assert len(other_pts) == len(self.pts)
return map((*), self.pts, other_pts) |> sum # dot product
else:
return self.pts |> map$(.*other) |*> vector # scalar multiple
def __rmul__(self, other) =
"""Necessary to make scalar multiplication commutative."""
self * other
```
The first thing to note here is that unlike with addition and subtraction, where we wanted to raise an error if the vector match failed, here, we want to do scalar multiplication if the match fails, so instead of using destructuring assignment, we use a `match` statement. The second thing to note here is the combination of pipeline-style programming, partial application, operator functions, and higher-order functions we're using to compute the dot product and scalar multiple. For the dot product, we map multiplication over the two vectors, then sum the result. For the scalar multiple, we take the original points, map multiplication by the scalar over them, then use them to make a new vector.
Finally, putting everything together:
```coconut
data vector(*pts):
"""Immutable n-vector."""
def __new__(cls, *pts):
"""Create a new vector from the given pts."""
match [v `isinstance` vector] in pts:
return v # vector(v) where v is a vector should return v
else:
return pts |*> makedata$(cls) # accesses base constructor
def __abs__(self) =
"""Return the magnitude of the vector."""
self.pts |> map$(.**2) |> sum |> (.**0.5)
def __add__(self, vector(*other_pts)
if len(other_pts) == len(self.pts)) =
"""Add two vectors together."""
map((+), self.pts, other_pts) |*> vector
def __sub__(self, vector(*other_pts)
if len(other_pts) == len(self.pts)) =
"""Subtract one vector from another."""
map((-), self.pts, other_pts) |*> vector
def __neg__(self) =
"""Retrieve the negative of the vector."""
self.pts |> map$(-) |*> vector
def __mul__(self, other):
"""Scalar multiplication and dot product."""
match vector(*other_pts) in other:
assert len(other_pts) == len(self.pts)
return map((*), self.pts, other_pts) |> sum # dot product
else:
return self.pts |> map$(.*other) |*> vector # scalar multiplication
def __rmul__(self, other) =
"""Necessary to make scalar multiplication commutative."""
self * other
# Test cases:
vector(1, 2, 3) |> print # vector(*pts=(1, 2, 3))
vector(4, 5) |> vector |> print # vector(*pts=(4, 5))
vector(3, 4) |> abs |> print # 5
vector(1, 2) + vector(2, 3) |> print # vector(*pts=(3, 5))
vector(2, 2) - vector(0, 1) |> print # vector(*pts=(2, 1))
-vector(1, 3) |> print # vector(*pts=(-1, -3))
(vector(1, 2) == "string") |> print # False
(vector(1, 2) == vector(3, 4)) |> print # False
(vector(2, 4) == vector(2, 4)) |> print # True
2*vector(1, 2) |> print # vector(*pts=(2, 4))
vector(1, 2) * vector(1, 3) |> print # 7
```
Copy, paste! Now that was a lot of code. But looking it over, it looks clean, readable, and concise, and it does precisely what we intended it to do: create an algebraic data type for an immutable n-vector that supports the basic vector operations. And we did the whole thing without needing any imperative constructs like state or loops—pure functional programming.
## Case Study 4: `vector_field`
For the final case study, instead of me writing the code, and you looking at it, you'll be writing the code—of course, I won't be looking at it, but I will show you how I would have done it after you give it a shot by yourself.
_The bonus challenge for this section is to write each of the functions we'll be defining in just one line. Try using assignment functions to help with that!_
First, let's introduce the general goal of this case study. We want to write a program that will allow us to produce infinite vector fields that we can iterate over and apply operations to. And in our case, we'll say we only care about vectors with positive components.
Our first step, therefore, is going to be creating a field of all the points with positive `x` and `y` values—that is, the first quadrant of the `x-y` plane, which looks something like this:
```
...
(0,2) ...
(0,1) (1,1) ...
(0,0) (1,0) (2,0) ...
```
But since we want to be able to iterate over that plane, we're going to need to linearize it somehow, and the easiest way to do that is to split it up into diagonals, and traverse the first diagonal, then the second diagonal, and so on, like this:
```
(0, 0), (1, 0), (0, 1), (2, 0), (1, 1), (0, 2), ...
```
### `diagonal_line`
Thus, our first function `diagonal_line(n)` should construct an iterator of all the points, represented as coordinate tuples, in the `n`th diagonal, starting with `(0, 0)` as the `0`th diagonal. Like we said at the start of this case study, this is where we I let go and you take over. Using all the tools of functional programming that Coconut provides, give `diagonal_line` a shot. When you're ready to move on, scroll down.
Here are some tests that you can use:
```coconut
diagonal_line(0) `isinstance` (list, tuple) |> print # False (should be an iterator)
diagonal_line(0) |> list |> print # [(0, 0)]
diagonal_line(1) |> list |> print # [(0, 1), (1, 0)]
```
_Hint: the `n`th diagonal should contain `n+1` elements, so try starting with `range(n+1)` and then transforming it in some way._
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
That wasn't so bad, now was it? Now, let's take a look at my solution:
```coconut
def diagonal_line(n) = range(n+1) |> map$(i => (i, n-i))
```
Pretty simple, huh? We take `range(n+1)`, and use `map` to transform it into the right sequence of tuples.
### `linearized_plane`
Now that we've created our diagonal lines, we need to join them together to make the full linearized plane, and to do that we're going to write the function `linearized_plane()`. `linearized_plane` should produce an iterator that goes through all the points in the plane, in order of all the points in the first `diagonal(0)`, then the second `diagonal(1)`, and so on. `linearized_plane` is going to be, by necessity, an infinite iterator, since it needs to loop through all the points in the plane, which have no end. To help you accomplish this, remember that the `::` operator is lazy, and won't evaluate its operands until they're needed, which means it can be used to construct infinite iterators. When you're ready to move on, scroll down.
Tests:
```coconut
# Note: these tests use $[] notation, which we haven't introduced yet
# but will introduce later in this case study; for now, just run the
# tests, and make sure you get the same result as is in the comment
linearized_plane()$[0] |> print # (0, 0)
linearized_plane()$[:3] |> list |> print # [(0, 0), (0, 1), (1, 0)]
```
_Hint: instead of defining the function as `linearized_plane()`, try defining it as `linearized_plane(n=0)`, where `n` is the diagonal to start at, and use recursion to build up from there._
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
That was a little bit rougher than the first one, but hopefully still not too bad. Let's compare to my solution:
```coconut
def linearized_plane(n=0) = diagonal_line(n) :: linearized_plane(n+1)
```
As you can see, it's a very fundamentally simple solution: just use `::` and recursion to join all the diagonals together in order.
### `vector_field`
Now that we have a function that builds up all the points we need, it's time to turn them into vectors, and to do that we'll define the new function `vector_field()`, which should turn all the tuples in `linearized_plane` into vectors, using the n-vector class we defined earlier.
Tests:
```coconut
# You'll need to bring in the vector class from earlier to make these work
vector_field()$[0] |> print # vector(*pts=(0, 0))
vector_field()$[2:3] |> list |> print # [vector(*pts=(1, 0))]
```
_Hint: Remember, the way we defined vector it takes the components as separate arguments, not a single tuple. You may find the [Coconut built-in `starmap`](./DOCS.md#starmap) useful in dealing with that._
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
We're making good progress! Before we move on, check your solution against mine:
```coconut
def vector_field() = linearized_plane() |> starmap$(vector)
```
All we're doing is taking our `linearized_plane` and mapping `vector` over it, but using `starmap` instead of `map` so that `vector` gets called with each element of the tuple as a separate argument.
### Applications
Now that we've built all the functions we need for our vector field, it's time to put it all together and test it. Feel free to substitute in your versions of the functions below:
```coconut
data vector(*pts):
"""Immutable n-vector."""
def __new__(cls, *pts):
"""Create a new vector from the given pts."""
match [v `isinstance` vector] in pts:
return v # vector(v) where v is a vector should return v
else:
return pts |*> makedata$(cls) # accesses base constructor
def __abs__(self) =
"""Return the magnitude of the vector."""
self.pts |> map$(.**2) |> sum |> (.**0.5)
def __add__(self, vector(*other_pts)
if len(other_pts) == len(self.pts)) =
"""Add two vectors together."""
map((+), self.pts, other_pts) |*> vector
def __sub__(self, vector(*other_pts)
if len(other_pts) == len(self.pts)) =
"""Subtract one vector from another."""
map((-), self.pts, other_pts) |*> vector
def __neg__(self) =
"""Retrieve the negative of the vector."""
self.pts |> map$(-) |*> vector
def __mul__(self, other):
"""Scalar multiplication and dot product."""
match vector(*other_pts) in other:
assert len(other_pts) == len(self.pts)
return map((*), self.pts, other_pts) |> sum # dot product
else:
return self.pts |> map$(.*other) |*> vector # scalar multiplication
def __rmul__(self, other) =
"""Necessary to make scalar multiplication commutative."""
self * other
def diagonal_line(n) = range(n+1) |> map$(i => (i, n-i))
def linearized_plane(n=0) = diagonal_line(n) :: linearized_plane(n+1)
def vector_field() = linearized_plane() |> starmap$(vector)
# Test cases:
diagonal_line(0) `isinstance` (list, tuple) |> print # False (should be an iterator)
diagonal_line(0) |> list |> print # [(0, 0)]
diagonal_line(1) |> list |> print # [(0, 1), (1, 0)]
linearized_plane()$[0] |> print # (0, 0)
linearized_plane()$[:3] |> list |> print # [(0, 0), (0, 1), (1, 0)]
vector_field()$[0] |> print # vector(*pts=(0, 0))
vector_field()$[2:3] |> list |> print # [vector(*pts=(1, 0))]
```
Copy, paste! Once you've made sure everything is working correctly if you substituted in your own functions, take a look at the last 4 tests. You'll notice that they use a new notation, similar to the notation for partial application we saw earlier, but with brackets instead of parentheses. This is the notation for iterator slicing. Similar to how partial application was lazy function calling, iterator slicing is _lazy sequence slicing_. Like with partial application, it is helpful to think of `$` as the _lazy-ify_ operator, in this case turning normal Python slicing, which is evaluated immediately, into lazy iterator slicing, which is evaluated only when the elements in the slice are needed.
With that in mind, now that we've built our vector field, it's time to use iterator slicing to play around with it. Try doing something cool to our vector fields like
- create a `magnitude_field` where each point is that vector's magnitude
- combine entire vector fields together with `map` and the vector addition and multiplication methods we wrote earlier
then use iterator slicing to take out portions and examine them.
## Case Study 5: `vector` Part II
For the some of the applications you might want to use your `vector_field` for, it might be desirable to add some useful methods to our `vector`. In this case study, we're going to be focusing on one in particular: `.angle`.
`.angle` will take one argument, another vector, and compute the angle between the two vectors. Mathematically, the formula for the angle between two vectors is the dot product of the vectors' respective unit vectors. Thus, before we can implement `.angle`, we're going to need `.unit`. Mathematically, the formula for the unit vector of a given vector is that vector divided by its magnitude. Thus, before we can implement `.unit`, and by extension `.angle`, we'll need to start by implementing division.
### `__truediv__`
Vector division is just scalar division, so we're going to write a `__truediv__` method that takes `self` as the first argument and `other` as the second argument, and returns a new vector the same size as `self` with every element divided by `other`. For an extra challenge, try writing this one in one line using assignment function notation.
Tests:
```coconut
vector(3, 4) / 1 |> print # vector(*pts=(3.0, 4.0))
vector(2, 4) / 2 |> print # vector(*pts=(1.0, 2.0))
```
_Hint: Look back at how we implemented scalar multiplication._
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
Here's my solution for you to check against:
```coconut
def __truediv__(self, other) = self.pts |> map$(x => x/other) |*> vector
```
### `.unit`
Next up, `.unit`. We're going to write a `unit` method that takes just `self` as its argument and returns a new vector the same size as `self` with each element divided by the magnitude of `self`, which we can retrieve with `abs`. This should be a very simple one-line function.
Tests:
```coconut
vector(0, 1).unit() |> print # vector(*pts=(0.0, 1.0))
vector(5, 0).unit() |> print # vector(*pts=(1.0, 0.0))
```
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
Here's my solution:
```coconut
def unit(self) = self / abs(self)
```
### `.angle`
This one is going to be a little bit more complicated. For starters, the mathematical formula for the angle between two vectors is the `math.acos` of the dot product of those vectors' respective unit vectors, and recall that we already implemented the dot product of two vectors when we wrote `__mul__`. So, `.angle` should take `self` as the first argument and `other` as the second argument, and if `other` is a vector, use that formula to compute the angle between `self` and `other`, or if `other` is not a vector, `.angle` should raise a `MatchError`. To accomplish this, we're going to want to use destructuring assignment to check that `other` is indeed a `vector`.
Tests:
```coconut
import math
vector(2, 0).angle(vector(3, 0)) |> print # 0.0
print(vector(1, 0).angle(vector(0, 2)), math.pi/2) # should be the same
vector(1, 2).angle(5) # MatchError
```
_Hint: Look back at how we checked whether the argument to `factorial` was an integer using pattern-matching._
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
Here's my solution—take a look:
```coconut
def angle(self, other `isinstance` vector) = math.acos(self.unit() * other.unit())
```
And now it's time to put it all together. Feel free to substitute in your own versions of the methods we just defined.
```coconut
import math # necessary for math.acos in .angle
data vector(*pts):
"""Immutable n-vector."""
def __new__(cls, *pts):
"""Create a new vector from the given pts."""
match [v `isinstance` vector] in pts:
return v # vector(v) where v is a vector should return v
else:
return pts |*> makedata$(cls) # accesses base constructor
def __abs__(self) =
"""Return the magnitude of the vector."""
self.pts |> map$(.**2) |> sum |> (.**0.5)
def __add__(self, vector(*other_pts)
if len(other_pts) == len(self.pts)) =
"""Add two vectors together."""
map((+), self.pts, other_pts) |*> vector
def __sub__(self, vector(*other_pts)
if len(other_pts) == len(self.pts)) =
"""Subtract one vector from another."""
map((-), self.pts, other_pts) |*> vector
def __neg__(self) =
"""Retrieve the negative of the vector."""
self.pts |> map$(-) |*> vector
def __mul__(self, other):
"""Scalar multiplication and dot product."""
match vector(*other_pts) in other:
assert len(other_pts) == len(self.pts)
return map((*), self.pts, other_pts) |> sum # dot product
else:
return self.pts |> map$(.*other) |*> vector # scalar multiplication
def __rmul__(self, other) =
"""Necessary to make scalar multiplication commutative."""
self * other
# New one-line functions necessary for finding the angle between vectors:
def __truediv__(self, other) = self.pts |> map$(x => x/other) |*> vector
def unit(self) = self / abs(self)
def angle(self, other `isinstance` vector) = math.acos(self.unit() * other.unit())
# Test cases:
vector(3, 4) / 1 |> print # vector(*pts=(3.0, 4.0))
vector(2, 4) / 2 |> print # vector(*pts=(1.0, 2.0))
vector(0, 1).unit() |> print # vector(*pts=(0.0, 1.0))
vector(5, 0).unit() |> print # vector(*pts=(1.0, 0.0))
vector(2, 0).angle(vector(3, 0)) |> print # 0.0
print(vector(1, 0).angle(vector(0, 2)), math.pi/2) # should be the same
vector(1, 2).angle(5) # MatchError
```
_One note of warning here: be careful not to leave a blank line when substituting in your methods, or the interpreter will cut off the code for the `vector` there. This isn't a problem in normal Coconut code, only here because we're copy-and-pasting into the command line._
Copy, paste! If everything is working, you can try going back to playing around with `vector_field` [applications](#applications) using our new methods.
## Filling in the Gaps
And with that, this tutorial is out of case studies—but that doesn't mean Coconut is out of features! In this last section, we'll touch on some of the other useful features of Coconut that we managed to miss in the case studies.
### Lazy Lists
First up is lazy lists. Lazy lists are lazily-evaluated lists, similar in their laziness to Coconut's `::` operator, in that any expressions put inside a lazy list won't be evaluated until that element of the lazy list is needed. The syntax for lazy lists is exactly the same as the syntax for normal lists, but with "banana brackets" (`(|` and `|)`) instead of normal brackets, like so:
```coconut
abc = (| a, b, c |)
```
Unlike Python iterators, lazy lists can be iterated over multiple times and still return the same result.
Unlike Python lists, however, using a lazy list, it is possible to define the values used in the following expressions as needed without raising a `NameError`:
```coconut
abcd = (| d(a), d(b), d(c) |) # a, b, c, and d are not defined yet
def d(n) = n + 1
a = 1
abcd$[0]
b = 2
abcd$[1]
c = 3
abcd$[2]
```
### Function Composition
Next is function composition. In Coconut, this is primarily accomplished through the `f1 ..> f2` operator, which takes two functions and composes them, creating a new function equivalent to `(*args, **kwargs) => f2(f1(*args, **kwargs))`. This can be useful in combination with partial application for piecing together multiple higher-order functions, like so:
```coconut
zipsum = zip ..> map$(sum)
```
_While `..>` is generally preferred, if you'd rather use the more traditional mathematical function composition ordering, you can get that with the `<..` operator._
If the composed functions are wrapped in parentheses, arguments can be passed into them:
```coconut
def plus1(x) = x + 1
def square(x) = x * x
(square ..> plus1)(3) == 10 # True
```
Functions of different arities can be composed together, as long as they are in the correct order. If they are in the incorrect order, a `TypeError` will be raised. In this example we will compose a unary function with a binary function:
```coconut
def add(n, m) = n + m # binary function
def square(n) = n * n # unary function
(square ..> add)(3, 1) # Raises TypeError: square() takes exactly 1 argument (2 given)
(add ..> square)(3, 1) # 16
```
Another useful trick with function composition involves composing a function with a higher-order function:
```coconut
def inc_or_dec(t):
# Our higher-order function, which returns another function
if t:
return x => x+1
else:
return x => x-1
def square(n) = n * n
square_inc = inc_or_dec(True) ..> square
square_dec = inc_or_dec(False) ..> square
square_inc(4) # 25
square_dec(4) # 9
```
_Note: Coconut also supports the function composition operators `..`, `..*>`, `<*..`, `..**>`, and `<**..`._
### Implicit Partials
Another useful Coconut feature is implicit partials. Coconut supports a number of different "incomplete" expressions that will evaluate to a function that takes in the part necessary to complete them, that is, an implicit partial application function. The different allowable expressions are:
```coconut
.attr
.method(args)
func$
seq[]
iter$[]
.[slice]
.$[slice]
```
For a full explanation of what each implicit partial does, see Coconut's documentation on [implicit partials](./DOCS.md#implicit-partial-application).
### Type Annotations
For many people, one of the big downsides of Python is the fact that it is dynamically-typed. In Python, this problem is addressed by [MyPy](http://mypy-lang.org/), a static type analyzer for Python, which can check Python-3-style type annotations such as
```coconut_python
def plus1(x: int) -> int:
return x + 1
a: int = plus1(10)
```
Unfortunately, in Python, such type annotation syntax only exists in Python 3. Not to worry in Coconut, however, which compiles Python-3-style type annotations to universally compatible type comments. Not only that, but Coconut has built-in [MyPy integration](./DOCS.md#mypy-integration) for automatically type-checking your code, and its own [enhanced type annotation syntax](./DOCS.md#enhanced-type-annotation) for more easily expressing complex types, like so:
```coconut
def int_map(
f: int -> int,
xs: int[],
) -> int[] =
xs |> map$(f) |> list
```
### Further Reading
And that's it for this tutorial! But that's hardly it for Coconut. All of the features examined in this tutorial, as well as a bunch of others, are detailed in Coconut's [documentation](./DOCS.md).
Also, if you have any other questions not covered in this tutorial, feel free to ask around at Coconut's [Gitter](https://gitter.im/evhub/coconut), a GitHub-integrated chat room for Coconut developers.
Finally, Coconut is a new, growing language, and if you'd like to get involved in the development of Coconut, all the code is available completely open-source on Coconut's [GitHub](https://github.com/evhub/coconut). Contributing is a simple as forking the code, making your changes, and proposing a pull request! See Coconuts [contributing guidelines](./CONTRIBUTING.md) for more information.
|
PypiClean
|
/drf_shop_api-0.0.3-py3-none-any.whl/drf_shop_api/orders/models.py
|
from django.conf import settings
from django.core.validators import MinValueValidator
from django.db import models
from drf_shop_api.abstract_models import OwnershipMultipleModel, TimeStampedModel
from drf_shop_api.orders.constants import OrderStatus, PaymentStatus, ShipmentStatus
from drf_shop_api.products.models import Product
class Order(TimeStampedModel, OwnershipMultipleModel):
status = models.CharField(
max_length=255,
choices=[(v.name, v.value) for v in OrderStatus],
null=True,
blank=True,
default=OrderStatus.CREATED.name,
)
total = models.DecimalField(max_digits=6, default=0, decimal_places=2, validators=[MinValueValidator(0)])
class Meta:
db_table = "orders"
ordering = ("-id",)
class OrderShipping(models.Model):
order = models.OneToOneField(Order, models.CASCADE, related_name="shipping")
method = models.ForeignKey("drf_shop_api.ShippingMethod", models.CASCADE)
address = models.CharField(max_length=255)
status = models.CharField(
max_length=255,
choices=[(v.name, v.value) for v in ShipmentStatus],
null=True,
blank=True,
default=ShipmentStatus.CREATED.name,
)
class Meta:
db_table = "order-shippings"
ordering = ("-id",)
class OrderPayment(models.Model):
order = models.OneToOneField(Order, models.CASCADE, related_name="payment")
if settings.DRF_SHOP_PAYMENT_MODEL:
payment = models.ForeignKey(settings.DRF_SHOP_PAYMENT_MODEL, models.CASCADE)
else:
status = models.CharField(
max_length=255,
choices=[(v.name, v.value) for v in PaymentStatus],
null=True,
blank=True,
default=PaymentStatus.UNCOMPLETED.name,
)
class OrderProduct(models.Model):
order = models.ForeignKey(Order, models.CASCADE, related_name="products")
product = models.ForeignKey(Product, models.CASCADE, related_name="order_products")
quantity = models.IntegerField(default=1)
class Meta:
db_table = "order-products"
ordering = ("-id",)
|
PypiClean
|
/Findex_GUI-0.2.18-py3-none-any.whl/findex_gui/static/js/findex/findex_core.js
|
$('#help').css('display', 'none');
function endsWith(str, suffix) {
return str.indexOf(suffix, str.length - suffix.length) !== -1;
}
function historyBack(){
history.back()
}
function file_icons(){
var file_icons = {
"bluray": ""
}
}
function errorBox(errors){
var text = '';
for(var i = 0; i != errors.length ; i++){
text += '<b>' + i + ':</b> ' + errors[i] + '<br>';
}
return '<div class=\"alert alert-danger\">'+text+'</div>';
}
function required_input(id){
$('#'+id).fadeTo(300,0.3);
setTimeout(function(){$('#'+id).fadeTo(200,1);}, 300);
}
function change_uri(uri){
window.history.pushState("", "", uri);
}
function goto_uri(uri){
window.location.href = uri;
}
function check_form(show_errors){
var warnings = [];
var data = {};
$('body *').each(function(){
var $this = $(this);
if($this.attr('data-req')){
var id = $this.attr('id');
var text = $this.html();
if($this.attr('data-req') == 'yes' && text == 'Empty'){
warnings.push('Property \'' + id + '\' cannot be empty.');
required_input(id);
}
else{
data[id] = text;
}
}
});
if(warnings.length == 0){
return data;
}
else{
if(show_alerts) $('#errorbox').html(errorBox(warnings));
}
}
function chart_browse_pie_filedistribution_spawn(target, data, source_name) {
var c = $(target).highcharts({
chart: {
plotBackgroundColor: null,
plotBorderWidth: 0,
plotShadow: false,
margin: [0, 0, 0, 0],
spacingTop: 0,
spacingBottom: 0,
spacingLeft: 0,
spacingRight: 0,
reflow: false
},
title: {
text: '',
align: 'center',
verticalAlign: 'middle',
y: -116
},
tooltip: {
pointFormat: '{series.name}: <b>{point.percentage:.1f}%</b>'
},
plotOptions: {
pie: {
size: '100%',
dataLabels: {
enabled: true,
distance: -40,
style: {
fontWeight: 'bold',
color: 'white',
textShadow: '0px 1px 2px black'
}
},
startAngle: -90,
endAngle: 90,
center: ['50%', '58%']
}
},
credits: {
enabled: false
},
series: [{
type: 'pie',
name: source_name,
innerSize: '0%',
data: data
}]
});
return c;
}
function gets(){
console.log($('#form_filter').serialize);
}
$.fn.serializeObject = function()
{
var o = {};
var a = this.serializeArray();
$.each(a, function() {
if (o[this.name] !== undefined) {
if (!o[this.name].push) {
o[this.name] = [o[this.name]];
}
o[this.name].push(this.value || 'x2');
} else {
if(this.value){
o[this.name] = this.value;
}
}
});
return o;
};
function url_for(inp){
if(inp.startsWith("/")) inp = inp.slice(1);
return `${APPLICATION_ROOT}${inp}`;
}
|
PypiClean
|
/boot-synth-1.2.0.tar.gz/boot-synth-1.2.0/synth/projects_master/nginx_router/frontend/react/node_modules/workbox-build/node_modules/fs-extra/lib/move/index.js
|
'use strict'
// most of this code was written by Andrew Kelley
// licensed under the BSD license: see
// https://github.com/andrewrk/node-mv/blob/master/package.json
// this needs a cleanup
const u = require('universalify').fromCallback
const fs = require('graceful-fs')
const ncp = require('../copy/ncp')
const path = require('path')
const remove = require('../remove').remove
const mkdirp = require('../mkdirs').mkdirs
function move (src, dest, options, callback) {
if (typeof options === 'function') {
callback = options
options = {}
}
const overwrite = options.overwrite || options.clobber || false
isSrcSubdir(src, dest, (err, itIs) => {
if (err) return callback(err)
if (itIs) return callback(new Error(`Cannot move '${src}' to a subdirectory of itself, '${dest}'.`))
mkdirp(path.dirname(dest), err => {
if (err) return callback(err)
doRename()
})
})
function doRename () {
if (path.resolve(src) === path.resolve(dest)) {
fs.access(src, callback)
} else if (overwrite) {
fs.rename(src, dest, err => {
if (!err) return callback()
if (err.code === 'ENOTEMPTY' || err.code === 'EEXIST') {
remove(dest, err => {
if (err) return callback(err)
options.overwrite = false // just overwriteed it, no need to do it again
move(src, dest, options, callback)
})
return
}
// weird Windows shit
if (err.code === 'EPERM') {
setTimeout(() => {
remove(dest, err => {
if (err) return callback(err)
options.overwrite = false
move(src, dest, options, callback)
})
}, 200)
return
}
if (err.code !== 'EXDEV') return callback(err)
moveAcrossDevice(src, dest, overwrite, callback)
})
} else {
fs.link(src, dest, err => {
if (err) {
if (err.code === 'EXDEV' || err.code === 'EISDIR' || err.code === 'EPERM' || err.code === 'ENOTSUP') {
return moveAcrossDevice(src, dest, overwrite, callback)
}
return callback(err)
}
return fs.unlink(src, callback)
})
}
}
}
function moveAcrossDevice (src, dest, overwrite, callback) {
fs.stat(src, (err, stat) => {
if (err) return callback(err)
if (stat.isDirectory()) {
moveDirAcrossDevice(src, dest, overwrite, callback)
} else {
moveFileAcrossDevice(src, dest, overwrite, callback)
}
})
}
function moveFileAcrossDevice (src, dest, overwrite, callback) {
const flags = overwrite ? 'w' : 'wx'
const ins = fs.createReadStream(src)
const outs = fs.createWriteStream(dest, { flags })
ins.on('error', err => {
ins.destroy()
outs.destroy()
outs.removeListener('close', onClose)
// may want to create a directory but `out` line above
// creates an empty file for us: See #108
// don't care about error here
fs.unlink(dest, () => {
// note: `err` here is from the input stream errror
if (err.code === 'EISDIR' || err.code === 'EPERM') {
moveDirAcrossDevice(src, dest, overwrite, callback)
} else {
callback(err)
}
})
})
outs.on('error', err => {
ins.destroy()
outs.destroy()
outs.removeListener('close', onClose)
callback(err)
})
outs.once('close', onClose)
ins.pipe(outs)
function onClose () {
fs.unlink(src, callback)
}
}
function moveDirAcrossDevice (src, dest, overwrite, callback) {
const options = {
overwrite: false
}
if (overwrite) {
remove(dest, err => {
if (err) return callback(err)
startNcp()
})
} else {
startNcp()
}
function startNcp () {
ncp(src, dest, options, err => {
if (err) return callback(err)
remove(src, callback)
})
}
}
// return true if dest is a subdir of src, otherwise false.
// extract dest base dir and check if that is the same as src basename
function isSrcSubdir (src, dest, cb) {
fs.stat(src, (err, st) => {
if (err) return cb(err)
if (st.isDirectory()) {
const baseDir = dest.split(path.dirname(src) + path.sep)[1]
if (baseDir) {
const destBasename = baseDir.split(path.sep)[0]
if (destBasename) return cb(null, src !== dest && dest.indexOf(src) > -1 && destBasename === path.basename(src))
return cb(null, false)
}
return cb(null, false)
}
return cb(null, false)
})
}
module.exports = {
move: u(move)
}
|
PypiClean
|
/bertserini_on_telegram-1.2.tar.gz/bertserini_on_telegram-1.2/bertserini_on_telegram/utils/base.py
|
from typing import List, Union, Optional, Mapping, Any
import abc
__all__ = ['Question', 'Context', 'Reader', 'Answer', 'TextType']
TextType = Union['Question', 'Context', 'Answer']
class Question:
"""
A wrapper around a question text. Can contain other metadata.
Args:
text (str): The question text.
id (Optional[str]): The question id. Defaults to None.
language (str): The language of the posed question. Defaults to "en".
"""
def __init__(self, text: str, id: Optional[str] = None, language: str = "en"):
self.text = text
self.id = id
self.language = language
class Context:
"""
A wrapper around a context text.
The text is unspecified with respect to it length; in principle, it could be a full-length document, a paragraph-sized passage, or
even a short phrase.
Args:
text (str): The context that contains potential answer.
language (str): The language of the posed question. Defaults to "en".
metadata (Mapping[str, Any]): Additional metadata and other annotations.
score (Optional[float]): The score of the context. For example, the score might be the BM25 score from an initial retrieval stage. Defaults to None.
"""
def __init__(self,
text: str,
language: str = "en",
metadata: Mapping[str, Any] = None,
score: Optional[float] = 0):
self.text = text
self.language = language
if metadata is None:
metadata = dict()
self.metadata = metadata
self.score = score
class Answer:
"""
A wrapper around a question text. Can contain other metadata.
Args:
text (str): The answer text.
language (str): The language of the posed question. Defaults to "en".
metadata (Mapping[str, Any]): Additional metadata and other annotations.
ans_score (Optional[float]): The score of the answer.
ctx_score (Optional[float]): The context score of the answer.
total_score (Optional[float]): The aggregated score of answer score and ctx_score.
"""
def __init__(self,
text: str,
language: str = "en",
metadata: Mapping[str, Any] = None,
ans_score: Optional[float] = 0,
ctx_score: Optional[float] = 0,
total_score: Optional[float] = 0):
self.text = text
self.language = language
if metadata is None:
metadata = dict()
self.metadata = metadata
self.ans_score = ans_score
self.ctx_score = ctx_score
self.total_score = total_score
def aggregate_score(self, weight: float) -> float:
"""
Computes the aggregate score between ans_score and ctx_score as a linear interpolation given a weight.
Args:
weight (float): The weight to assign to ans_score and ctx_score.
Returns:
float: The aggregated score.
"""
self.total_score = weight*self.ans_score + (1-weight)*self.ctx_score
|
PypiClean
|
/Wryten-1.42.10-py3-none-any.whl/suplemon/main.py
|
import os
import sys
from . import ui
from . import module_loader
from . import themes
from . import helpers
from .file import File
from .logger import logger
from .config import Config
from .editor import Editor
__version__ = "1.42.10"
class App:
def __init__(self, filenames=None, config_file=None):
"""
Handle App initialization
:param list filenames: Names of files to load initially
:param str filenames[*]: Path to a file to load
"""
self.version = __version__
self.inited = False
self.running = False
self.debug = True
self.block_rendering = False
# Set default variables
self.path = os.path.dirname(os.path.realpath(__file__))
self.files = []
self.current_file = 0
self.status_msg = ""
self.last_input = None
self.global_buffer = []
self.event_bindings = {}
self.config = None
self.ui = None
self.modules = None
self.themes = None
# Maximum amount of inputs to process at once
self.max_input = 100
# Save filenames for later
self.filenames = filenames
# Save config file path for later
self.config_file = config_file
# Define core operations
self.operations = {
"help": self.help,
"save_file": self.save_file,
"run_command": self.query_command,
"go_to": self.go_to,
"open": self.open,
"close_file": self.close_file,
"new_file": self.new_file,
"exit": self.ask_exit,
"ask_exit": self.ask_exit,
"prev_file": self.prev_file,
"next_file": self.next_file,
"save_file_as": self.save_file_as,
"reload_file": self.reload_file,
"toggle_mouse": self.toggle_mouse,
"toggle_fullscreen": self.toggle_fullscreen,
}
# Bind our logger
self.logger = logger
self.logger.debug("Starting Suplemon...")
def init(self):
"""Initialize the app."""
# Load core components
self.config = Config(self)
if self.config_file:
self.config.set_path(self.config_file)
if not self.config.init():
# Can't run without config
return False
self.config.load()
# Unicode symbols don't play nice with Python 2 so disable them
if sys.version_info[0] < 3:
self.config["app"]["use_unicode_symbols"] = False
# Configure logger
self.debug = self.config["app"]["debug"]
debug_level = self.config["app"]["debug_level"]
self.logger.debug("Setting debug_level to {0}.".format(debug_level))
self.logger.setLevel(debug_level)
[handler.setLevel(debug_level) for handler in self.logger.handlers]
# Load user interface
self.ui = ui.UI(self)
self.ui.init()
# Load extension modules
self.modules = module_loader.ModuleLoader(self)
self.modules.load()
# Load default module configs
self.config.load_module_configs()
# Load themes
self.themes = themes.ThemeLoader(self)
# Indicate that initialization is complete
self.inited = True
return True
def exit(self):
"""Stop the main loop and exit."""
self.trigger_event_before("app_exit")
self.running = False
def run(self):
"""Run the app via the ui wrapper."""
self.ui.run(self.run_wrapped)
def run_wrapped(self, *args):
"""Actually run the app and start mainloop.
This shouldn't be called directly. Instead it's passed to the UI wich
calls it in a safe curses wrapper.
:param *args: Not used. Takes any args the wrapper might pass in.
"""
# Load ui and files etc
self.load()
# Initial render
self.get_editor().refresh()
self.ui.refresh()
# Start mainloop
self.main_loop()
self.trigger_event_after("app_exit")
# Unload ui
self.ui.unload()
def load(self):
"""Load the app.
Load the UI, open files in self.filenames and finally trigger
the 'app_loaded' event.
"""
self.ui.load()
ver = sys.version_info
if ver[0] < 3 or (ver[0] == 3 and ver[1] < 3):
ver = ".".join(map(str, sys.version_info[0:2]))
self.logger.warning("Running Suplemon with Python {version} "
"isn't officialy supported. Please use "
"Python 3.3 or higher."
.format(version=ver))
self.load_files()
self.running = True
self.trigger_event_after("app_loaded")
def on_input(self, event):
# Handle the input or give it to the editor
if not self.handle_input(event):
# Pass the input to the editor component
self.get_editor().handle_input(event)
def main_loop(self):
"""Run the terminal IO loop until exit() is called."""
while self.running:
# Update ui before refreshing it
self.ui.update()
self.block_rendering = True
got_input = False
# Run through max 100 inputs (so the view is updated at least every 100 characters)
for i in range(self.max_input):
event = self.ui.get_input(False) # non-blocking
if not event:
break # no more inputs to process at this time
got_input = True
self.on_input(event)
if not got_input:
# Wait for input, since there were none already available
event = self.ui.get_input(True) # blocking
if event:
got_input = True
self.on_input(event) # PERF: Up to 30% processing time
self.block_rendering = False
self.trigger_event_after("mainloop")
# Rendering happens here
# TODO: Optimize performance. Can make up 45% of processing time in the loop.
self.get_editor().refresh()
self.ui.refresh()
def get_status(self):
"""Get the current status message.
:return: Current status message.
:rtype: str
"""
return self.status_msg
def get_file_index(self, file_obj):
"""Return the index of file_obj in the file list.
:param file_obj: File instance.
:return: Index of file_obj.
:rtype: int
"""
return self.files.index(file_obj)
def get_key_bindings(self):
"""Return the list of key bindings."""
return self.config.key_bindings
def get_event_bindings(self):
"""Return the dict of event bindings."""
return self.event_bindings
def set_key_binding(self, key, operation):
"""Bind a key to an operation.
Bind operation to be run when key is pressed.
:param key: What key or key combination to bind.
:param str operation: Which operation to run.
"""
self.config.keymap.prepend({"keys": [key], "command": operation})
def set_event_binding(self, event, when, callback):
"""Bind a callback to be run before or after an event.
Bind callback to run before or after event occurs. The when parameter
should be 'before' or 'after'. If using 'before' the callback can
inhibit running the event if it returns True
:param str event: Event to bind to.
:param str when: String with 'before' or 'after'.
:param callback: Callback to bind.
"""
event_bindings = self.get_event_bindings()
if when not in event_bindings.keys():
event_bindings[when] = {}
if event in event_bindings[when].keys():
event_bindings[when][event].append(callback)
else:
event_bindings[when][event] = [callback]
def set_status(self, status):
"""Set app status message.
:param str status: Status message to show in status bar.
"""
self.status_msg = str(status)
def unsaved_changes(self):
"""Return True if there are unsaved changes in any file."""
for f in self.files:
if f.is_changed():
return True
return False
def reload_config(self):
"""Reload configuration."""
self.config.reload()
for f in self.files:
self.setup_editor(f.editor)
self.trigger_event_after("config_loaded")
self.ui.resize()
self.ui.refresh()
def handle_input(self, event):
"""Handle an input event.
Runs relevant actions based on the event received.
:param event: An event instance.
:return: Boolean indicating if the event was handled.
:rtype: boolean
"""
if not event:
return False
self.last_input = event
if event.type == "key":
return self.handle_key(event)
elif event.type == "mouse":
return self.handle_mouse(event)
return False
def handle_key(self, event):
"""Handle a key input event.
:param event: Event instance.
:return: Boolean indicating if event was handled.
:rtype: boolean
"""
key_bindings = self.get_key_bindings()
operation = None
if event.key_name in key_bindings.keys():
operation = key_bindings[event.key_name]
elif event.key_code in key_bindings.keys():
operation = key_bindings[event.key_code]
if operation in self.operations.keys():
self.run_operation(operation)
return True
elif operation in self.modules.modules.keys():
self.run_module(operation)
return False
def handle_mouse(self, event):
"""Handle a mouse input event.
:param event: Event instance.
:return: Boolean indicating if event was handled.
:rtype: boolean
"""
editor = self.get_editor()
if event.mouse_code == 1: # Left mouse button release
editor.set_single_cursor(event.mouse_pos)
elif event.mouse_code == 4096: # Right mouse button release
editor.add_cursor(event.mouse_pos)
elif event.mouse_code == 524288: # Wheel up
editor.jump_up()
elif event.mouse_code == 134217728: # Wheel down(and unfortunately left button drag)
editor.jump_down()
else:
return False
return True
###########################################################################
# User Interactions
###########################################################################
def help(self):
"""Toggle the help document.
- If current document is help, close it.
- Otherwise, if help is open, switch to it.
- Otherwise, open a new file with help text.
"""
if self.get_file().is_help:
self.close_file()
else:
idx = next((i for i, f in enumerate(self.files) if f.is_help), -1)
if idx == -1:
f = self.default_file()
from . import help
f.set_data(help.help_text)
f.is_help = True
self.files.append(f)
idx = self.last_file_index()
self.switch_to_file(idx)
def new_file(self, path=None):
"""Open a new empty file.
Open a new file and optionally set it's path.
:param str path: Optional. Path for file.
"""
new_file = self.default_file()
if path:
new_file.set_path(path)
self.files.append(new_file)
self.current_file = self.last_file_index()
return new_file
def ask_exit(self):
"""Exit if no unsaved changes, else make sure the user really wants to exit."""
if self.unsaved_changes():
yes = self.ui.query_bool("Exit?")
if yes:
self.exit()
return True
return False
self.exit()
return True
def switch_to_file(self, index):
"""Load a default file if no files specified."""
self.current_file = index
def next_file(self):
"""Switch to next file."""
if len(self.files) < 2:
return
cur = self.current_file
cur += 1
if cur > len(self.files)-1:
cur = 0
self.switch_to_file(cur)
def prev_file(self):
"""Switch to previous file."""
if len(self.files) < 2:
return
cur = self.current_file
cur -= 1
if cur < 0:
cur = len(self.files)-1
self.switch_to_file(cur)
def go_to(self):
"""Go to a line or a file (or a line in a specific file with 'name:lineno')."""
input_str = self.ui.query("Go to:")
lineno = None
fname = None
if input_str is False:
return False
if input_str.find(":") != -1:
parts = input_str.split(":")
fname = parts[0]
lineno = parts[1]
file_index = self.find_file(fname)
if file_index != -1:
self.switch_to_file(file_index)
try:
input_str = int(lineno)
self.get_editor().go_to_pos(input_str)
except ValueError:
pass
else:
try:
line_no = int(input_str)
self.get_editor().go_to_pos(line_no)
except ValueError:
file_index = self.find_file(input_str)
if file_index != -1:
self.switch_to_file(file_index)
def find_file(self, s):
"""Return index of file matching string."""
# REFACTOR: Move to a helper function or implement in a module
# Case insensitive matching
s = s.lower()
# First match files beginning with s
for i, file in enumerate(self.files):
if file.name.lower().startswith(s):
return i
# Then match any files that contain s
for i, file in enumerate(self.files):
if s in file.name.lower():
return i
return -1
def run_command(self, data):
"""Run editor commands."""
parts = data.split(" ")
cmd = parts[0].lower()
if cmd in self.operations.keys():
return self.run_operation(cmd)
args = " ".join(parts[1:])
self.logger.debug("Looking for command '{0}'".format(cmd))
if cmd in self.modules.modules.keys():
self.logger.debug("Trying to run command '{0}'".format(cmd))
self.get_editor().store_action_state(cmd)
if not self.run_module(cmd, args):
return False
else:
self.set_status("Command '{0}' not found.".format(cmd))
return False
return True
def run_module(self, module_name, args=""):
try:
self.modules.modules[module_name].run(self, self.get_editor(), args)
return True
except:
# Catch any error when running a module just incase
self.set_status("Running command failed!")
self.logger.exception("Running command failed!")
return False
def run_operation(self, operation):
"""Run an app core operation."""
# Support arbitrary callables. TODO: deprecate
if hasattr(operation, "__call__"):
return operation()
if operation in self.operations.keys():
cancel = self.trigger_event_before(operation)
if not cancel:
result = self.operations[operation]()
self.trigger_event_after(operation)
return result
elif operation in self.modules.modules.keys():
cancel = self.trigger_event_before(operation)
if not cancel:
result = self.modules.modules[operation].run(self, self.get_editor(), "")
self.trigger_event_after(operation)
return result
return False
def trigger_event(self, event, when):
"""Triggers event and runs registered callbacks."""
status = False
bindings = self.get_event_bindings()
if when not in bindings.keys():
return False
if event in bindings[when].keys():
callbacks = bindings[when][event]
for cb in callbacks:
try:
val = cb(event)
except:
# Catch all errors in callbacks just incase
self.logger.error("Failed running callback: {0}".format(cb), exc_info=True)
continue
if val:
status = True
return status
def trigger_event_before(self, event):
return self.trigger_event(event, "before")
def trigger_event_after(self, event):
return self.trigger_event(event, "after")
def toggle_fullscreen(self):
"""Toggle full screen editor."""
display = self.config["display"]
show_indicators = not display["show_top_bar"]
display["show_top_bar"] = show_indicators
display["show_bottom_bar"] = show_indicators
display["show_legend"] = show_indicators
# Virtual curses windows need to be resized
self.ui.resize()
def toggle_mouse(self):
"""Toggle mouse support."""
# Invert the boolean
self.config["editor"]["use_mouse"] = not self.config["editor"]["use_mouse"]
self.ui.setup_mouse()
if self.config["editor"]["use_mouse"]:
self.set_status("Mouse enabled")
else:
self.set_status("Mouse disabled")
def query_command(self):
"""Run editor commands."""
if sys.version_info[0] < 3:
modules = self.modules.modules.iteritems()
else:
modules = self.modules.modules.items()
# Get built in operations
completions = [oper for oper in self.operations.keys()]
# Add runnable modules
completions += [name for name, m in modules if m.is_runnable()]
data = self.ui.query_autocmp("Command:", completions=sorted(completions))
if not data:
return False
self.run_command(data)
###########################################################################
# Editor operations
###########################################################################
def new_editor(self):
"""Create a new editor instance."""
editor = Editor(self, self.ui.editor_win)
self.setup_editor(editor)
return editor
def get_editor(self):
"""Return the current editor."""
return self.files[self.current_file].editor
def setup_editor(self, editor):
"""Setup an editor instance with configuration."""
config = self.config["editor"]
editor.set_config(config)
editor.init()
###########################################################################
# File operations
###########################################################################
def open(self):
"""Ask for file name and try to open it."""
input_name = self.ui.query_file("Open file:")
if not input_name:
return False
name_row_col = helpers.get_filename_cursor_pos(input_name)
name = name_row_col["name"]
exists = self.file_is_open(name)
if exists:
self.switch_to_file(self.files.index(exists))
return True
if not self.open_file(**name_row_col):
self.set_status("Failed to load '{0}'".format(name))
return False
self.switch_to_file(self.last_file_index())
return True
def close_file(self):
"""Close current file if user confirms action."""
if self.get_file().is_changed():
if not self.ui.query_bool("Close file?"):
return False
self.files.pop(self.current_file)
if not len(self.files):
self.new_file()
return False
if self.current_file == len(self.files):
self.current_file -= 1
def save_file(self, file=False, overwrite=False):
"""Save current file."""
f = file or self.get_file()
# Make sure the file has a name
if not f.get_name():
return self.save_file_as(f)
# Warn if the file has changed on disk
if not overwrite and f.is_changed_on_disk():
if not self.ui.query_bool("The file was modified since you opened it, save anyway?"):
return False
# Save the file
if f.save():
self.set_status("Saved [{0}] '{1}'".format(helpers.curr_time_sec(), f.name))
if f.path() == self.config.path() or f.path() == self.config.keymap_path():
self.reload_config()
return True
self.set_status("Couldn't write to '{0}'".format(f.name))
return False
def save_file_as(self, file=False):
"""Save current file."""
f = file or self.get_file()
name_input = self.ui.query_file("Save as:", f.name)
if not name_input:
return False
target_dir, name = helpers.parse_path(name_input)
full_path = os.path.join(target_dir, name)
if os.path.exists(full_path):
if not self.ui.query_bool("A file or directory with that name already exists. Overwrite it?"):
return False
if target_dir and not os.path.exists(target_dir):
if self.ui.query_bool("The path doesn't exist, do you want to create it?"):
self.logger.debug("Creating missing folders in save path.")
os.makedirs(target_dir)
else:
return False
f.set_path(full_path)
# We can just overwrite the file since the user already confirmed
return self.save_file(f, overwrite=True)
def reload_file(self):
"""Reload the current file."""
if self.ui.query_bool("Reload '{0}'?".format(self.get_file().name)):
if self.get_file().reload():
return True
return False
def get_files(self):
"""Return list of open files."""
return self.files
def get_file(self):
"""Return the current file."""
return self.files[self.current_file]
def last_file_index(self):
"""Get index of last file."""
cur = len(self.files)-1
return cur
def current_file_index(self):
"""Get index of current file."""
return self.current_file
def open_file(self, name=None, row=0, col=0):
"""Open a file."""
file = File(self)
file.set_path(name)
file.set_editor(self.new_editor())
if not file.load():
return False
file.get_editor().set_single_cursor((col, row))
file.get_editor().scroll_to_line(row)
self.files.append(file)
return True
def load_files(self):
"""Try to load all files specified in arguments."""
if self.filenames:
for item in self.filenames:
name_row_col = helpers.get_filename_cursor_pos(item)
name = name_row_col["name"]
if os.path.isdir(name):
continue
# Avoid opening duplicate files
if self.file_is_open(name):
continue
if not self.open_file(**name_row_col):
self.new_file(name)
# If nothing was loaded
if not self.files:
self.load_default()
def file_is_open(self, path):
"""Check if file is open. Returns a File object or False."""
for file in self.files:
if file.path() == os.path.abspath(path):
return file
return False
def load_default(self):
"""Load a default file if no files specified."""
file = self.default_file()
self.files.append(file)
def default_file(self):
"""Create the default file."""
file = File(self)
file.set_editor(self.new_editor())
# Specify contents to avoid appearing as modified
file.set_data("")
# Set markdown as the default file type
file.editor.set_file_extension("md")
return file
|
PypiClean
|
/Django-Template-Preprocess-1.0.2.tar.gz/Django-Template-Preprocess-1.0.2/template_preprocess/test/extend_block.py
|
from django.test import TestCase
from django.test.utils import override_settings
from template_preprocess.processor import process_template_content
from template_preprocess.test import get_test_template_settings
template_settings = get_test_template_settings()
@override_settings(**template_settings)
class TestExtendBlock(TestCase):
def test_basic_block(self):
content = '{% include "extends/sub_template1.html" %}'
result = process_template_content(content)
correct = ('Before {% block inserted_content %}The Block'
'{%endblock inserted_content%} {% block block2 %}'
'Block 2{%endblock block2 %} {% block notreplaced %}'
'In wrapper{%endblock%} After ')
self.assertEquals(result, correct)
def test_extends_missing_template(self):
content = '{% include "extends/parent_is_missing.html" %}'
result = process_template_content(content)
self.assertEquals(result, content)
def test_recursive_extends(self):
content = '{% include "extends/recursive.html" %}'
result = process_template_content(content)
self.assertEquals(result, content)
def test_nested_blocks(self):
content = '{% include "extends/nested.html" %}'
result = process_template_content(content)
self.assertEquals(
result,
'{% block a %}{% block b %}{% endblock b %}{% endblock %} ')
def test_load_tag_outside_of_block(self):
content = '{% include "extends/load_tag_out_of_block.html" %}'
result = process_template_content(content)
correct = ('{% load another more from app.templatetags %}'
'{% load i18n %}Before {% block content %}'
'The content{% endblock %} After ')
self.assertEquals(result, correct)
def test_multiline_block(self):
content = '{% include "extends/multiline.html" %}'
result = process_template_content(content)
correct = 'Before {%block ok%}Line 1 Line 2{%endblock%} '
self.assertEquals(result, correct)
|
PypiClean
|
/robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/option.py
|
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, Optional
from robinhood_commons.entity.option_type import OptionType
from robinhood_commons.entity.state import State
from robinhood_commons.entity.tick import Tick, clean_tick
from robinhood_commons.entity.tradability import Tradability
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
EXAMPLE: Dict[str, Any] = {
"chain_id": "f7ed1d28-55c4-4c76-abf5-3b16cb68a2e7",
"chain_symbol": "MRO",
"created_at": "2020-06-10T00:13:05.407629Z",
"expiration_date": "2020-06-19",
"id": "7f7720a3-ccc1-45f4-b7be-37cb8ef69cbb",
"issue_date": "1991-05-06",
"min_ticks": {"above_tick": "0.05", "below_tick": "0.01", "cutoff_price": "3.00"},
"rhs_tradability": "untradable",
"state": "active",
"strike_price": "13.0000",
"tradability": "tradable",
"type": "call",
"updated_at": "2020-06-10T00:13:05.407639Z",
"url": "https://api.robinhood.com/options/instruments/7f7720a3-ccc1-45f4-b7be-37cb8ef69cbb/",
"sellout_datetime": "2020-06-19T18:45:00+00:00",
}
STATS_EXAMPLE: Dict[str, Any] = {
"chain_id": "f7ed1d28-55c4-4c76-abf5-3b16cb68a2e7",
"chain_symbol": "MRO",
"created_at": "2020-06-10T00:13:05.407629Z",
"expiration_date": "2020-06-19",
"id": "7f7720a3-ccc1-45f4-b7be-37cb8ef69cbb",
"issue_date": "1991-05-06",
"min_ticks": {"above_tick": "0.05", "below_tick": "0.01", "cutoff_price": "3.00"},
"rhs_tradability": "untradable",
"state": "active",
"strike_price": "13.0000",
"tradability": "tradable",
"type": "call",
"updated_at": "2020-06-10T00:13:05.407639Z",
"url": "https://api.robinhood.com/options/instruments/7f7720a3-ccc1-45f4-b7be-37cb8ef69cbb/",
"sellout_datetime": "2020-06-19T18:45:00+00:00",
"adjusted_mark_price": "0.010000",
"ask_price": "0.020000",
"ask_size": 10,
"bid_price": "0.000000",
"bid_size": 0,
"break_even_price": "13.010000",
"high_price": None,
"instrument": "https://api.robinhood.com/options/instruments/7f7720a3-ccc1-45f4-b7be-37cb8ef69cbb/",
"last_trade_price": "0.040000",
"last_trade_size": 1,
"low_price": None,
"mark_price": "0.010000",
"open_interest": 1,
"previous_close_date": "2020-06-11",
"previous_close_price": "0.010000",
"volume": 0,
"chance_of_profit_long": "0.007166",
"chance_of_profit_short": "0.992834",
"delta": "0.015780",
"gamma": "0.020289",
"implied_volatility": "2.139463",
"rho": "0.000018",
"theta": "-0.005508",
"vega": "0.000360",
"high_fill_rate_buy_price": "0.020000",
"high_fill_rate_sell_price": "0.000000",
"low_fill_rate_buy_price": "0.000000",
"low_fill_rate_sell_price": "0.010000",
}
@dataclass(frozen=True)
class Option:
chain_id: str
chain_symbol: str
created_at: datetime
expiration_date: datetime
id: str
issue_date: datetime
min_ticks: Tick
rhs_tradability: Tradability
state: State
strike_price: float
tradability: Tradability
type: OptionType
updated_at: datetime
url: str
sellout_datetime: datetime
adjusted_mark_price: Optional[float] = None
ask_price: Optional[float] = None
ask_size: Optional[int] = None
bid_price: Optional[float] = None
bid_size: Optional[int] = None
break_even_price: Optional[float] = None
high_price: Optional[float] = None
instrument: Optional[str] = None
last_trade_price: Optional[float] = None
last_trade_size: Optional[int] = None
low_price: Optional[float] = None
mark_price: Optional[float] = None
open_interest: Optional[int] = None
previous_close_date: Optional[datetime] = None
previous_close_price: Optional[float] = None
volume: Optional[int] = None
chance_of_profit_long: Optional[float] = None
chance_of_profit_short: Optional[float] = None
delta: Optional[float] = None
gamma: Optional[float] = None
implied_volatility: Optional[float] = None
rho: Optional[float] = None
theta: Optional[float] = None
vega: Optional[float] = None
high_fill_rate_buy_price: Optional[float] = None
high_fill_rate_sell_price: Optional[float] = None
low_fill_rate_buy_price: Optional[float] = None
low_fill_rate_sell_price: Optional[float] = None
def clean_option(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["min_ticks"] = Tick(**clean_tick(data["min_ticks"]))
data["rhs_tradability"] = Tradability.to_enum(data["rhs_tradability"])
data["state"] = State.to_enum(data["state"])
data["tradability"] = Tradability.to_enum(data["tradability"])
data["type"] = OptionType.to_enum(data["type"])
data = convert_floats(
data,
[
"strike_price",
"adjusted_mark_price",
"ask_price",
"ask_size",
"bid_price",
"bid_size",
"break_even_price",
"high_price",
"last_trade_price",
"last_trade_size",
"low_price",
"mark_price",
"open_interest",
"previous_close_price",
"volume",
"chance_of_profit_long",
"chance_of_profit_short",
"delta",
"gamma",
"implied_volatility",
"rho",
"theta",
"vega",
"high_fill_rate_buy_price",
"high_fill_rate_sell_price",
"low_fill_rate_buy_price",
"low_fill_rate_sell_price",
],
)
data = convert_dates(data, ["expiration_date", "issue_date", "sellout_datetime", "previous_close_date"])
return data
def main() -> None:
for example in [EXAMPLE, STATS_EXAMPLE]:
option: Option = Option(**clean_option(example))
print(option)
if __name__ == "__main__":
main()
|
PypiClean
|
/PyOpenGL-3.1.7-py3-none-any.whl/OpenGL/GL/EXT/separate_shader_objects.py
|
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.separate_shader_objects import *
from OpenGL.raw.GL.EXT.separate_shader_objects import _EXTENSION_NAME
def glInitSeparateShaderObjectsEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glCreateShaderProgramvEXT.strings size not checked against count
glCreateShaderProgramvEXT=wrapper.wrapper(glCreateShaderProgramvEXT).setInputArraySize(
'strings', None
)
# INPUT glDeleteProgramPipelinesEXT.pipelines size not checked against n
glDeleteProgramPipelinesEXT=wrapper.wrapper(glDeleteProgramPipelinesEXT).setInputArraySize(
'pipelines', None
)
# INPUT glGenProgramPipelinesEXT.pipelines size not checked against n
glGenProgramPipelinesEXT=wrapper.wrapper(glGenProgramPipelinesEXT).setInputArraySize(
'pipelines', None
)
# INPUT glGetProgramPipelineInfoLogEXT.infoLog size not checked against bufSize
glGetProgramPipelineInfoLogEXT=wrapper.wrapper(glGetProgramPipelineInfoLogEXT).setInputArraySize(
'infoLog', None
).setInputArraySize(
'length', 1
)
# INPUT glProgramUniform1fvEXT.value size not checked against count
glProgramUniform1fvEXT=wrapper.wrapper(glProgramUniform1fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform1ivEXT.value size not checked against count
glProgramUniform1ivEXT=wrapper.wrapper(glProgramUniform1ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2fvEXT.value size not checked against count*2
glProgramUniform2fvEXT=wrapper.wrapper(glProgramUniform2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2ivEXT.value size not checked against count*2
glProgramUniform2ivEXT=wrapper.wrapper(glProgramUniform2ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3fvEXT.value size not checked against count*3
glProgramUniform3fvEXT=wrapper.wrapper(glProgramUniform3fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3ivEXT.value size not checked against count*3
glProgramUniform3ivEXT=wrapper.wrapper(glProgramUniform3ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4fvEXT.value size not checked against count*4
glProgramUniform4fvEXT=wrapper.wrapper(glProgramUniform4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4ivEXT.value size not checked against count*4
glProgramUniform4ivEXT=wrapper.wrapper(glProgramUniform4ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2fvEXT.value size not checked against count*4
glProgramUniformMatrix2fvEXT=wrapper.wrapper(glProgramUniformMatrix2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3fvEXT.value size not checked against count*9
glProgramUniformMatrix3fvEXT=wrapper.wrapper(glProgramUniformMatrix3fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4fvEXT.value size not checked against count*16
glProgramUniformMatrix4fvEXT=wrapper.wrapper(glProgramUniformMatrix4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform1uivEXT.value size not checked against count
glProgramUniform1uivEXT=wrapper.wrapper(glProgramUniform1uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2uivEXT.value size not checked against count*2
glProgramUniform2uivEXT=wrapper.wrapper(glProgramUniform2uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3uivEXT.value size not checked against count*3
glProgramUniform3uivEXT=wrapper.wrapper(glProgramUniform3uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4uivEXT.value size not checked against count*4
glProgramUniform4uivEXT=wrapper.wrapper(glProgramUniform4uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4fvEXT.value size not checked against count*16
glProgramUniformMatrix4fvEXT=wrapper.wrapper(glProgramUniformMatrix4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x3fvEXT.value size not checked against count*6
glProgramUniformMatrix2x3fvEXT=wrapper.wrapper(glProgramUniformMatrix2x3fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x2fvEXT.value size not checked against count*6
glProgramUniformMatrix3x2fvEXT=wrapper.wrapper(glProgramUniformMatrix3x2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x4fvEXT.value size not checked against count*8
glProgramUniformMatrix2x4fvEXT=wrapper.wrapper(glProgramUniformMatrix2x4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x2fvEXT.value size not checked against count*8
glProgramUniformMatrix4x2fvEXT=wrapper.wrapper(glProgramUniformMatrix4x2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x4fvEXT.value size not checked against count*12
glProgramUniformMatrix3x4fvEXT=wrapper.wrapper(glProgramUniformMatrix3x4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x3fvEXT.value size not checked against count*12
glProgramUniformMatrix4x3fvEXT=wrapper.wrapper(glProgramUniformMatrix4x3fvEXT).setInputArraySize(
'value', None
)
### END AUTOGENERATED SECTION
|
PypiClean
|
/featureform-enterprise-0.10.3.tar.gz/featureform-enterprise-0.10.3/src/featureform/dashboard/out/_next/static/chunks/react-syntax-highlighter_languages_refractor_racket.bee31c56faa733b2.js
|
"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[4213,5085],{32168:function(a,b,c){var d=c(9997);function e(a){a.register(d),a.languages.racket=a.languages.extend("scheme",{"lambda-parameter":{pattern:/([(\[]lambda\s+[(\[])[^()\[\]'\s]+/,lookbehind:!0}}),a.languages.insertBefore("racket","string",{lang:{pattern:/^#lang.+/m,greedy:!0,alias:"keyword"}}),a.languages.rkt=a.languages.racket}a.exports=e,e.displayName="racket",e.aliases=["rkt"]},9997:function(a){function b(a){var b;(b=a).languages.scheme={comment:/;.*|#;\s*(?:\((?:[^()]|\([^()]*\))*\)|\[(?:[^\[\]]|\[[^\[\]]*\])*\])|#\|(?:[^#|]|#(?!\|)|\|(?!#)|#\|(?:[^#|]|#(?!\|)|\|(?!#))*\|#)*\|#/,string:{pattern:/"(?:[^"\\]|\\.)*"/,greedy:!0},symbol:{pattern:/'[^()\[\]#'\s]+/,greedy:!0},char:{pattern:/#\\(?:[ux][a-fA-F\d]+\b|[-a-zA-Z]+\b|[\uD800-\uDBFF][\uDC00-\uDFFF]|\S)/,greedy:!0},"lambda-parameter":[{pattern:/((?:^|[^'`#])[(\[]lambda\s+)(?:[^|()\[\]'\s]+|\|(?:[^\\|]|\\.)*\|)/,lookbehind:!0},{pattern:/((?:^|[^'`#])[(\[]lambda\s+[(\[])[^()\[\]']+/,lookbehind:!0}],keyword:{pattern:/((?:^|[^'`#])[(\[])(?:begin|case(?:-lambda)?|cond(?:-expand)?|define(?:-library|-macro|-record-type|-syntax|-values)?|defmacro|delay(?:-force)?|do|else|except|export|guard|if|import|include(?:-ci|-library-declarations)?|lambda|let(?:rec)?(?:-syntax|-values|\*)?|let\*-values|only|parameterize|prefix|(?:quasi-?)?quote|rename|set!|syntax-(?:case|rules)|unless|unquote(?:-splicing)?|when)(?=[()\[\]\s]|$)/,lookbehind:!0},builtin:{pattern:/((?:^|[^'`#])[(\[])(?:abs|and|append|apply|assoc|ass[qv]|binary-port\?|boolean=?\?|bytevector(?:-append|-copy|-copy!|-length|-u8-ref|-u8-set!|\?)?|caar|cadr|call-with-(?:current-continuation|port|values)|call\/cc|car|cdar|cddr|cdr|ceiling|char(?:->integer|-ready\?|\?|<\?|<=\?|=\?|>\?|>=\?)|close-(?:input-port|output-port|port)|complex\?|cons|current-(?:error|input|output)-port|denominator|dynamic-wind|eof-object\??|eq\?|equal\?|eqv\?|error|error-object(?:-irritants|-message|\?)|eval|even\?|exact(?:-integer-sqrt|-integer\?|\?)?|expt|features|file-error\?|floor(?:-quotient|-remainder|\/)?|flush-output-port|for-each|gcd|get-output-(?:bytevector|string)|inexact\??|input-port(?:-open\?|\?)|integer(?:->char|\?)|lcm|length|list(?:->string|->vector|-copy|-ref|-set!|-tail|\?)?|make-(?:bytevector|list|parameter|string|vector)|map|max|member|memq|memv|min|modulo|negative\?|newline|not|null\?|number(?:->string|\?)|numerator|odd\?|open-(?:input|output)-(?:bytevector|string)|or|output-port(?:-open\?|\?)|pair\?|peek-char|peek-u8|port\?|positive\?|procedure\?|quotient|raise|raise-continuable|rational\?|rationalize|read-(?:bytevector|bytevector!|char|error\?|line|string|u8)|real\?|remainder|reverse|round|set-c[ad]r!|square|string(?:->list|->number|->symbol|->utf8|->vector|-append|-copy|-copy!|-fill!|-for-each|-length|-map|-ref|-set!|\?|<\?|<=\?|=\?|>\?|>=\?)?|substring|symbol(?:->string|\?|=\?)|syntax-error|textual-port\?|truncate(?:-quotient|-remainder|\/)?|u8-ready\?|utf8->string|values|vector(?:->list|->string|-append|-copy|-copy!|-fill!|-for-each|-length|-map|-ref|-set!|\?)?|with-exception-handler|write-(?:bytevector|char|string|u8)|zero\?)(?=[()\[\]\s]|$)/,lookbehind:!0},operator:{pattern:/((?:^|[^'`#])[(\[])(?:[-+*%/]|[<>]=?|=>?)(?=[()\[\]\s]|$)/,lookbehind:!0},number:{pattern:RegExp(function(a){for(var b in a)a[b]=a[b].replace(/<[\w\s]+>/g,function(b){return"(?:"+a[b].trim()+")"});return a[b]}({"<ureal dec>":/\d+(?:\/\d+)|(?:\d+(?:\.\d*)?|\.\d+)(?:[esfdl][+-]?\d+)?/.source,"<real dec>":/[+-]?<ureal dec>|[+-](?:inf|nan)\.0/.source,"<imaginary dec>":/[+-](?:<ureal dec>|(?:inf|nan)\.0)?i/.source,"<complex dec>":/<real dec>(?:@<real dec>|<imaginary dec>)?|<imaginary dec>/.source,"<num dec>":/(?:#d(?:#[ei])?|#[ei](?:#d)?)?<complex dec>/.source,"<ureal box>":/[0-9a-f]+(?:\/[0-9a-f]+)?/.source,"<real box>":/[+-]?<ureal box>|[+-](?:inf|nan)\.0/.source,"<imaginary box>":/[+-](?:<ureal box>|(?:inf|nan)\.0)?i/.source,"<complex box>":/<real box>(?:@<real box>|<imaginary box>)?|<imaginary box>/.source,"<num box>":/#[box](?:#[ei])?|(?:#[ei])?#[box]<complex box>/.source,"<number>":/(^|[()\[\]\s])(?:<num dec>|<num box>)(?=[()\[\]\s]|$)/.source}),"i"),lookbehind:!0},boolean:{pattern:/(^|[()\[\]\s])#(?:[ft]|false|true)(?=[()\[\]\s]|$)/,lookbehind:!0},function:{pattern:/((?:^|[^'`#])[(\[])(?:[^|()\[\]'\s]+|\|(?:[^\\|]|\\.)*\|)(?=[()\[\]\s]|$)/,lookbehind:!0},identifier:{pattern:/(^|[()\[\]\s])\|(?:[^\\|]|\\.)*\|(?=[()\[\]\s]|$)/,lookbehind:!0,greedy:!0},punctuation:/[()\[\]']/}}a.exports=b,b.displayName="scheme",b.aliases=[]}}])
|
PypiClean
|
/imerit_ango-1.2.3-py3-none-any.whl/imerit_ango/plugins.py
|
import datetime
import logging
import os
import time
from io import BytesIO
from typing import Callable, Tuple
import queue
from urllib.parse import urlparse
import requests
import socketio
from apscheduler.schedulers.background import BackgroundScheduler
from imerit_ango.plugin_logger import PluginLogger
from imerit_ango.sdk import SDK
try:
import asyncio
except ImportError:
import trollius as asyncio
LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
logging.basicConfig(level=LOGLEVEL)
class Plugin(socketio.ClientNamespace):
def __init__(self, id: str, secret: str, callback: Callable):
super().__init__('/plugin')
self.id = id
self.secret = secret
self.scheduler = BackgroundScheduler()
self.scheduler.add_job(self.heartbeat, 'interval', seconds=60)
self.scheduler.start()
self.logger = logging.getLogger("plugin")
self.logger.setLevel(LOGLEVEL)
self.callback = callback
self.loop = asyncio.get_event_loop()
def on_connect(self):
self.heartbeat()
self.logger.warning("Connected")
def on_disconnect(self):
self.logger.warning("Disconnected")
_connect(self, self.client.connection_url)
def heartbeat(self):
try:
self.emit('heartbeat', {"id": self.id, "secret": self.secret})
except Exception as e:
self.logger.critical(e)
os._exit(1)
self.logger.info("Heartbeat at %s" % str(time.time()))
def on_plugin(self, data):
data["logger"] = self._get_logger(data)
data["batches"] = data.get('tags', [])
response = {
"response": self.callback(**data),
"session": data.get("session", "")
}
self.emit('response', response)
def _get_logger(self, data):
org_id = data.get("orgId", "")
run_by = data.get("runBy", "")
session = data.get("session", "")
logger = PluginLogger("logger", self.id, org_id, run_by, session, self)
return logger
def start(self):
asyncio.get_event_loop().run_forever()
class ExportPlugin(Plugin):
def __init__(self, id: str, secret: str, callback: Callable[[str, dict], Tuple[str, BytesIO]],
host="https://imeritapi.ango.ai", version: str = "v2"):
super().__init__(id, secret, callback)
self.host = host
self.version = version
def on_plugin(self, data):
"""
:param data: {project_id: str, assignees: List[str] = None, completed_at: List[datetime.datetime] = None,
updated_at: List[datetime.datetime = None, tags: List[str] = None}
:return:
"""
completed_at = None
updated_at = None
project_id = data.get('projectId')
logger = super()._get_logger(data)
api_key = data.get('apiKey')
sdk = SDK(api_key=api_key, host=self.host)
if data.get("completed_at", None):
completed_at = [datetime.datetime.fromisoformat(data.completed_at[0]),
datetime.datetime.fromisoformat(data.completed_at[1])]
if data.get("updated_at", None):
updated_at = [datetime.datetime.fromisoformat(data.updated_at[0]),
datetime.datetime.fromisoformat(data.updated_at[1])]
try:
if self.version == 'v3':
(json_export, num_lines) = sdk.exportV3(project_id, batches=data.get('batches', None),
stage=data.get('stage', None), format="ndjson")
data["numTasks"] = num_lines
else:
json_export = sdk.export(project_id, data.get('assignees', None), completed_at=completed_at,
updated_at=updated_at, batches=data.get('batches', None),
stage=data.get('stage', None))
except Exception as e:
logger.error(f"Error calling sdk.export: {e}")
return
data["jsonExport"] = json_export
data["logger"] = logger
file_name, export_bytes = self.callback(**data)
upload_url = sdk._get_upload_url(file_name)
signed_url = sdk._get_signed_url(upload_url)
try:
upload_resp = requests.put(upload_url, data=export_bytes.getvalue())
upload_resp.raise_for_status()
except requests.HTTPError as http_err:
logger.error(f"HTTP error occurred: {http_err}")
except Exception as err:
logger.error(f"Other error occurred: {err}")
else:
response = {
"export": True,
"response": signed_url,
"session": data.get("session", "")
}
self.emit('response', response)
class ModelPlugin(Plugin):
def __init__(self, id: str, secret: str, callback: Callable, host="https://imeritapi.ango.ai", concurrency=1):
super().__init__(id, secret, callback)
self.host = host
self.concurrency = concurrency
self.queue = queue.Queue()
async def work(self):
while True:
data = self.queue.get()
data["batches"] = data.get('tags', [])
api_key = data.get('apiKey')
task_id = data.get('taskId')
sdk = SDK(api_key=api_key, host=self.host)
answer = self.callback(**data)
sdk._annotate(task_id, answer)
def on_plugin(self, data):
workflow = data.get('workflow')
if not workflow:
return super().on_plugin(data)
self.queue.put(data)
def start(self):
tasks = [self.work() for i in range(self.concurrency)]
future = asyncio.gather(*tasks)
self.loop.run_until_complete(future)
class FileExplorerPlugin(Plugin):
def __init__(self, id: str, secret: str, callback: Callable):
super().__init__(id, secret, callback)
class BatchModelPlugin(Plugin):
def __init__(self, id: str, secret: str, callback: Callable):
super().__init__(id, secret, callback)
class InputPlugin(Plugin):
def __init__(self, id: str, secret: str, callback: Callable):
super().__init__(id, secret, callback)
class MarkdownPlugin(Plugin):
def __init__(self, id: str, secret: str, callback: Callable):
super().__init__(id, secret, callback)
def _connect(plugin, host):
try:
sio = socketio.Client(logger=logging.getLogger("plugin"), reconnection=False)
sio.register_namespace(plugin)
sio.connect(host, namespaces=["/plugin"], transports=["websocket"], wait=True)
except Exception as e:
logging.getLogger().critical(e)
os._exit(1)
def run(plugin, host="https://plugin.imerit.ango.ai"):
_connect(plugin, host)
try:
plugin.start()
except (KeyboardInterrupt, SystemExit):
logging.getLogger().warning("Plugin Stopped")
os._exit(1)
|
PypiClean
|
/DeCAF-2.0.0.tar.gz/DeCAF-2.0.0/decaf/toolkits/ob.py
|
"""OpenBabel toolkit for DeCAF"""
from decaf import PHARS, Pharmacophore
import pybel
import openbabel as ob
import numpy as np
from collections import deque
import math
PATTERNS = {phar: pybel.Smarts(smarts) for (phar, smarts) in PHARS.items()}
def __count_bonds(a1, a2, exclude):
"""Count number of bonds between two pharmacophore points, if the shortest
path does not contain any other pharmacophore point.
Args:
a1, a2 (OBAtom): source and target atoms
exclude (list): atoms (ids) that cannot be in the shortest path
Returns:
int: number of bonds in path or -1 if there is no path between a1 and a2
"""
visited = []
bonds_nr = -1
queue = deque([(a1, 0)])
while queue:
atom, depth = queue.popleft()
idx = atom.GetIdx()
visited.append(idx)
if atom == a2:
bonds_nr = depth
break
else:
for atom in ob.OBAtomAtomIter(atom):
if atom.GetIdx() not in visited and atom.GetIdx() not in exclude:
queue.append((atom, depth+1))
return bonds_nr
def phar_from_mol(ligand):
"""Create Pharmacophore from given pybel.Molecule object."""
if not isinstance(ligand, pybel.Molecule):
raise TypeError("Invalid ligand! Expected pybel.Molecule object, got "
"%s instead" % type(ligand).__name__)
matches = {}
for (phar, pattern) in PATTERNS.items():
atoms = list(zip(*pattern.findall(ligand)))
if len(atoms) > 0:
matches[phar] = list(atoms[0])
else:
matches[phar] = []
points = {} # graph ids of matched atoms
nodes = []
idx = 0
for (phar, atoms) in matches.items():
for atom in atoms:
if atom in points:
nodes[points[atom]]["type"][phar] = 1.0
else:
nodes.append({"label": atom, "type": {phar: 1.0},
"freq": 1.0})
points[atom] = idx
idx += 1
edges = np.zeros((idx, idx))
keys = sorted(points.keys())
for i in range(len(keys)):
for j in range(i):
dist = float(__count_bonds(ligand.atoms[keys[i]-1].OBAtom,
ligand.atoms[keys[j]-1].OBAtom,
[keys[k] for k in range(len(keys)) if
k not in [i, j]]))
if dist > -1:
edges[points[keys[i]], points[keys[j]]] = dist
edges[points[keys[j]], points[keys[i]]] = dist
if ligand.title == "":
return Pharmacophore(nodes, edges, molecules=1.0)
else:
return Pharmacophore(nodes, edges, molecules=1.0, title=ligand.title)
def layout(p):
"""Calculate points positions for depiction of Pharmacophore p using OpenBabel."""
if not isinstance(p, Pharmacophore):
raise TypeError("Expected Pharmacophore object, got %s instead" %
type(p).__name__)
positions = np.zeros((p.numnodes, 2))
m = pybel.Molecule(ob.OBMol())
for i in range(p.numnodes):
m.OBMol.NewAtom()
idx = p.numnodes + 1
for i in range(p.numnodes):
for j in range(i):
if p.edges[i, j] > 0:
tmp = int(math.ceil(p.edges[i, j])) - 1
prev = i + 1
#add invisible atoms to get right distance
for k in range(tmp):
atom = m.OBMol.NewAtom(idx)
atom.SetHyb(1)
m.OBMol.AddBond(prev, idx, 1)
prev = idx
idx += 1
m.OBMol.AddBond(prev, j + 1, 1)
m.draw(show=False, update=True)
for i in range(p.numnodes):
positions[i][0] = m.atoms[i].coords[0]
positions[i][1] = m.atoms[i].coords[1]
return positions
|
PypiClean
|
/py_lav-1.12.1.tar.gz/py_lav-1.12.1/pylav/storage/migrations/low_level/v_1_0_0.py
|
from __future__ import annotations
import contextlib
from collections import defaultdict
from typing import TYPE_CHECKING
import asyncpg
from asyncpg import Connection
from dacite import from_dict
from packaging.version import parse
from pylav.compat import json
from pylav.constants.playlists import BUNDLED_PLAYLIST_IDS
from pylav.constants.versions import VERSION_1_0_0
from pylav.nodes.api.responses.track import Track
from pylav.players.tracks.decoder import decode_track
from pylav.storage.database.tables.config import LibConfigRow
from pylav.storage.database.tables.nodes import NodeRow
from pylav.storage.database.tables.players import PlayerRow
from pylav.storage.database.tables.playlists import PlaylistRow
from pylav.storage.database.tables.queries import QueryRow
from pylav.storage.database.tables.tracks import TrackRow
if TYPE_CHECKING:
from pylav.storage.controllers.config import ConfigController
async def run_playlist_migration_v_1_0_0(connection: Connection) -> list[asyncpg.Record]:
"""
Runs playlist migration 100.
"""
HAS_COLUMN = """
SELECT EXISTS (SELECT 1
FROM information_schema.columns
WHERE table_name='playlist' AND column_name='tracks')
"""
has_playlist_tracks = await connection.fetchval(HAS_COLUMN)
if has_playlist_tracks:
data = await connection.fetch("SELECT * FROM playlist;")
await connection.execute("DROP TABLE playlist;")
return data
async def run_query_migration_v_1_0_0(connection: Connection) -> list[asyncpg.Record] | None:
"""
Runs playlist migration 100.
"""
HAS_COLUMN = """
SELECT EXISTS (SELECT 1
FROM information_schema.columns
WHERE table_name='query' AND column_name='tracks')
"""
has_query_tracks = await connection.fetchval(HAS_COLUMN)
if has_query_tracks:
data = await connection.fetch("SELECT * FROM query;")
await connection.execute("DROP TABLE query;")
return data
async def run_player_config_v_1_0_0(connection: Connection) -> list[asyncpg.Record]:
"""
Migrates player config.
"""
has_column = """
SELECT EXISTS (SELECT 1
FROM information_schema.columns
WHERE table_name='version' AND column_name='version')
"""
has_version_column = await connection.fetchval(has_column)
if not has_version_column:
return []
version = await connection.fetchval("SELECT version from version;")
if version is None:
return []
version = parse(version)
if (not version) or version < VERSION_1_0_0:
return await connection.fetch("SELECT * FROM player;")
return []
async def run_node_config_v_1_0_0(connection: Connection) -> list[asyncpg.Record]:
"""
Migrates player config.
"""
has_column = """
SELECT EXISTS (SELECT 1
FROM information_schema.columns
WHERE table_name='version' AND column_name='version')
"""
has_version_column = await connection.fetchval(has_column)
if not has_version_column:
return []
version = await connection.fetchval("SELECT version from version;")
if version is None:
return []
version = parse(version)
if (not version) or version < VERSION_1_0_0:
return await connection.fetch("SELECT * FROM node;")
return []
async def run_lib_config_v_1_0_0(connection: Connection) -> list[asyncpg.Record]:
"""
Migrates player config.
"""
has_column = """
SELECT EXISTS (SELECT 1
FROM information_schema.columns
WHERE table_name='version' AND column_name='version')
"""
has_version_column = await connection.fetchval(has_column)
if not has_version_column:
return []
version = await connection.fetchval("SELECT version from version;")
if version is None:
return []
version = parse(version)
if (not version) or version < VERSION_1_0_0:
return await connection.fetch("SELECT * FROM lib_config;")
async def migrate_playlists_v_1_0_0(playlists: list[asyncpg.Record]) -> None:
"""Runs playlist migration for version 1.0.0."""
for playlist in playlists:
if playlist["id"] in BUNDLED_PLAYLIST_IDS:
continue
defaults = {
PlaylistRow.name: playlist["name"],
PlaylistRow.scope: playlist["scope"],
PlaylistRow.author: playlist["author"],
PlaylistRow.url: playlist["url"],
}
playlist_row = await PlaylistRow.objects().get_or_create(PlaylistRow.id == playlist["id"], defaults)
# noinspection PyProtectedMember
if not playlist_row._was_created:
await PlaylistRow.update(defaults).where(PlaylistRow.id == playlist["id"])
new_tracks = []
# TODO: Optimize this, after https://github.com/piccolo-orm/piccolo/discussions/683 is answered or fixed
tracks = json.loads(playlist["tracks"]) if playlist["tracks"] else []
_temp = defaultdict(list)
for x in tracks:
_temp[type(x)].append(x)
for entry_type, entry_list in _temp.items():
if entry_type == str:
for track in entry_list:
with contextlib.suppress(Exception):
# TODO: Make an API call to the public node?
new_tracks.append(await TrackRow.get_or_create(decode_track(track)))
elif entry_type == dict:
for track_object in entry_list:
new_tracks.append(await TrackRow.get_or_create(from_dict(data_class=Track, data=track_object)))
else:
for track_object in entry_list:
new_tracks.append(await TrackRow.get_or_create(track_object))
if new_tracks:
await playlist_row.add_m2m(*new_tracks, m2m=PlaylistRow.tracks)
async def migrate_queries_v_1_0_0(queries: list[asyncpg.Record]) -> None:
"""Processes queries for migration to version 1.0.0."""
for query in queries:
defaults = {QueryRow.name: query["name"]}
query_row = await QueryRow.objects().get_or_create(QueryRow.identifier == query["identifier"], defaults)
# noinspection PyProtectedMember
if not query_row._was_created:
await QueryRow.update(defaults).where(QueryRow.identifier == query["identifier"])
new_tracks = []
# TODO: Optimize this, after https://github.com/piccolo-orm/piccolo/discussions/683 is answered or fixed
tracks = json.loads(query["tracks"]) if query["tracks"] else []
_temp = defaultdict(list)
for x in tracks:
_temp[type(x)].append(x)
for entry_type, entry_list in _temp.items():
if entry_type == str:
for track in entry_list:
with contextlib.suppress(Exception):
# TODO: Make an API call to the public node?
new_tracks.append(await TrackRow.get_or_create(decode_track(track)))
elif entry_type == dict:
for track_object in entry_list:
new_tracks.append(await TrackRow.get_or_create(from_dict(data_class=Track, data=track_object)))
else:
for track_object in entry_list:
new_tracks.append(await TrackRow.get_or_create(track_object))
if new_tracks:
await query_row.add_m2m(*new_tracks, m2m=QueryRow.tracks)
async def migrate_player_config_v_1_0_0(players: list[asyncpg.Record]) -> None:
"""Processes player config for migration to version 1.0.0."""
bulk_insert = []
for player in players:
data = {
"id": player["id"],
"bot": player["bot"],
"volume": player["volume"],
"max_volume": player["max_volume"],
"auto_play_playlist_id": player["auto_play_playlist_id"],
"text_channel_id": player["text_channel_id"],
"notify_channel_id": player["notify_channel_id"],
"forced_channel_id": player["forced_channel_id"],
"repeat_current": player["repeat_current"],
"repeat_queue": player["repeat_queue"],
"shuffle": player["shuffle"],
"auto_shuffle": player["auto_shuffle"],
"auto_play": player["auto_play"],
"self_deaf": player["self_deaf"],
"empty_queue_dc": json.loads(player["empty_queue_dc"]),
"alone_dc": json.loads(player["alone_dc"]),
"alone_pause": json.loads(player["alone_pause"]),
"extras": json.loads(player["extras"]),
"effects": json.loads(player["effects"]),
"dj_users": player["dj_users"],
"dj_roles": player["dj_roles"],
}
if player["id"] == 0:
data = {
PlayerRow.volume: player["volume"],
PlayerRow.max_volume: player["max_volume"],
PlayerRow.auto_play_playlist_id: player["auto_play_playlist_id"],
PlayerRow.text_channel_id: player["text_channel_id"],
PlayerRow.notify_channel_id: player["notify_channel_id"],
PlayerRow.forced_channel_id: player["forced_channel_id"],
PlayerRow.repeat_current: player["repeat_current"],
PlayerRow.repeat_queue: player["repeat_queue"],
PlayerRow.shuffle: player["shuffle"],
PlayerRow.auto_shuffle: player["auto_shuffle"],
PlayerRow.auto_play: player["auto_play"],
PlayerRow.self_deaf: player["self_deaf"],
PlayerRow.empty_queue_dc: json.loads(player["empty_queue_dc"]),
PlayerRow.alone_dc: json.loads(player["alone_dc"]),
PlayerRow.alone_pause: json.loads(player["alone_pause"]),
PlayerRow.extras: json.loads(player["extras"]),
PlayerRow.effects: json.loads(player["effects"]),
PlayerRow.dj_users: player["dj_users"],
PlayerRow.dj_roles: player["dj_roles"],
}
playerobj = await PlayerRow.objects().get_or_create(
(PlayerRow.id == player["id"]) & (PlayerRow.bot == player["bot"]), defaults=data
)
if not playerobj._was_created:
await PlayerRow.update(data).where((PlayerRow.id == player["id"]) & (PlayerRow.bot == player["bot"]))
else:
bulk_insert.append(PlayerRow(**data))
if bulk_insert:
await PlayerRow.insert(*bulk_insert)
async def migrate_node_config_v_1_0_0(nodes: list[asyncpg.Record]) -> None:
"""Processes node config for migration to version 1.0.0."""
for node in nodes:
data = {
NodeRow.name: node["name"],
NodeRow.ssl: node["ssl"],
NodeRow.resume_timeout: node["resume_timeout"],
NodeRow.reconnect_attempts: node["reconnect_attempts"],
NodeRow.search_only: node["search_only"],
NodeRow.managed: node["managed"],
NodeRow.disabled_sources: node["disabled_sources"],
NodeRow.extras: json.loads(node["extras"]),
NodeRow.yaml: json.loads(node["yaml"]),
}
node_obj = await NodeRow.objects().get_or_create(NodeRow.id == node["id"], defaults=data)
if not node_obj._was_created:
await NodeRow.update(data).where(NodeRow.id == node["id"])
async def migrate_lib_config_v_1_0_0(configs: list[asyncpg.Record]) -> None:
"""Processes lib config for migration to version 1.0.0."""
for config in configs:
data = {
LibConfigRow.config_folder: config["config_folder"],
LibConfigRow.java_path: config["java_path"],
LibConfigRow.enable_managed_node: config["enable_managed_node"],
LibConfigRow.auto_update_managed_nodes: config["auto_update_managed_nodes"],
LibConfigRow.localtrack_folder: config["localtrack_folder"],
LibConfigRow.download_id: config["download_id"],
LibConfigRow.update_bot_activity: config["update_bot_activity"],
LibConfigRow.use_bundled_pylav_external: config["use_bundled_pylav_external"],
LibConfigRow.use_bundled_lava_link_external: False,
LibConfigRow.extras: json.loads(config["extras"]),
LibConfigRow.next_execution_update_bundled_playlists: config["next_execution_update_bundled_playlists"],
LibConfigRow.next_execution_update_bundled_external_playlists: config[
"next_execution_update_bundled_external_playlists"
],
LibConfigRow.next_execution_update_external_playlists: config["next_execution_update_external_playlists"],
}
config_obj = await LibConfigRow.objects().get_or_create(
(LibConfigRow.id == config["id"]) & (LibConfigRow.bot == config["bot"]), defaults=data
)
if not config_obj._was_created:
await LibConfigRow.update(data).where(
(LibConfigRow.id == config["id"]) & (LibConfigRow.bot == config["bot"])
)
async def low_level_v_1_0_0_migration(
con: Connection, migration_data: dict[str, dict[str, list[asyncpg.Record]] | None], migrator: ConfigController
) -> None:
"""Runs the low level migration for version 1.0.0."""
version = "1.0.0"
migration_data[version] = {}
await low_level_v_1_0_0_playlists(con, migration_data, version)
await low_level_v_1_0_0_queries(con, migration_data, version)
await low_level_v_1_0_0_players(con, migration_data, version)
await low_level_v_1_0_0_lib(con, migration_data, version)
await low_level_v_1_0_0_nodes(con, migration_data, version)
if migration_data[version]:
await migrator.reset_database()
async def low_level_v_1_0_0_nodes(
con: Connection, migration_data: dict[str, dict[str, list[asyncpg.Record]] | None], version: str
) -> None:
"""Runs the low level migration of nodes for version 1.0.0."""
node_config_data_1000 = await run_node_config_v_1_0_0(con)
if node_config_data_1000:
migration_data[version]["node"] = node_config_data_1000
async def low_level_v_1_0_0_lib(
con: Connection, migration_data: dict[str, dict[str, list[asyncpg.Record]] | None], version: str
) -> None:
"""Runs the low level migration of lib config for version 1.0.0."""
lib_config_data_1000 = await run_lib_config_v_1_0_0(con)
if lib_config_data_1000:
migration_data[version]["lib"] = lib_config_data_1000
async def low_level_v_1_0_0_players(
con: Connection, migration_data: dict[str, dict[str, list[asyncpg.Record]] | None], version: str
) -> None:
"""Runs the low level migration of players for version 1.0.0."""
player_data_1000 = await run_player_config_v_1_0_0(con)
if player_data_1000:
migration_data[version]["player"] = player_data_1000
async def low_level_v_1_0_0_queries(
con: Connection, migration_data: dict[str, dict[str, list[asyncpg.Record]] | None], version: str
) -> None:
"""Runs the low level migration of queries for version 1.0.0."""
query_data_1000 = await run_query_migration_v_1_0_0(con)
if query_data_1000:
migration_data[version]["query"] = query_data_1000
async def low_level_v_1_0_0_playlists(
con: Connection, migration_data: dict[str, dict[str, list[asyncpg.Record]] | None], version: str
) -> None:
"""Runs the low level migration of playlists for version 1.0.0."""
playlist_data_1000 = await run_playlist_migration_v_1_0_0(con)
if playlist_data_1000:
migration_data[version]["playlist"] = playlist_data_1000
|
PypiClean
|
/injective_py-0.8rc0-py3-none-any.whl/pyinjective/proto/cosmos/autocli/v1/query_pb2_grpc.py
|
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from cosmos.autocli.v1 import query_pb2 as cosmos_dot_autocli_dot_v1_dot_query__pb2
class QueryStub(object):
"""RemoteInfoService provides clients with the information they need
to build dynamically CLI clients for remote chains.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AppOptions = channel.unary_unary(
'/cosmos.autocli.v1.Query/AppOptions',
request_serializer=cosmos_dot_autocli_dot_v1_dot_query__pb2.AppOptionsRequest.SerializeToString,
response_deserializer=cosmos_dot_autocli_dot_v1_dot_query__pb2.AppOptionsResponse.FromString,
)
class QueryServicer(object):
"""RemoteInfoService provides clients with the information they need
to build dynamically CLI clients for remote chains.
"""
def AppOptions(self, request, context):
"""AppOptions returns the autocli options for all of the modules in an app.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_QueryServicer_to_server(servicer, server):
rpc_method_handlers = {
'AppOptions': grpc.unary_unary_rpc_method_handler(
servicer.AppOptions,
request_deserializer=cosmos_dot_autocli_dot_v1_dot_query__pb2.AppOptionsRequest.FromString,
response_serializer=cosmos_dot_autocli_dot_v1_dot_query__pb2.AppOptionsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'cosmos.autocli.v1.Query', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Query(object):
"""RemoteInfoService provides clients with the information they need
to build dynamically CLI clients for remote chains.
"""
@staticmethod
def AppOptions(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosmos.autocli.v1.Query/AppOptions',
cosmos_dot_autocli_dot_v1_dot_query__pb2.AppOptionsRequest.SerializeToString,
cosmos_dot_autocli_dot_v1_dot_query__pb2.AppOptionsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
PypiClean
|
/qpid-python-1.36.0.tar.gz/qpid-python-1.36.0/qpid/messaging/driver.py
|
import socket, struct, sys, time
from logging import getLogger, DEBUG
from qpid import compat
from qpid import sasl
from qpid.concurrency import synchronized
from qpid.datatypes import RangedSet, Serial
from qpid.framing import OpEncoder, SegmentEncoder, FrameEncoder, \
FrameDecoder, SegmentDecoder, OpDecoder
from qpid.messaging import address, transports
from qpid.messaging.constants import UNLIMITED, REJECTED, RELEASED
from qpid.messaging.exceptions import *
from qpid.messaging.message import get_codec, Disposition, Message
from qpid.messaging.endpoints import MangledString
from qpid.ops import *
from qpid.selector import Selector
from qpid.util import URL, default,get_client_properties_with_defaults
from qpid.validator import And, Context, List, Map, Types, Values
from threading import Condition, Thread
log = getLogger("qpid.messaging")
rawlog = getLogger("qpid.messaging.io.raw")
opslog = getLogger("qpid.messaging.io.ops")
def addr2reply_to(addr):
name, subject, options = address.parse(addr)
if options:
type = options.get("node", {}).get("type")
else:
type = None
if type == "topic":
return ReplyTo(name, subject)
else:
return ReplyTo(None, name)
def reply_to2addr(reply_to):
if reply_to.exchange in (None, ""):
return reply_to.routing_key
elif reply_to.routing_key is None:
return "%s; {node: {type: topic}}" % reply_to.exchange
else:
return "%s/%s; {node: {type: topic}}" % (reply_to.exchange, reply_to.routing_key)
class Attachment:
def __init__(self, target):
self.target = target
# XXX
DURABLE_DEFAULT=False
# XXX
class Pattern:
"""
The pattern filter matches the supplied wildcard pattern against a
message subject.
"""
def __init__(self, value):
self.value = value
# XXX: this should become part of the driver
def _bind(self, sst, exchange, queue):
from qpid.ops import ExchangeBind
sst.write_cmd(ExchangeBind(exchange=exchange, queue=queue,
binding_key=self.value.replace("*", "#")))
SUBJECT_DEFAULTS = {
"topic": "#"
}
def noop(): pass
def sync_noop(): pass
class SessionState:
def __init__(self, driver, session, name, channel):
self.driver = driver
self.session = session
self.name = name
self.channel = channel
self.detached = False
self.committing = False
self.aborting = False
# sender state
self.sent = Serial(0)
self.acknowledged = RangedSet()
self.actions = {}
self.min_completion = self.sent
self.max_completion = self.sent
self.results = {}
self.need_sync = False
# receiver state
self.received = None
self.executed = RangedSet()
# XXX: need to periodically exchange completion/known_completion
self.destinations = {}
def write_query(self, query, handler, obj):
id = self.sent
self.write_cmd(query, lambda: handler(self.results.pop(id), obj))
def apply_overrides(self, cmd, overrides):
for k, v in overrides.items():
cmd[k.replace('-', '_')] = v
def write_cmd(self, cmd, action=noop, overrides=None, sync=True):
if overrides:
self.apply_overrides(cmd, overrides)
if action != noop:
cmd.sync = sync
if self.detached:
raise Exception("detached")
cmd.id = self.sent
self.sent += 1
self.actions[cmd.id] = action
self.max_completion = cmd.id
self.write_op(cmd)
self.need_sync = not cmd.sync
def write_cmds(self, cmds, action=noop):
if cmds:
for cmd in cmds[:-1]:
self.write_cmd(cmd)
self.write_cmd(cmds[-1], action)
else:
action()
def write_op(self, op):
op.channel = self.channel
self.driver.write_op(op)
POLICIES = Values("always", "sender", "receiver", "never")
RELIABILITY = Values("unreliable", "at-most-once", "at-least-once",
"exactly-once")
DECLARE = Map({}, restricted=False)
BINDINGS = List(Map({
"exchange": Types(basestring),
"queue": Types(basestring),
"key": Types(basestring),
"arguments": Map({}, restricted=False)
}))
COMMON_OPTS = {
"create": POLICIES,
"delete": POLICIES,
"assert": POLICIES,
"node": Map({
"type": Values("queue", "topic"),
"durable": Types(bool),
"x-declare": DECLARE,
"x-bindings": BINDINGS
}),
"link": Map({
"name": Types(basestring),
"durable": Types(bool),
"reliability": RELIABILITY,
"x-declare": DECLARE,
"x-bindings": BINDINGS,
"x-subscribe": Map({}, restricted=False)
})
}
RECEIVE_MODES = Values("browse", "consume")
SOURCE_OPTS = COMMON_OPTS.copy()
SOURCE_OPTS.update({
"mode": RECEIVE_MODES
})
TARGET_OPTS = COMMON_OPTS.copy()
class LinkIn:
ADDR_NAME = "source"
DIR_NAME = "receiver"
VALIDATOR = Map(SOURCE_OPTS)
def init_link(self, sst, rcv, _rcv):
_rcv.destination = str(rcv.id)
sst.destinations[_rcv.destination] = _rcv
_rcv.draining = False
_rcv.bytes_open = False
_rcv.on_unlink = []
def do_link(self, sst, rcv, _rcv, type, subtype, action):
link_opts = _rcv.options.get("link", {})
if type == "topic":
default_reliability = "unreliable"
else:
default_reliability = "at-least-once"
reliability = link_opts.get("reliability", default_reliability)
declare = link_opts.get("x-declare", {})
subscribe = link_opts.get("x-subscribe", {})
acq_mode = acquire_mode.pre_acquired
if reliability in ("unreliable", "at-most-once"):
rcv._accept_mode = accept_mode.none
else:
rcv._accept_mode = accept_mode.explicit
if type == "topic":
default_name = "%s.%s" % (rcv.session.name, _rcv.destination)
_rcv._queue = link_opts.get("name", default_name)
sst.write_cmd(QueueDeclare(queue=_rcv._queue,
durable=link_opts.get("durable", False),
exclusive=True,
auto_delete=(reliability == "unreliable")),
overrides=declare)
if declare.get("exclusive", True): _rcv.on_unlink = [QueueDelete(_rcv._queue)]
subject = _rcv.subject or SUBJECT_DEFAULTS.get(subtype)
bindings = get_bindings(link_opts, _rcv._queue, _rcv.name, subject)
if not bindings:
sst.write_cmd(ExchangeBind(_rcv._queue, _rcv.name, subject))
elif type == "queue":
_rcv._queue = _rcv.name
if _rcv.options.get("mode", "consume") == "browse":
acq_mode = acquire_mode.not_acquired
bindings = get_bindings(link_opts, queue=_rcv._queue)
sst.write_cmds(bindings)
sst.write_cmd(MessageSubscribe(queue=_rcv._queue,
destination=_rcv.destination,
acquire_mode = acq_mode,
accept_mode = rcv._accept_mode),
overrides=subscribe)
sst.write_cmd(MessageSetFlowMode(_rcv.destination, flow_mode.credit), action)
def do_unlink(self, sst, rcv, _rcv, action=noop):
link_opts = _rcv.options.get("link", {})
reliability = link_opts.get("reliability")
cmds = [MessageCancel(_rcv.destination)]
cmds.extend(_rcv.on_unlink)
msgs = [] #release back messages for the closing receiver
msg = rcv.session._pop(rcv)
while msg is not None:
msgs.append(msg)
msg = rcv.session._pop(rcv)
if len(msgs) > 0:
ids = RangedSet(*[m._transfer_id for m in msgs])
log.debug("releasing back messages: %s, as receiver is closing", ids)
cmds.append(MessageRelease(ids, True))
sst.write_cmds(cmds, action)
def del_link(self, sst, rcv, _rcv):
del sst.destinations[_rcv.destination]
class LinkOut:
ADDR_NAME = "target"
DIR_NAME = "sender"
VALIDATOR = Map(TARGET_OPTS)
def init_link(self, sst, snd, _snd):
_snd.closing = False
_snd.pre_ack = False
def do_link(self, sst, snd, _snd, type, subtype, action):
link_opts = _snd.options.get("link", {})
reliability = link_opts.get("reliability", "at-least-once")
_snd.pre_ack = reliability in ("unreliable", "at-most-once")
if type == "topic":
_snd._exchange = _snd.name
_snd._routing_key = _snd.subject
bindings = get_bindings(link_opts, exchange=_snd.name, key=_snd.subject)
elif type == "queue":
_snd._exchange = ""
_snd._routing_key = _snd.name
bindings = get_bindings(link_opts, queue=_snd.name)
sst.write_cmds(bindings, action)
def do_unlink(self, sst, snd, _snd, action=noop):
action()
def del_link(self, sst, snd, _snd):
pass
class Cache:
def __init__(self, ttl):
self.ttl = ttl
self.entries = {}
def __setitem__(self, key, value):
self.entries[key] = time.time(), value
def __getitem__(self, key):
tstamp, value = self.entries[key]
if time.time() - tstamp >= self.ttl:
del self.entries[key]
raise KeyError(key)
else:
return value
def __delitem__(self, key):
del self.entries[key]
# XXX
HEADER="!4s4B"
EMPTY_DP = DeliveryProperties()
EMPTY_MP = MessageProperties()
SUBJECT = "qpid.subject"
CLOSED = "CLOSED"
READ_ONLY = "READ_ONLY"
WRITE_ONLY = "WRITE_ONLY"
OPEN = "OPEN"
class Driver:
def __init__(self, connection):
self.connection = connection
self.log_id = "%x" % id(self.connection)
self._lock = self.connection._lock
self._selector = Selector.default()
self._attempts = 0
self._delay = self.connection.reconnect_interval_min
self._reconnect_log = self.connection.reconnect_log
self._host = 0
self._retrying = False
self._next_retry = None
self._transport = None
self._timeout = None
self.engine = None
def _next_host(self):
urls = [URL(u) for u in self.connection.reconnect_urls]
hosts = [(self.connection.host, default(self.connection.port, 5672))] + \
[(u.host, default(u.port, 5672)) for u in urls]
if self._host >= len(hosts):
self._host = 0
self._last_host = hosts[self._host]
if self._host == 0:
self._attempts += 1
self._host = self._host + 1
return self._last_host
def _num_hosts(self):
return len(self.connection.reconnect_urls) + 1
@synchronized
def wakeup(self):
self.dispatch()
self._selector.wakeup()
def start(self):
self._selector.register(self)
def stop(self):
self._selector.unregister(self)
if self._transport:
self.st_closed()
def fileno(self):
return self._transport.fileno()
@synchronized
def reading(self):
"""Called by the Selector I/O thread to determine if the driver needs to
wait on the arrival of network data (call self.readable() callback)
"""
return self._transport is not None and \
self._transport.reading(True)
@synchronized
def writing(self):
"""Called by the Selector I/O thread to determine if it should block
waiting for output bandwidth (call the self.writeable() callback)
"""
return self._transport is not None and \
self._transport.writing(self.engine.pending())
@synchronized
def timing(self):
"""Called by the Selector I/O thread to determine if it should wake up the
driver (call the timeout() callback
"""
return self._timeout
@synchronized
def abort(self, exc, info):
"""Called if the Selector I/O thread hits an unrecoverable error and fails.
"""
try:
self.connection.error = exc
log.error("I/O Thread Fatal error: %s\n%s" % (str(exc), info))
except:
pass
def _check_retry_ok(self):
"""We consider a reconnect to have suceeded only when we have received
open-ok from the peer.
If we declared success as soon as the transport connected, then we could get
into an infinite heartbeat loop if the remote process is hung and never
sends us any data. We would fail the connection after 2 missed heartbeats,
reconnect the transport, declare the reconnect ok, then fail again after 2
missed heartbeats and so on.
"""
if self._retrying and self.engine._connected: # Means we have received open-ok.
if self._reconnect_log:
log.warn("reconnect succeeded: %s:%s", *self._last_host)
self._next_retry = None
self._attempts = 0
self._delay = self.connection.reconnect_interval_min
self._retrying = False
@synchronized
def readable(self):
try:
data = self._transport.recv(64*1024)
if data is None:
return
elif data:
rawlog.debug("READ[%s]: %r", self.log_id, data)
self.engine.write(data)
self._check_retry_ok()
else:
self.close_engine()
except socket.error, e:
self.close_engine(ConnectionError(text=str(e)))
self.update_status()
self._notify()
def _notify(self):
if self.connection.error:
self.connection._condition.gc()
self.connection._waiter.notifyAll()
def close_engine(self, e=None):
if e is None:
e = ConnectionError(text="connection aborted")
if (self.connection.reconnect and
(self.connection.reconnect_limit is None or
self.connection.reconnect_limit <= 0 or
self._attempts <= self.connection.reconnect_limit)):
if self._host < self._num_hosts():
delay = 0
else:
delay = self._delay
self._delay = min(2*self._delay,
self.connection.reconnect_interval_max)
self._next_retry = time.time() + delay
if self._reconnect_log:
log.warn("recoverable error[attempt %s]: %s" % (self._attempts, e))
if delay > 0:
log.warn("sleeping %s seconds" % delay)
self._retrying = True
self.engine.close()
else:
self.engine.close(e)
self.schedule()
def update_status(self):
if not self.engine: return False
status = self.engine.status()
return getattr(self, "st_%s" % status.lower())()
def st_closed(self):
# XXX: this log statement seems to sometimes hit when the socket is not connected
# XXX: rawlog.debug("CLOSE[%s]: %s", self.log_id, self._socket.getpeername())
if self._transport: self._transport.close()
self._transport = None
self.engine = None
return True
def st_open(self):
return False
@synchronized
def writeable(self):
notify = False
try:
n = self._transport.send(self.engine.peek())
if n == 0: return
sent = self.engine.read(n)
rawlog.debug("SENT[%s]: %r", self.log_id, sent)
except socket.error, e:
self.close_engine(e)
notify = True
if self.update_status() or notify:
self._notify()
@synchronized
def timeout(self):
self.dispatch()
self.update_status()
self._notify()
self.schedule()
def schedule(self):
times = []
if self.connection.heartbeat:
times.append(time.time() + self.connection.heartbeat)
if self._next_retry:
times.append(self._next_retry)
if times:
self._timeout = min(times)
else:
self._timeout = None
def dispatch(self):
try:
if self._transport is None:
if self.connection._connected and not self.connection.error:
self.connect()
else:
self.engine.dispatch()
except HeartbeatTimeout, e:
self.close_engine(e)
except ContentError, e:
msg = compat.format_exc()
self.connection.error = ContentError(text=msg)
except:
# XXX: Does socket get leaked if this occurs?
msg = compat.format_exc()
self.connection.error = InternalError(text=msg)
def connect(self):
if self._retrying and time.time() < self._next_retry:
return
try:
# XXX: should make this non blocking
host, port = self._next_host()
if self._retrying and self._reconnect_log:
log.warn("trying: %s:%s", host, port)
self.engine = Engine(self.connection)
self.engine.open()
rawlog.debug("OPEN[%s]: %s:%s", self.log_id, host, port)
trans = transports.TRANSPORTS.get(self.connection.transport)
if trans:
self._transport = trans(self.connection, host, port)
else:
raise ConnectError("no such transport: %s" % self.connection.transport)
self.schedule()
except socket.error, e:
self.close_engine(ConnectError(text=str(e)))
DEFAULT_DISPOSITION = Disposition(None)
def get_bindings(opts, queue=None, exchange=None, key=None):
bindings = opts.get("x-bindings", [])
cmds = []
for b in bindings:
exchange = b.get("exchange", exchange)
queue = b.get("queue", queue)
key = b.get("key", key)
args = b.get("arguments", {})
cmds.append(ExchangeBind(queue, exchange, key, args))
return cmds
CONNECTION_ERRS = {
# anythong not here (i.e. everything right now) will default to
# connection error
}
SESSION_ERRS = {
# anything not here will default to session error
error_code.unauthorized_access: UnauthorizedAccess,
error_code.not_found: NotFound,
error_code.resource_locked: ReceiverError,
error_code.resource_limit_exceeded: TargetCapacityExceeded,
error_code.internal_error: ServerError
}
class Engine:
def __init__(self, connection):
self.connection = connection
self.log_id = "%x" % id(self.connection)
self._closing = False
self._connected = False
self._reconnecting = bool(connection.sessions)
self._attachments = {}
self._in = LinkIn()
self._out = LinkOut()
self._channel_max = 65536
self._channels = 0
self._sessions = {}
self.address_cache = Cache(self.connection.address_ttl)
self._status = CLOSED
self._buf = ""
self._hdr = ""
# Set _last_in and _last_out here so heartbeats will be timed from the
# beginning of connection if no data is sent/received.
self._last_in = time.time()
self._last_out = time.time()
self._op_enc = OpEncoder()
self._seg_enc = SegmentEncoder()
self._frame_enc = FrameEncoder()
self._frame_dec = FrameDecoder()
self._seg_dec = SegmentDecoder()
self._op_dec = OpDecoder()
self._sasl = sasl.Client()
if self.connection.username:
self._sasl.setAttr("username", self.connection.username)
if self.connection.password:
self._sasl.setAttr("password", self.connection.password)
if self.connection.host:
self._sasl.setAttr("host", self.connection.host)
self._sasl.setAttr("service", self.connection.sasl_service)
if self.connection.sasl_min_ssf is not None:
self._sasl.setAttr("minssf", self.connection.sasl_min_ssf)
if self.connection.sasl_max_ssf is not None:
self._sasl.setAttr("maxssf", self.connection.sasl_max_ssf)
self._sasl.init()
self._sasl_encode = False
self._sasl_decode = False
def _reset(self):
self.connection._transport_connected = False
for ssn in self.connection.sessions.values():
for m in ssn.acked + ssn.unacked + ssn.incoming:
m._transfer_id = None
for snd in ssn.senders:
snd.linked = False
for rcv in ssn.receivers:
rcv.impending = rcv.received
rcv.linked = False
def status(self):
return self._status
def write(self, data):
self._last_in = time.time()
try:
if self._sasl_decode:
data = self._sasl.decode(data)
if len(self._hdr) < 8:
r = 8 - len(self._hdr)
self._hdr += data[:r]
data = data[r:]
if len(self._hdr) == 8:
self.do_header(self._hdr)
self._frame_dec.write(data)
self._seg_dec.write(*self._frame_dec.read())
self._op_dec.write(*self._seg_dec.read())
for op in self._op_dec.read():
self.assign_id(op)
opslog.debug("RCVD[%s]: %r", self.log_id, op)
op.dispatch(self)
self.dispatch()
except MessagingError, e:
self.close(e)
except:
self.close(InternalError(text=compat.format_exc()))
def close(self, e=None):
self._reset()
# We cannot re-establish transactional sessions, they must be aborted.
# We could re-do transactional enqueues, but not dequeues.
for ssn in self.connection.sessions.values():
if ssn.transactional:
if ssn.committing:
ssn.error = TransactionUnknown(text="Transaction outcome unknown due to transport failure")
else:
ssn.error = TransactionAborted(text="Transaction aborted due to transport failure")
ssn.closed = True
if e:
self.connection.error = e
self._status = CLOSED
def assign_id(self, op):
if isinstance(op, Command):
sst = self.get_sst(op)
op.id = sst.received
sst.received += 1
def pending(self):
return len(self._buf)
def read(self, n):
result = self._buf[:n]
self._buf = self._buf[n:]
return result
def peek(self):
return self._buf
def write_op(self, op):
opslog.debug("SENT[%s]: %r", self.log_id, op)
self._op_enc.write(op)
self._seg_enc.write(*self._op_enc.read())
self._frame_enc.write(*self._seg_enc.read())
bytes = self._frame_enc.read()
if self._sasl_encode:
bytes = self._sasl.encode(bytes)
self._buf += bytes
self._last_out = time.time()
def do_header(self, hdr):
cli_major = 0; cli_minor = 10
magic, _, _, major, minor = struct.unpack(HEADER, hdr)
if major != cli_major or minor != cli_minor:
raise VersionError(text="client: %s-%s, server: %s-%s" %
(cli_major, cli_minor, major, minor))
def do_connection_start(self, start):
if self.connection.sasl_mechanisms:
permitted = self.connection.sasl_mechanisms.split()
mechs = [m for m in start.mechanisms if m in permitted]
else:
mechs = start.mechanisms
try:
mech, initial = self._sasl.start(" ".join(mechs))
except sasl.SASLError, e:
if "ANONYMOUS" not in mechs and self.connection.username is None:
_text="Anonymous connections disabled, missing credentials"
else:
_text=str(e)
raise AuthenticationFailure(text=_text)
client_properties = get_client_properties_with_defaults(provided_client_properties=self.connection.client_properties);
self.write_op(ConnectionStartOk(client_properties=client_properties,
mechanism=mech, response=initial))
def do_connection_secure(self, secure):
resp = self._sasl.step(secure.challenge)
self.write_op(ConnectionSecureOk(response=resp))
def do_connection_tune(self, tune):
# XXX: is heartbeat protocol specific?
if tune.channel_max is not None:
self.channel_max = tune.channel_max
self.write_op(ConnectionTuneOk(heartbeat=self.connection.heartbeat,
channel_max=self.channel_max))
self.write_op(ConnectionOpen())
self._sasl_encode = True
def do_connection_open_ok(self, open_ok):
self.connection.auth_username = self._sasl.auth_username()
self._connected = True
self._sasl_decode = True
self.connection._transport_connected = True
def do_connection_heartbeat(self, hrt):
pass
def do_connection_close(self, close):
self.write_op(ConnectionCloseOk())
if close.reply_code != close_code.normal:
exc = CONNECTION_ERRS.get(close.reply_code, ConnectionError)
self.connection.error = exc(close.reply_code, close.reply_text)
# XXX: should we do a half shutdown on the socket here?
# XXX: we really need to test this, we may end up reporting a
# connection abort after this, if we were to do a shutdown on read
# and stop reading, then we wouldn't report the abort, that's
# probably the right thing to do
def do_connection_close_ok(self, close_ok):
self.close()
def do_session_attached(self, atc):
pass
def do_session_command_point(self, cp):
sst = self.get_sst(cp)
sst.received = cp.command_id
def do_session_completed(self, sc):
sst = self.get_sst(sc)
for r in sc.commands:
sst.acknowledged.add(r.lower, r.upper)
if not sc.commands.empty():
while sst.min_completion in sc.commands:
if sst.actions.has_key(sst.min_completion):
sst.actions.pop(sst.min_completion)()
sst.min_completion += 1
def session_known_completed(self, kcmp):
sst = self.get_sst(kcmp)
executed = RangedSet()
for e in sst.executed.ranges:
for ke in kcmp.ranges:
if e.lower in ke and e.upper in ke:
break
else:
executed.add_range(e)
sst.executed = completed
def do_session_flush(self, sf):
sst = self.get_sst(sf)
if sf.expected:
if sst.received is None:
exp = None
else:
exp = RangedSet(sst.received)
sst.write_op(SessionExpected(exp))
if sf.confirmed:
sst.write_op(SessionConfirmed(sst.executed))
if sf.completed:
sst.write_op(SessionCompleted(sst.executed))
def do_session_request_timeout(self, rt):
sst = self.get_sst(rt)
sst.write_op(SessionTimeout(timeout=0))
def do_execution_result(self, er):
sst = self.get_sst(er)
sst.results[er.command_id] = er.value
sst.executed.add(er.id)
def do_execution_exception(self, ex):
sst = self.get_sst(ex)
exc = SESSION_ERRS.get(ex.error_code, SessionError)
sst.session.error = exc(ex.error_code, ex.description)
def dispatch(self):
if not self.connection._connected and not self._closing and self._status != CLOSED:
self.disconnect()
if self._connected and not self._closing:
for ssn in self.connection.sessions.values():
self.attach(ssn)
self.process(ssn)
# We need to check heartbeat even if not self._connected since we may have
# heartbeat timeout before receiving an open-ok
if self.connection.heartbeat and self._status != CLOSED and not self._closing:
now = time.time()
if now - self._last_in > 2*self.connection.heartbeat:
raise HeartbeatTimeout(text="heartbeat timeout")
# Only send heartbeats if we are connected.
if self._connected and now - self._last_out >= self.connection.heartbeat/2.0:
self.write_op(ConnectionHeartbeat())
def open(self):
self._reset()
self._status = OPEN
self._buf += struct.pack(HEADER, "AMQP", 1, 1, 0, 10)
def disconnect(self):
self.write_op(ConnectionClose(close_code.normal))
self._closing = True
def attach(self, ssn):
if ssn.closed: return
sst = self._attachments.get(ssn)
if sst is None:
for i in xrange(0, self.channel_max):
if not self._sessions.has_key(i):
ch = i
break
else:
raise RuntimeError("all channels used")
sst = SessionState(self, ssn, ssn.name, ch)
sst.write_op(SessionAttach(name=ssn.name, force=self._reconnecting))
sst.write_op(SessionCommandPoint(sst.sent, 0))
self._reconnecting = False
sst.outgoing_idx = 0
sst.acked = []
sst.acked_idx = 0
if ssn.transactional:
sst.write_cmd(TxSelect())
self._attachments[ssn] = sst
self._sessions[sst.channel] = sst
for snd in ssn.senders:
self.link(snd, self._out, snd.target)
for rcv in ssn.receivers:
self.link(rcv, self._in, rcv.source)
if sst is not None and ssn.closing and not sst.detached:
sst.detached = True
sst.write_op(SessionDetach(name=ssn.name))
def get_sst(self, op):
return self._sessions[op.channel]
def do_session_detached(self, dtc):
sst = self._sessions.pop(dtc.channel)
ssn = sst.session
del self._attachments[ssn]
ssn.closed = True
def do_session_detach(self, dtc):
sst = self.get_sst(dtc)
sst.write_op(SessionDetached(name=dtc.name))
self.do_session_detached(dtc)
def link(self, lnk, dir, addr):
sst = self._attachments.get(lnk.session)
_lnk = self._attachments.get(lnk)
if _lnk is None and not lnk.closed:
_lnk = Attachment(lnk)
_lnk.closing = False
dir.init_link(sst, lnk, _lnk)
err = self.parse_address(_lnk, dir, addr) or self.validate_options(_lnk, dir)
if err:
lnk.error = err
lnk.closed = True
return
def linked():
lnk.linked = True
def resolved(type, subtype):
dir.do_link(sst, lnk, _lnk, type, subtype, linked)
self.resolve_declare(sst, _lnk, dir.DIR_NAME, resolved)
self._attachments[lnk] = _lnk
if lnk.linked and lnk.closing and not lnk.closed:
if not _lnk.closing:
def unlinked():
dir.del_link(sst, lnk, _lnk)
del self._attachments[lnk]
lnk.closed = True
if _lnk.options.get("delete") in ("always", dir.DIR_NAME):
dir.do_unlink(sst, lnk, _lnk)
requested_type = _lnk.options.get("node", {}).get("type")
self.delete(sst, _lnk.name, unlinked, node_type=requested_type)
else:
dir.do_unlink(sst, lnk, _lnk, unlinked)
_lnk.closing = True
elif not lnk.linked and lnk.closing and not lnk.closed:
if lnk.error: lnk.closed = True
def parse_address(self, lnk, dir, addr):
if addr is None:
return MalformedAddress(text="%s is None" % dir.ADDR_NAME)
else:
try:
lnk.name, lnk.subject, lnk.options = address.parse(addr)
# XXX: subject
if lnk.options is None:
lnk.options = {}
if isinstance(addr, MangledString):
lnk.options['create'] = "always"
if 'node' not in lnk.options:
lnk.options['node'] = {}
if 'x-declare' not in lnk.options['node']:
lnk.options['node']['x-declare'] = {}
xdeclare = lnk.options['node']['x-declare']
if 'auto-delete' not in xdeclare:
xdeclare['auto-delete'] = "True"
if 'exclusive' not in xdeclare:
xdeclare['exclusive'] = "True"
except address.LexError, e:
return MalformedAddress(text=str(e))
except address.ParseError, e:
return MalformedAddress(text=str(e))
def validate_options(self, lnk, dir):
ctx = Context()
err = dir.VALIDATOR.validate(lnk.options, ctx)
if err: return InvalidOption(text="error in options: %s" % err)
def resolve_declare(self, sst, lnk, dir, action):
declare = lnk.options.get("create") in ("always", dir)
assrt = lnk.options.get("assert") in ("always", dir)
requested_type = lnk.options.get("node", {}).get("type")
def do_resolved(type, subtype):
err = None
if type is None:
if declare:
err = self.declare(sst, lnk, action, True)
else:
err = NotFound(text="no such %s: %s" % (requested_type or "queue", lnk.name))
else:
if assrt:
expected = lnk.options.get("node", {}).get("type")
if expected and type != expected:
if declare:
err = self.declare(sst, lnk, action, True)
else:
err = AssertionFailed(text="expected %s, got %s" % (expected, type))
if "node" in lnk.options and "x-bindings" in lnk.options["node"]:
err = self.declare(sst, lnk, action, False)
if err is None:
action(type, subtype)
if err:
tgt = lnk.target
tgt.error = err
del self._attachments[tgt]
tgt.closed = True
return
self.resolve(sst, lnk.name, do_resolved, node_type=requested_type, force=declare)
def resolve(self, sst, name, action, force=False, node_type=None, delete=False):
if not force and not node_type:
try:
type, subtype = self.address_cache[name]
action(type, subtype)
return
except KeyError:
pass
args = { "topic":None, "queue":None }
def do_result(r, obj):
args[obj] = r
def do_action():
er = args["topic"]
qr = args["queue"]
if node_type == "topic" and er and not er.not_found:
type, subtype = "topic", er.type
elif node_type == "queue" and qr and qr.queue:
type, subtype = "queue", None
elif (er and er.not_found) and qr and not qr.queue:
type, subtype = None, None
elif (qr and qr.queue):
if node_type == "topic" and force:
type, subtype = None, None
else:
type, subtype = "queue", None
elif (er and not er.not_found):
if node_type == "queue" and force:
type, subtype = None, None
else:
type, subtype = "topic", er.type
elif er:
if er.not_found:
type, subtype = None, None
else:
type, subtype = "topic", er.type
else:
type, subtype = None, None
if type is not None:
self.address_cache[name] = (type, subtype)
action(type, subtype)
def do_result_and_action(r, obj):
do_result(r, obj)
do_action()
if (node_type is None): # we don't know the type, let check broker
sst.write_query(ExchangeQuery(name), do_result, "topic")
sst.write_query(QueueQuery(name), do_result_and_action, "queue")
elif force and not delete: # we forcefully declare known type, dont ask broker
do_action()
elif node_type == "topic":
sst.write_query(ExchangeQuery(name), do_result_and_action, "topic")
else:
sst.write_query(QueueQuery(name), do_result_and_action, "queue")
def declare(self, sst, lnk, action, create_node):
name = lnk.name
props = lnk.options.get("node", {})
durable = props.get("durable", DURABLE_DEFAULT)
type = props.get("type", "queue")
declare = props.get("x-declare", {})
cmd = None
if type == "topic":
if create_node: cmd = ExchangeDeclare(exchange=name, durable=durable)
bindings = get_bindings(props, exchange=name)
elif type == "queue":
if create_node: cmd = QueueDeclare(queue=name, durable=durable)
bindings = get_bindings(props, queue=name)
else:
raise ValueError(type)
if cmd is not None:
sst.apply_overrides(cmd, declare)
if type == "topic":
if cmd.type is None:
cmd.type = "topic"
subtype = cmd.type
else:
subtype = None
cmds = [cmd]
else:
cmds = []
cmds.extend(bindings)
def declared():
if create_node:
self.address_cache[name] = (type, subtype)
action(type, subtype)
sst.write_cmds(cmds, declared)
def delete(self, sst, name, action, node_type=None):
def deleted():
del self.address_cache[name]
action()
def do_delete(type, subtype):
if type == "topic":
sst.write_cmd(ExchangeDelete(name), deleted)
elif type == "queue":
sst.write_cmd(QueueDelete(name), deleted)
elif type is None:
action()
else:
raise ValueError(type)
self.resolve(sst, name, do_delete, force=True, node_type=node_type, delete=True)
def process(self, ssn):
if ssn.closed or ssn.closing: return
sst = self._attachments[ssn]
while sst.outgoing_idx < len(ssn.outgoing):
msg = ssn.outgoing[sst.outgoing_idx]
snd = msg._sender
# XXX: should check for sender error here
_snd = self._attachments.get(snd)
if _snd and snd.linked:
self.send(snd, msg)
sst.outgoing_idx += 1
else:
break
for snd in ssn.senders:
# XXX: should included snd.acked in this
if snd.synced >= snd.queued and sst.need_sync:
sst.write_cmd(ExecutionSync(), sync_noop)
for rcv in ssn.receivers:
self.process_receiver(rcv)
if ssn.acked:
messages = ssn.acked[sst.acked_idx:]
if messages:
ids = RangedSet()
disposed = [(DEFAULT_DISPOSITION, [])]
acked = []
for m in messages:
# XXX: we're ignoring acks that get lost when disconnected,
# could we deal this via some message-id based purge?
if m._transfer_id is None:
acked.append(m)
continue
ids.add(m._transfer_id)
if m._receiver._accept_mode is accept_mode.explicit:
disp = m._disposition or DEFAULT_DISPOSITION
last, msgs = disposed[-1]
if disp.type is last.type and disp.options == last.options:
msgs.append(m)
else:
disposed.append((disp, [m]))
else:
acked.append(m)
for range in ids:
sst.executed.add_range(range)
sst.write_op(SessionCompleted(sst.executed))
def ack_acker(msgs):
def ack_ack():
for m in msgs:
ssn.acked.remove(m)
sst.acked_idx -= 1
# XXX: should this check accept_mode too?
if not ssn.transactional:
sst.acked.remove(m)
return ack_ack
for disp, msgs in disposed:
if not msgs: continue
if disp.type is None:
op = MessageAccept
elif disp.type is RELEASED:
op = MessageRelease
elif disp.type is REJECTED:
op = MessageReject
sst.write_cmd(op(RangedSet(*[m._transfer_id for m in msgs]),
**disp.options),
ack_acker(msgs))
if log.isEnabledFor(DEBUG):
for m in msgs:
log.debug("SACK[%s]: %s, %s", ssn.log_id, m, m._disposition)
sst.acked.extend(messages)
sst.acked_idx += len(messages)
ack_acker(acked)()
if ssn.committing and not sst.committing:
def commit_ok():
del sst.acked[:]
ssn.committing = False
ssn.committed = True
ssn.aborting = False
ssn.aborted = False
sst.committing = False
sst.write_cmd(TxCommit(), commit_ok)
sst.committing = True
if ssn.aborting and not sst.aborting:
sst.aborting = True
def do_rb():
messages = sst.acked + ssn.unacked + ssn.incoming
ids = RangedSet(*[m._transfer_id for m in messages])
for range in ids:
sst.executed.add_range(range)
sst.write_op(SessionCompleted(sst.executed))
sst.write_cmd(MessageRelease(ids, True))
sst.write_cmd(TxRollback(), do_rb_ok)
def do_rb_ok():
del ssn.incoming[:]
del ssn.unacked[:]
del sst.acked[:]
for rcv in ssn.receivers:
rcv.impending = rcv.received
rcv.returned = rcv.received
# XXX: do we need to update granted here as well?
for rcv in ssn.receivers:
self.process_receiver(rcv)
ssn.aborting = False
ssn.aborted = True
ssn.committing = False
ssn.committed = False
sst.aborting = False
for rcv in ssn.receivers:
_rcv = self._attachments[rcv]
sst.write_cmd(MessageStop(_rcv.destination))
sst.write_cmd(ExecutionSync(), do_rb)
def grant(self, rcv):
sst = self._attachments[rcv.session]
_rcv = self._attachments.get(rcv)
if _rcv is None or not rcv.linked or _rcv.closing or _rcv.draining:
return
if rcv.granted is UNLIMITED:
if rcv.impending is UNLIMITED:
delta = 0
else:
delta = UNLIMITED
elif rcv.impending is UNLIMITED:
delta = -1
else:
delta = max(rcv.granted, rcv.received) - rcv.impending
if delta is UNLIMITED:
if not _rcv.bytes_open:
sst.write_cmd(MessageFlow(_rcv.destination, credit_unit.byte, UNLIMITED.value))
_rcv.bytes_open = True
sst.write_cmd(MessageFlow(_rcv.destination, credit_unit.message, UNLIMITED.value))
rcv.impending = UNLIMITED
elif delta > 0:
if not _rcv.bytes_open:
sst.write_cmd(MessageFlow(_rcv.destination, credit_unit.byte, UNLIMITED.value))
_rcv.bytes_open = True
sst.write_cmd(MessageFlow(_rcv.destination, credit_unit.message, delta))
rcv.impending += delta
elif delta < 0 and not rcv.draining:
_rcv.draining = True
def do_stop():
rcv.impending = rcv.received
_rcv.draining = False
_rcv.bytes_open = False
self.grant(rcv)
sst.write_cmd(MessageStop(_rcv.destination), do_stop)
if rcv.draining:
_rcv.draining = True
def do_flush():
rcv.impending = rcv.received
rcv.granted = rcv.impending
_rcv.draining = False
_rcv.bytes_open = False
rcv.draining = False
sst.write_cmd(MessageFlush(_rcv.destination), do_flush)
def process_receiver(self, rcv):
if rcv.closed: return
self.grant(rcv)
def send(self, snd, msg):
sst = self._attachments[snd.session]
_snd = self._attachments[snd]
if msg.subject is None or _snd._exchange == "":
rk = _snd._routing_key
else:
rk = msg.subject
if msg.subject is None:
subject = _snd.subject
else:
subject = msg.subject
# XXX: do we need to query to figure out how to create the reply-to interoperably?
if msg.reply_to:
rt = addr2reply_to(msg.reply_to)
else:
rt = None
content_encoding = msg.properties.get("x-amqp-0-10.content-encoding")
dp = DeliveryProperties(routing_key=rk)
mp = MessageProperties(message_id=msg.id,
user_id=msg.user_id,
reply_to=rt,
correlation_id=msg.correlation_id,
app_id = msg.properties.get("x-amqp-0-10.app-id"),
content_type=msg.content_type,
content_encoding=content_encoding,
application_headers=msg.properties)
if subject is not None:
if mp.application_headers is None:
mp.application_headers = {}
mp.application_headers[SUBJECT] = subject
if msg.durable is not None:
if msg.durable:
dp.delivery_mode = delivery_mode.persistent
else:
dp.delivery_mode = delivery_mode.non_persistent
if msg.priority is not None:
dp.priority = msg.priority
if msg.ttl is not None:
dp.ttl = long(msg.ttl*1000)
enc, dec = get_codec(msg.content_type)
try:
body = enc(msg.content)
except AttributeError, e:
# convert to non-blocking EncodeError
raise EncodeError(e)
# XXX: this is not safe for out of order, can this be triggered by pre_ack?
def msg_acked():
# XXX: should we log the ack somehow too?
snd.acked += 1
m = snd.session.outgoing.pop(0)
sst.outgoing_idx -= 1
log.debug("RACK[%s]: %s", sst.session.log_id, msg)
assert msg == m
xfr = MessageTransfer(destination=_snd._exchange, headers=(dp, mp),
payload=body)
if _snd.pre_ack:
sst.write_cmd(xfr)
else:
sst.write_cmd(xfr, msg_acked, sync=msg._sync)
log.debug("SENT[%s]: %s", sst.session.log_id, msg)
if _snd.pre_ack:
msg_acked()
def do_message_transfer(self, xfr):
sst = self.get_sst(xfr)
ssn = sst.session
msg = self._decode(xfr)
rcv = sst.destinations[xfr.destination].target
msg._receiver = rcv
if rcv.closing or rcv.closed: # release message to a closing receiver
ids = RangedSet(*[msg._transfer_id])
log.debug("releasing back %s message: %s, as receiver is closing", ids, msg)
sst.write_cmd(MessageRelease(ids, True))
return
if rcv.impending is not UNLIMITED:
assert rcv.received < rcv.impending, "%s, %s" % (rcv.received, rcv.impending)
rcv.received += 1
log.debug("RCVD[%s]: %s", ssn.log_id, msg)
ssn._notify_message_received(msg)
def _decode(self, xfr):
dp = EMPTY_DP
mp = EMPTY_MP
for h in xfr.headers:
if isinstance(h, DeliveryProperties):
dp = h
elif isinstance(h, MessageProperties):
mp = h
ap = mp.application_headers
enc, dec = get_codec(mp.content_type)
try:
content = dec(xfr.payload)
except Exception, e:
raise DecodeError(e)
msg = Message(content)
msg.id = mp.message_id
if ap is not None:
msg.subject = ap.get(SUBJECT)
msg.user_id = mp.user_id
if mp.reply_to is not None:
msg.reply_to = reply_to2addr(mp.reply_to)
msg.correlation_id = mp.correlation_id
if dp.delivery_mode is not None:
msg.durable = dp.delivery_mode == delivery_mode.persistent
msg.priority = dp.priority
if dp.ttl is not None:
msg.ttl = dp.ttl/1000.0
msg.redelivered = dp.redelivered
msg.properties = mp.application_headers or {}
if mp.app_id is not None:
msg.properties["x-amqp-0-10.app-id"] = mp.app_id
if mp.content_encoding is not None:
msg.properties["x-amqp-0-10.content-encoding"] = mp.content_encoding
if dp.routing_key is not None:
msg.properties["x-amqp-0-10.routing-key"] = dp.routing_key
if dp.timestamp is not None:
msg.properties["x-amqp-0-10.timestamp"] = dp.timestamp
msg.content_type = mp.content_type
msg._transfer_id = xfr.id
return msg
|
PypiClean
|
/langchain_xfyun-0.0.275b2-py3-none-any.whl/langchain_xfyun/document_loaders/sitemap.py
|
import itertools
import re
from typing import Any, Callable, Generator, Iterable, List, Optional
from langchain_xfyun.document_loaders.web_base import WebBaseLoader
from langchain_xfyun.schema import Document
def _default_parsing_function(content: Any) -> str:
return str(content.get_text())
def _default_meta_function(meta: dict, _content: Any) -> dict:
return {"source": meta["loc"], **meta}
def _batch_block(iterable: Iterable, size: int) -> Generator[List[dict], None, None]:
it = iter(iterable)
while item := list(itertools.islice(it, size)):
yield item
class SitemapLoader(WebBaseLoader):
"""Load a sitemap and its URLs."""
def __init__(
self,
web_path: str,
filter_urls: Optional[List[str]] = None,
parsing_function: Optional[Callable] = None,
blocksize: Optional[int] = None,
blocknum: int = 0,
meta_function: Optional[Callable] = None,
is_local: bool = False,
continue_on_failure: bool = False,
):
"""Initialize with webpage path and optional filter URLs.
Args:
web_path: url of the sitemap. can also be a local path
filter_urls: list of strings or regexes that will be applied to filter the
urls that are parsed and loaded
parsing_function: Function to parse bs4.Soup output
blocksize: number of sitemap locations per block
blocknum: the number of the block that should be loaded - zero indexed.
Default: 0
meta_function: Function to parse bs4.Soup output for metadata
remember when setting this method to also copy metadata["loc"]
to metadata["source"] if you are using this field
is_local: whether the sitemap is a local file. Default: False
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
"""
if blocksize is not None and blocksize < 1:
raise ValueError("Sitemap blocksize should be at least 1")
if blocknum < 0:
raise ValueError("Sitemap blocknum can not be lower then 0")
try:
import lxml # noqa:F401
except ImportError:
raise ImportError(
"lxml package not found, please install it with " "`pip install lxml`"
)
super().__init__(web_path)
self.filter_urls = filter_urls
self.parsing_function = parsing_function or _default_parsing_function
self.meta_function = meta_function or _default_meta_function
self.blocksize = blocksize
self.blocknum = blocknum
self.is_local = is_local
self.continue_on_failure = continue_on_failure
def parse_sitemap(self, soup: Any) -> List[dict]:
"""Parse sitemap xml and load into a list of dicts.
Args:
soup: BeautifulSoup object.
Returns:
List of dicts.
"""
els = []
for url in soup.find_all("url"):
loc = url.find("loc")
if not loc:
continue
# Strip leading and trailing whitespace and newlines
loc_text = loc.text.strip()
if self.filter_urls and not any(
re.match(r, loc_text) for r in self.filter_urls
):
continue
els.append(
{
tag: prop.text
for tag in ["loc", "lastmod", "changefreq", "priority"]
if (prop := url.find(tag))
}
)
for sitemap in soup.find_all("sitemap"):
loc = sitemap.find("loc")
if not loc:
continue
soup_child = self.scrape_all([loc.text], "xml")[0]
els.extend(self.parse_sitemap(soup_child))
return els
def load(self) -> List[Document]:
"""Load sitemap."""
if self.is_local:
try:
import bs4
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it"
" with `pip install beautifulsoup4`"
)
fp = open(self.web_path)
soup = bs4.BeautifulSoup(fp, "xml")
else:
soup = self.scrape("xml")
els = self.parse_sitemap(soup)
if self.blocksize is not None:
elblocks = list(_batch_block(els, self.blocksize))
blockcount = len(elblocks)
if blockcount - 1 < self.blocknum:
raise ValueError(
"Selected sitemap does not contain enough blocks for given blocknum"
)
else:
els = elblocks[self.blocknum]
results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el])
return [
Document(
page_content=self.parsing_function(results[i]),
metadata=self.meta_function(els[i], results[i]),
)
for i in range(len(results))
]
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/device_management/managed_devices/item/shut_down/shut_down_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from .....models.o_data_errors import o_data_error
class ShutDownRequestBuilder():
"""
Provides operations to call the shutDown method.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new ShutDownRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/deviceManagement/managedDevices/{managedDevice%2Did}/shutDown"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
async def post(self,request_configuration: Optional[ShutDownRequestBuilderPostRequestConfiguration] = None) -> None:
"""
Shut down device
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
"""
request_info = self.to_post_request_information(
request_configuration
)
from .....models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_no_response_content_async(request_info, error_mapping)
def to_post_request_information(self,request_configuration: Optional[ShutDownRequestBuilderPostRequestConfiguration] = None) -> RequestInformation:
"""
Shut down device
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.POST
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
return request_info
@dataclass
class ShutDownRequestBuilderPostRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/niconico.py-1.2.4-py3-none-any.whl/niconico/base.py
|
from __future__ import annotations
from typing import TYPE_CHECKING, Generic, TypeVar, Type, Optional
if TYPE_CHECKING:
from .niconico import NicoNico
__all__ = ("DictFromAttribute", "BaseClient")
SuperT = TypeVar("SuperT")
class DictFromAttribute(Generic[SuperT]):
"""辞書を属性からアクセスできるようにするものです。
属性からアクセスされた際に返すものもこのクラスのインスタンスです。
niconico.pyでのほとんどのニコニコのデータはこのクラスのインスタンスに格納されます。
Parameters
----------
data : dict
属性でアクセスされた際に返すべき値がある辞書です。
super_ : SuperT
属性からアクセスされた際に返すインスタンスに渡すものです。
Attributes
----------
__data__ : dict
インスタンス化時に引数の ``data`` に渡された辞書が入っています。
Notes
-----
データに属性からではない方法でアクセスしたい場合はこれを使用しましょう。
また、生のデータを取得したい場合はこちらを使用してください。
__super__ : SuperT
インスタンス化時に引数の ``super_`` に渡されたオブジェクトです。
ニコニコのデータの場合はそのデータの提供元(例:ニコニコ動画)のクライアント用クラスのインスタンスが入ります。"""
__dfa_class__: Type[DictFromAttribute]
def __init__(self, data: dict, super_: SuperT):
self.__data__, self.__super__ = data, super_
@staticmethod
def _get_extends(cls) -> dict[str, Type[DictFromAttribute]]:
return getattr(cls, "__extends__", {})
@classmethod
def _from_data(cls, data, super_: SuperT, key: str):
cls = cls._get_extends(cls).get(key, cls)
if isinstance(data, dict):
try:
return cls(data, super_)
except TypeError:
return cls.__dfa_class__(data, super_)
elif isinstance(data, list):
return [cls._from_data(item, super_, key) for item in data]
else:
return data
def __getattr__(self, key: str):
if key in self.__data__:
return self._from_data(self.__data__[key], self.__super__, key)
else:
raise AttributeError(
f"class '{self.__class__.__name__}' has no attributre '{key}'"
)
class BaseClient:
"""クライアントクラスのベースクラスです。
ここでいうベースクラスはニコニコの各サービスのために用意するクライアントに使われるもので、 :class:`niconico.niconico.NicoNico` では使われません。
Parameters
----------
cookies : Cookies, optional
リクエストを行う際に使用するクッキーです。
指定しない場合は :func:`niconico.cookies.Cookies.guest` を実行して返ってきたものが使われます。"""
if TYPE_CHECKING:
niconico: NicoNico
def log(self, type_: str, content: str, *args, **kwargs):
"""クラスの名前を使ってログ出力を行います。
:attr:`niconico.niconico.NicoNico.logger` が使用されます。
普通これは開発者が使います。
Parameters
----------
type_ : str
content : str
*args
**kwargs"""
return getattr(self.niconico.logger, type_)(content, *args, **kwargs)
def __init__(self, niconico: NicoNico):
self.niconico = niconico
|
PypiClean
|
/genpipes-suite-0.0a0.tar.gz/genpipes-suite-0.0a0/genpipes/bfx/picard2.py
|
# Python Standard Modules
import re
# MUGQIC Modules
from ..core.config import global_conf
from ..core.job import Job
from . import picard
from . import gatk4
def build_bam_index(input, output):
if global_conf.get('build_bam_index', 'module_picard').split("/")[2] < "2":
return picard.build_bam_index(input, output)
else:
return Job(
[input],
[output],
[
['build_bam_index', 'module_java'],
['build_bam_index', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar BuildBamIndex \\
VALIDATION_STRINGENCY=SILENT \\
INPUT={input} \\
OUTPUT={output} """.format(
tmp_dir=global_conf.get('build_bam_index', 'tmp_dir'),
java_other_options=global_conf.get('build_bam_index', 'java_other_options'),
ram=global_conf.get('build_bam_index', 'ram'),
input=input,
output=output,
)
)
def calculate_hs_metrics(input, output, intervals, reference_sequence=None):
baits_intervals = ""
baits_intervals = global_conf.get('picard_calculate_hs_metrics', 'baits_intervals', required = False)
if global_conf.get('picard_calculate_hs_metrics', 'module_picard').split("/")[2] < "2":
return picard.calculate_hs_metrics(input, output, intervals, reference_sequence)
else:
return Job(
[input, intervals],
[output],
[
['picard_calculate_hs_metrics', 'module_java'],
['picard_calculate_hs_metrics', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar CollectHsMetrics \\
TMP_DIR={tmp_dir} \\
INPUT={input} \\
OUTPUT={output} \\
BAIT_INTERVALS={baits} \\
TARGET_INTERVALS={intervals} \\
REFERENCE_SEQUENCE={reference_sequence}""".format(
tmp_dir=global_conf.get('picard_calculate_hs_metrics', 'tmp_dir'),
java_other_options=global_conf.get('picard_calculate_hs_metrics', 'java_other_options'),
ram=global_conf.get('picard_calculate_hs_metrics', 'ram'),
input=input,
output=output,
intervals=intervals,
baits=baits_intervals if baits_intervals != "" else intervals,
reference_sequence=reference_sequence if reference_sequence else global_conf.get('picard_calculate_hs_metrics', 'genome_fasta', param_type='filepath')
)
)
def collect_multiple_metrics(input, output, reference_sequence=None, library_type="PAIRED_END"):
if library_type == "PAIRED_END" :
outputs = [
output + ".base_distribution_by_cycle_metrics",
output + ".base_distribution_by_cycle.pdf",
output + ".alignment_summary_metrics",
output + ".insert_size_histogram.pdf",
output + ".insert_size_metrics",
output + ".quality_by_cycle_metrics",
output + ".quality_by_cycle.pdf",
output + ".quality_distribution_metrics",
output + ".quality_distribution.pdf"
]
else :
outputs = [
output + ".quality_by_cycle.pdf",
output + ".alignment_summary_metrics",
output + ".quality_by_cycle_metrics",
output + ".quality_distribution_metrics",
output + ".quality_distribution.pdf"
]
if global_conf.get('picard_collect_multiple_metrics', 'module_picard').split("/")[2] < "2":
return picard.collect_multiple_metrics(input, output, reference_sequence, library_type)
else:
return Job(
[input],
outputs,
[
['picard_collect_multiple_metrics', 'module_java'],
['picard_collect_multiple_metrics', 'module_picard'],
['picard_collect_multiple_metrics', 'module_R']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar CollectMultipleMetrics \\
PROGRAM=CollectAlignmentSummaryMetrics PROGRAM=CollectInsertSizeMetrics VALIDATION_STRINGENCY=SILENT \\
TMP_DIR={tmp_dir} \\
REFERENCE_SEQUENCE={reference_sequence} \\
INPUT={input} \\
OUTPUT={output} \\
MAX_RECORDS_IN_RAM={max_records_in_ram}""".format(
tmp_dir=global_conf.get('picard_collect_multiple_metrics', 'tmp_dir'),
java_other_options=global_conf.get('picard_collect_multiple_metrics', 'java_other_options'),
ram=global_conf.get('picard_collect_multiple_metrics', 'ram'),
reference_sequence=reference_sequence if reference_sequence else global_conf.get('picard_collect_multiple_metrics', 'genome_fasta', param_type='filepath'),
input=input,
output=output,
max_records_in_ram=global_conf.get('picard_collect_multiple_metrics', 'max_records_in_ram', param_type='int')
)
)
def collect_sequencing_artifacts_metrics(input, output, annotation_flat=None,reference_sequence=None):
output_dep = output + ".bait_bias_summary_metrics"
return Job(
[input],
[output_dep],
[
['picard_collect_sequencing_artifacts_metrics', 'module_java'],
['picard_collect_sequencing_artifacts_metrics', 'module_picard'],
['picard_collect_sequencing_artifacts_metrics', 'module_R']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar CollectSequencingArtifactMetrics \\
VALIDATION_STRINGENCY=SILENT {options} \\
TMP_DIR={tmp_dir} \\
INPUT={input} \\
OUTPUT={output} \\
REFERENCE_SEQUENCE={reference} \\
MAX_RECORDS_IN_RAM={max_records_in_ram}""".format(
options=global_conf.get('picard_collect_sequencing_artifacts_metrics', 'options'),
tmp_dir=global_conf.get('picard_collect_sequencing_artifacts_metrics', 'tmp_dir'),
java_other_options=global_conf.get('picard_collect_sequencing_artifacts_metrics', 'java_other_options'),
ram=global_conf.get('picard_collect_sequencing_artifacts_metrics', 'ram'),
input=input,
output=output,
reference=reference_sequence if reference_sequence else global_conf.get('picard_collect_sequencing_artifacts_metrics', 'genome_fasta'),
max_records_in_ram=global_conf.get('picard_collect_sequencing_artifacts_metrics', 'max_records_in_ram', param_type='int')
)
)
def convert_sequencing_artifacts_metrics(input, output, annotation_flat=None,reference_sequence=None):
input_dep = input + ".bait_bias_summary_metrics"
return Job(
[input_dep],
[output],
[
['picard_convert_sequencing_artifacts_metrics', 'module_java'],
['picard_convert_sequencing_artifacts_metrics', 'module_picard'],
['picard_convert_sequencing_artifacts_metrics', 'module_R']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar ConvertSequencingArtifactToOxoG \\
VALIDATION_STRINGENCY=SILENT \\
TMP_DIR={tmp_dir} \\
INPUT_BASE={input} \\
OUTPUT_BASE={output} \\
REFERENCE_SEQUENCE={reference}""".format(
tmp_dir=global_conf.get('picard_convert_sequencing_artifacts_metrics', 'tmp_dir'),
java_other_options=global_conf.get('picard_convert_sequencing_artifacts_metrics', 'java_other_options'),
ram=global_conf.get('picard_convert_sequencing_artifacts_metrics', 'ram'),
input=input,
output=output,
reference=reference_sequence if reference_sequence else global_conf.get('picard_convert_sequencing_artifacts_metrics', 'genome_fasta'),
)
)
def collect_oxog_metrics(input, output, annotation_flat=None, reference_sequence=None):
return Job(
[input],
[output],
[
['picard_collect_sequencing_artifacts_metrics', 'module_java'],
['picard_collect_sequencing_artifacts_metrics', 'module_picard'],
['picard_collect_sequencing_artifacts_metrics', 'module_R']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar CollectOxoGMetrics \\
VALIDATION_STRINGENCY=SILENT \\
TMP_DIR={tmp_dir} \\
INPUT={input} \\
OUTPUT={output} \\
DB_SNP={dbsnp} \\
REFERENCE_SEQUENCE={reference} \\
MAX_RECORDS_IN_RAM={max_records_in_ram}""".format(
tmp_dir=global_conf.get('picard_collect_oxog_metrics', 'tmp_dir'),
java_other_options=global_conf.get('picard_collect_oxog_metrics', 'java_other_options'),
ram=global_conf.get('picard_collect_oxog_metrics', 'ram'),
input=input,
output=output,
dbsnp=global_conf.get('picard_collect_oxog_metrics', 'known_variants'),
reference=reference_sequence if reference_sequence else global_conf.get('picard_collect_oxog_metrics', 'genome_fasta'),
max_records_in_ram=global_conf.get('picard_collect_oxog_metrics', 'max_records_in_ram', param_type='int')
)
)
def collect_gcbias_metrics(input, output, chart, summary_file, annotation_flat=None,reference_sequence=None):
return Job(
[input],
[output],
[
['picard_collect_gcbias_metrics', 'module_java'],
['picard_collect_gcbias_metrics', 'module_picard'],
['picard_collect_gcbias_metrics', 'module_R']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar CollectGcBiasMetrics \\
VALIDATION_STRINGENCY=SILENT ALSO_IGNORE_DUPLICATES=TRUE \\
TMP_DIR={tmp_dir} \\
INPUT={input} \\
OUTPUT={output} \\
CHART={chart} \\
SUMMARY_OUTPUT={summary_file} \\
REFERENCE_SEQUENCE={reference} \\
MAX_RECORDS_IN_RAM={max_records_in_ram}""".format(
tmp_dir=global_conf.get('picard_collect_gcbias_metrics', 'tmp_dir'),
java_other_options=global_conf.get('picard_collect_gcbias_metrics', 'java_other_options'),
ram=global_conf.get('picard_collect_gcbias_metrics', 'ram'),
input=input,
output=output,
chart=chart,
summary_file=summary_file,
reference=reference_sequence if reference_sequence else global_conf.get('picard_collect_gcbias_metrics', 'genome_fasta'),
max_records_in_ram=global_conf.get('picard_collect_gcbias_metrics', 'max_records_in_ram', param_type='int')
)
)
def fix_mate_information(input, output):
if global_conf.get('fixmate', 'module_picard').split("/")[2] < "2":
return picard.fix_mate_information(input, output)
else:
return Job(
[input],
[output],
[
['fixmate', 'module_java'],
['fixmate', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar FixMateInformation \\
VALIDATION_STRINGENCY=SILENT CREATE_INDEX=true SORT_ORDER=coordinate \\
TMP_DIR={tmp_dir} \\
INPUT={input} \\
OUTPUT={output} \\
MAX_RECORDS_IN_RAM={max_records_in_ram}""".format(
tmp_dir=global_conf.get('picard_fix_mate_information', 'tmp_dir'),
java_other_options=global_conf.get('picard_fix_mate_information', 'java_other_options'),
ram=global_conf.get('picard_fix_mate_information', 'ram'),
input=input,
output=output,
max_records_in_ram=global_conf.get('picard_fix_mate_information', 'max_records_in_ram', param_type='int')
),
removable_files=[output, re.sub("\.([sb])am$", ".\\1ai", output), output + ".md5"]
)
def mark_duplicates(inputs, output, metrics_file, remove_duplicates="false"):
if not isinstance(inputs, list):
inputs=[inputs]
if global_conf.get('picard_mark_duplicates', 'module_picard').split("/")[2] < "2" and global_conf.get('picard_mark_duplicates', 'module_gatk').split("/")[2] < "4":
return picard.mark_duplicates(inputs, output, metrics_file, remove_duplicates)
elif global_conf.get('picard_mark_duplicates', 'module_gatk').split("/")[2] > "4":
return gatk4.mark_duplicates(inputs, output, metrics_file, remove_duplicates)
else:
return Job(
inputs,
[output, re.sub("\.([sb])am$", ".\\1ai", output), metrics_file],
[
['picard_mark_duplicates', 'module_java'],
['picard_mark_duplicates', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar MarkDuplicates \\
REMOVE_DUPLICATES={remove_duplicates} VALIDATION_STRINGENCY=SILENT CREATE_INDEX=true \\
TMP_DIR={tmp_dir} \\
{inputs} \\
OUTPUT={output} \\
METRICS_FILE={metrics_file} \\
MAX_RECORDS_IN_RAM={max_records_in_ram} {other_options}""".format(
tmp_dir=global_conf.get('picard_mark_duplicates', 'tmp_dir'),
java_other_options=global_conf.get('picard_mark_duplicates', 'java_other_options'),
ram=global_conf.get('picard_mark_duplicates', 'ram'),
remove_duplicates=remove_duplicates,
inputs=" \\\n ".join(["INPUT=" + str(input) for input in inputs]),
output=output,
metrics_file=metrics_file,
max_records_in_ram=global_conf.get('picard_mark_duplicates', 'max_records_in_ram', param_type='int'),
other_options=global_conf.get('picard_mark_duplicates', 'other_options', required = False) if global_conf.get('picard_mark_duplicates', 'other_options', required = False) else ""
),
removable_files=[output, re.sub("\.([sb])am$", ".\\1ai", output), output + ".md5"]
)
def mark_duplicates_mate_cigar(inputs, output, metrics_file, remove_duplicates="false"):
if not isinstance(inputs, list):
inputs=[inputs]
if global_conf.get('mark_duplicates_mate_cigar', 'module_gatk').split("/")[2] > "4":
return gatk4.mark_duplicates_mate_cigar(inputs, output, metrics_file, remove_duplicates)
else:
return Job(
inputs,
[output, re.sub("\.([sb])am$", ".\\1ai", output), metrics_file],
[
['mark_duplicates_mate_cigar', 'module_java'],
['mark_duplicates_mate_cigar', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar \\
MarkDuplicatesWithMateCigar \\
REMOVE_DUPLICATES={remove_duplicates} VALIDATION_STRINGENCY=SILENT CREATE_INDEX=true \\
TMP_DIR={tmp_dir} \\
{inputs} \\
OUTPUT={output} \\
METRICS_FILE={metrics_file} \\
MAX_RECORDS_IN_RAM={max_records_in_ram} {other_options}""".format(
tmp_dir=global_conf.get('mark_duplicates_mate_cigar', 'tmp_dir'),
java_other_options=global_conf.get('mark_duplicates_mate_cigar', 'java_other_options'),
ram=global_conf.get('mark_duplicates_mate_cigar', 'ram'),
remove_duplicates=remove_duplicates,
inputs=" \\\n ".join(["INPUT=" + str(input) for input in inputs]),
output=output,
metrics_file=metrics_file,
max_records_in_ram=global_conf.get('mark_duplicates_mate_cigar', 'max_records_in_ram', param_type='int'),
other_options=global_conf.get('mark_duplicates_mate_cigar', 'other_options', required=False) if global_conf.get('picard_mark_duplicates', 'other_options', required=False) else ""),
removable_files=[output, re.sub("\.([sb])am$", ".\\1ai", output), output + ".md5"]
)
def mark_duplicates_mate_cigar(inputs, output, metrics_file, remove_duplicates="false"):
if not isinstance(inputs, list):
inputs=[inputs]
if global_conf.get('picard_mark_duplicates_mate_cigar', 'module_gatk').split("/")[2] > "4":
return gatk4.mark_duplicates_mate_cigar(inputs, output, metrics_file, remove_duplicates)
else:
return Job(
inputs,
[output, re.sub("\.([sb])am$", ".\\1ai", output), metrics_file],
[
['picard_mark_duplicates_mate_cigar', 'module_java'],
['picard_mark_duplicates_mate_cigar', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar \\
MarkDuplicatesWithMateCigar \\
REMOVE_DUPLICATES={remove_duplicates} VALIDATION_STRINGENCY=SILENT CREATE_INDEX=true \\
TMP_DIR={tmp_dir} \\
{inputs} \\
OUTPUT={output} \\
METRICS_FILE={metrics_file} \\
MAX_RECORDS_IN_RAM={max_records_in_ram} {other_options}""".format(
tmp_dir=global_conf.get('picard_mark_duplicates_mate_cigar', 'tmp_dir'),
java_other_options=global_conf.get('picard_mark_duplicates_mate_cigar', 'java_other_options'),
ram=global_conf.get('picard_mark_duplicates_mate_cigar', 'ram'),
remove_duplicates=remove_duplicates,
inputs=" \\\n ".join(["INPUT=" + str(input) for input in inputs]),
output=output,
metrics_file=metrics_file,
max_records_in_ram=global_conf.get('picard_mark_duplicates_mate_cigar', 'max_records_in_ram', param_type='int'),
other_options=global_conf.get('picard_mark_duplicates_mate_cigar', 'other_options', required = False) if global_conf.get('picard_mark_duplicates', 'other_options', required = False) else ""),
removable_files=[output, re.sub("\.([sb])am$", ".\\1ai", output), output + ".md5"]
)
def merge_sam_files(inputs, output):
if not isinstance(inputs, list):
inputs=[inputs]
if global_conf.get('picard_merge_sam_files', 'module_picard').split("/")[2] < "2":
return picard.merge_sam_files(inputs, output)
else:
return Job(
inputs,
[output, re.sub("\.([sb])am$", ".\\1ai", output)],
[
['picard_merge_sam_files', 'module_java'],
['picard_merge_sam_files', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar MergeSamFiles \\
VALIDATION_STRINGENCY=SILENT ASSUME_SORTED=true CREATE_INDEX=true \\
TMP_DIR={tmp_dir} \\
{inputs} \\
OUTPUT={output} \\
MAX_RECORDS_IN_RAM={max_records_in_ram}""".format(
tmp_dir=global_conf.get('picard_merge_sam_files', 'tmp_dir'),
java_other_options=global_conf.get('picard_merge_sam_files', 'java_other_options'),
ram=global_conf.get('picard_merge_sam_files', 'ram'),
inputs=" \\\n ".join(["INPUT=" + input for input in inputs]),
output=output,
max_records_in_ram=global_conf.get('picard_merge_sam_files', 'max_records_in_ram', param_type='int')
),
removable_files=[output, re.sub("\.([sb])am$", ".\\1ai", output)]
)
# Reorder BAM/SAM files based on reference/dictionary
def reorder_sam(input, output):
if global_conf.get('reorder_sam', 'module_picard').split("/")[2] < "2":
return picard.reorder_sam(input, output)
else:
return Job(
[input],
[output],
[
['reorder_sam', 'module_java'],
['reorder_sam', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar ReorderSam \\
VALIDATION_STRINGENCY=SILENT CREATE_INDEX=true \\
TMP_DIR={tmp_dir} \\
INPUT={input} \\
OUTPUT={output} \\
REFERENCE={reference} \\
MAX_RECORDS_IN_RAM={max_records_in_ram}""".format(
tmp_dir=global_conf.get('picard_reorder_sam', 'tmp_dir'),
java_other_options=global_conf.get('picard_reorder_sam', 'java_other_options'),
ram=global_conf.get('picard_reorder_sam', 'ram'),
input=input,
output=output,
reference=global_conf.get('picard_reorder_sam', 'genome_fasta', param_type='filepath'),
max_records_in_ram=global_conf.get('picard_reorder_sam', 'max_records_in_ram', param_type='int')
),
removable_files=[output, re.sub("\.([sb])am$", ".\\1ai", output)]
)
# Convert SAM/BAM file to fastq format
def sam_to_fastq(input, fastq, second_end_fastq=None):
if global_conf.get('picard_sam_to_fastq', 'module_picard').split("/")[2] < "2":
return picard.sam_to_fastq(input, fastq, second_end_fastq)
else:
return Job(
[input],
[fastq, second_end_fastq],
[
['picard_sam_to_fastq', 'module_java'],
['picard_sam_to_fastq', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar SamToFastq \\
VALIDATION_STRINGENCY=LENIENT \\
CREATE_MD5_FILE=TRUE \\
INPUT={input} \\
FASTQ={fastq}{second_end_fastq}""".format(
tmp_dir=global_conf.get('picard_sam_to_fastq', 'tmp_dir'),
java_other_options=global_conf.get('picard_sam_to_fastq', 'java_other_options'),
ram=global_conf.get('picard_sam_to_fastq', 'ram'),
input=input,
fastq=fastq,
second_end_fastq=" \\\n SECOND_END_FASTQ=" + second_end_fastq if second_end_fastq else ""
),
removable_files=[fastq, second_end_fastq]
)
def sort_sam(input, output, sort_order="coordinate", ini_section='picard_sort_sam'):
if global_conf.get(ini_section, 'module_picard').split("/")[2] < "2":
return picard.sort_sam(input, output, sort_order, ini_section)
else:
return Job(
[input],
# Add SAM/BAM index as output only when writing a coordinate-sorted BAM file
[output, re.sub("\.([sb])am$", ".\\1ai", output) if sort_order == "coordinate" else None],
[
[ini_section, 'module_java'],
[ini_section, 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar SortSam \\
VALIDATION_STRINGENCY=SILENT CREATE_INDEX=true \\
TMP_DIR={tmp_dir} \\
INPUT={input} \\
OUTPUT={output} \\
SORT_ORDER={sort_order} \\
MAX_RECORDS_IN_RAM={max_records_in_ram}""".format(
tmp_dir=global_conf.get(ini_section, 'tmp_dir'),
java_other_options=global_conf.get(ini_section, 'java_other_options'),
ram=global_conf.get(ini_section, 'ram'),
input=input,
output=output,
sort_order=sort_order,
max_records_in_ram=global_conf.get(ini_section, 'max_records_in_ram', param_type='int')
),
removable_files=[output, re.sub("\.([sb])am$", ".\\1ai", output) if sort_order == "coordinate" else None]
)
def sort_vcfs(inputs, output, ini_section='picard_sort_vcf'):
if not isinstance(inputs, list):
inputs=[inputs]
if global_conf.get(ini_section, 'module_picard').split("/")[2] < "2":
return picard.sort_vcfs(inputs, output, ini_section)
else:
return Job(
inputs,
# Add SAM/BAM index as output only when writing a coordinate-sorted BAM file
[output],
[
[ini_section, 'module_java'],
[ini_section, 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar SortVcf \\
VALIDATION_STRINGENCY=SILENT \\
TMP_DIR={tmp_dir} \\
{inputs} \\
OUTPUT={output} \\
SEQUENCE_DICTIONARY={seq_dict}""".format(
tmp_dir=global_conf.get(ini_section, 'tmp_dir'),
java_other_options=global_conf.get(ini_section, 'java_other_options'),
ram=global_conf.get(ini_section, 'ram'),
inputs=" \\\n ".join(["INPUT=" + input for input in inputs]),
output=output,
seq_dict=global_conf.get(ini_section, 'genome_dictionary', param_type='filepath')
)
)
def mergeVcfs(variants, output):
return Job(
variants,
[output],
[
['picard_merge_vcfs', 'module_java'],
['picard_merge_vcfs', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar \\
MergeVcfs {options} \\
--TMP_DIR {tmp_dir} \\
--REFERENCE_SEQUENCE {reference}{variants} \\
--OUTPUT {output} \\
--MAX_RECORDS_IN_RAM {max_records_in_ram}""".format(
tmp_dir=global_conf.get('picard_merge_vcfs', 'tmp_dir'),
java_other_options=global_conf.get('picard_merge_vcfs', 'java_other_options'),
ram=global_conf.get('picard_merge_vcfs', 'ram'),
options=global_conf.get('picard_merge_vcfs', 'options'),
reference=global_conf.get('picard_merge_vcfs', 'genome_fasta', param_type='filepath'),
variants="".join(" \\\n --INPUT " + variant for variant in variants),
output=output,
max_records_in_ram=global_conf.get('picard_merge_vcfs', 'max_records_in_ram', param_type='int')
)
)
def collect_rna_metrics(input, output, annotation_flat=None,reference_sequence=None):
if global_conf.get('picard_collect_rna_metrics', 'module_picard').split("/")[2] < "2":
return picard.collect_rna_metrics(input, output, annotation_flat, reference_sequence)
else:
return Job(
[input],
# collect specific RNA metrics (exon rate, strand specificity, etc...)
[output],
[
['picard_collect_rna_metrics', 'module_java'],
['picard_collect_rna_metrics', 'module_picard'],
['picard_collect_rna_metrics', 'module_R']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar CollectRnaSeqMetrics \\
VALIDATION_STRINGENCY=SILENT \\
TMP_DIR={tmp_dir} \\
INPUT={input} \\
OUTPUT={output} \\
REF_FLAT={ref_flat} \\
STRAND_SPECIFICITY={strand_specificity} \\
MINIMUM_LENGTH={min_length} \\
REFERENCE_SEQUENCE={reference} \\
MAX_RECORDS_IN_RAM={max_records_in_ram}""".format(
tmp_dir=global_conf.get('picard_collect_rna_metrics', 'tmp_dir'),
java_other_options=global_conf.get('picard_collect_rna_metrics', 'java_other_options'),
ram=global_conf.get('picard_collect_rna_metrics', 'ram'),
input=input,
output=output,
ref_flat=annotation_flat if annotation_flat else global_conf.get('picard_collect_rna_metrics', 'annotation_flat'),
strand_specificity=global_conf.get('picard_collect_rna_metrics', 'strand_info'),
min_length=global_conf.get('picard_collect_rna_metrics', 'minimum_length', param_type='int'),
reference=reference_sequence if reference_sequence else global_conf.get('picard_collect_rna_metrics', 'genome_fasta'),
max_records_in_ram=global_conf.get('picard_collect_rna_metrics', 'max_records_in_ram', param_type='int')
)
)
def add_read_groups(input, output, readgroup, library, processing_unit, sample, sort_order="coordinate"):
if global_conf.get('picard_add_read_groups', 'module_picard').split("/")[2] < "2":
return picard.add_read_groups(input, output, readgroup, library, processing_unit, sample, sort_order)
else:
return Job(
[input],
# collect specific RNA metrics (exon rate, strand specificity, etc...)
[output, re.sub("\.([sb])am$", ".\\1ai", output)],
[
['picard_add_read_groups', 'module_java'],
['picard_add_read_groups', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar AddOrReplaceReadGroups \\
VALIDATION_STRINGENCY=SILENT \\
TMP_DIR={tmp_dir} \\
CREATE_INDEX=true \\
INPUT={input} \\
OUTPUT={output} \\
SORT_ORDER=\"{sort_order}\" \\
RGID=\"{readgroup}\" \\
RGLB=\"{library}\" \\
RGPL=\"{platform}\" \\
RGPU=\"run{processing_unit}\" \\
RGSM=\"{sample}\" \\
{sequencing_center}""".format(
tmp_dir=global_conf.get('picard_add_read_groups', 'tmp_dir'),
java_other_options=global_conf.get('picard_add_read_groups', 'java_other_options'),
ram=global_conf.get('picard_add_read_groups', 'ram'),
input=input,
output=output,
sort_order=sort_order,
readgroup=readgroup,
library=library,
platform=global_conf.get('picard_add_read_groups', 'platform'),
processing_unit=processing_unit,
sample=sample,
sequencing_center=("RGCN=\"" + global_conf.get(
'picard_add_read_groups', 'sequencing_center') + "\""
if global_conf.get(
'picard_add_read_groups', 'sequencing_center', required=False) else "")
)
)
def bed2interval_list(
dictionary,
bed,
output
):
if global_conf.get('picard_bed2interval_list', 'module_picard').split("/")[2] < "2":
return picard.bed2interval_list(
dictionary,
bed,
output
)
return Job(
[dictionary, bed],
[output],
[
['picard_bed2interval_list', 'module_java'],
['picard_bed2interval_list', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar BedToIntervalList \\
INPUT={bed} \\
SEQUENCE_DICTIONARY={dictionary} \\
OUTPUT={output}""".format(
tmp_dir=global_conf.get('picard_bed2interval_list', 'tmp_dir'),
java_other_options=global_conf.get('picard_bed2interval_list', 'java_other_options'),
ram=global_conf.get('picard_bed2interval_list', 'ram'),
dictionary=dictionary if dictionary else global_conf.get('picard_bed2interval_list', 'genome_dictionary', param_type='filepath'),
bed=bed,
output=output,
)
)
def interval_list2bed(input, output):
return Job(
[input],
[output],
[
['picard_interval_list2bed', 'module_java'],
['picard_interval_list2bed', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar IntervalListToBed \\
INPUT={input} \\
OUTPUT={output}""".format(
tmp_dir=global_conf.get('picard_interval_list2bed', 'tmp_dir'),
java_other_options=global_conf.get('picard_interval_list2bed', 'java_other_options'),
ram=global_conf.get('picard_interval_list2bed', 'ram'),
input=input,
output=output
)
)
def scatterIntervalsByNs(reference,
output,
options= None):
# exclude_intervals=None):
return Job(
[reference],
[output],
[
['picard_ScatterIntervalsByNs', 'module_java'],
['picard_ScatterIntervalsByNs', 'module_picard']
],
command="""\
java -Djava.io.tmpdir={tmp_dir} {java_other_options} -Xmx{ram} -jar $PICARD_HOME/picard.jar \\
ScatterIntervalsByNs {options} \\
REFERENCE={reference} \\
OUTPUT={output}""".format(
tmp_dir=global_conf.get('picard_ScatterIntervalsByNs', 'tmp_dir'),
options=options if options else global_conf.get('picard_ScatterIntervalsByNs', 'options'),
java_other_options=global_conf.get('picard_ScatterIntervalsByNs', 'java_other_options'),
ram=global_conf.get('picard_ScatterIntervalsByNs', 'ram'),
reference=reference if reference else global_conf.get('picard_ScatterIntervalsByNs', 'genome_fasta', param_type='filepath'),
# exclude_intervals=exclude_intervals if exclude_intervals else "".join(" \\\n --excludeIntervals " + exclude_interval for exclude_interval in exclude_intervals),
output=output
)
)
|
PypiClean
|
/virtual_assistant_generator-0.1.0-py3-none-any.whl/va_generator/back_end/node_modules/@types/node/worker_threads.d.ts
|
declare module "worker_threads" {
import { Context } from "vm";
import { EventEmitter } from "events";
import { Readable, Writable } from "stream";
const isMainThread: boolean;
const parentPort: null | MessagePort;
const threadId: number;
const workerData: any;
class MessageChannel {
readonly port1: MessagePort;
readonly port2: MessagePort;
}
class MessagePort extends EventEmitter {
close(): void;
postMessage(value: any, transferList?: Array<ArrayBuffer | MessagePort>): void;
ref(): void;
unref(): void;
start(): void;
addListener(event: "close", listener: () => void): this;
addListener(event: "message", listener: (value: any) => void): this;
addListener(event: string | symbol, listener: (...args: any[]) => void): this;
emit(event: "close"): boolean;
emit(event: "message", value: any): boolean;
emit(event: string | symbol, ...args: any[]): boolean;
on(event: "close", listener: () => void): this;
on(event: "message", listener: (value: any) => void): this;
on(event: string | symbol, listener: (...args: any[]) => void): this;
once(event: "close", listener: () => void): this;
once(event: "message", listener: (value: any) => void): this;
once(event: string | symbol, listener: (...args: any[]) => void): this;
prependListener(event: "close", listener: () => void): this;
prependListener(event: "message", listener: (value: any) => void): this;
prependListener(event: string | symbol, listener: (...args: any[]) => void): this;
prependOnceListener(event: "close", listener: () => void): this;
prependOnceListener(event: "message", listener: (value: any) => void): this;
prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this;
removeListener(event: "close", listener: () => void): this;
removeListener(event: "message", listener: (value: any) => void): this;
removeListener(event: string | symbol, listener: (...args: any[]) => void): this;
off(event: "close", listener: () => void): this;
off(event: "message", listener: (value: any) => void): this;
off(event: string | symbol, listener: (...args: any[]) => void): this;
}
interface WorkerOptions {
/**
* List of arguments which would be stringified and appended to
* `process.argv` in the worker. This is mostly similar to the `workerData`
* but the values will be available on the global `process.argv` as if they
* were passed as CLI options to the script.
*/
argv?: any[];
eval?: boolean;
workerData?: any;
stdin?: boolean;
stdout?: boolean;
stderr?: boolean;
execArgv?: string[];
resourceLimits?: ResourceLimits;
}
interface ResourceLimits {
maxYoungGenerationSizeMb?: number;
maxOldGenerationSizeMb?: number;
codeRangeSizeMb?: number;
}
class Worker extends EventEmitter {
readonly stdin: Writable | null;
readonly stdout: Readable;
readonly stderr: Readable;
readonly threadId: number;
readonly resourceLimits?: ResourceLimits;
constructor(filename: string, options?: WorkerOptions);
postMessage(value: any, transferList?: Array<ArrayBuffer | MessagePort>): void;
ref(): void;
unref(): void;
/**
* Stop all JavaScript execution in the worker thread as soon as possible.
* Returns a Promise for the exit code that is fulfilled when the `exit` event is emitted.
*/
terminate(): Promise<number>;
/**
* Transfer a `MessagePort` to a different `vm` Context. The original `port`
* object will be rendered unusable, and the returned `MessagePort` instance will
* take its place.
*
* The returned `MessagePort` will be an object in the target context, and will
* inherit from its global `Object` class. Objects passed to the
* `port.onmessage()` listener will also be created in the target context
* and inherit from its global `Object` class.
*
* However, the created `MessagePort` will no longer inherit from
* `EventEmitter`, and only `port.onmessage()` can be used to receive
* events using it.
*/
moveMessagePortToContext(port: MessagePort, context: Context): MessagePort;
/**
* Receive a single message from a given `MessagePort`. If no message is available,
* `undefined` is returned, otherwise an object with a single `message` property
* that contains the message payload, corresponding to the oldest message in the
* `MessagePort`’s queue.
*/
receiveMessageOnPort(port: MessagePort): {} | undefined;
addListener(event: "error", listener: (err: Error) => void): this;
addListener(event: "exit", listener: (exitCode: number) => void): this;
addListener(event: "message", listener: (value: any) => void): this;
addListener(event: "online", listener: () => void): this;
addListener(event: string | symbol, listener: (...args: any[]) => void): this;
emit(event: "error", err: Error): boolean;
emit(event: "exit", exitCode: number): boolean;
emit(event: "message", value: any): boolean;
emit(event: "online"): boolean;
emit(event: string | symbol, ...args: any[]): boolean;
on(event: "error", listener: (err: Error) => void): this;
on(event: "exit", listener: (exitCode: number) => void): this;
on(event: "message", listener: (value: any) => void): this;
on(event: "online", listener: () => void): this;
on(event: string | symbol, listener: (...args: any[]) => void): this;
once(event: "error", listener: (err: Error) => void): this;
once(event: "exit", listener: (exitCode: number) => void): this;
once(event: "message", listener: (value: any) => void): this;
once(event: "online", listener: () => void): this;
once(event: string | symbol, listener: (...args: any[]) => void): this;
prependListener(event: "error", listener: (err: Error) => void): this;
prependListener(event: "exit", listener: (exitCode: number) => void): this;
prependListener(event: "message", listener: (value: any) => void): this;
prependListener(event: "online", listener: () => void): this;
prependListener(event: string | symbol, listener: (...args: any[]) => void): this;
prependOnceListener(event: "error", listener: (err: Error) => void): this;
prependOnceListener(event: "exit", listener: (exitCode: number) => void): this;
prependOnceListener(event: "message", listener: (value: any) => void): this;
prependOnceListener(event: "online", listener: () => void): this;
prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this;
removeListener(event: "error", listener: (err: Error) => void): this;
removeListener(event: "exit", listener: (exitCode: number) => void): this;
removeListener(event: "message", listener: (value: any) => void): this;
removeListener(event: "online", listener: () => void): this;
removeListener(event: string | symbol, listener: (...args: any[]) => void): this;
off(event: "error", listener: (err: Error) => void): this;
off(event: "exit", listener: (exitCode: number) => void): this;
off(event: "message", listener: (value: any) => void): this;
off(event: "online", listener: () => void): this;
off(event: string | symbol, listener: (...args: any[]) => void): this;
}
}
|
PypiClean
|
/client-library-for-chaos-mesh-1.2.14.tar.gz/client-library-for-chaos-mesh-1.2.14/chaosmesh/experiments/base/k8s/stress/cpu.py
|
from abc import ABC
from dataclasses import asdict
from chaosmesh.experiments.base.k8s.stress import StressTest
from chaosmesh.k8s.selector import Selector
class BasePodStressCPUExperiment(StressTest, ABC):
"""
BasePodStressCPUExperiment is a class that implements the abstract base class StressTest and is used to perform a stress test on CPU resources of Pods.
Args:
**kwargs: The keyword arguments that are passed to the class constructor.
Attributes:
kwargs (dict): A dictionary of arguments passed to the class constructor.
"""
def __init__(self, **kwargs):
"""
The constructor for the BasePodStressCPUExperiment class. It initializes the attributes of the class by calling the superclass constructor.
Args:
**kwargs: The keyword arguments that are passed to the class constructor.
"""
super(BasePodStressCPUExperiment, self).__init__(**kwargs)
def validate(self) -> None:
"""
The validate method is used to validate the arguments passed to the class. It checks if the selector argument is not None and is of type `Selector`. It also checks if the workers and load arguments are not None.
Raises:
AssertionError: If the selector argument is None or is not of type `Selector`.
AssertionError: If the workers argument is None.
AssertionError: If the load argument is None.
"""
assert self.kwargs['selector'] is not None, "label selector cannot be None"
assert isinstance(self.kwargs['selector'], Selector), "check the selector type"
assert self.kwargs['workers'] is not None, "workers cannot be None"
assert self.kwargs['load'] is not None, "size cannot be None"
def spec(self, namespace, name) -> dict:
"""
The spec method is used to create the specification for the experiment.
Args:
namespace (str): The namespace in which the experiment is to be run.
name (str): The name of the experiment.
Returns:
dict: A dictionary that represents the specification of the experiment.
"""
return {
"selector": asdict(self.kwargs['selector']),
"mode": self.kwargs.get('mode'),
"stressors": {
"cpu": {
"workers": self.kwargs.get('workers'),
"load": self.kwargs.get('load')
}
},
"duration": self.kwargs.get('duration')
}
|
PypiClean
|
/react-frontend-20230406083236.tar.gz/react-frontend-20230406083236/react_frontend/f67ce0a0.js
|
"use strict";(self.webpackChunkreact_frontend=self.webpackChunkreact_frontend||[]).push([[1480],{81480:function(e,t,n){function r(e,t,n){return(t=d(t))in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),Object.defineProperty(e,"prototype",{writable:!1}),t&&o(e,t)}function o(e,t){return o=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(e,t){return e.__proto__=t,e},o(e,t)}function c(e){var t=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var n,r=a(e);if(t){var i=a(this).constructor;n=Reflect.construct(r,arguments,i)}else n=r.apply(this,arguments);return function(e,t){if(t&&("object"===v(t)||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return function(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}(e)}(this,n)}}function a(e){return a=Object.setPrototypeOf?Object.getPrototypeOf.bind():function(e){return e.__proto__||Object.getPrototypeOf(e)},a(e)}function s(e){return function(e){if(Array.isArray(e))return u(e)}(e)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(!e)return;if("string"==typeof e)return u(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);"Object"===n&&e.constructor&&(n=e.constructor.name);if("Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return u(e,t)}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function u(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n<t;n++)r[n]=e[n];return r}function h(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function l(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,d(r.key),r)}}function f(e,t,n){return t&&l(e.prototype,t),n&&l(e,n),Object.defineProperty(e,"prototype",{writable:!1}),e}function d(e){var t=function(e,t){if("object"!==v(e)||null===e)return e;var n=e[Symbol.toPrimitive];if(void 0!==n){var r=n.call(e,t||"default");if("object"!==v(r))return r;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===v(t)?t:String(t)}function v(e){return v="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},v(e)}function g(e){return Array.isArray?Array.isArray(e):"[object Array]"===L(e)}n.d(t,{Z:function(){return ve}});var y=1/0;function p(e){return null==e?"":function(e){if("string"==typeof e)return e;var t=e+"";return"0"==t&&1/e==-y?"-0":t}(e)}function m(e){return"string"==typeof e}function k(e){return"number"==typeof e}function M(e){return!0===e||!1===e||function(e){return b(e)&&null!==e}(e)&&"[object Boolean]"==L(e)}function b(e){return"object"===v(e)}function x(e){return null!=e}function w(e){return!e.trim().length}function L(e){return null==e?void 0===e?"[object Undefined]":"[object Null]":Object.prototype.toString.call(e)}var _=function(e){return"Missing ".concat(e," property in key")},S=function(e){return"Property 'weight' in key '".concat(e,"' must be a positive integer")},C=Object.prototype.hasOwnProperty,A=function(){function e(t){var n=this;h(this,e),this._keys=[],this._keyMap={};var r=0;t.forEach((function(e){var t=I(e);r+=t.weight,n._keys.push(t),n._keyMap[t.id]=t,r+=t.weight})),this._keys.forEach((function(e){e.weight/=r}))}return f(e,[{key:"get",value:function(e){return this._keyMap[e]}},{key:"keys",value:function(){return this._keys}},{key:"toJSON",value:function(){return JSON.stringify(this._keys)}}]),e}();function I(e){var t=null,n=null,r=null,i=1,o=null;if(m(e)||g(e))r=e,t=O(e),n=E(e);else{if(!C.call(e,"name"))throw new Error(_("name"));var c=e.name;if(r=c,C.call(e,"weight")&&(i=e.weight)<=0)throw new Error(S(c));t=O(c),n=E(c),o=e.getFn}return{path:t,id:n,weight:i,src:r,getFn:o}}function O(e){return g(e)?e:e.split(".")}function E(e){return g(e)?e.join("."):e}var j={useExtendedSearch:!1,getFn:function(e,t){var n=[],r=!1;return function e(t,i,o){if(x(t))if(i[o]){var c=t[i[o]];if(!x(c))return;if(o===i.length-1&&(m(c)||k(c)||M(c)))n.push(p(c));else if(g(c)){r=!0;for(var a=0,s=c.length;a<s;a+=1)e(c[a],i,o+1)}else i.length&&e(c,i,o+1)}else n.push(t)}(e,m(t)?t.split("."):t,0),r?n:n[0]},ignoreLocation:!1,ignoreFieldNorm:!1,fieldNormWeight:1},$=Object.assign({},{isCaseSensitive:!1,includeScore:!1,keys:[],shouldSort:!0,sortFn:function(e,t){return e.score===t.score?e.idx<t.idx?-1:1:e.score<t.score?-1:1}},{includeMatches:!1,findAllMatches:!1,minMatchCharLength:1},{location:0,threshold:.6,distance:100},j),F=/[^ ]+/g;var R=function(){function e(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=t.getFn,r=void 0===n?$.getFn:n,i=t.fieldNormWeight,o=void 0===i?$.fieldNormWeight:i;h(this,e),this.norm=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:3,n=new Map,r=Math.pow(10,t);return{get:function(t){var i=t.match(F).length;if(n.has(i))return n.get(i);var o=1/Math.pow(i,.5*e),c=parseFloat(Math.round(o*r)/r);return n.set(i,c),c},clear:function(){n.clear()}}}(o,3),this.getFn=r,this.isCreated=!1,this.setIndexRecords()}return f(e,[{key:"setSources",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];this.docs=e}},{key:"setIndexRecords",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];this.records=e}},{key:"setKeys",value:function(){var e=this,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];this.keys=t,this._keysMap={},t.forEach((function(t,n){e._keysMap[t.id]=n}))}},{key:"create",value:function(){var e=this;!this.isCreated&&this.docs.length&&(this.isCreated=!0,m(this.docs[0])?this.docs.forEach((function(t,n){e._addString(t,n)})):this.docs.forEach((function(t,n){e._addObject(t,n)})),this.norm.clear())}},{key:"add",value:function(e){var t=this.size();m(e)?this._addString(e,t):this._addObject(e,t)}},{key:"removeAt",value:function(e){this.records.splice(e,1);for(var t=e,n=this.size();t<n;t+=1)this.records[t].i-=1}},{key:"getValueForItemAtKeyId",value:function(e,t){return e[this._keysMap[t]]}},{key:"size",value:function(){return this.records.length}},{key:"_addString",value:function(e,t){if(x(e)&&!w(e)){var n={v:e,i:t,n:this.norm.get(e)};this.records.push(n)}}},{key:"_addObject",value:function(e,t){var n=this,r={i:t,$:{}};this.keys.forEach((function(t,i){var o=t.getFn?t.getFn(e):n.getFn(e,t.path);if(x(o))if(g(o)){for(var c=[],a=[{nestedArrIndex:-1,value:o}];a.length;){var s=a.pop(),u=s.nestedArrIndex,h=s.value;if(x(h))if(m(h)&&!w(h)){var l={v:h,i:u,n:n.norm.get(h)};c.push(l)}else g(h)&&h.forEach((function(e,t){a.push({nestedArrIndex:t,value:e})}))}r.$[i]=c}else if(m(o)&&!w(o)){var f={v:o,n:n.norm.get(o)};r.$[i]=f}})),this.records.push(r)}},{key:"toJSON",value:function(){return{keys:this.keys,records:this.records}}}]),e}();function N(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.getFn,i=void 0===r?$.getFn:r,o=n.fieldNormWeight,c=void 0===o?$.fieldNormWeight:o,a=new R({getFn:i,fieldNormWeight:c});return a.setKeys(e.map(I)),a.setSources(t),a.create(),a}function P(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.errors,r=void 0===n?0:n,i=t.currentLocation,o=void 0===i?0:i,c=t.expectedLocation,a=void 0===c?0:c,s=t.distance,u=void 0===s?$.distance:s,h=t.ignoreLocation,l=void 0===h?$.ignoreLocation:h,f=r/e.length;if(l)return f;var d=Math.abs(a-o);return u?f+d/u:d?1:f}var W=32;function z(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},i=r.location,o=void 0===i?$.location:i,c=r.distance,a=void 0===c?$.distance:c,s=r.threshold,u=void 0===s?$.threshold:s,h=r.findAllMatches,l=void 0===h?$.findAllMatches:h,f=r.minMatchCharLength,d=void 0===f?$.minMatchCharLength:f,v=r.includeMatches,g=void 0===v?$.includeMatches:v,y=r.ignoreLocation,p=void 0===y?$.ignoreLocation:y;if(t.length>W)throw new Error("Pattern length exceeds max of ".concat(W,"."));for(var m,k=t.length,M=e.length,b=Math.max(0,Math.min(o,M)),x=u,w=b,L=d>1||g,_=L?Array(M):[];(m=e.indexOf(t,w))>-1;){var S=P(t,{currentLocation:m,expectedLocation:b,distance:a,ignoreLocation:p});if(x=Math.min(S,x),w=m+k,L)for(var C=0;C<k;)_[m+C]=1,C+=1}w=-1;for(var A=[],I=1,O=k+M,E=1<<k-1,j=0;j<k;j+=1){for(var F=0,R=O;F<R;){P(t,{errors:j,currentLocation:b+R,expectedLocation:b,distance:a,ignoreLocation:p})<=x?F=R:O=R,R=Math.floor((O-F)/2+F)}O=R;var N=Math.max(1,b-R+1),z=l?M:Math.min(b+R,M)+k,K=Array(z+2);K[z+1]=(1<<j)-1;for(var T=z;T>=N;T-=1){var q=T-1,B=n[e.charAt(q)];if(L&&(_[q]=+!!B),K[T]=(K[T+1]<<1|1)&B,j&&(K[T]|=(A[T+1]|A[T])<<1|1|A[T+1]),K[T]&E&&(I=P(t,{errors:j,currentLocation:q,expectedLocation:b,distance:a,ignoreLocation:p}))<=x){if(x=I,(w=q)<=b)break;N=Math.max(1,2*b-w)}}if(P(t,{errors:j+1,currentLocation:b,expectedLocation:b,distance:a,ignoreLocation:p})>x)break;A=K}var J={isMatch:w>=0,score:Math.max(.001,I)};if(L){var U=function(){for(var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:$.minMatchCharLength,n=[],r=-1,i=-1,o=0,c=e.length;o<c;o+=1){var a=e[o];a&&-1===r?r=o:a||-1===r||((i=o-1)-r+1>=t&&n.push([r,i]),r=-1)}return e[o-1]&&o-r>=t&&n.push([r,o-1]),n}(_,d);U.length?g&&(J.indices=U):J.isMatch=!1}return J}function K(e){for(var t={},n=0,r=e.length;n<r;n+=1){var i=e.charAt(n);t[i]=(t[i]||0)|1<<r-n-1}return t}var T=function(){function e(t){var n=this,r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},i=r.location,o=void 0===i?$.location:i,c=r.threshold,a=void 0===c?$.threshold:c,s=r.distance,u=void 0===s?$.distance:s,l=r.includeMatches,f=void 0===l?$.includeMatches:l,d=r.findAllMatches,v=void 0===d?$.findAllMatches:d,g=r.minMatchCharLength,y=void 0===g?$.minMatchCharLength:g,p=r.isCaseSensitive,m=void 0===p?$.isCaseSensitive:p,k=r.ignoreLocation,M=void 0===k?$.ignoreLocation:k;if(h(this,e),this.options={location:o,threshold:a,distance:u,includeMatches:f,findAllMatches:v,minMatchCharLength:y,isCaseSensitive:m,ignoreLocation:M},this.pattern=m?t:t.toLowerCase(),this.chunks=[],this.pattern.length){var b=function(e,t){n.chunks.push({pattern:e,alphabet:K(e),startIndex:t})},x=this.pattern.length;if(x>W){for(var w=0,L=x%W,_=x-L;w<_;)b(this.pattern.substr(w,W),w),w+=W;if(L){var S=x-W;b(this.pattern.substr(S),S)}}else b(this.pattern,0)}}return f(e,[{key:"searchIn",value:function(e){var t=this.options,n=t.isCaseSensitive,r=t.includeMatches;if(n||(e=e.toLowerCase()),this.pattern===e){var i={isMatch:!0,score:0};return r&&(i.indices=[[0,e.length-1]]),i}var o=this.options,c=o.location,a=o.distance,u=o.threshold,h=o.findAllMatches,l=o.minMatchCharLength,f=o.ignoreLocation,d=[],v=0,g=!1;this.chunks.forEach((function(t){var n=t.pattern,i=t.alphabet,o=t.startIndex,y=z(e,n,i,{location:c+o,distance:a,threshold:u,findAllMatches:h,minMatchCharLength:l,includeMatches:r,ignoreLocation:f}),p=y.isMatch,m=y.score,k=y.indices;p&&(g=!0),v+=m,p&&k&&(d=[].concat(s(d),s(k)))}));var y={isMatch:g,score:g?v/this.chunks.length:1};return g&&r&&(y.indices=d),y}}]),e}(),q=function(){function e(t){h(this,e),this.pattern=t}return f(e,[{key:"search",value:function(){}}],[{key:"isMultiMatch",value:function(e){return B(e,this.multiRegex)}},{key:"isSingleMatch",value:function(e){return B(e,this.singleRegex)}}]),e}();function B(e,t){var n=e.match(t);return n?n[1]:null}var J=function(e){i(n,e);var t=c(n);function n(e){return h(this,n),t.call(this,e)}return f(n,[{key:"search",value:function(e){var t=e===this.pattern;return{isMatch:t,score:t?0:1,indices:[0,this.pattern.length-1]}}}],[{key:"type",get:function(){return"exact"}},{key:"multiRegex",get:function(){return/^="(.*)"$/}},{key:"singleRegex",get:function(){return/^=(.*)$/}}]),n}(q),U=function(e){i(n,e);var t=c(n);function n(e){return h(this,n),t.call(this,e)}return f(n,[{key:"search",value:function(e){var t=-1===e.indexOf(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}}],[{key:"type",get:function(){return"inverse-exact"}},{key:"multiRegex",get:function(){return/^!"(.*)"$/}},{key:"singleRegex",get:function(){return/^!(.*)$/}}]),n}(q),V=function(e){i(n,e);var t=c(n);function n(e){return h(this,n),t.call(this,e)}return f(n,[{key:"search",value:function(e){var t=e.startsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,this.pattern.length-1]}}}],[{key:"type",get:function(){return"prefix-exact"}},{key:"multiRegex",get:function(){return/^\^"(.*)"$/}},{key:"singleRegex",get:function(){return/^\^(.*)$/}}]),n}(q),D=function(e){i(n,e);var t=c(n);function n(e){return h(this,n),t.call(this,e)}return f(n,[{key:"search",value:function(e){var t=!e.startsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}}],[{key:"type",get:function(){return"inverse-prefix-exact"}},{key:"multiRegex",get:function(){return/^!\^"(.*)"$/}},{key:"singleRegex",get:function(){return/^!\^(.*)$/}}]),n}(q),Q=function(e){i(n,e);var t=c(n);function n(e){return h(this,n),t.call(this,e)}return f(n,[{key:"search",value:function(e){var t=e.endsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[e.length-this.pattern.length,e.length-1]}}}],[{key:"type",get:function(){return"suffix-exact"}},{key:"multiRegex",get:function(){return/^"(.*)"\$$/}},{key:"singleRegex",get:function(){return/^(.*)\$$/}}]),n}(q),Z=function(e){i(n,e);var t=c(n);function n(e){return h(this,n),t.call(this,e)}return f(n,[{key:"search",value:function(e){var t=!e.endsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}}],[{key:"type",get:function(){return"inverse-suffix-exact"}},{key:"multiRegex",get:function(){return/^!"(.*)"\$$/}},{key:"singleRegex",get:function(){return/^!(.*)\$$/}}]),n}(q),G=function(e){i(n,e);var t=c(n);function n(e){var r,i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},o=i.location,c=void 0===o?$.location:o,a=i.threshold,s=void 0===a?$.threshold:a,u=i.distance,l=void 0===u?$.distance:u,f=i.includeMatches,d=void 0===f?$.includeMatches:f,v=i.findAllMatches,g=void 0===v?$.findAllMatches:v,y=i.minMatchCharLength,p=void 0===y?$.minMatchCharLength:y,m=i.isCaseSensitive,k=void 0===m?$.isCaseSensitive:m,M=i.ignoreLocation,b=void 0===M?$.ignoreLocation:M;return h(this,n),(r=t.call(this,e))._bitapSearch=new T(e,{location:c,threshold:s,distance:l,includeMatches:d,findAllMatches:g,minMatchCharLength:p,isCaseSensitive:k,ignoreLocation:b}),r}return f(n,[{key:"search",value:function(e){return this._bitapSearch.searchIn(e)}}],[{key:"type",get:function(){return"fuzzy"}},{key:"multiRegex",get:function(){return/^"(.*)"$/}},{key:"singleRegex",get:function(){return/^(.*)$/}}]),n}(q),H=function(e){i(n,e);var t=c(n);function n(e){return h(this,n),t.call(this,e)}return f(n,[{key:"search",value:function(e){for(var t,n=0,r=[],i=this.pattern.length;(t=e.indexOf(this.pattern,n))>-1;)n=t+i,r.push([t,n-1]);var o=!!r.length;return{isMatch:o,score:o?0:1,indices:r}}}],[{key:"type",get:function(){return"include"}},{key:"multiRegex",get:function(){return/^'"(.*)"$/}},{key:"singleRegex",get:function(){return/^'(.*)$/}}]),n}(q),X=[J,H,V,D,Z,Q,U,G],Y=X.length,ee=/ +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)/;var te=new Set([G.type,H.type]),ne=function(){function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=n.isCaseSensitive,i=void 0===r?$.isCaseSensitive:r,o=n.includeMatches,c=void 0===o?$.includeMatches:o,a=n.minMatchCharLength,s=void 0===a?$.minMatchCharLength:a,u=n.ignoreLocation,l=void 0===u?$.ignoreLocation:u,f=n.findAllMatches,d=void 0===f?$.findAllMatches:f,v=n.location,g=void 0===v?$.location:v,y=n.threshold,p=void 0===y?$.threshold:y,m=n.distance,k=void 0===m?$.distance:m;h(this,e),this.query=null,this.options={isCaseSensitive:i,includeMatches:c,minMatchCharLength:s,findAllMatches:d,ignoreLocation:l,location:g,threshold:p,distance:k},this.pattern=i?t:t.toLowerCase(),this.query=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return e.split("|").map((function(e){for(var n=e.trim().split(ee).filter((function(e){return e&&!!e.trim()})),r=[],i=0,o=n.length;i<o;i+=1){for(var c=n[i],a=!1,s=-1;!a&&++s<Y;){var u=X[s],h=u.isMultiMatch(c);h&&(r.push(new u(h,t)),a=!0)}if(!a)for(s=-1;++s<Y;){var l=X[s],f=l.isSingleMatch(c);if(f){r.push(new l(f,t));break}}}return r}))}(this.pattern,this.options)}return f(e,[{key:"searchIn",value:function(e){var t=this.query;if(!t)return{isMatch:!1,score:1};var n=this.options,r=n.includeMatches;e=n.isCaseSensitive?e:e.toLowerCase();for(var i=0,o=[],c=0,a=0,u=t.length;a<u;a+=1){var h=t[a];o.length=0,i=0;for(var l=0,f=h.length;l<f;l+=1){var d=h[l],v=d.search(e),g=v.isMatch,y=v.indices,p=v.score;if(!g){c=0,i=0,o.length=0;break}if(i+=1,c+=p,r){var m=d.constructor.type;te.has(m)?o=[].concat(s(o),s(y)):o.push(y)}}if(i){var k={isMatch:!0,score:c/i};return r&&(k.indices=o),k}}return{isMatch:!1,score:1}}}],[{key:"condition",value:function(e,t){return t.useExtendedSearch}}]),e}(),re=[];function ie(e,t){for(var n=0,r=re.length;n<r;n+=1){var i=re[n];if(i.condition(e,t))return new i(e,t)}return new T(e,t)}var oe="$and",ce="$or",ae="$path",se="$val",ue=function(e){return!(!e[oe]&&!e[ce])},he=function(e){return r({},oe,Object.keys(e).map((function(t){return r({},t,e[t])})))};function le(e,t){var n=(arguments.length>2&&void 0!==arguments[2]?arguments[2]:{}).auto,r=void 0===n||n;return ue(e)||(e=he(e)),function e(n){var i=Object.keys(n),o=function(e){return!!e[ae]}(n);if(!o&&i.length>1&&!ue(n))return e(he(n));if(function(e){return!g(e)&&b(e)&&!ue(e)}(n)){var c=o?n[ae]:i[0],a=o?n[se]:n[c];if(!m(a))throw new Error(function(e){return"Invalid value for key ".concat(e)}(c));var s={keyId:E(c),pattern:a};return r&&(s.searcher=ie(a,t)),s}var u={children:[],operator:i[0]};return i.forEach((function(t){var r=n[t];g(r)&&r.forEach((function(t){u.children.push(e(t))}))})),u}(e)}function fe(e,t){var n=e.matches;t.matches=[],x(n)&&n.forEach((function(e){if(x(e.indices)&&e.indices.length){var n={indices:e.indices,value:e.value};e.key&&(n.key=e.key.src),e.idx>-1&&(n.refIndex=e.idx),t.matches.push(n)}}))}function de(e,t){t.score=e.score}var ve=function(){function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=arguments.length>2?arguments[2]:void 0;h(this,e),this.options=Object.assign({},$,n),this.options.useExtendedSearch,this._keyStore=new A(this.options.keys),this.setCollection(t,r)}return f(e,[{key:"setCollection",value:function(e,t){if(this._docs=e,t&&!(t instanceof R))throw new Error("Incorrect 'index' type");this._myIndex=t||N(this.options.keys,this._docs,{getFn:this.options.getFn,fieldNormWeight:this.options.fieldNormWeight})}},{key:"add",value:function(e){x(e)&&(this._docs.push(e),this._myIndex.add(e))}},{key:"remove",value:function(){for(var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:function(){return!1},t=[],n=0,r=this._docs.length;n<r;n+=1){var i=this._docs[n];e(i,n)&&(this.removeAt(n),n-=1,r-=1,t.push(i))}return t}},{key:"removeAt",value:function(e){this._docs.splice(e,1),this._myIndex.removeAt(e)}},{key:"getIndex",value:function(){return this._myIndex}},{key:"search",value:function(e){var t=(arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}).limit,n=void 0===t?-1:t,r=this.options,i=r.includeMatches,o=r.includeScore,c=r.shouldSort,a=r.sortFn,s=r.ignoreFieldNorm,u=m(e)?m(this._docs[0])?this._searchStringList(e):this._searchObjectList(e):this._searchLogical(e);return function(e,t){var n=t.ignoreFieldNorm,r=void 0===n?$.ignoreFieldNorm:n;e.forEach((function(e){var t=1;e.matches.forEach((function(e){var n=e.key,i=e.norm,o=e.score,c=n?n.weight:null;t*=Math.pow(0===o&&c?Number.EPSILON:o,(c||1)*(r?1:i))})),e.score=t}))}(u,{ignoreFieldNorm:s}),c&&u.sort(a),k(n)&&n>-1&&(u=u.slice(0,n)),function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.includeMatches,i=void 0===r?$.includeMatches:r,o=n.includeScore,c=void 0===o?$.includeScore:o,a=[];return i&&a.push(fe),c&&a.push(de),e.map((function(e){var n=e.idx,r={item:t[n],refIndex:n};return a.length&&a.forEach((function(t){t(e,r)})),r}))}(u,this._docs,{includeMatches:i,includeScore:o})}},{key:"_searchStringList",value:function(e){var t=ie(e,this.options),n=this._myIndex.records,r=[];return n.forEach((function(e){var n=e.v,i=e.i,o=e.n;if(x(n)){var c=t.searchIn(n),a=c.isMatch,s=c.score,u=c.indices;a&&r.push({item:n,idx:i,matches:[{score:s,value:n,norm:o,indices:u}]})}})),r}},{key:"_searchLogical",value:function(e){var t=this,n=le(e,this.options),r=function e(n,r,i){if(!n.children){var o=n.keyId,c=n.searcher,a=t._findMatches({key:t._keyStore.get(o),value:t._myIndex.getValueForItemAtKeyId(r,o),searcher:c});return a&&a.length?[{idx:i,item:r,matches:a}]:[]}for(var u=[],h=0,l=n.children.length;h<l;h+=1){var f=e(n.children[h],r,i);if(f.length)u.push.apply(u,s(f));else if(n.operator===oe)return[]}return u},i=this._myIndex.records,o={},c=[];return i.forEach((function(e){var t=e.$,i=e.i;if(x(t)){var a=r(n,t,i);a.length&&(o[i]||(o[i]={idx:i,item:t,matches:[]},c.push(o[i])),a.forEach((function(e){var t,n=e.matches;(t=o[i].matches).push.apply(t,s(n))})))}})),c}},{key:"_searchObjectList",value:function(e){var t=this,n=ie(e,this.options),r=this._myIndex,i=r.keys,o=r.records,c=[];return o.forEach((function(e){var r=e.$,o=e.i;if(x(r)){var a=[];i.forEach((function(e,i){a.push.apply(a,s(t._findMatches({key:e,value:r[i],searcher:n})))})),a.length&&c.push({idx:o,item:r,matches:a})}})),c}},{key:"_findMatches",value:function(e){var t=e.key,n=e.value,r=e.searcher;if(!x(n))return[];var i=[];if(g(n))n.forEach((function(e){var n=e.v,o=e.i,c=e.n;if(x(n)){var a=r.searchIn(n),s=a.isMatch,u=a.score,h=a.indices;s&&i.push({score:u,key:t,value:n,idx:o,norm:c,indices:h})}}));else{var o=n.v,c=n.n,a=r.searchIn(o),s=a.isMatch,u=a.score,h=a.indices;s&&i.push({score:u,key:t,value:o,norm:c,indices:h})}return i}}]),e}();ve.version="6.6.2",ve.createIndex=N,ve.parseIndex=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.getFn,r=void 0===n?$.getFn:n,i=t.fieldNormWeight,o=void 0===i?$.fieldNormWeight:i,c=e.keys,a=e.records,s=new R({getFn:r,fieldNormWeight:o});return s.setKeys(c),s.setIndexRecords(a),s},ve.config=$,ve.parseQuery=le,function(){re.push.apply(re,arguments)}(ne)}}]);
|
PypiClean
|
/seaborn_image-0.7.0-py3-none-any.whl/seaborn_image/_grid.py
|
import itertools
import warnings
from typing import Iterable
import matplotlib.pyplot as plt
import numpy as np
from copy import copy
from ._filters import filterplot
from ._general import imgplot
from .utils import despine
__all__ = ["ParamGrid", "ImageGrid", "rgbplot", "FilterGrid"]
class ImageGrid:
"""
Figure level : plot a collection of 2-D or 3-D images or 3-D or 4-D image data
along a grid. This class also supports slicing of the 3-D and 4-D image data
along different axis with variable step sizes and start/end indexes.
Parameters
----------
data :
3-D or 4-D Image data (array-like), or list of 2-D or 3-D image data. Supported array shapes are all
`matplotlib.pyplot.imshow` array shapes
slices : int or list, optional
If `data` is 3-D or 4-D, `slices` will index the specific slice from the last axis and only plot
the resulting images. If None, it will plot all the slices from the last axis, by default None
axis : int, optional
Axis along which the data will be sliced, by default -1 for 3-D arrays and 0 for 4-D arrays
step : int, optional
Step along the given axis, by default 1
start : int, optional
Starting index to select from the the data, by default None
stop : int, optional
Stopping index to select from the data, by default None
map_func : callable or list/tuple or callables, optional
Transform input image data using this function. All function arguments must be passed as map_func_kw.
map_func_kw : dict or list/tuple of dict, optional
kwargs to pass on to `map_func`. Must be dict for a single `map_func` and a list/tuple of dicts for a list/tuple of `map_func`
col_wrap : int, optional
Number of columns to display. Defaults to None.
height : int or float, optional
Size of the individual images. Defaults to 3.
aspect : int or float, optional
Aspect ratio of individual images. Defaults to 1.
cmap : str or `matplotlib.colors.Colormap` or list, optional
Image colormap. If input data is a list of images,
`cmap` can be a list of colormaps. Defaults to None.
robust : bool or list, optional
If True, colormap range is calculated based on the percentiles
defined in `perc` parameter. If input data is a list of images,
robust can be a list of bools, by default False
perc : tuple or list, optional
If `robust` is True, colormap range is calculated based
on the percentiles specified instead of the extremes, by default (2, 98) -
2nd and 98th percentiles for min and max values. Can be a list of tuples, if
input data is a list of images
alpha : float or array-like, optional
`matplotlib.pyplot.imshow` alpha blending value from 0 (transparent) to 1 (opaque),
by default None
origin : str, optional
Image origin, by default None
vmin : float or list of floats, optional
Minimum data value that colormap covers, by default None
vmax : float or list of floats, optional
Maximum data value that colormap covers, by default None
interpolation : str, optional
`matplotlib.pyplot.imshow` interpolation method used, by default None
norm : `matplotlib.colors.Normalize` or list of `matplotlib.colors.Normalize`, optional
`matplotlib` Normalize instance used to scale scalar data before
mapping to colors using cmap.
dx : float or list, optional
Size per pixel of the image data. If scalebar
is required, `dx` and `units` must be sepcified.
Can be a list of floats, if input data is a list of images.
Defaults to None.
units : str or list, optional
Units of `dx`.
Can be a list of str, if input data is a list of images.
Defaults to None.
dimension : str or list, optional
Dimension of `dx` and `units`.
Options include :
- "si" : scale bar showing km, m, cm, etc.
- "imperial" : scale bar showing in, ft, yd, mi, etc.
- "si-reciprocal" : scale bar showing 1/m, 1/cm, etc.
- "angle" : scale bar showing °, ʹ (minute of arc) or ʹʹ (second of arc).
- "pixel" : scale bar showing px, kpx, Mpx, etc.
Can be a list of str, if input data is a list of images.
Defaults to None.
cbar : bool or list, optional
Specify if a colorbar is required or not.
Can be a list of bools, if input data is a list of images.
Defaults to True.
orientation : str, optional
Specify the orientaion of colorbar.
Option include :
- 'h' or 'horizontal' for a horizontal colorbar to the bottom of the image.
- 'v' or 'vertical' for a vertical colorbar to the right of the image.
Defaults to 'v'.
cbar_log : bool, optional
Log scale colormap and colorbar
cbar_label : str or list, optional
Colorbar label.
Can be a list of str, if input data is a list of images.
Defaults to None.
cbar_ticks : list, optional
List of colorbar ticks. Defaults to None.
showticks : bool, optional
Show image x-y axis ticks. Defaults to False.
despine : bool, optional
Remove axes spines from image axes as well as colorbar axes.
Defaults to None.
Returns
-------
A `seaborn_image.ImageGrid` object
Raises
------
ValueError
If `data` is None
ValueError
If `data` has 1 dimension
ValueError
If `data` has more than 4 dimensions
ValueError
If `data` contains a 4D image within a list of images
ValueError
If `axis` is not 0, 1, 2 or -1
TypeError
If `map_func` is not a callable object or a list/tuple of callable objects
ValueError
If `map_func` is a list/tuple of callable objects when `data` is 3D or 4D
Examples
--------
Plot a collection of images
.. plot::
:context: close-figs
>>> import seaborn_image as isns
>>> pol = isns.load_image("polymer")
>>> pl = isns.load_image("fluorescence")
>>> g = isns.ImageGrid([pol, pl])
Common properties across images
.. plot::
:context: close-figs
>>> g = isns.ImageGrid([pol, pl], cmap="inferno")
Different scalebars for different images
.. plot::
:context: close-figs
>>> g = isns.ImageGrid([pol, pl], dx=[0.15, 0.1], units="um")
Specify properties only for specific images
.. plot::
:context: close-figs
>>> g = isns.ImageGrid([pol, pl], dx=[None, 100], units=[None, "nm"])
Different colormaps and colorbar titles
.. plot::
:context: close-figs
>>> g = isns.ImageGrid([pol, pl], cmap=["deep", "magma"], cbar_label=["Height (nm)", "PL Intensity"])
Correct colormap for outliers
.. plot::
:context: close-figs
>>> pol_out = isns.load_image("polymer outliers")
>>> g = isns.ImageGrid([pol, pl, pol_out], robust=[False, False, True], perc=[None, None, (0.5, 99.5)])
Plot 3-D images; control number of columns
.. plot::
:context: close-figs
>>> cells = isns.load_image("cells")
>>> g = isns.ImageGrid(cells, col_wrap=5, cbar=False)
Plot specific slices of the 3-D data cube
.. plot::
:context: close-figs
>>> g = isns.ImageGrid(cells, slices=[10, 20, 30], cbar=False)
Slice along different axis
.. plot::
:context: close-figs
>>> g = isns.ImageGrid(cells, slices=[0, 4, 10, 32], axis=0, cbar=False)
Select indexes with a specifc step size
.. plot::
:context: close-figs
>>> g = isns.ImageGrid(cells, step=3, cbar=False)
Visulaize image intensities relative to other images on the grid
.. plot::
:context: close-figs
>>> g = isns.ImageGrid(cells, vmin=0, vmax=1, height=1, col_wrap=5)
Plot a list of 3-D images
.. plot::
:context: close-figs
>>> from skimage.data import astronaut, chelsea
>>> g = isns.ImageGrid([astronaut(), chelsea()], origin="upper")
Plot 4-D image data cube
.. plot::
:context: close-figs
>>> cifar = isns.load_image("cifar10")
>>> g = isns.ImageGrid(cifar, height=1, col_wrap=6)
Map a function to the image data
.. plot::
:context: close-figs
>>> from skimage.exposure import adjust_gamma
>>> g = isns.ImageGrid(
... cells,
... map_func=adjust_gamma,
... map_func_kw={"gamma" : 0.5},
... cbar=False,
... height=1,
... col_wrap=10)
Map a list of functions to the input data. Pass function kwargs to `map_func_kw`.
.. plot::
:context: close-figs
>>> from skimage.filters import meijering, sato, frangi, hessian
>>> retina = isns.load_image("retina-gray")
>>> g = isns.ImageGrid(
... retina,
... map_func=[meijering, sato, frangi, hessian],
... col_wrap=4,
... map_func_kw=[{"mode" : "reflect", "sigmas" : [1]} for _ in range(4)])
If no kwargs are required for one or more of the functions, use `None`.
.. plot::
:context: close-figs
>>> g = isns.ImageGrid(
... retina,
... map_func=[meijering, sato, frangi, hessian],
... col_wrap=4,
... map_func_kw=[{"mode" : "reflect", "sigmas" : [1]}, None, None, None])
Apply a list of filters to a list of input images.
.. plot::
:context: close-figs
>>> from skimage.filters import gaussian, median
>>> g = isns.ImageGrid(
... [pol, pl, retina],
... map_func=[gaussian, median, hessian],
... dx=[15, 100, None],
... units="nm")
Change colorbar orientation
.. plot::
:context: close-figs
>>> g = isns.ImageGrid([pol, pl], orientation="h")
Change figure size using height
.. plot::
:context: close-figs
>>> g = isns.ImageGrid([pol, pl], height=4.5)
"""
def __init__(
self,
data,
*,
slices=None,
axis=None,
step=1,
start=None,
stop=None,
map_func=None,
map_func_kw=None,
col_wrap=None,
height=3,
aspect=1,
cmap=None,
robust=False,
perc=(2, 98),
alpha=None,
origin=None,
vmin=None,
vmax=None,
interpolation=None,
norm=None,
dx=None,
units=None,
dimension=None,
cbar=True,
orientation="v",
cbar_log=False,
cbar_label=None,
cbar_ticks=None,
showticks=False,
despine=None,
):
if data is None:
raise ValueError("image data can not be None")
if isinstance(
data, (list, tuple)
): # using 'Iterable' numpy was being picked up
# check the number of images to be plotted
_nimages = len(data)
# --- List/Tuple of 2D or 3D images with a List/Tuple of map_func ---
# change the number of images on the grid accordingly
map_func_type = self._check_map_func(map_func, map_func_kw)
if map_func_type == "list/tuple":
_nimages = len(data) * len(map_func)
# no of columns should either be len of data list or len of map_func list
# whichever is higher
if col_wrap is None:
col_wrap = (
len(map_func) if len(map_func) >= len(data) else len(data)
)
elif not isinstance(data, np.ndarray):
raise ValueError("image data must be a list of images or a 3d or 4d array.")
elif data.ndim == 2:
warnings.warn(
"The data inputed is a 2d array which contains a single image. "
"It is recomended that you use `imgplot` instead of `ImageGrid`.",
RuntimeWarning,
)
_nimages = 1
# ---- 2D image with a list/tuple of map_func or individual map_func ------
# check if map_func is a list/tuple of callables
# and assign the new number of images
map_func_type = self._check_map_func(map_func, map_func_kw)
if map_func_type == "list/tuple":
_nimages = len(map_func)
# no of columns should now be len of map_func list
col_wrap = len(map_func) if col_wrap is None else col_wrap
elif data.ndim in [3, 4]:
if data.ndim == 4 and data.shape[-1] not in [1, 3, 4]:
raise ValueError(
"The number of channels in the images must be 1, 3 or 4"
)
if axis is None:
if data.ndim == 3:
axis = -1
else:
axis = 0
if data.ndim == 3 and axis not in [0, 1, 2, -1]:
raise ValueError("Incorrect 'axis'; must be either 0, 1, 2, or -1")
if data.ndim == 4 and axis not in [0, 1, 2, 3, -1]:
raise ValueError("Incorrect 'axis'; must be either 0, 1, 2, 3, or -1")
if slices is None:
# slice the image array along specified axis;
# if start, stop and step are not provided, default is step=1
data = data[
(slice(None),) * (axis % data.ndim) + (slice(start, stop, step),)
]
slices = np.arange(data.shape[axis])
# if a single slice is provided and
# it is not an interable
elif not isinstance(slices, Iterable):
slices = [slices]
_nimages = len(slices)
# ---- 3D or 4D image with an individual map_func ----
map_func_type = self._check_map_func(map_func, map_func_kw)
# raise a ValueError if a list of map_func is provided for 3d image
# TODO - support multiple map_func if "chaining"?
if map_func_type == "list/tuple":
raise ValueError(
"Can not map multiple functions to 3D or 4D image. Please provide a single `map_func`"
)
else:
raise ValueError("Image data can not have more than 4 dimensions")
# if no column wrap specified
# set it to default 3
if col_wrap is None:
col_wrap = 3
# don't create extra columns when there aren't enough images
if col_wrap > _nimages:
col_wrap = _nimages
# Compute the grid shape if col_wrap is specified
ncol = col_wrap
nrow = int(np.ceil(_nimages / col_wrap))
# Calculate the base figure size
figsize = (ncol * height * aspect, nrow * height)
fig = plt.figure(figsize=figsize)
axes = fig.subplots(nrow, ncol, squeeze=False)
# Public API
self.data = data
self.fig = fig
self.axes = axes
self.slices = slices
self.axis = axis
self.step = step
self.start = start
self.stop = stop
self.col_wrap = col_wrap
self.height = height
self.aspect = aspect
self.cmap = cmap
self.robust = robust
self.perc = perc
self.alpha = alpha
self.origin = origin
self.vmin = vmin
self.vmax = vmax
self.interpolation = interpolation
self.norm = norm
self.dx = dx
self.units = units
self.dimension = dimension
self.cbar = cbar
self.orientation = orientation
self.cbar_log = cbar_log
self.cbar_label = cbar_label
self.cbar_ticks = cbar_ticks
self.showticks = showticks
self.despine = despine
self._nrow = nrow
self._ncol = ncol
self._nimages = _nimages
# map function to input data
if map_func is not None:
self._map_func_to_data(map_func, map_func_kw)
self._map_img_to_grid()
self._cleanup_extra_axes()
self._finalize_grid()
def _check_map_func(self, map_func, map_func_kw):
"Check if `map_func` passed is a list/tuple of callables or individual callable"
if map_func is not None:
if isinstance(map_func, (list, tuple)):
for func in map_func:
if not callable(func):
raise TypeError(f"{func} must be a callable function object")
if map_func_kw is not None:
if not isinstance(map_func_kw, (list, tuple)):
raise TypeError(
"`map_func_kw` must be list/tuple of dictionaries"
)
if len(map_func_kw) != len(map_func):
raise ValueError(
"number of `map_func_kw` passed must be the same as the number of `map_func` objects"
)
return "list/tuple"
elif callable(map_func):
if map_func_kw is not None:
if not isinstance(map_func_kw, dict):
raise TypeError(
"`map_func_kw` must be a dictionary when a single `map_func` is passed as input"
)
return "callable"
else:
raise TypeError(
"`map_func` must either be a callable object or a list/tuple of callable objects"
)
def _map_img_to_grid(self):
"""Map image data cube to the image grid."""
_cmap = self.cmap
_robust = self.robust
_perc = self.perc
_vmin = self.vmin
_vmax = self.vmax
_norm = self.norm
_dx = self.dx
_units = self.units
_dimension = self.dimension
_cbar = self.cbar
_cbar_log = self.cbar_log
_cbar_label = self.cbar_label
for i in range(self._nimages):
ax = self.axes.flat[i]
if isinstance(self.data, (list, tuple)):
_d = self.data[i]
# check if the image has more than 2 dimensions
if _d.ndim > 3:
raise ValueError(
f"Image {i} in the list has more than 3 dimensions"
)
if _d.ndim == 3 and _d.shape[-1] not in [1, 3, 4]:
raise ValueError(f"Image {i} in the list has more than 4 channels")
elif self.data.ndim == 2:
_d = self.data
else:
_d = self.data.take(indices=self.slices[i], axis=self.axis)
if isinstance(self.cmap, (list, tuple)):
self._check_len_wrt_n_images(self.cmap)
_cmap = self.cmap[i]
if isinstance(self.robust, (list, tuple)):
self._check_len_wrt_n_images(self.robust)
_robust = self.robust[i]
if isinstance(self.vmin, (list, tuple)):
self._check_len_wrt_n_images(self.vmin)
_vmin = self.vmin[i]
if isinstance(self.vmax, (list, tuple)):
self._check_len_wrt_n_images(self.vmax)
_vmax = self.vmax[i]
if isinstance(self.perc, (list)):
self._check_len_wrt_n_images(self.perc)
_perc = self.perc[i]
if isinstance(self.norm, (list)):
self._check_len_wrt_n_images(self.norm)
_norm = self.norm[i]
if isinstance(self.dx, (list, tuple)):
self._check_len_wrt_n_images(self.dx)
_dx = self.dx[i]
if isinstance(self.units, (list, tuple)):
self._check_len_wrt_n_images(self.units)
_units = self.units[i]
if isinstance(self.dimension, (list, tuple)):
self._check_len_wrt_n_images(self.dimension)
_dimension = self.dimension[i]
if isinstance(self.cbar, (list, tuple)):
self._check_len_wrt_n_images(self.cbar)
_cbar = self.cbar[i]
if isinstance(self.cbar_log, (list, tuple)):
self._check_len_wrt_n_images(self.cbar_log)
_cbar_log = self.cbar_log[i]
if isinstance(self.cbar_label, (list, tuple)):
self._check_len_wrt_n_images(self.cbar_label)
_cbar_label = self.cbar_label[i]
_ = imgplot(
_d,
ax=ax,
cmap=_cmap,
robust=_robust,
perc=_perc,
vmin=_vmin,
vmax=_vmax,
alpha=self.alpha,
origin=self.origin,
interpolation=self.interpolation,
norm=_norm,
dx=_dx,
units=_units,
dimension=_dimension,
cbar=_cbar,
orientation=self.orientation,
cbar_log=_cbar_log,
cbar_label=_cbar_label,
cbar_ticks=self.cbar_ticks,
showticks=self.showticks,
despine=self.despine,
describe=False,
)
# FIXME - for common colorbar
# if self.cbar and self.vmin is not None and self.vmax is not None:
# print("here")
# self.fig.colorbar(_im.images[0], ax=list(self.axes.ravel()), orientation=self.orientation)
def _check_len_wrt_n_images(self, param_list):
"""If a specific parameter is supplied as a list/tuple, check that the
length of the parameter list is the same as the number of images that the parameter is mapped onto
"""
if len(param_list) != self._nimages:
raise AssertionError(
f"If supplying a list/tuple, length of {param_list} must be {self._nimages}."
)
def _adjust_param_list_len(self, map_func):
"""
If the input data and map_func are both list-like,
modify the parameter list such as dx, units, etc such that
the length of new parameter list is the same as the number of images.
# For example -
# if data -> [img1, img2], map_func -> [func1, func2, func3]
# and dx = [dx1, dx2] # same as len(data)
# then for plotting, dx needs to be expanded such that the len(dx) == len(data) * len(map_func)
# so, new dx -> [dx1, dx2] * len(map_func)
"""
if isinstance(self.dx, (list, tuple)):
self.dx = self.dx * len(map_func)
if isinstance(self.units, (list, tuple)):
self.units = self.units * len(map_func)
if isinstance(self.dimension, (list, tuple)):
self.dimension = self.dimension * len(map_func)
if isinstance(self.cbar, (list, tuple)):
self.cbar = self.cbar * len(map_func)
if isinstance(self.cbar_label, (list, tuple)):
self.cbar_label = self.cbar_label * len(map_func)
if isinstance(self.cbar_log, (list, tuple)):
self.cbar_log = self.cbar_log * len(map_func)
def _map_func_to_data(self, map_func, map_func_kw):
"""Transform image data using the map_func callable object."""
self.data = copy(self.data)
# if data is a list or tuple of 2D or 3D images
if isinstance(self.data, (list, tuple)):
if self._check_map_func(map_func, map_func_kw) == "list/tuple":
self._adjust_param_list_len(map_func)
_d = self.data
# only pass on kwargs if not None
if map_func_kw is not None:
# check if one of the supplied kwargs in the list is None
# if None - change it to empty {}
map_func_kw = [{} if kw is None else kw for kw in map_func_kw]
self.data = [
func(img, **kwargs)
for func, kwargs in zip(map_func, map_func_kw)
for img in _d
]
else:
self.data = [func(img) for func in map_func for img in _d]
else: # if map_func is callable
for i in range(len(self.data)):
# only pass on kwargs if not None
if map_func_kw is not None:
self.data[i] = map_func(self.data[i], **map_func_kw)
else:
self.data[i] = map_func(self.data[i])
else:
# if data is 4D, 3D or 2D and map_func is single callable
if self._check_map_func(map_func, map_func_kw) == "callable":
if map_func_kw is not None:
self.data = map_func(self.data, **map_func_kw)
else:
self.data = map_func(self.data)
# list of callables -- only for list of 2D or list of 3D images
else:
_d = self.data
# only pass on kwargs if not None
if map_func_kw is not None:
# check if one of the supplied kwargs in the list is None
# if None - change it to empty {}
map_func_kw = [{} if kw is None else kw for kw in map_func_kw]
self.data = [
func(_d, **kwargs)
for func, kwargs in zip(map_func, map_func_kw)
]
else:
self.data = [func(_d) for func in map_func]
def _cleanup_extra_axes(self):
"""Clean extra axes that are generated if col_wrap is specified."""
# check if there are any extra axes that need to be clened up
_rem = (self._ncol * self._nrow) - self._nimages
if _rem > 0:
rem_ax = self.axes.flat[-_rem:]
for i in range(len(rem_ax)):
rem_ax[i].set_yticks([])
rem_ax[i].set_xticks([])
rem_ax[i].set_ylabel("")
rem_ax[i].set_xlabel("")
despine(ax=rem_ax[i]) # remove axes spines for the extra generated axes
def _finalize_grid(self):
"""Finalize grid with tight layout."""
self.fig.tight_layout()
def rgbplot(
data,
*,
col_wrap=3,
height=3,
aspect=1,
cmap=None,
alpha=None,
origin=None,
vmin=None,
vmax=None,
interpolation=None,
dx=None,
units=None,
dimension=None,
cbar=True,
orientation="v",
cbar_label=None,
cbar_ticks=None,
showticks=False,
despine=None,
):
"""Split and plot the red, green and blue channels of an
RGB image.
Parameters
----------
data :
RGB image data as 3-D array.
col_wrap : int, optional
Number of columns to display. Defaults to 3.
height : int or float, optional
Size of the individual images. Defaults to 3.
aspect : int or float, optional
Aspect ratio of individual images. Defaults to 1.
cmap : str or `matplotlib.colors.Colormap` or list, optional
Image colormap or a list of colormaps. Defaults to None.
alpha : float or array-like, optional
`matplotlib.pyplot.imshow` alpha blending value from 0 (transparent) to 1 (opaque),
by default None
origin : str, optional
Image origin, by default None
vmin : float or list of floats, optional
Minimum data value that colormap covers, by default None
vmax : float or list of floats, optional
Maximum data value that colormap covers, by default None
interpolation : str, optional
`matplotlib.pyplot.imshow` interpolation method used, by default None
dx : float or list, optional
Size per pixel of the image data. If scalebar
is required, `dx` and `units` must be sepcified.
Can be a list of floats.
Defaults to None.
units : str or list, optional
Units of `dx`.
Can be a list of str.
Defaults to None.
dimension : str or list, optional
Dimension of `dx` and `units`.
Options include :
- "si" : scale bar showing km, m, cm, etc.
- "imperial" : scale bar showing in, ft, yd, mi, etc.
- "si-reciprocal" : scale bar showing 1/m, 1/cm, etc.
- "angle" : scale bar showing °, ʹ (minute of arc) or ʹʹ (second of arc).
- "pixel" : scale bar showing px, kpx, Mpx, etc.
Can be a list of str.
Defaults to None.
cbar : bool or list, optional
Specify if a colorbar is required or not.
Can be a list of bools.
Defaults to True.
orientation : str, optional
Specify the orientaion of colorbar.
Option include :
- 'h' or 'horizontal' for a horizontal colorbar to the bottom of the image.
- 'v' or 'vertical' for a vertical colorbar to the right of the image.
Defaults to 'v'.
cbar_label : str or list, optional
Colorbar label.
Can be a list of str.
Defaults to None.
cbar_ticks : list, optional
List of colorbar ticks. Defaults to None.
showticks : bool, optional
Show image x-y axis ticks. Defaults to False.
despine : bool, optional
Remove axes spines from image axes as well as colorbar axes.
Defaults to None.
Returns
-------
`seaborn_image.ImageGrid`
Raises
------
ValueError
If `data` dimension is not 3
ValueError
If `data` channels are not 3
Examples
--------
Split and plot the channels of a RGB image
.. plot::
:context: close-figs
>>> import seaborn_image as isns; isns.set_image(origin="upper")
>>> from skimage.data import astronaut
>>> g = isns.rgbplot(astronaut())
Hide colorbar
.. plot::
:context: close-figs
>>> g = isns.rgbplot(astronaut(), cbar=False)
Change colormap
.. plot::
:context: close-figs
>>> g = isns.rgbplot(astronaut(), cmap="deep")
.. plot::
:context: close-figs
>>> g = isns.rgbplot(astronaut(), cmap=["inferno", "viridis", "ice"])
Horizontal colorbar
.. plot::
:context: close-figs
>>> g = isns.rgbplot(astronaut(), orientation="h")
"""
if not data.ndim == 3:
raise ValueError("input image must be a RGB image")
if data.shape[-1] != 3:
raise ValueError("input image must be a RGB image")
# if no cmap, assign reds, greens and blues cmap
if cmap is None:
cmap = ["R", "G", "B"]
# split RGB channels
_d = [data[:, :, 0], data[:, :, 1], data[:, :, 2]]
g = ImageGrid(
_d,
height=height,
aspect=aspect,
col_wrap=col_wrap,
cmap=cmap,
alpha=alpha,
origin=origin,
vmin=vmin,
vmax=vmax,
interpolation=interpolation,
dx=dx,
units=units,
dimension=dimension,
cbar=cbar,
orientation=orientation,
cbar_label=cbar_label,
cbar_ticks=cbar_ticks,
showticks=showticks,
despine=despine,
)
return g
# TODO provide common cbar option
# TODO allow gridspec_kws and subplot_kws
class ParamGrid(object):
"""This class allows exploration of different parameters of
a function across the rows and columns of the grid. Additional function
parameters that are not to be varied can also be passed.
Generates a grid of images with the specific function applied to all
the images.
Parameters
----------
data :
Image data (array-like). Supported array shapes are all
`matplotlib.pyplot.imshow` array shapes
map_func : callable or str
Function to be applied/mapped to data. Can be any callable that accepts data
as the the first input parameter.
If using a `str`, must one of the implemented filter functions in `seaborn_image`.
You can check implemented filters using `seaborn_image.implemented_filters()`.
row : str, optional
Parameter name that is to be displayed
along the row. Defaults to None.
col : str, optional
Parameter name that is to be displayed
along the column. Defaults to None.
col_wrap : int, optional
Number of columns to display if `col`
is not None and `row` is None. Defaults to None.
height : int or float, optional
Size of the individual images. Defaults to 3.
aspect : int or float, optional
Aspect ratio of individual images. Defaults to 1.
cmap : str or `matplotlib.colors.Colormap`, optional
Image colormap. Defaults to None.
alpha : float or array-like, optional
`matplotlib.pyplot.imshow` alpha blending value from 0 (transparent) to 1 (opaque),
by default None
origin : str, optional
Image origin, by default None
vmin : float or list of floats, optional
Minimum data value that colormap covers, by default None
vmax : float or list of floats, optional
Maximum data value that colormap covers, by default None
interpolation : str, optional
`matplotlib.pyplot.imshow` interpolation method used, by default None
dx : float, optional
Size per pixel of the image data. If scalebar
is required, `dx` and `units` must be sepcified. Defaults to None.
units : str, optional
Units of `dx`. Defaults to None.
dimension : str, optional
Dimension of `dx` and `units`.
Options include :
- "si" : scale bar showing km, m, cm, etc.
- "imperial" : scale bar showing in, ft, yd, mi, etc.
- "si-reciprocal" : scale bar showing 1/m, 1/cm, etc.
- "angle" : scale bar showing °, ʹ (minute of arc) or ʹʹ (second of arc).
- "pixel" : scale bar showing px, kpx, Mpx, etc.
Defaults to None.
cbar : bool, optional
Specify if a colorbar is required or not.
Defaults to True.
orientation : str, optional
Specify the orientaion of colorbar.
Option include :
- 'h' or 'horizontal' for a horizontal colorbar to the bottom of the image.
- 'v' or 'vertical' for a vertical colorbar to the right of the image.
Defaults to 'v'.
cbar_log : bool, optional
Log scale colormap and colorbar
cbar_label : str, optional
Colorbar label. Defaults to None.
cbar_ticks : list, optional
List of colorbar ticks. Defaults to None.
showticks : bool, optional
Show image x-y axis ticks. Defaults to False.
despine : bool, optional
Remove axes spines from image axes as well as colorbar axes.
Defaults to None.
**kwargs : Additional parameters as keyword arguments to be passed to the underlying filter specified.
Returns
-------
A `seabron_image.ParamGrid` object
Raises
------
TypeError
If `row` is not a str
ValueError
If `row` is specified without passing the parameter as a keyword argument
TypeError
If `col` is not a str
ValueError
If `col` is specified without passing the parameter as a keyword argument
ValueError
If `col_wrap` is specified when `row` is not `None`
Examples
--------
Specify a filter with different parameters along the columns
.. plot::
:context: close-figs
>>> import seaborn_image as isns
>>> img = isns.load_image("polymer")
>>> g = isns.ParamGrid(img, "median", col="size", size=[2,3,4,5])
Or rows
.. plot::
:context: close-figs
>>> g = isns.ParamGrid(img, "median", row="size", size=[2,3,4,5])
Use `col_wrap` to control column display
.. plot::
:context: close-figs
>>> g = isns.ParamGrid(img, "median", col="size", size=[2,3,4,5], col_wrap=3)
Use `col` and `row` to display different parameters along the columns and rows
.. plot::
:context: close-figs
>>> g = isns.ParamGrid(img,
... "percentile",
... row="percentile",
... col="size",
... percentile=[10,20,30],
... size=[20,25,30],)
Specify additional keyword arguments for the filter
.. plot::
:context: close-figs
>>> g = isns.ParamGrid(img, "median", col="size", size=[2,3,4,5], mode="reflect")
General image controls such as changing colormap, scalebar, etc.
.. plot::
:context: close-figs
>>> g = isns.ParamGrid(
... img,
... "median",
... col="size",
... size=[2,3,4,5],
... cmap="inferno",
... dx=15,
... units="nm")
"""
def __init__(
self,
data,
map_func,
*,
row=None,
col=None,
col_wrap=None,
height=3,
aspect=1,
cmap=None,
alpha=None,
origin=None,
vmin=None,
vmax=None,
interpolation=None,
dx=None,
units=None,
dimension=None,
cbar=True,
orientation="v",
cbar_log=False,
cbar_label=None,
cbar_ticks=None,
showticks=False,
despine=None,
**kwargs,
):
if data is None:
raise ValueError("image data can not be None")
if map_func is None:
raise ValueError("'map_func' can not be None; must be a string or callable")
row_params = []
if row is not None:
if not isinstance(row, str):
raise TypeError("'row' parameter must be a string")
if row not in kwargs:
err = f"Specified '{row}' as 'row' without passing it as a kwargs"
raise ValueError(err)
else:
row_params.extend(kwargs[f"{row}"])
col_params = []
if col is not None:
if not isinstance(col, str):
raise TypeError("'col' parameter must be a string")
if col not in kwargs:
err = f"Specified '{col}' as 'col' without passing it as a kwargs"
raise ValueError(err)
else:
col_params.extend(kwargs[f"{col}"])
# Compute the grid shape like FacetGrid
nrow = 1 if row is None else len(kwargs[f"{row}"])
ncol = 1 if col is None else len(kwargs[f"{col}"])
# col_wrap can not be used with row option
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together"
raise ValueError(err)
# recompute the grid shape if col_wrap is specified
ncol = col_wrap
nrow = int(np.ceil(len(kwargs[f"{col}"]) / col_wrap))
# Calculate the base figure size
figsize = (ncol * height * aspect, nrow * height)
fig = plt.figure(figsize=figsize)
axes = fig.subplots(nrow, ncol, squeeze=False)
product_params = []
if row and col:
_p = itertools.product(row_params, col_params)
for _r, _c in _p:
product_params.append([_r, _c])
elif row:
for _r in row_params:
product_params.append([_r])
elif col:
for _c in col_params:
product_params.append([_c])
product_params = np.array(product_params, dtype=object)
# check if any additional kwargs are passed
# that need to be passed to the underlying filter
additional_kwargs = {}
for k, v in kwargs.items():
if row and col:
if k not in [row, col]:
additional_kwargs.update({k: v})
elif row:
if k not in row:
additional_kwargs.update({k: v})
elif col:
if k not in col:
additional_kwargs.update({k: v})
# Public API
self.data = data
self.map_func = map_func
self.fig = fig
self.axes = axes
self.row = row
self.col = col
self.col_wrap = col_wrap
self.param_product = product_params
self.additional_kwargs = additional_kwargs
self.height = height
self.aspect = aspect
self.cmap = cmap
self.alpha = alpha
self.origin = origin
self.vmin = vmin
self.vmax = vmax
self.interpolation = interpolation
self.dx = dx
self.units = units
self.dimension = dimension
self.cbar = cbar
self.orientation = orientation
self.cbar_log = cbar_log
self.cbar_label = cbar_label
self.cbar_ticks = cbar_ticks
self.showticks = showticks
self.despine = despine
self._nrow = nrow
self._ncol = ncol
self.map_filter_to_grid()
self._cleanup_extra_axes()
self._finalize_grid()
return
def map_filter_to_grid(self):
"""Map specified filter with row and col paramters
to the image grid.
"""
# any additional kwargs that need to be passed
# to the underlying filter
func_kwargs = self.additional_kwargs
if self.row is None and self.col is None:
imgplot(
self.data, ax=self.axes.flat[0]
) # since squeeze is False, array needs to be flattened and indexed
for i in range(len(self.param_product)):
ax = self.axes.flat[i]
p = self.param_product[i]
# plot only col vars
if self.row is None:
func_kwargs.update({self.col: p[0]})
self._plot(ax=ax, **func_kwargs)
ax.set_title(f"{self.col} : {p[0]}")
# plot only row vars
if self.col is None:
func_kwargs.update({self.row: p[0]})
self._plot(ax=ax, **func_kwargs)
ax.set_title(f"{self.row} : {p[0]}")
# when both row and col vars are specified
if self.row and self.col:
func_kwargs.update({self.row: p[0], self.col: p[1]})
self._plot(ax=ax, **func_kwargs)
# set row labels only to the outermost column
if not i % self._nrow:
ax.set_ylabel(f"{self.row} : {p[0]}")
# set column labels only to the top row
if i < self._ncol:
ax.set_title(f"{self.col} : {p[1]}")
# FIXME - for common colorbar
# self.fig.colorbar(ax.images[0], ax=list(self.axes.flat), orientation=self.orientation)
return
def _plot(self, ax, **func_kwargs):
"""Helper function to call the underlying filterplot
Parameters
----------
ax : `matplotlib.axes.Axes`
Axis to plot filtered image
"""
filterplot(
self.data,
self.map_func,
ax=ax,
cmap=self.cmap,
alpha=self.alpha,
origin=self.origin,
vmin=self.vmin,
vmax=self.vmax,
interpolation=self.interpolation,
dx=self.dx,
units=self.units,
dimension=self.dimension,
cbar=self.cbar,
orientation=self.orientation,
cbar_log=self.cbar_log,
cbar_label=self.cbar_label,
cbar_ticks=self.cbar_ticks,
showticks=self.showticks,
despine=self.despine,
**func_kwargs,
)
return
def _cleanup_extra_axes(self):
"""Clean extra axes that are generated if col_wrap is specified."""
if self.col_wrap is not None:
# check if there are any extra axes that need to be clened up
_rem = (self.col_wrap * self._nrow) - len(self.param_product)
if _rem > 0:
rem_ax = self.axes.flat[-_rem:]
for i in range(len(rem_ax)):
rem_ax[i].set_yticks([])
rem_ax[i].set_xticks([])
rem_ax[i].set_ylabel("")
rem_ax[i].set_xlabel("")
despine(
ax=rem_ax[i]
) # remove axes spines for the extra generated axes
def _finalize_grid(self):
"""Finalize grid with tight layout."""
self.fig.tight_layout()
class FilterGrid:
"""Deprecated - use `ParamGrid` instead."""
def __init__(self, *args, **kwargs):
warnings.warn(
"FilterGrid is depracted and will be removed in a future release."
"Use ParamGrid instead with the same arguments.",
UserWarning,
)
ParamGrid(*args, **kwargs)
|
PypiClean
|
/ressources/lib/node_modules/highcharts/modules/series-label.src.js
|
'use strict';
(function (factory) {
if (typeof module === 'object' && module.exports) {
module.exports = factory;
} else if (typeof define === 'function' && define.amd) {
define(function () {
return factory;
});
} else {
factory(Highcharts);
}
}(function (Highcharts) {
(function (H) {
/**
* (c) 2009-2017 Torstein Honsi
*
* License: www.highcharts.com/license
*/
/**
* Highcharts module to place labels next to a series in a natural position.
*
* TODO:
* - add column support (box collision detection, boxesToAvoid logic)
* - avoid data labels, when data labels above, show series label below.
* - add more options (connector, format, formatter)
*
* https://jsfiddle.net/highcharts/L2u9rpwr/
* https://jsfiddle.net/highcharts/y5A37/
* https://jsfiddle.net/highcharts/264Nm/
* https://jsfiddle.net/highcharts/y5A37/
*/
var labelDistance = 3,
addEvent = H.addEvent,
each = H.each,
extend = H.extend,
isNumber = H.isNumber,
pick = H.pick,
Series = H.Series,
SVGRenderer = H.SVGRenderer,
Chart = H.Chart;
H.setOptions({
/**
* @optionparent plotOptions
*/
plotOptions: {
series: {
/**
* Series labels are placed as close to the series as possible in a
* natural way, seeking to avoid other series. The goal of this
* feature is to make the chart more easily readable, like if a
* human designer placed the labels in the optimal position.
*
* The series labels currently work with series types having a
* `graph` or an `area`.
*
* Requires the `series-label.js` module.
*
* @sample highcharts/series-label/line-chart
* Line chart
* @sample highcharts/demo/streamgraph
* Stream graph
* @sample highcharts/series-label/stock-chart
* Stock chart
* @since 6.0.0
* @product highcharts highstock
*/
label: {
/**
* Enable the series label per series.
*/
enabled: true,
/**
* Allow labels to be placed distant to the graph if necessary,
* and draw a connector line to the graph. Setting this option
* to true may decrease the performance significantly, since the
* algorithm with systematically search for open spaces in the
* while plot area. Visually, it may also result in a more
* cluttered chart, though more of the series will be labeled.
*/
connectorAllowed: false,
/**
* If the label is closer than this to a neighbour graph, draw a
* connector.
*/
connectorNeighbourDistance: 24,
/**
* For area-like series, allow the font size to vary so that
* small areas get a smaller font size. The default applies this
* effect to area-like series but not line-like series.
*
* @type {Number}
*/
minFontSize: null,
/**
* For area-like series, allow the font size to vary so that
* small areas get a smaller font size. The default applies this
* effect to area-like series but not line-like series.
*
* @type {Number}
*/
maxFontSize: null,
/**
* Draw the label on the area of an area series. By default it
* is drawn on the area. Set it to `false` to draw it next to
* the graph instead.
*
* @type {Boolean}
*/
onArea: null,
/**
* Styles for the series label. The color defaults to the series
* color, or a contrast color if `onArea`.
*/
style: {
fontWeight: 'bold'
},
/**
* An array of boxes to avoid when laying out the labels. Each
* item has a `left`, `right`, `top` and `bottom` property.
*
* @type {Array<Object>}
*/
boxesToAvoid: []
}
}
}
});
/**
* Counter-clockwise, part of the fast line intersection logic
*/
function ccw(x1, y1, x2, y2, x3, y3) {
var cw = ((y3 - y1) * (x2 - x1)) - ((y2 - y1) * (x3 - x1));
return cw > 0 ? true : cw < 0 ? false : true;
}
/**
* Detect if two lines intersect
*/
function intersectLine(x1, y1, x2, y2, x3, y3, x4, y4) {
return ccw(x1, y1, x3, y3, x4, y4) !== ccw(x2, y2, x3, y3, x4, y4) &&
ccw(x1, y1, x2, y2, x3, y3) !== ccw(x1, y1, x2, y2, x4, y4);
}
/**
* Detect if a box intersects with a line
*/
function boxIntersectLine(x, y, w, h, x1, y1, x2, y2) {
return (
intersectLine(x, y, x + w, y, x1, y1, x2, y2) || // top of label
intersectLine(x + w, y, x + w, y + h, x1, y1, x2, y2) || // right
intersectLine(x, y + h, x + w, y + h, x1, y1, x2, y2) || // bottom
intersectLine(x, y, x, y + h, x1, y1, x2, y2) // left of label
);
}
/**
* General symbol definition for labels with connector
*/
SVGRenderer.prototype.symbols.connector = function (x, y, w, h, options) {
var anchorX = options && options.anchorX,
anchorY = options && options.anchorY,
path,
yOffset,
lateral = w / 2;
if (isNumber(anchorX) && isNumber(anchorY)) {
path = ['M', anchorX, anchorY];
// Prefer 45 deg connectors
yOffset = y - anchorY;
if (yOffset < 0) {
yOffset = -h - yOffset;
}
if (yOffset < w) {
lateral = anchorX < x + (w / 2) ? yOffset : w - yOffset;
}
// Anchor below label
if (anchorY > y + h) {
path.push('L', x + lateral, y + h);
// Anchor above label
} else if (anchorY < y) {
path.push('L', x + lateral, y);
// Anchor left of label
} else if (anchorX < x) {
path.push('L', x, y + h / 2);
// Anchor right of label
} else if (anchorX > x + w) {
path.push('L', x + w, y + h / 2);
}
}
return path || [];
};
/**
* Points to avoid. In addition to actual data points, the label should avoid
* interpolated positions.
*/
Series.prototype.getPointsOnGraph = function () {
if (!this.xAxis && !this.yAxis) {
return;
}
var distance = 16,
points = this.points,
point,
last,
interpolated = [],
i,
deltaX,
deltaY,
delta,
len,
n,
j,
d,
graph = this.graph || this.area,
node = graph.element,
inverted = this.chart.inverted,
xAxis = this.xAxis,
yAxis = this.yAxis,
paneLeft = inverted ? yAxis.pos : xAxis.pos,
paneTop = inverted ? xAxis.pos : yAxis.pos,
onArea = pick(this.options.label.onArea, !!this.area),
translatedThreshold = yAxis.getThreshold(this.options.threshold);
// For splines, get the point at length (possible caveat: peaks are not
// correctly detected)
if (this.getPointSpline && node.getPointAtLength && !onArea) {
// If it is animating towards a path definition, use that briefly, and
// reset
if (graph.toD) {
d = graph.attr('d');
graph.attr({ d: graph.toD });
}
len = node.getTotalLength();
for (i = 0; i < len; i += distance) {
point = node.getPointAtLength(i);
interpolated.push({
chartX: paneLeft + point.x,
chartY: paneTop + point.y,
plotX: point.x,
plotY: point.y
});
}
if (d) {
graph.attr({ d: d });
}
// Last point
point = points[points.length - 1];
point.chartX = paneLeft + point.plotX;
point.chartY = paneTop + point.plotY;
interpolated.push(point);
// Interpolate
} else {
len = points.length;
for (i = 0; i < len; i += 1) {
point = points[i];
last = points[i - 1];
// Absolute coordinates so we can compare different panes
point.chartX = paneLeft + point.plotX;
point.chartY = paneTop + point.plotY;
if (onArea) {
// Vertically centered inside area
point.chartCenterY = paneTop + (
point.plotY +
pick(point.yBottom, translatedThreshold)
) / 2;
}
// Add interpolated points
if (i > 0) {
deltaX = Math.abs(point.chartX - last.chartX);
deltaY = Math.abs(point.chartY - last.chartY);
delta = Math.max(deltaX, deltaY);
if (delta > distance) {
n = Math.ceil(delta / distance);
for (j = 1; j < n; j += 1) {
interpolated.push({
chartX: last.chartX +
(point.chartX - last.chartX) * (j / n),
chartY: last.chartY +
(point.chartY - last.chartY) * (j / n),
chartCenterY: last.chartCenterY +
(point.chartCenterY - last.chartCenterY) *
(j / n),
plotX: last.plotX +
(point.plotX - last.plotX) * (j / n),
plotY: last.plotY +
(point.plotY - last.plotY) * (j / n)
});
}
}
}
// Add the real point in order to find positive and negative peaks
if (isNumber(point.plotY)) {
interpolated.push(point);
}
}
}
// Get the bounding box so we can do a quick check first if the bounding
// boxes overlap.
/*
interpolated.bBox = node.getBBox();
interpolated.bBox.x += paneLeft;
interpolated.bBox.y += paneTop;
*/
return interpolated;
};
/**
* Overridable function to return series-specific font sizes for the labels. By
* default it returns bigger font sizes for series with the greater sum of y
* values.
*/
Series.prototype.labelFontSize = function (minFontSize, maxFontSize) {
return minFontSize + (
(this.sum / this.chart.labelSeriesMaxSum) *
(maxFontSize - minFontSize)
) + 'px';
};
/**
* Check whether a proposed label position is clear of other elements
*/
Series.prototype.checkClearPoint = function (x, y, bBox, checkDistance) {
var distToOthersSquared = Number.MAX_VALUE, // distance to other graphs
distToPointSquared = Number.MAX_VALUE,
dist,
connectorPoint,
connectorEnabled = this.options.label.connectorAllowed,
onArea = pick(this.options.label.onArea, !!this.area),
chart = this.chart,
series,
points,
leastDistance = 16,
withinRange,
xDist,
yDist,
i,
j;
function intersectRect(r1, r2) {
return !(r2.left > r1.right ||
r2.right < r1.left ||
r2.top > r1.bottom ||
r2.bottom < r1.top);
}
/**
* Get the weight in order to determine the ideal position. Larger distance
* to other series gives more weight. Smaller distance to the actual point
* (connector points only) gives more weight.
*/
function getWeight(distToOthersSquared, distToPointSquared) {
return distToOthersSquared - distToPointSquared;
}
// First check for collision with existing labels
for (i = 0; i < chart.boxesToAvoid.length; i += 1) {
if (intersectRect(chart.boxesToAvoid[i], {
left: x,
right: x + bBox.width,
top: y,
bottom: y + bBox.height
})) {
return false;
}
}
// For each position, check if the lines around the label intersect with any
// of the graphs.
for (i = 0; i < chart.series.length; i += 1) {
series = chart.series[i];
points = series.interpolatedPoints;
if (series.visible && points) {
for (j = 1; j < points.length; j += 1) {
if (
// To avoid processing, only check intersection if the X
// values are close to the box.
points[j].chartX >= x - leastDistance &&
points[j - 1].chartX <= x + bBox.width + leastDistance
) {
// If any of the box sides intersect with the line, return.
if (boxIntersectLine(
x,
y,
bBox.width,
bBox.height,
points[j - 1].chartX,
points[j - 1].chartY,
points[j].chartX,
points[j].chartY
)) {
return false;
}
// But if it is too far away (a padded box doesn't
// intersect), also return.
if (this === series && !withinRange && checkDistance) {
withinRange = boxIntersectLine(
x - leastDistance,
y - leastDistance,
bBox.width + 2 * leastDistance,
bBox.height + 2 * leastDistance,
points[j - 1].chartX,
points[j - 1].chartY,
points[j].chartX,
points[j].chartY
);
}
}
// Find the squared distance from the center of the label. On
// area series, avoid its own graph.
if (
(connectorEnabled || withinRange) &&
(this !== series || onArea)
) {
xDist = x + bBox.width / 2 - points[j].chartX;
yDist = y + bBox.height / 2 - points[j].chartY;
distToOthersSquared = Math.min(
distToOthersSquared,
xDist * xDist + yDist * yDist
);
}
}
// Do we need a connector?
if (
!onArea &&
connectorEnabled &&
this === series &&
(
(checkDistance && !withinRange) ||
distToOthersSquared < Math.pow(
this.options.label.connectorNeighbourDistance,
2
)
)
) {
for (j = 1; j < points.length; j += 1) {
dist = Math.min(
(
Math.pow(x + bBox.width / 2 - points[j].chartX, 2) +
Math.pow(y + bBox.height / 2 - points[j].chartY, 2)
),
(
Math.pow(x - points[j].chartX, 2) +
Math.pow(y - points[j].chartY, 2)
),
(
Math.pow(x + bBox.width - points[j].chartX, 2) +
Math.pow(y - points[j].chartY, 2)
),
(
Math.pow(x + bBox.width - points[j].chartX, 2) +
Math.pow(y + bBox.height - points[j].chartY, 2)
),
(
Math.pow(x - points[j].chartX, 2) +
Math.pow(y + bBox.height - points[j].chartY, 2)
)
);
if (dist < distToPointSquared) {
distToPointSquared = dist;
connectorPoint = points[j];
}
}
withinRange = true;
}
}
}
return !checkDistance || withinRange ? {
x: x,
y: y,
weight: getWeight(
distToOthersSquared,
connectorPoint ? distToPointSquared : 0
),
connectorPoint: connectorPoint
} : false;
};
/**
* The main initiator method that runs on chart level after initiation and
* redraw. It runs in a timeout to prevent locking, and loops over all series,
* taking all series and labels into account when placing the labels.
*/
Chart.prototype.drawSeriesLabels = function () {
// console.time('drawSeriesLabels');
var chart = this,
labelSeries = this.labelSeries;
chart.boxesToAvoid = [];
// Build the interpolated points
each(labelSeries, function (series) {
series.interpolatedPoints = series.getPointsOnGraph();
each(series.options.label.boxesToAvoid || [], function (box) {
chart.boxesToAvoid.push(box);
});
});
each(chart.series, function (series) {
if (!series.xAxis && !series.yAxis) {
return;
}
var bBox,
x,
y,
results = [],
clearPoint,
i,
best,
labelOptions = series.options.label,
inverted = chart.inverted,
paneLeft = inverted ? series.yAxis.pos : series.xAxis.pos,
paneTop = inverted ? series.xAxis.pos : series.yAxis.pos,
paneWidth = chart.inverted ? series.yAxis.len : series.xAxis.len,
paneHeight = chart.inverted ? series.xAxis.len : series.yAxis.len,
points = series.interpolatedPoints,
onArea = pick(labelOptions.onArea, !!series.area),
label = series.labelBySeries,
minFontSize = labelOptions.minFontSize,
maxFontSize = labelOptions.maxFontSize;
function insidePane(x, y, bBox) {
return x > paneLeft && x <= paneLeft + paneWidth - bBox.width &&
y >= paneTop && y <= paneTop + paneHeight - bBox.height;
}
function destroyLabel() {
if (label) {
series.labelBySeries = label.destroy();
}
}
if (series.visible && !series.isSeriesBoosting && points) {
if (!label) {
series.labelBySeries = label = chart.renderer
.label(series.name, 0, -9999, 'connector')
.css(extend({
color: onArea ?
chart.renderer.getContrast(series.color) :
series.color
}, series.options.label.style));
// Adapt label sizes to the sum of the data
if (minFontSize && maxFontSize) {
label.css({
fontSize: series.labelFontSize(minFontSize, maxFontSize)
});
}
label
.attr({
padding: 0,
opacity: chart.renderer.forExport ? 1 : 0,
stroke: series.color,
'stroke-width': 1,
zIndex: 3
})
.add()
.animate({ opacity: 1 }, { duration: 200 });
}
bBox = label.getBBox();
bBox.width = Math.round(bBox.width);
// Ideal positions are centered above or below a point on right side
// of chart
for (i = points.length - 1; i > 0; i -= 1) {
if (onArea) {
// Centered
x = points[i].chartX - bBox.width / 2;
y = points[i].chartCenterY - bBox.height / 2;
if (insidePane(x, y, bBox)) {
best = series.checkClearPoint(
x,
y,
bBox
);
}
if (best) {
results.push(best);
}
} else {
// Right - up
x = points[i].chartX + labelDistance;
y = points[i].chartY - bBox.height - labelDistance;
if (insidePane(x, y, bBox)) {
best = series.checkClearPoint(
x,
y,
bBox
);
}
if (best) {
results.push(best);
}
// Right - down
x = points[i].chartX + labelDistance;
y = points[i].chartY + labelDistance;
if (insidePane(x, y, bBox)) {
best = series.checkClearPoint(
x,
y,
bBox
);
}
if (best) {
results.push(best);
}
// Left - down
x = points[i].chartX - bBox.width - labelDistance;
y = points[i].chartY + labelDistance;
if (insidePane(x, y, bBox)) {
best = series.checkClearPoint(
x,
y,
bBox
);
}
if (best) {
results.push(best);
}
// Left - up
x = points[i].chartX - bBox.width - labelDistance;
y = points[i].chartY - bBox.height - labelDistance;
if (insidePane(x, y, bBox)) {
best = series.checkClearPoint(
x,
y,
bBox
);
}
if (best) {
results.push(best);
}
}
}
// Brute force, try all positions on the chart in a 16x16 grid
if (labelOptions.connectorAllowed && !results.length && !onArea) {
for (
x = paneLeft + paneWidth - bBox.width;
x >= paneLeft;
x -= 16
) {
for (
y = paneTop;
y < paneTop + paneHeight - bBox.height;
y += 16
) {
clearPoint = series.checkClearPoint(x, y, bBox, true);
if (clearPoint) {
results.push(clearPoint);
}
}
}
}
if (results.length) {
results.sort(function (a, b) {
return b.weight - a.weight;
});
best = results[0];
chart.boxesToAvoid.push({
left: best.x,
right: best.x + bBox.width,
top: best.y,
bottom: best.y + bBox.height
});
// Move it if needed
var dist = Math.sqrt(
Math.pow(Math.abs(best.x - label.x), 2),
Math.pow(Math.abs(best.y - label.y), 2)
);
if (dist) {
// Move fast and fade in - pure animation movement is
// distractive...
var attr = {
opacity: chart.renderer.forExport ? 1 : 0,
x: best.x,
y: best.y
},
anim = {
opacity: 1
};
// ... unless we're just moving a short distance
if (dist <= 10) {
anim = {
x: attr.x,
y: attr.y
};
attr = {};
}
series.labelBySeries
.attr(extend(attr, {
anchorX: best.connectorPoint &&
best.connectorPoint.plotX + paneLeft,
anchorY: best.connectorPoint &&
best.connectorPoint.plotY + paneTop
}))
.animate(anim);
// Record closest point to stick to for sync redraw
series.options.kdNow = true;
series.buildKDTree();
var closest = series.searchPoint({
chartX: best.x,
chartY: best.y
}, true);
label.closest = [
closest,
best.x - closest.plotX,
best.y - closest.plotY
];
}
} else {
destroyLabel();
}
} else {
destroyLabel();
}
});
// console.timeEnd('drawSeriesLabels');
};
/**
* Prepare drawing series labels
*/
function drawLabels() {
var chart = this,
delay = Math.max(
H.animObject(chart.renderer.globalAnimation).duration,
250
),
initial = !chart.hasRendered;
chart.labelSeries = [];
chart.labelSeriesMaxSum = 0;
H.clearTimeout(chart.seriesLabelTimer);
// Which series should have labels
each(chart.series, function (series) {
var options = series.options.label,
label = series.labelBySeries,
closest = label && label.closest;
if (
options.enabled &&
series.visible &&
(series.graph || series.area) &&
!series.isSeriesBoosting
) {
chart.labelSeries.push(series);
if (options.minFontSize && options.maxFontSize) {
series.sum = H.reduce(series.yData, function (pv, cv) {
return (pv || 0) + (cv || 0);
}, 0);
chart.labelSeriesMaxSum = Math.max(
chart.labelSeriesMaxSum,
series.sum
);
}
// The labels are processing heavy, wait until the animation is done
if (initial) {
delay = Math.max(
delay,
H.animObject(series.options.animation).duration
);
}
// Keep the position updated to the axis while redrawing
if (closest) {
if (closest[0].plotX !== undefined) {
label.animate({
x: closest[0].plotX + closest[1],
y: closest[0].plotY + closest[2]
});
} else {
label.attr({ opacity: 0 });
}
}
}
});
chart.seriesLabelTimer = H.syncTimeout(function () {
if (chart.series && chart.labelSeries) { // #7931, chart destroyed
chart.drawSeriesLabels();
}
}, chart.renderer.forExport ? 0 : delay);
}
addEvent(Chart, 'render', drawLabels);
}(Highcharts));
return (function () {
}());
}));
|
PypiClean
|
/CLAchievements-0.1.0.tar.gz/CLAchievements-0.1.0/clachievements/reset/__main__.py
|
# Command Line Achievements
# Copyright 2016 Louis Paternault
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Reset progress."""
import argparse
import os
import sys
from clachievements.db import db_path
def commandline_parser():
"""Return a command line parser."""
parser = argparse.ArgumentParser(
prog="clachievements.reset",
description=(
"Reset progress."
),
formatter_class=argparse.RawTextHelpFormatter,
)
return parser
def confirm():
"""Ask for confirmation from user."""
for _ in range(3):
try:
answer = input("Any progress will be lost. Proceed? [yes/no] ")
except (KeyboardInterrupt, EOFError):
print()
return False
if answer == "yes":
return True
if answer == "no":
return False
print("Please answer 'yes' or 'no'.")
return False
def reset():
"""Reset progress"""
if confirm():
path = db_path()
if not os.path.exists(path):
return 0
try:
os.remove(path)
except OSError as error:
print("Error while removing '{}': {}.".format(
path,
str(error),
))
return 1
return 0
else:
print("Aborted.")
return 0
def main():
"""Main function"""
commandline_parser().parse_args()
sys.exit(reset())
if __name__ == "__main__":
main()
|
PypiClean
|
/plaid-python-15.5.0.tar.gz/plaid-python-15.5.0/plaid/model/liabilities_get_request.py
|
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from plaid.exceptions import ApiAttributeError
def lazy_import():
from plaid.model.liabilities_get_request_options import LiabilitiesGetRequestOptions
globals()['LiabilitiesGetRequestOptions'] = LiabilitiesGetRequestOptions
class LiabilitiesGetRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'access_token': (str,), # noqa: E501
'client_id': (str,), # noqa: E501
'secret': (str,), # noqa: E501
'options': (LiabilitiesGetRequestOptions,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'access_token': 'access_token', # noqa: E501
'client_id': 'client_id', # noqa: E501
'secret': 'secret', # noqa: E501
'options': 'options', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, access_token, *args, **kwargs): # noqa: E501
"""LiabilitiesGetRequest - a model defined in OpenAPI
Args:
access_token (str): The access token associated with the Item data is being requested for.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
client_id (str): Your Plaid API `client_id`. The `client_id` is required and may be provided either in the `PLAID-CLIENT-ID` header or as part of a request body.. [optional] # noqa: E501
secret (str): Your Plaid API `secret`. The `secret` is required and may be provided either in the `PLAID-SECRET` header or as part of a request body.. [optional] # noqa: E501
options (LiabilitiesGetRequestOptions): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.access_token = access_token
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, access_token, *args, **kwargs): # noqa: E501
"""LiabilitiesGetRequest - a model defined in OpenAPI
Args:
access_token (str): The access token associated with the Item data is being requested for.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
client_id (str): Your Plaid API `client_id`. The `client_id` is required and may be provided either in the `PLAID-CLIENT-ID` header or as part of a request body.. [optional] # noqa: E501
secret (str): Your Plaid API `secret`. The `secret` is required and may be provided either in the `PLAID-SECRET` header or as part of a request body.. [optional] # noqa: E501
options (LiabilitiesGetRequestOptions): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.access_token = access_token
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/caisson-0.0.4.tar.gz/caisson-0.0.4/README.rst
|
`caisson`: The Recursive Decompressor
=====================================
Extract all compressed files recursively using external decompressors
found in the system.
Installation
------------
.. code-block:: shell
$ pip3 install caisson
Usage
-----
.. code-block::
$ caisson --help
usage: caisson [-h] [-q | -v | -d] [--list] [-o {always,never,ask,rename}]
[source [source ...]] destination
Recursively decompress files
positional arguments:
source file or directory to be extracted
destination destination for extracted files
optional arguments:
-h, --help show this help message and exit
-q, --quiet don't print any log
-v, --verbose print a moderate log
-d, --debug print a debugging log
--list print list of available decompressors
-o {always,never,ask,rename}, --overwrite {always,never,ask,rename}
|
PypiClean
|
/Django-4.2.4.tar.gz/Django-4.2.4/django/contrib/postgres/aggregates/general.py
|
import json
import warnings
from django.contrib.postgres.fields import ArrayField
from django.db.models import Aggregate, BooleanField, JSONField, TextField, Value
from django.utils.deprecation import RemovedInDjango50Warning, RemovedInDjango51Warning
from .mixins import OrderableAggMixin
__all__ = [
"ArrayAgg",
"BitAnd",
"BitOr",
"BitXor",
"BoolAnd",
"BoolOr",
"JSONBAgg",
"StringAgg",
]
# RemovedInDjango50Warning
NOT_PROVIDED = object()
class DeprecatedConvertValueMixin:
def __init__(self, *expressions, default=NOT_PROVIDED, **extra):
if default is NOT_PROVIDED:
default = None
self._default_provided = False
else:
self._default_provided = True
super().__init__(*expressions, default=default, **extra)
def resolve_expression(self, *args, **kwargs):
resolved = super().resolve_expression(*args, **kwargs)
if not self._default_provided:
resolved.empty_result_set_value = getattr(
self, "deprecation_empty_result_set_value", self.deprecation_value
)
return resolved
def convert_value(self, value, expression, connection):
if value is None and not self._default_provided:
warnings.warn(self.deprecation_msg, category=RemovedInDjango50Warning)
return self.deprecation_value
return value
class ArrayAgg(DeprecatedConvertValueMixin, OrderableAggMixin, Aggregate):
function = "ARRAY_AGG"
template = "%(function)s(%(distinct)s%(expressions)s %(ordering)s)"
allow_distinct = True
# RemovedInDjango50Warning
deprecation_value = property(lambda self: [])
deprecation_msg = (
"In Django 5.0, ArrayAgg() will return None instead of an empty list "
"if there are no rows. Pass default=None to opt into the new behavior "
"and silence this warning or default=[] to keep the previous behavior."
)
@property
def output_field(self):
return ArrayField(self.source_expressions[0].output_field)
class BitAnd(Aggregate):
function = "BIT_AND"
class BitOr(Aggregate):
function = "BIT_OR"
class BitXor(Aggregate):
function = "BIT_XOR"
class BoolAnd(Aggregate):
function = "BOOL_AND"
output_field = BooleanField()
class BoolOr(Aggregate):
function = "BOOL_OR"
output_field = BooleanField()
class JSONBAgg(DeprecatedConvertValueMixin, OrderableAggMixin, Aggregate):
function = "JSONB_AGG"
template = "%(function)s(%(distinct)s%(expressions)s %(ordering)s)"
allow_distinct = True
output_field = JSONField()
# RemovedInDjango50Warning
deprecation_value = "[]"
deprecation_empty_result_set_value = property(lambda self: [])
deprecation_msg = (
"In Django 5.0, JSONBAgg() will return None instead of an empty list "
"if there are no rows. Pass default=None to opt into the new behavior "
"and silence this warning or default=[] to keep the previous "
"behavior."
)
# RemovedInDjango51Warning: When the deprecation ends, remove __init__().
#
# RemovedInDjango50Warning: When the deprecation ends, replace with:
# def __init__(self, *expressions, default=None, **extra):
def __init__(self, *expressions, default=NOT_PROVIDED, **extra):
super().__init__(*expressions, default=default, **extra)
if (
isinstance(default, Value)
and isinstance(default.value, str)
and not isinstance(default.output_field, JSONField)
):
value = default.value
try:
decoded = json.loads(value)
except json.JSONDecodeError:
warnings.warn(
"Passing a Value() with an output_field that isn't a JSONField as "
"JSONBAgg(default) is deprecated. Pass default="
f"Value({value!r}, output_field=JSONField()) instead.",
stacklevel=2,
category=RemovedInDjango51Warning,
)
self.default.output_field = self.output_field
else:
self.default = Value(decoded, self.output_field)
warnings.warn(
"Passing an encoded JSON string as JSONBAgg(default) is "
f"deprecated. Pass default={decoded!r} instead.",
stacklevel=2,
category=RemovedInDjango51Warning,
)
class StringAgg(DeprecatedConvertValueMixin, OrderableAggMixin, Aggregate):
function = "STRING_AGG"
template = "%(function)s(%(distinct)s%(expressions)s %(ordering)s)"
allow_distinct = True
output_field = TextField()
# RemovedInDjango50Warning
deprecation_value = ""
deprecation_msg = (
"In Django 5.0, StringAgg() will return None instead of an empty "
"string if there are no rows. Pass default=None to opt into the new "
'behavior and silence this warning or default="" to keep the previous behavior.'
)
def __init__(self, expression, delimiter, **extra):
delimiter_expr = Value(str(delimiter))
super().__init__(expression, delimiter_expr, **extra)
|
PypiClean
|
/fds.sdk.NewsAPIforDigitalPortals-0.10.9-py3-none-any.whl/fds/sdk/NewsAPIforDigitalPortals/model/inline_response2009_data.py
|
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.NewsAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.NewsAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.NewsAPIforDigitalPortals.model.inline_response2009_data_distributor import InlineResponse2009DataDistributor
globals()['InlineResponse2009DataDistributor'] = InlineResponse2009DataDistributor
class InlineResponse2009Data(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'name': (str, none_type,), # noqa: E501
'distributor': (InlineResponse2009DataDistributor,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'distributor': 'distributor', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse2009Data - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str, none_type): Name of the publisher.. [optional] # noqa: E501
distributor (InlineResponse2009DataDistributor): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse2009Data - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str, none_type): Name of the publisher.. [optional] # noqa: E501
distributor (InlineResponse2009DataDistributor): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/flask-hsrpc-0.2.0.tar.gz/flask-hsrpc-0.2.0/flask_hsrpc/consul.py
|
from consul import Consul as BaseConsul
from consul.base import CB, Check
from uuid import uuid4 as uuid
import random
import json
import os
def _is_node_health(node):
checks = node["Checks"]
return len(list(filter(lambda c: c["Status"] != "passing", checks))) == 0
def random_balance(health_nodes):
return random.sample(health_nodes, 1)[0]
def _rr_balance_func():
Next_RR_Balance_Count = 0
def balance(health_nodes):
nonlocal Next_RR_Balance_Count
if Next_RR_Balance_Count >= len(health_nodes):
Next_RR_Balance_Count = 0
rel = health_nodes[Next_RR_Balance_Count]
Next_RR_Balance_Count += 1
return rel
return balance
rr_balance = _rr_balance_func()
class Consul(BaseConsul):
def service_register(
self,
name,
service_id=None,
address=None,
port=None,
tags=None,
check=None,
token=None,
meta=None,
# *deprecated* use check parameter
script=None,
interval=None,
ttl=None,
http=None,
timeout=None,
enable_tag_override=False,
connect=None):
payload = {'name': name}
if enable_tag_override:
payload['enabletagoverride'] = enable_tag_override
if service_id:
payload['id'] = service_id
if address:
payload['address'] = address
if port:
payload['port'] = port
if tags:
payload['tags'] = tags
if meta:
payload['meta'] = meta
if connect:
payload['connect'] = connect
if check:
payload['check'] = check
else:
payload.update(Check._compat(
script=script,
interval=interval,
ttl=ttl,
http=http,
timeout=timeout))
params = {}
token = token or self.token
if token:
params['token'] = token
return self.http.put(
CB.bool(),
'/v1/agent/service/register',
params=params,
data=json.dumps(payload))
def service_deregister(self, service_id):
return self.http.put(
CB.bool(), '/v1/agent/service/deregister/%s' % service_id)
class ConsulRegister(object):
_service_id = ""
def __init__(self, app):
host = app.config.get("CONSUL_HOST", "127.0.0.1")
port = app.config.get("CONSUL_PORT", "8500")
self.name = app.config.get("APP_NAME")
self.service_info = app.config.get("SERVICE_INFO")
self.service_info["service_id"] = self.service_id
print(self.service_id)
# self.consul = BaseConsul(host=host, port=port)
self.consul = Consul(host=host, port=port)
if self.name:
print("register service...")
# self.consul.agent.service.register()
self.consul.service_register(name=self.name, **self.service_info)
@property
def service_id(self):
if not self._service_id:
if self.service_info and self.service_info.get("service_id"):
self._service_id = self.service_info.get("service_id")
elif os.path.exists("service_id"):
with open("service_id", "r", encoding="utf-8") as f:
self._service_id = f.readline()
else:
self._service_id = self.name + '-' + uuid().hex
with open("service_id", "w", encoding="utf-8") as f:
f.write(self._service_id)
return self._service_id
def get_service(self, name):
return self.consul.health.service(name)
def get_health_service_node_by_balance(self, name, balance=random_balance):
nodes = self.get_service(name)[1]
health_nodes = list(filter(lambda n: _is_node_health(n), nodes))
return balance(health_nodes) if health_nodes else None
def unregister(self):
print(self.service_id)
print("unregister service...")
self.consul.service_deregister(self.service_id)
# def consul_unregister(app):
# if app:
# host = app.config.get("CONSUL_HOST", "127.0.0.1")
# port = app.config.get("CONSUL_PORT", "8500")
# name = app.config.get("APP_NAME")
# service_info = app.config.get("SERVICE_INFO")
# service_id = service_info.get("service_id") if service_info and service_info.get("service_id") else name
# consul = Consul(host=host, port=port)
# print(service_id)
# print(consul.service_deregister(service_id))
|
PypiClean
|
/mmdet_taeuk4958-1.0.9-py3-none-any.whl/mmdet_taeuk4958/datasets/pipelines/formatting.py
|
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some results to :obj:`torch.Tensor` by given keys.
Args:
keys (Sequence[str]): Keys that need to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert data in results to :obj:`torch.Tensor`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted
to :obj:`torch.Tensor`.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image to :obj:`torch.Tensor` by given keys.
The dimension order of input image is (H, W, C). The pipeline will convert
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
(1, H, W).
Args:
keys (Sequence[str]): Key of images to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = (to_tensor(img.transpose(2, 0, 1))).contiguous()
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose:
"""Transpose some results by given keys.
Args:
keys (Sequence[str]): Keys of results to be transposed.
order (Sequence[int]): Order of transpose.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Call function to transpose the channel order of data in results.
Args:
results (dict): Result dict contains the data to transpose.
Returns:
dict: The result dict contains the data transposed to \
``self.order``.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToDataContainer:
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
Args:
fields (Sequence[dict]): Each field is a dict like
``dict(key='xxx', **kwargs)``. The ``key`` in result will
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))``.
"""
def __init__(self,
fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
"""Call function to convert data in results to
:obj:`mmcv.DataContainer`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted to \
:obj:`mmcv.DataContainer`.
"""
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class DefaultFormatBundle:
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img",
"proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".
These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- proposals: (1)to tensor, (2)to DataContainer
- gt_bboxes: (1)to tensor, (2)to DataContainer
- gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
- gt_labels: (1)to tensor, (2)to DataContainer
- gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \
(3)to DataContainer (stack=True)
Args:
img_to_float (bool): Whether to force the image to be converted to
float type. Default: True.
pad_val (dict): A dict for padding value in batch collating,
the default value is `dict(img=0, masks=0, seg=255)`.
Without this argument, the padding value of "gt_semantic_seg"
will be set to 0 by default, which should be 255.
"""
def __init__(self,
img_to_float=True,
pad_val=dict(img=0, masks=0, seg=255)):
self.img_to_float = img_to_float
self.pad_val = pad_val
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with \
default bundle.
"""
if 'img' in results:
img = results['img']
if self.img_to_float is True and img.dtype == np.uint8:
# Normally, image is of uint8 type without normalization.
# At this time, it needs to be forced to be converted to
# flot32, otherwise the model training and inference
# will be wrong. Only used for YOLOX currently .
img = img.astype(np.float32)
# add default meta keys
results = self._add_default_meta_keys(results)
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(
to_tensor(img), padding_value=self.pad_val['img'], stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
if 'gt_masks' in results:
results['gt_masks'] = DC(
results['gt_masks'],
padding_value=self.pad_val['masks'],
cpu_only=True)
if 'gt_semantic_seg' in results:
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None, ...]),
padding_value=self.pad_val['seg'],
stack=True)
return results
def _add_default_meta_keys(self, results):
"""Add default meta keys.
We set default meta keys including `pad_shape`, `scale_factor` and
`img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
`Pad` are implemented during the whole pipeline.
Args:
results (dict): Result dict contains the data to convert.
Returns:
results (dict): Updated result dict contains the data to convert.
"""
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results.setdefault(
'img_norm_cfg',
dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False))
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(img_to_float={self.img_to_float})'
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "proposals", "gt_bboxes",
"gt_bboxes_ignore", "gt_labels", and/or "gt_masks".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple \
(h, w, c). Note that images may be zero padded on the \
bottom/right if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
'img_norm_cfg')``
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function to collect keys in results. The keys in ``meta_keys``
will be converted to :obj:mmcv.DataContainer.
Args:
results (dict): Result dict contains the data to collect.
Returns:
dict: The result dict contains the following keys
- keys in``self.keys``
- ``img_metas``
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
@PIPELINES.register_module()
class WrapFieldsToLists:
"""Wrap fields of the data dictionary into lists for evaluation.
This class can be used as a last step of a test or validation
pipeline for single image evaluation or inference.
Example:
>>> test_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
>>> dict(type='Pad', size_divisor=32),
>>> dict(type='ImageToTensor', keys=['img']),
>>> dict(type='Collect', keys=['img']),
>>> dict(type='WrapFieldsToLists')
>>> ]
"""
def __call__(self, results):
"""Call function to wrap fields into lists.
Args:
results (dict): Result dict contains the data to wrap.
Returns:
dict: The result dict where value of ``self.keys`` are wrapped \
into list.
"""
# Wrap dict fields into lists
for key, val in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
|
PypiClean
|
/musma_ray-1.0.0.2-py3-none-any.whl/release/ray_release/result.py
|
import enum
from dataclasses import dataclass
from typing import Optional, Dict, Tuple
@dataclass
class Result:
results: Optional[Dict] = None
status: str = "invalid"
return_code: int = 0
last_logs: Optional[str] = None
runtime: Optional[float] = None
stable: bool = True
buildkite_url: Optional[str] = None
wheels_url: Optional[str] = None
cluster_url: Optional[str] = None
class ExitCode(enum.Enum):
# If you change these, also change the `retry` section
# in `build_pipeline.py` and the `reason()` function in `run_e2e.sh`
SUCCESS = 0 # Do not set/return this manually
UNCAUGHT = 1 # Do not set/return this manually
UNSPECIFIED = 2
UNKNOWN = 3
# Hard infra errors (non-retryable)
CLI_ERROR = 10
CONFIG_ERROR = 11
SETUP_ERROR = 12
CLUSTER_RESOURCE_ERROR = 13
CLUSTER_ENV_BUILD_ERROR = 14
CLUSTER_STARTUP_ERROR = 15
LOCAL_ENV_SETUP_ERROR = 16
REMOTE_ENV_SETUP_ERROR = 17
# ANYSCALE_SDK_ERROR = 19
# Infra timeouts (retryable)
RAY_WHEELS_TIMEOUT = 30
CLUSTER_ENV_BUILD_TIMEOUT = 31
CLUSTER_STARTUP_TIMEOUT = 32
CLUSTER_WAIT_TIMEOUT = 33
# Command errors
COMMAND_ERROR = 40
COMMAND_ALERT = 41
COMMAND_TIMEOUT = 42
PREPARE_ERROR = 43
def handle_exception(e: Exception) -> Tuple[ExitCode, str, Optional[int]]:
from ray_release.exception import ReleaseTestError
if isinstance(e, ReleaseTestError):
exit_code = e.exit_code
# Legacy reporting
if 1 <= exit_code.value < 10:
error_type = "runtime_error"
runtime = None
elif 10 <= exit_code.value < 20:
error_type = "infra_error"
runtime = None
elif 30 <= exit_code.value < 40:
error_type = "infra_timeout"
runtime = None
elif exit_code == ExitCode.COMMAND_TIMEOUT:
error_type = "timeout"
runtime = 0
elif 40 <= exit_code.value < 50:
error_type = "error"
runtime = 0
else:
error_type = "error"
runtime = 0
else:
exit_code = ExitCode.UNKNOWN
error_type = "unknown error"
runtime = 0
return exit_code, error_type, runtime
|
PypiClean
|
/django-simple-permissions-0.7.0.tar.gz/django-simple-permissions-0.7.0/src/simple_permissions/utils/logics.py
|
from simple_permissions.logics import PermissionLogic
from simple_permissions.utils.filters import add_permission_filter
def add_permission_logic(model, permission_logic):
"""
Add permission logic to the model
Parameters
----------
model : django model class
A django model class which will be treated by the specified permission
logic
permission_logic : permission logic instance
A permission logic instance which will be used to determine permission
of the model
Examples
--------
>>> from django.db import models
>>> from simple_permissions.logics import PermissionLogic
>>> class Mock(models.Model):
... name = models.CharField('name', max_length=120)
>>> add_permission_logic(Mock, PermissionLogic())
"""
if not isinstance(permission_logic, PermissionLogic):
raise AttributeError(
'`permission_logic` must be an instance of PermissionLogic')
if not hasattr(model, '_permission_logics'):
model._permission_logics = set()
if not hasattr(model, '_permission_handler'):
from simple_permissions.utils.handlers import registry
# register default permission handler
registry.register(model, handler=None)
model._permission_logics.add(permission_logic)
# store target model to the permission_logic instance
permission_logic.model = model
add_permission_filter(model)
def remove_permission_logic(model, permission_logic, fail_silently=True):
"""
Remove permission logic to the model
Parameters
----------
model : django model class
A django model class which will be treated by the specified permission
logic
permission_logic : permission logic class or instance
A permission logic class or instance which will be used to determine
permission of the model
fail_silently : boolean
If `True` then do not raise KeyError even the specified permission logic
have not registered.
Examples
--------
>>> from django.db import models
>>> from simple_permissions.logics import PermissionLogic
>>> class Mock(models.Model):
... name = models.CharField('name', max_length=120)
>>> logic = PermissionLogic()
>>> add_permission_logic(Mock, logic)
>>> remove_permission_logic(Mock, logic)
"""
if not hasattr(model, '_permission_logics'):
model._permission_logics = set()
if not isinstance(permission_logic, PermissionLogic):
# remove all permission logic of related
remove_set = set()
for _permission_logic in model._permission_logics:
if _permission_logic.__class__ == permission_logic:
remove_set.add(_permission_logic)
# difference
model._permission_logics = model._permission_logics.difference(remove_set)
else:
if fail_silently and permission_logic not in model._permission_logics:
pass
else:
model._permission_logics.remove(permission_logic)
|
PypiClean
|
/parsec_cloud-2.16.0rc2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/parsec/core/fs/remote_loader.py
|
from __future__ import annotations
import math
from contextlib import contextmanager
from typing import TYPE_CHECKING, Awaitable, Callable, Iterable, Iterator
import trio
from trio import MemoryReceiveChannel, MemorySendChannel, open_memory_channel
from parsec import FEATURE_FLAGS
from parsec._parsec import AuthenticatedCmds as RsBackendAuthenticatedCmds
from parsec._parsec import (
BlockAccess,
BlockCreateRepAlreadyExists,
BlockCreateRepInMaintenance,
BlockCreateRepNotAllowed,
BlockCreateRepOk,
BlockCreateRepTimeout,
BlockReadRepInMaintenance,
BlockReadRepNotAllowed,
BlockReadRepNotFound,
BlockReadRepOk,
ChunkID,
CoreEvent,
CryptoError,
DateTime,
DeviceID,
EntryID,
HashDigest,
LocalDevice,
RealmCreateRepAlreadyExists,
RealmCreateRepOk,
RealmGetRoleCertificatesRepNotAllowed,
RealmGetRoleCertificatesRepOk,
RealmID,
RealmRole,
RemoteDevicesManagerBackendOfflineError,
RemoteDevicesManagerDeviceNotFoundError,
RemoteDevicesManagerError,
RemoteDevicesManagerInvalidTrustchainError,
RemoteDevicesManagerUserNotFoundError,
SequesterServiceID,
UserID,
VerifyKey,
VlobCreateRepAlreadyExists,
VlobCreateRepBadEncryptionRevision,
VlobCreateRepInMaintenance,
VlobCreateRepNotAllowed,
VlobCreateRepOk,
VlobCreateRepRejectedBySequesterService,
VlobCreateRepRequireGreaterTimestamp,
VlobCreateRepSequesterInconsistency,
VlobCreateRepTimeout,
VlobID,
VlobListVersionsRepInMaintenance,
VlobListVersionsRepNotAllowed,
VlobListVersionsRepNotFound,
VlobListVersionsRepOk,
VlobReadRepBadEncryptionRevision,
VlobReadRepBadVersion,
VlobReadRepInMaintenance,
VlobReadRepNotAllowed,
VlobReadRepNotFound,
VlobReadRepOk,
VlobUpdateRepBadEncryptionRevision,
VlobUpdateRepBadVersion,
VlobUpdateRepInMaintenance,
VlobUpdateRepNotAllowed,
VlobUpdateRepNotFound,
VlobUpdateRepOk,
VlobUpdateRepRejectedBySequesterService,
VlobUpdateRepRequireGreaterTimestamp,
VlobUpdateRepSequesterInconsistency,
VlobUpdateRepTimeout,
WorkspaceEntry,
)
from parsec.api.data import (
AnyRemoteManifest,
DataError,
DeviceCertificate,
RealmRoleCertificate,
RevokedUserCertificate,
SequesterAuthorityCertificate,
SequesterServiceCertificate,
UserCertificate,
)
from parsec.api.data.manifest import manifest_decrypt_verify_and_load
from parsec.core.backend_connection import BackendConnectionError, BackendNotAvailable
from parsec.core.fs.exceptions import (
FSBackendOfflineError,
FSBadEncryptionRevision,
FSDeviceNotFoundError,
FSError,
FSInvalidTrustchainError,
FSLocalMissError,
FSRemoteBlockNotFound,
FSRemoteManifestNotFound,
FSRemoteManifestNotFoundBadVersion,
FSRemoteOperationError,
FSRemoteSyncError,
FSSequesterServiceRejectedError,
FSServerUploadTemporarilyUnavailableError,
FSUserNotFoundError,
FSWorkspaceInMaintenance,
FSWorkspaceNoReadAccess,
FSWorkspaceNoWriteAccess,
)
from parsec.core.fs.storage.workspace_storage import AnyWorkspaceStorage
from parsec.core.remote_devices_manager import RemoteDevicesManager
from parsec.event_bus import EventBus
from parsec.utils import open_service_nursery
if TYPE_CHECKING:
from parsec.core.backend_connection import BackendAuthenticatedCmds
# This value is used to increment the timestamp provided by the backend
# when a manifest restamping is required. This value should be kept small
# compared to the certificate stamp ahead value, so the certificate updates have
# priority over manifest updates.
MANIFEST_STAMP_AHEAD_US = 100_000 # microseconds, or 0.1 seconds
# This value is used to increment the timestamp provided by the backend
# when a certificate restamping is required. This value should be kept big
# compared to the manifest stamp ahead value, so the certificate updates have
# priority over manifest updates.
ROLE_CERTIFICATE_STAMP_AHEAD_US = 500_000 # microseconds, or 0.5 seconds
class VlobRequireGreaterTimestampError(Exception):
@property
def strictly_greater_than(self) -> DateTime:
return self.args[0]
class VlobSequesterInconsistencyError(Exception):
def __init__(
self,
sequester_authority_certificate: bytes,
sequester_services_certificates: Iterable[bytes],
):
self.sequester_authority_certificate = sequester_authority_certificate
self.sequester_services_certificates = sequester_services_certificates
def _validate_sequester_config(
root_verify_key: VerifyKey,
sequester_authority_certificate: bytes | None,
sequester_services_certificates: Iterable[bytes] | None,
) -> tuple[SequesterAuthorityCertificate | None, list[SequesterServiceCertificate] | None]:
if sequester_authority_certificate is None:
return None, None
try:
# In theory `sequester_authority_certificate` and `sequester_services_certificates`
# should be both None or both not None. However this is a cheap check to
# cover the case the server made a mistake.
sequester_services_certificates = sequester_services_certificates or ()
# 1) Validate authority certificate
# Sequestery authority is always signed by the root key, hence `expected_author` is always None
authority = SequesterAuthorityCertificate.verify_and_load(
sequester_authority_certificate, author_verify_key=root_verify_key
)
# 2) Validate services certificates
services = []
for sc in sequester_services_certificates:
# Cannot use the regular `verify_and_load` here given authority key is
# not a regular `parsec.crypto.VerifyKey`
service = SequesterServiceCertificate.load(authority.verify_key_der.verify(sc))
services.append(service)
except (CryptoError, DataError) as exc:
raise FSInvalidTrustchainError(
f"Invalid sequester configuration returned by server: {exc}"
) from exc
return authority, services
@contextmanager
def translate_remote_devices_manager_errors() -> Iterator[None]:
try:
yield
except RemoteDevicesManagerBackendOfflineError as exc:
raise FSBackendOfflineError(str(exc)) from exc
except RemoteDevicesManagerUserNotFoundError as exc:
raise FSUserNotFoundError(str(exc)) from exc
except RemoteDevicesManagerDeviceNotFoundError as exc:
raise FSDeviceNotFoundError(str(exc)) from exc
except RemoteDevicesManagerInvalidTrustchainError as exc:
raise FSInvalidTrustchainError(str(exc)) from exc
except RemoteDevicesManagerError as exc:
raise FSRemoteOperationError(str(exc)) from exc
@contextmanager
def translate_backend_cmds_errors() -> Iterator[None]:
try:
yield
except BackendNotAvailable as exc:
raise FSBackendOfflineError(str(exc)) from exc
except BackendConnectionError as exc:
raise FSRemoteOperationError(str(exc)) from exc
class PyUserRemoteLoader:
def __init__(
self,
device: LocalDevice,
workspace_id: EntryID,
backend_cmds: BackendAuthenticatedCmds | RsBackendAuthenticatedCmds,
remote_devices_manager: RemoteDevicesManager,
):
self.device = device
self.workspace_id = workspace_id
self.backend_cmds = backend_cmds
self.remote_devices_manager = remote_devices_manager
self._realm_role_certificates_cache: list[RealmRoleCertificate] | None = None
def clear_realm_role_certificate_cache(self) -> None:
self._realm_role_certificates_cache = None
async def _load_realm_role_certificates(
self, realm_id: EntryID | None = None
) -> tuple[list[RealmRoleCertificate], dict[UserID, RealmRole]]:
with translate_backend_cmds_errors():
rep = await self.backend_cmds.realm_get_role_certificates(
RealmID.from_entry_id(realm_id or self.workspace_id)
)
if isinstance(rep, RealmGetRoleCertificatesRepNotAllowed):
# Seems we lost the access to the realm
raise FSWorkspaceNoReadAccess("Cannot get workspace roles: no read access")
elif not isinstance(rep, RealmGetRoleCertificatesRepOk):
raise FSError(f"Cannot retrieve workspace roles: {rep}")
try:
# Must read unverified certificates to access metadata
unsecure_certifs = sorted(
[
(RealmRoleCertificate.unsecure_load(uv_role), uv_role)
for uv_role in rep.certificates
],
key=lambda x: x[0].timestamp,
)
current_roles: dict[UserID, RealmRole] = {}
owner_only = (RealmRole.OWNER,)
owner_or_manager = (RealmRole.OWNER, RealmRole.MANAGER)
# Now verify each certif
for unsecure_certif, raw_certif in unsecure_certifs:
certif_author = unsecure_certif.author
if certif_author is None:
raise FSError("Expected a certificate signed by a user")
with translate_remote_devices_manager_errors():
author = await self.remote_devices_manager.get_device(certif_author)
RealmRoleCertificate.verify_and_load(
raw_certif,
author_verify_key=author.verify_key,
expected_author=author.device_id,
)
# Make sure author had the right to do this
existing_user_role = current_roles.get(unsecure_certif.user_id)
if not current_roles and unsecure_certif.user_id == author.device_id.user_id:
# First user is auto-signed
needed_roles: tuple[RealmRole | None, ...] = (None,)
elif (
existing_user_role in owner_or_manager
or unsecure_certif.role in owner_or_manager
):
needed_roles = owner_only
else:
needed_roles = owner_or_manager
if current_roles.get(certif_author.user_id) not in needed_roles:
raise FSError(
f"Invalid realm role certificates: "
f"{unsecure_certif.author} has not right to give "
f"{unsecure_certif.role} role to {unsecure_certif.user_id.str} "
f"on {unsecure_certif.timestamp}"
)
if unsecure_certif.role is None:
current_roles.pop(unsecure_certif.user_id, None)
else:
current_roles[unsecure_certif.user_id] = unsecure_certif.role
# Decryption error
except DataError as exc:
raise FSError(f"Invalid realm role certificates: {exc}") from exc
# Now unsecure_certifs is no longer insecure given we have validated its items
return [c for c, _ in unsecure_certifs], current_roles
async def load_realm_role_certificates(
self, realm_id: EntryID | None = None
) -> list[RealmRoleCertificate]:
"""
Raises:
FSError
FSBackendOfflineError
FSRemoteOperationError
FSWorkspaceNoAccess
FSUserNotFoundError
FSDeviceNotFoundError
FSInvalidTrustchainError
"""
certificates, _ = await self._load_realm_role_certificates(realm_id)
return certificates
async def load_realm_current_roles(
self, realm_id: EntryID | None = None
) -> dict[UserID, RealmRole]:
"""
Raises:
FSError
FSBackendOfflineError
FSRemoteOperationError
FSWorkspaceNoAccess
FSUserNotFoundError
FSDeviceNotFoundError
FSInvalidTrustchainError
"""
_, current_roles = await self._load_realm_role_certificates(realm_id)
return current_roles
async def get_user(
self, user_id: UserID, no_cache: bool = False
) -> tuple[UserCertificate, RevokedUserCertificate | None]:
"""
Raises:
FSRemoteOperationError
FSBackendOfflineError
FSUserNotFoundError
FSInvalidTrustchainError
"""
with translate_remote_devices_manager_errors():
return await self.remote_devices_manager.get_user(user_id, no_cache=no_cache)
async def get_device(self, device_id: DeviceID, no_cache: bool = False) -> DeviceCertificate:
"""
Raises:
FSRemoteOperationError
FSBackendOfflineError
FSUserNotFoundError
FSDeviceNotFoundError
FSInvalidTrustchainError
"""
with translate_remote_devices_manager_errors():
return await self.remote_devices_manager.get_device(device_id, no_cache=no_cache)
async def list_versions(self, entry_id: EntryID) -> dict[int, tuple[DateTime, DeviceID]]:
"""
Raises:
FSError
FSRemoteOperationError
FSBackendOfflineError
FSWorkspaceInMaintenance
FSRemoteManifestNotFound
"""
with translate_backend_cmds_errors():
rep = await self.backend_cmds.vlob_list_versions(VlobID.from_entry_id(entry_id))
if isinstance(rep, VlobListVersionsRepNotAllowed):
# Seems we lost the access to the realm
raise FSWorkspaceNoReadAccess("Cannot load manifest: no read access")
elif isinstance(rep, VlobListVersionsRepNotFound):
raise FSRemoteManifestNotFound(entry_id)
elif isinstance(rep, VlobListVersionsRepInMaintenance):
raise FSWorkspaceInMaintenance(
"Cannot download vlob while the workspace is in maintenance"
)
elif not isinstance(rep, VlobListVersionsRepOk):
raise FSError(f"Cannot fetch vlob {entry_id.hex}: {rep}")
return rep.versions
async def create_realm(self, realm_id: EntryID) -> None:
"""
Raises:
FSError
FSRemoteOperationError
FSBackendOfflineError
"""
timestamp = self.device.timestamp()
certif = RealmRoleCertificate.build_realm_root_certif(
author=self.device.device_id,
timestamp=timestamp,
realm_id=RealmID.from_entry_id(realm_id),
).dump_and_sign(self.device.signing_key)
with translate_backend_cmds_errors():
rep = await self.backend_cmds.realm_create(certif)
if isinstance(rep, RealmCreateRepAlreadyExists):
# It's possible a previous attempt to create this realm
# succeeded but we didn't receive the confirmation, hence
# we play idempotent here.
return
elif not isinstance(rep, RealmCreateRepOk):
raise FSError(f"Cannot create realm {realm_id.hex}: {rep}")
# Binding are not accepted as BaseClass
class RemoteLoader(PyUserRemoteLoader):
def __init__(
self,
device: LocalDevice,
workspace_id: EntryID,
get_workspace_entry: Callable[[], WorkspaceEntry],
get_previous_workspace_entry: Callable[[], Awaitable[WorkspaceEntry | None]],
backend_cmds: BackendAuthenticatedCmds | RsBackendAuthenticatedCmds,
remote_devices_manager: RemoteDevicesManager,
local_storage: AnyWorkspaceStorage,
event_bus: EventBus,
):
super().__init__(
device,
workspace_id,
backend_cmds,
remote_devices_manager,
)
self.get_workspace_entry = get_workspace_entry
self.get_previous_workspace_entry = get_previous_workspace_entry
self.local_storage = local_storage
self.event_bus = event_bus
self._sequester_services_cache: list[SequesterServiceCertificate] | None = None
async def _get_user_realm_role_at(
self, user_id: UserID, timestamp: DateTime, author_last_role_granted_on: DateTime
) -> RealmRole | None:
# Lazily iterate over user certificates from newest to oldest
def _get_user_certificates_from_cache() -> Iterator[RealmRoleCertificate]:
if self._realm_role_certificates_cache is None:
return
for certif in reversed(self._realm_role_certificates_cache):
if certif.user_id == user_id:
yield certif
# Reload cache certificates if necessary
last_certif = next(_get_user_certificates_from_cache(), None)
if last_certif is None or (
last_certif.timestamp < timestamp
and last_certif.timestamp < author_last_role_granted_on
):
self._realm_role_certificates_cache, _ = await self._load_realm_role_certificates()
# Find the corresponding role
assert self._realm_role_certificates_cache is not None
for certif in _get_user_certificates_from_cache():
if certif.timestamp <= timestamp:
return certif.role
else:
return None
async def load_blocks(self, accesses: list[BlockAccess]) -> None:
async with open_service_nursery() as nursery:
async with await self.receive_load_blocks(accesses, nursery) as receive_channel:
async for value in receive_channel:
pass
async def receive_load_blocks(
self, blocks: list[BlockAccess], nursery: trio.Nursery
) -> "MemoryReceiveChannel[BlockAccess]":
"""
Raises:
FSError
FSRemoteBlockNotFound
FSBackendOfflineError
FSWorkspaceInMaintenance
"""
blocks_iter = iter(blocks)
send_channel, receive_channel = open_memory_channel[BlockAccess](math.inf)
async def _loader(send_channel: "MemorySendChannel[BlockAccess]") -> None:
async with send_channel:
while True:
access = next(blocks_iter, None)
if not access:
break
await self.load_block(access)
await send_channel.send(access)
async with send_channel:
for _ in range(4):
nursery.start_soon(_loader, send_channel.clone())
return receive_channel
async def load_block(self, access: BlockAccess) -> None:
"""
Raises:
FSError
FSRemoteBlockNotFound
FSBackendOfflineError
FSRemoteOperationError
FSWorkspaceInMaintenance
FSWorkspaceNoAccess
"""
# Already present
if await self.local_storage.is_clean_block(access.id):
return
# Download
with translate_backend_cmds_errors():
rep = await self.backend_cmds.block_read(access.id)
if isinstance(rep, BlockReadRepNotFound):
raise FSRemoteBlockNotFound(access)
elif isinstance(rep, BlockReadRepNotAllowed):
# Seems we lost the access to the realm
raise FSWorkspaceNoReadAccess("Cannot load block: no read access")
elif isinstance(rep, BlockReadRepInMaintenance):
raise FSWorkspaceInMaintenance(
"Cannot download block while the workspace in maintenance"
)
elif not isinstance(rep, BlockReadRepOk):
raise FSError(f"Cannot download block: `{rep}`")
# Decryption
try:
block = access.key.decrypt(rep.block)
# Decryption error
except CryptoError as exc:
raise FSError(f"Cannot decrypt block: {exc}") from exc
# TODO: let encryption manager do the digest check ?
assert HashDigest.from_data(block) == access.digest, access
removed_block_ids = await self.local_storage.set_clean_block(access.id, block)
self.event_bus.send(
CoreEvent.FS_BLOCK_DOWNLOADED, workspace_id=self.workspace_id, block_access=access
)
if removed_block_ids:
self.event_bus.send(
CoreEvent.FS_BLOCK_PURGED,
workspace_id=self.workspace_id,
block_ids=removed_block_ids,
)
async def upload_blocks(self, blocks: list[BlockAccess]) -> None:
blocks_iter = iter(blocks)
async def _uploader() -> None:
while True:
access = next(blocks_iter, None)
if not access:
break
try:
data = await self.local_storage.get_dirty_block(access.id)
except FSLocalMissError:
continue
await self.upload_block(access, data)
async with open_service_nursery() as nursery:
for _ in range(4):
nursery.start_soon(_uploader)
async def upload_block(self, access: BlockAccess, data: bytes) -> None:
"""
Raises:
FSError
FSBackendOfflineError
FSRemoteOperationError
FSWorkspaceInMaintenance
FSWorkspaceNoAccess
"""
# Encryption
try:
ciphered = access.key.encrypt(data)
# Encryption error
except CryptoError as exc:
raise FSError(f"Cannot encrypt block: {exc}") from exc
# Upload block
with translate_backend_cmds_errors():
rep = await self.backend_cmds.block_create(
access.id, RealmID.from_entry_id(self.workspace_id), ciphered
)
if isinstance(rep, BlockCreateRepAlreadyExists):
# Ignore exception if the block has already been uploaded
# This might happen when a failure occurs before the local storage is updated
pass
elif isinstance(rep, BlockCreateRepNotAllowed):
# Seems we lost the access to the realm
raise FSWorkspaceNoWriteAccess("Cannot upload block: no write access")
elif isinstance(rep, BlockCreateRepInMaintenance):
raise FSWorkspaceInMaintenance("Cannot upload block while the workspace in maintenance")
elif isinstance(rep, BlockCreateRepTimeout):
raise FSServerUploadTemporarilyUnavailableError("Temporary failure during block upload")
elif not isinstance(rep, BlockCreateRepOk):
raise FSError(f"Cannot upload block: {rep}")
# Update local storage
removed_block_ids = await self.local_storage.set_clean_block(access.id, data)
await self.local_storage.clear_chunk(ChunkID.from_block_id(access.id), miss_ok=True)
if removed_block_ids:
self.event_bus.send(
CoreEvent.FS_BLOCK_PURGED,
workspace_id=self.workspace_id,
block_ids=removed_block_ids,
)
async def load_manifest(
self,
entry_id: EntryID,
version: int | None = None,
timestamp: DateTime | None = None,
expected_backend_timestamp: DateTime | None = None,
workspace_entry: WorkspaceEntry | None = None,
) -> AnyRemoteManifest:
"""
Download a manifest.
Only one from version or timestamp parameters can be specified at the same time.
expected_backend_timestamp enables to check a timestamp against the one returned by the
backend.
Raises:
FSError
FSBackendOfflineError
FSRemoteOperationError
FSWorkspaceInMaintenance
FSRemoteManifestNotFound
FSBadEncryptionRevision
FSWorkspaceNoAccess
FSUserNotFoundError
FSDeviceNotFoundError
FSInvalidTrustchainError
"""
assert (
timestamp is None or version is None
), "Either timestamp or version argument should be provided"
# Get the current and requested workspace entry
# They're usually the same, except when loading from a workspace while it's in maintenance
current_workspace_entry = self.get_workspace_entry()
workspace_entry = current_workspace_entry if workspace_entry is None else workspace_entry
# Download the vlob
with translate_backend_cmds_errors():
rep = await self.backend_cmds.vlob_read(
workspace_entry.encryption_revision,
VlobID.from_entry_id(entry_id),
version=version,
timestamp=timestamp if version is None else None,
)
# Special case for loading manifest while in maintenance.
# This is done to allow users to fetch data from a workspace while it's being reencrypted.
# If the workspace is in maintenance for another reason (such as garbage collection),
# the recursive call to load manifest will simply also fail with an FSWorkspaceInMaintenance.
if (
isinstance(rep, VlobReadRepInMaintenance)
and workspace_entry.encryption_revision == current_workspace_entry.encryption_revision
):
# Getting the last workspace entry with the previous encryption revision
# requires one or several calls to the backend, meaning the following exceptions might get raised:
# - FSError
# - FSBackendOfflineError
# - FSWorkspaceInMaintenance
# It is fine to let those exceptions bubble up as there all valid reasons for failing to load a manifest.
previous_workspace_entry = await self.get_previous_workspace_entry()
if previous_workspace_entry is not None:
# Make sure we don't fall into an infinite loop because of some other bug
assert (
previous_workspace_entry.encryption_revision
< self.get_workspace_entry().encryption_revision
)
# Recursive call to `load_manifest`, requiring an older encryption revision than the current one
return await self.load_manifest(
entry_id,
version=version,
timestamp=timestamp,
expected_backend_timestamp=expected_backend_timestamp,
workspace_entry=previous_workspace_entry,
)
if isinstance(rep, VlobReadRepNotFound):
raise FSRemoteManifestNotFound(entry_id)
elif isinstance(rep, VlobReadRepNotAllowed):
# Seems we lost the access to the realm
raise FSWorkspaceNoReadAccess("Cannot load manifest: no read access")
elif isinstance(rep, VlobReadRepBadVersion):
raise FSRemoteManifestNotFoundBadVersion(entry_id)
elif isinstance(rep, VlobReadRepBadEncryptionRevision):
raise FSBadEncryptionRevision(
f"Cannot fetch vlob {entry_id.hex}: Bad encryption revision provided"
)
elif isinstance(rep, VlobReadRepInMaintenance):
raise FSWorkspaceInMaintenance(
"Cannot download vlob while the workspace is in maintenance"
)
elif not isinstance(rep, VlobReadRepOk):
raise FSError(f"Cannot fetch vlob {entry_id.hex}: {rep}")
expected_version = rep.version
expected_author = rep.author
expected_timestamp = rep.timestamp
if version not in (None, expected_version):
raise FSError(
f"Backend returned invalid version for vlob {entry_id.hex} (expecting {version}, "
f"got {expected_version})"
)
if expected_backend_timestamp and expected_backend_timestamp != expected_timestamp:
raise FSError(
f"Backend returned invalid expected timestamp for vlob {entry_id.hex} at version "
f"{version} (expecting {expected_backend_timestamp}, got {expected_timestamp})"
)
with translate_remote_devices_manager_errors():
author = await self.remote_devices_manager.get_device(expected_author)
try:
remote_manifest = manifest_decrypt_verify_and_load(
rep.blob,
key=workspace_entry.key,
author_verify_key=author.verify_key,
expected_author=expected_author,
expected_timestamp=expected_timestamp,
expected_version=expected_version,
expected_id=entry_id,
)
except DataError as exc:
raise FSError(f"Cannot decrypt vlob: {exc}") from exc
# Get the timestamp of the last role for this particular user
author_last_role_granted_on = rep.author_last_role_granted_on
# Compatibility with older backends (best effort strategy)
if author_last_role_granted_on is None:
author_last_role_granted_on = self.device.timestamp()
# Finally make sure author was allowed to create this manifest
role_at_timestamp = await self._get_user_realm_role_at(
expected_author.user_id, expected_timestamp, author_last_role_granted_on
)
if role_at_timestamp is None:
raise FSError(
f"Manifest was created at {expected_timestamp} by `{expected_author.str}` "
"which had no right to access the workspace at that time"
)
elif role_at_timestamp == RealmRole.READER:
raise FSError(
f"Manifest was created at {expected_timestamp} by `{expected_author.str}` "
"which had no right to write on the workspace at that time"
)
return remote_manifest
async def upload_manifest(
self,
entry_id: EntryID,
manifest: AnyRemoteManifest,
timestamp_greater_than: DateTime | None = None,
) -> AnyRemoteManifest:
"""
Raises:
FSError
FSRemoteSyncError
FSBackendOfflineError
FSWorkspaceInMaintenance
FSBadEncryptionRevision
FSInvalidTrustchainError: if backend send invalid sequester configuration
"""
assert manifest.author == self.device.device_id
# Restamp the manifest before uploading
timestamp = self.device.timestamp()
if timestamp_greater_than is not None:
timestamp = max(
timestamp, timestamp_greater_than.add(microseconds=MANIFEST_STAMP_AHEAD_US)
)
manifest = manifest.evolve(timestamp=timestamp)
workspace_entry = self.get_workspace_entry()
if self._sequester_services_cache is None:
# Regular mode: we only encrypt the blob with the workspace symmetric key
sequester_blob = None
try:
ciphered = manifest.dump_sign_and_encrypt(
key=workspace_entry.key, author_signkey=self.device.signing_key
)
except DataError as exc:
raise FSError(f"Cannot encrypt vlob: {exc}") from exc
else:
# Sequestered organization mode: we also encrypt the blob with each
# sequester services' asymmetric encryption key
try:
signed = manifest.dump_and_sign(author_signkey=self.device.signing_key)
except DataError as exc:
raise FSError(f"Cannot encrypt vlob: {exc}") from exc
ciphered = workspace_entry.key.encrypt(signed)
sequester_blob = {}
for service in self._sequester_services_cache:
sequester_blob[service.service_id] = service.encryption_key_der.encrypt(signed)
# Upload the vlob
try:
if manifest.version == 1:
await self._vlob_create(
workspace_entry.encryption_revision,
entry_id,
ciphered,
manifest.timestamp,
sequester_blob,
)
else:
await self._vlob_update(
workspace_entry.encryption_revision,
entry_id,
ciphered,
manifest.timestamp,
manifest.version,
sequester_blob,
)
# The backend notified us that some restamping is required
except VlobRequireGreaterTimestampError as exc:
return await self.upload_manifest(entry_id, manifest, exc.strictly_greater_than)
# The backend notified us that we didn't encrypt the blob for the right sequester
# services. This typically occurs for the first vlob update/create (since we lazily
# fetch sequester config) or if a sequester service has been created/disabled.
except VlobSequesterInconsistencyError as exc:
# Ensure the config send by the backend is valid
_, sequester_services = _validate_sequester_config(
root_verify_key=self.device.root_verify_key,
sequester_authority_certificate=exc.sequester_authority_certificate,
sequester_services_certificates=exc.sequester_services_certificates,
)
# Update our cache and retry the request
self._sequester_services_cache = sequester_services
return await self.upload_manifest(entry_id, manifest)
except FSSequesterServiceRejectedError as exc:
# Small hack to provide the manifest object that was lacking when the exception was raised
exc.manifest = manifest
raise exc
else:
return manifest
async def _vlob_create(
self,
encryption_revision: int,
entry_id: EntryID,
ciphered: bytes,
now: DateTime,
sequester_blob: dict[SequesterServiceID, bytes] | None,
) -> None:
"""
Raises:
FSError
FSRemoteSyncError
FSBackendOfflineError
FSRemoteOperationError
FSWorkspaceInMaintenance
FSBadEncryptionRevision
FSWorkspaceNoAccess
"""
# Vlob upload
with translate_backend_cmds_errors():
rep = await self.backend_cmds.vlob_create(
RealmID.from_entry_id(self.workspace_id),
encryption_revision,
VlobID.from_entry_id(entry_id),
now,
ciphered,
sequester_blob,
)
if isinstance(rep, VlobCreateRepAlreadyExists):
raise FSRemoteSyncError(entry_id)
elif isinstance(rep, VlobCreateRepNotAllowed):
# Seems we lost the access to the realm
raise FSWorkspaceNoWriteAccess("Cannot upload manifest: no write access")
elif isinstance(rep, VlobCreateRepRequireGreaterTimestamp):
raise VlobRequireGreaterTimestampError(rep.strictly_greater_than)
elif isinstance(rep, VlobCreateRepBadEncryptionRevision):
raise FSBadEncryptionRevision(
f"Cannot create vlob {entry_id.hex}: Bad encryption revision provided"
)
elif isinstance(rep, VlobCreateRepInMaintenance):
raise FSWorkspaceInMaintenance(
"Cannot create vlob while the workspace is in maintenance"
)
elif isinstance(rep, VlobCreateRepSequesterInconsistency):
raise VlobSequesterInconsistencyError(
sequester_authority_certificate=rep.sequester_authority_certificate,
sequester_services_certificates=rep.sequester_services_certificates,
)
elif isinstance(rep, VlobCreateRepRejectedBySequesterService):
raise FSSequesterServiceRejectedError(
id=entry_id,
service_id=rep.service_id,
service_label=rep.service_label,
reason=rep.reason,
)
elif isinstance(rep, VlobCreateRepTimeout):
raise FSServerUploadTemporarilyUnavailableError("Temporary failure during vlob upload")
elif not isinstance(rep, VlobCreateRepOk):
raise FSError(f"Cannot create vlob {entry_id.hex}: {rep}")
async def _vlob_update(
self,
encryption_revision: int,
entry_id: EntryID,
ciphered: bytes,
now: DateTime,
version: int,
sequester_blob: dict[SequesterServiceID, bytes] | None,
) -> None:
"""
Raises:
FSError
FSRemoteSyncError
FSBackendOfflineError
FSRemoteOperationError
FSWorkspaceInMaintenance
FSBadEncryptionRevision
FSWorkspaceNoAccess
"""
# Vlob upload
with translate_backend_cmds_errors():
rep = await self.backend_cmds.vlob_update(
encryption_revision,
VlobID.from_entry_id(entry_id),
version,
now,
ciphered,
sequester_blob,
)
if isinstance(rep, VlobUpdateRepNotFound):
raise FSRemoteSyncError(entry_id)
elif isinstance(rep, VlobUpdateRepNotAllowed):
# Seems we lost the access to the realm
raise FSWorkspaceNoWriteAccess("Cannot upload manifest: no write access")
elif isinstance(rep, VlobUpdateRepRequireGreaterTimestamp):
raise VlobRequireGreaterTimestampError(rep.strictly_greater_than)
elif isinstance(rep, VlobUpdateRepBadVersion):
raise FSRemoteSyncError(entry_id)
elif isinstance(rep, VlobUpdateRepBadEncryptionRevision):
raise FSBadEncryptionRevision(
f"Cannot update vlob {entry_id.hex}: Bad encryption revision provided"
)
elif isinstance(rep, VlobUpdateRepInMaintenance):
raise FSWorkspaceInMaintenance(
"Cannot create vlob while the workspace is in maintenance"
)
elif isinstance(rep, VlobUpdateRepSequesterInconsistency):
raise VlobSequesterInconsistencyError(
sequester_authority_certificate=rep.sequester_authority_certificate,
sequester_services_certificates=rep.sequester_services_certificates,
)
elif isinstance(rep, VlobUpdateRepRejectedBySequesterService):
raise FSSequesterServiceRejectedError(
id=entry_id,
service_id=rep.service_id,
service_label=rep.service_label,
reason=rep.reason,
)
elif isinstance(rep, VlobUpdateRepTimeout):
raise FSServerUploadTemporarilyUnavailableError("Temporary failure during vlob upload")
elif not isinstance(rep, VlobUpdateRepOk):
raise FSError(f"Cannot update vlob {entry_id.hex}: {rep}")
def to_timestamped(self, timestamp: DateTime) -> "RemoteLoaderTimestamped":
return RemoteLoaderTimestamped(self, timestamp)
class RemoteLoaderTimestamped(RemoteLoader):
def __init__(self, remote_loader: RemoteLoader, timestamp: DateTime):
self.device = remote_loader.device
self.workspace_id = remote_loader.workspace_id
self.get_workspace_entry = remote_loader.get_workspace_entry
self.get_previous_workspace_entry = remote_loader.get_previous_workspace_entry
self.backend_cmds = remote_loader.backend_cmds
self.remote_devices_manager = remote_loader.remote_devices_manager
self.local_storage = remote_loader.local_storage.to_timestamped(timestamp)
self._realm_role_certificates_cache = None
self.timestamp = timestamp
self.event_bus = remote_loader.event_bus
async def upload_block(self, access: BlockAccess, data: bytes) -> None:
raise FSError("Cannot upload block through a timestamped remote loader")
async def load_manifest(
self,
entry_id: EntryID,
version: int | None = None,
timestamp: DateTime | None = None,
expected_backend_timestamp: DateTime | None = None,
workspace_entry: WorkspaceEntry | None = None,
) -> AnyRemoteManifest:
"""
Allows to have manifests at all timestamps as it is needed by the versions method of either
a WorkspaceFS or a WorkspaceFSTimestamped
Only one from version or timestamp can be specified at the same time.
expected_backend_timestamp enables to check a timestamp against the one returned by the
backend.
Raises:
FSError
FSBackendOfflineError
FSWorkspaceInMaintenance
FSRemoteManifestNotFound
FSBadEncryptionRevision
FSWorkspaceNoAccess
"""
if timestamp is None and version is None:
timestamp = self.timestamp
return await super().load_manifest(
entry_id,
version=version,
timestamp=timestamp,
expected_backend_timestamp=expected_backend_timestamp,
workspace_entry=workspace_entry,
)
async def upload_manifest(
self,
entry_id: EntryID,
manifest: AnyRemoteManifest,
timestamp_greater_than: DateTime | None = None,
) -> AnyRemoteManifest:
raise FSError("Cannot upload manifest through a timestamped remote loader")
async def _vlob_create(
self,
encryption_revision: int,
entry_id: EntryID,
ciphered: bytes,
now: DateTime,
sequester_blob: dict[SequesterServiceID, bytes] | None,
) -> None:
raise FSError("Cannot create vlob through a timestamped remote loader")
async def _vlob_update(
self,
encryption_revision: int,
entry_id: EntryID,
ciphered: bytes,
now: DateTime,
version: int,
sequester_blob: dict[SequesterServiceID, bytes] | None,
) -> None:
raise FSError("Cannot update vlob through a timestamped remote loader")
if not TYPE_CHECKING and FEATURE_FLAGS["UNSTABLE_OXIDIZED_CLIENT_CONNECTION"]:
from parsec._parsec import UserRemoteLoader as RsUserRemoteLoader
UserRemoteLoader = RsUserRemoteLoader
else:
UserRemoteLoader = PyUserRemoteLoader
|
PypiClean
|
/certora-cli-alpha-mike-migration-script-20230516.16.51.788505.tar.gz/certora-cli-alpha-mike-migration-script-20230516.16.51.788505/certora_cli/EVMVerifier/certoraNodeFilters.py
|
from typing import Any, Dict
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from Shared.certoraUtils import NoValEnum
class NodeFilters:
class NodeType(NoValEnum):
def is_this_node_type(self, type_name_node: Dict[str, Any]) -> bool:
return type_name_node["nodeType"] == self.value
class TypeNameNode(NodeType):
ELEMENTARY = "ElementaryTypeName"
FUNCTION = "FunctionTypeName"
USER_DEFINED = "UserDefinedTypeName"
MAPPING = "Mapping"
ARRAY = "ArrayTypeName"
class UserDefinedTypeDefNode(NodeType):
ENUM = "EnumDefinition"
STRUCT = "StructDefinition"
VALUE_TYPE = "UserDefinedValueTypeDefinition"
CONTRACT = "ContractDefinition"
@staticmethod
def CERTORA_CONTRACT_NAME() -> str:
return "certora_contract_name"
@staticmethod
def is_enum_definition(node: Dict[str, Any]) -> bool:
return node["nodeType"] == "EnumDefinition"
@staticmethod
def is_struct_definition(node: Dict[str, Any]) -> bool:
return node["nodeType"] == "StructDefinition"
@staticmethod
def is_user_defined_value_type_definition(node: Dict[str, Any]) -> bool:
return node["nodeType"] == "UserDefinedValueTypeDefinition"
@staticmethod
def is_contract_definition(node: Dict[str, Any]) -> bool:
return node["nodeType"] == "ContractDefinition"
@staticmethod
def is_user_defined_type_definition(node: Dict[str, Any]) -> bool:
return NodeFilters.is_enum_definition(node) or NodeFilters.is_struct_definition(
node) or NodeFilters.is_user_defined_value_type_definition(node)
@staticmethod
def is_import(node: Dict[str, Any]) -> bool:
return node["nodeType"] == "ImportDirective"
@staticmethod
def is_defined_in_a_contract_or_library(node: Dict[str, Any]) -> bool:
return NodeFilters.CERTORA_CONTRACT_NAME() in node
@staticmethod
def is_defined_in_contract(node: Dict[str, Any], contract_name: str) -> bool:
return node[NodeFilters.CERTORA_CONTRACT_NAME()] == contract_name
|
PypiClean
|
/fasttest-1.0.0.tar.gz/fasttest-1.0.0/README.md
|
`fasttest` 在`macaca`、`appium`、`selenium`的基础上做了一层关键字的封装与解析,通过`yaml`编写自动化用例,即使无代码基础的同学也已可以很快上手自动化测试

#### 我能做什么
- 支持`IDEA`关键字联想输入,在`yaml`文件上写用例像写代码一样舒畅
- 支持实时`debug`用例步骤,无需重复运行验证
- 支持现有关键字组合、自定义关键字,拥有无限扩展性
- 支持`PO`模式、支持`iOS`、`Android`两端共用一份用例
- 支持`if`、`while`、`for`等语法用于构造复杂场景
- 支持`CLI`命令,支持`Jenkins`持续集成
- 支持多设备并行执行
#### 演示↓↓↓

更多请点击 [fasttest](https://www.yuque.com/jodeee/vt6gkg/oue9xb)
|
PypiClean
|
/openlmi-0.4.1.tar.gz/openlmi-0.4.1/lmi/providers/TimerManager.py
|
import ctypes
import threading
import Queue
from lmi.base import singletonmixin
from lmi.providers import cmpi_logging
LOG = cmpi_logging.get_logger(__name__)
class TimerException(Exception):
pass
class MonotonicClock(object):
"""
Monotonic clock, represented by clock_gettime() and CLOCK_MONOTONIC.
This clock is not influenced by NTP or administrator setting time or date.
"""
CLOCK_MONOTONIC = ctypes.c_int(1)
class timespec(ctypes.Structure):
_fields_ = [
("tv_sec", ctypes.c_long),
("tv_nsec", ctypes.c_long)]
def __init__(self):
libc = ctypes.CDLL("librt.so.1")
self._clock_gettime = libc.clock_gettime
def now(self):
"""
Return current time, i.e. float representing seconds with precision up
to nanoseconds (depends on glibc). The actual value of current time is
meaningless, it can be used only to measure time differences.
:returns: ``float`` with current time in seconds.
"""
t = MonotonicClock.timespec(0, 0)
ret = self._clock_gettime(self.CLOCK_MONOTONIC, ctypes.pointer(t))
if ret < 0:
raise TimerException("Cannot get clock time, clock_gettime() failed.")
return t.tv_sec + t.tv_nsec * 10 ** (-9)
class Timer(object):
"""
A class representing a timer. A timer has a timeout and after the timeout,
given callback is called and the timer is deleted.
"""
@cmpi_logging.trace_method
def __init__(self, timer_manager, name, callback=None, *args, **kwargs):
"""
Create a timer. If specified, given callback is registered.
The callback is called with *args and **kwargs.
:param timer_manager: (``TimerManager)`` Instance of the timer manager
which will manage the timer.
:param name: (``string``) Name of the timer, used for logging.
:param callback: (``function``) Callback to call when the timer expires.
:param *args, **kwargs: Parameters of the callback.
"""
self._mgr = timer_manager
self._name = name
self._callback = callback
self._args = args
self._kwargs = kwargs
LOG().trace_info("Timer: Timer %s created", name)
@cmpi_logging.trace_method
def set_callback(self, callback, *args, **kwargs):
"""
Set callback to call when the timer expires.
:param callback: (``function``) Callback to call when the timer expires.
:param *args, **kwargs: Parameters of the callback.
"""
self._callback = callback
self._args = args
self._kwargs = kwargs
@cmpi_logging.trace_method
def start(self, timeout):
"""
Start the timer with given timeout. After the timeout, the registered
callback will be called.
:param timeout: (``float``) Timeout in seconds.
"""
self._timeout = timeout
now = self._mgr.now()
self._end_time = now + timeout
LOG().trace_info("Timer: Timer %s started at %f for %f seconds",
self._name, now, self._timeout)
self._mgr._add_timer(self)
@cmpi_logging.trace_method
def cancel(self):
"""
Cancel the timer. This method does not guarantee that the callback won't
be called, the timer might be calling the callback right now,
"""
LOG().trace_info("Timer: Timer %s cancelled", self._name)
self._mgr._remove_timer(self)
@cmpi_logging.trace_method
def _expired(self, now):
"""
Returns True, if the timer is expired.
:param now: (``float``) Current time, as returned by MonotonicClock.now().
:returns: (``boolean``) ``True``, if the timer is expired.
"""
if self._end_time <= now:
LOG().trace_info("Timer: Timer %s has expired", self._name)
return True
return False
@cmpi_logging.trace_method
def _expire(self):
"""
Called when the timer expired. It calls the callback.
"""
LOG().trace_info("Timer: Calling callback for timer %s", self._name)
self._callback(*self._args, **self._kwargs)
class TimerManager(singletonmixin.Singleton):
"""
Manages set of timers.
Python standard Timer class creates a thread for
each timer, which is inefficient. This class uses only one thread, which
is registered at CIMOM, i.e. it can log as usual.
This class is singleton, use TimerManager.get_instance() to get the
instance.
Still, the singleton needs to be initialized with ProviderEnvironment to
enable logging in the timer thread. Use TimerManager.get_instance(env) in
you provider initialization.
"""
# Commands to the timer thread
COMMAND_STOP = 1
COMMAND_RESCHEDULE = 2
@cmpi_logging.trace_method
def __init__(self, env=None):
"""
Initialize new thread manager.
:param env: (``ProviderEnvironment``) Environment to use for logging.
"""
self._clock = MonotonicClock()
self._lock = threading.RLock()
self._queue = Queue.Queue()
# Array of timers. Assumption: nr. of timers is relatively small,
# i.e. hundreds at the worst.
self._timers = []
new_broker = None
if env:
broker = env.get_cimom_handle()
new_broker = broker.PrepareAttachThread()
self._timer_thread = threading.Thread(
target=self._timer_loop, args=(new_broker,))
self._timer_thread.daemon = False
self._timer_thread.start()
def create_timer(self, name, callback=None, *args, **kwargs):
"""
Create new timer. If specified, given callback is registered.
The callback is called with *args and **kwargs.
:param name: (``string``) Name of the timer, used for logging.
:param callback: (``function``) Callback to call when the timer expires.
:param *args, **kwargs: Parameters of the callback.
"""
return Timer(self, name, callback, *args, **kwargs)
def _timer_loop(self, broker):
"""
TimerManager thread main loop. It waits for timeout of all timers
and calls their callbacks.
:param broker: (``BrokerCIMOMHandle``) CIM broker handle, used for
logging.
"""
if broker:
broker.AttachThread()
LOG().info("Started Timer thread.")
while True:
self._handle_expired()
timeout = self._find_timeout()
if timeout != 0:
# Wait for the timeout or any change in timers.
try:
command = self._queue.get(timeout=timeout)
self._queue.task_done()
if command == self.COMMAND_STOP:
break # stop the thread
# process COMMAND_RESCHEDULE in next loop
except Queue.Empty:
# Timeout has happened, ignore the exception.
pass
LOG().info("Stopped Timer thread.")
@cmpi_logging.trace_method
def _handle_expired(self):
"""
Finds all expired timers, calls their callback and removes them from
list of timers.
"""
# Get list of expired timers.
with self._lock:
now = self.now()
LOG().trace_info("Timer: Checking for expired, now=%f.", now)
expired = [t for t in self._timers if t._expired(now)]
# Call the callbacks (unlocked!).
for t in expired:
t._expire()
# Remove the timers (locked).
with self._lock:
for t in expired:
try:
LOG().trace_info("Timer: Removing %s", t._name)
self._timers.remove(t)
except ValueError:
# The timer has already been removed.
pass
@cmpi_logging.trace_method
def _find_timeout(self):
"""
Return nearest timeout, in seconds (as float, i.e. subsecond timeout
is possible). If no timer is scheduled, None is returned.
If there are expired timers, 0 is returned.
:returns: Positive ``float``: Nearest timeout.
:returns: ``0``: Some timer has expired.
:returns: ``None``: No timer is scheduled.
"""
with self._lock:
if not self._timers:
LOG().trace_info("Timer: No timers scheduled, waiting forever.")
return None
closest = min(self._timers, key=lambda timer: timer._end_time)
now = self.now()
timeout = closest._end_time - now
if timeout > 0:
LOG().trace_info("Timer: Waiting for %f seconds, now=%f.",
timeout, now)
return timeout
LOG().trace_info(
"Timer: Some timer has already expired, no waiting.")
return 0
@cmpi_logging.trace_method
def _add_timer(self, timer):
"""
Adds timer to list of timers. The timer must be started, i.e. its
timeout must be nozero!
This is internal method called by Timer.start().
:param timer: (``Timer``) Timer to add.
"""
with self._lock:
self._timers.append(timer)
# Wake up the timer manager thread.
self._queue.put(self.COMMAND_RESCHEDULE)
LOG().trace_info("Timer: Timer %s added", timer._name)
@cmpi_logging.trace_method
def _remove_timer(self, timer):
"""
Remove timer from list of timers.
This is internal method called by Timer.cancel().
:param timer: (``Timer``) Timer to remove.
"""
with self._lock:
try:
self._timers.remove(timer)
except ValueError:
pass
# Wake up the timer manager thread.
self._queue.put(self.COMMAND_RESCHEDULE)
LOG().trace_info("Timer: Timer %s removed", timer._name)
def now(self):
"""
Return current time, not influenced by NTP or admin setting date or
time. The actual value of current time is meaningless, it can be used
only to measure time differences.
:returns: ``float`` Current time, in seconds.
"""
return self._clock.now()
@cmpi_logging.trace_method
def shutdown(self):
"""
Stop the thread. This method blocks until the thread is safely
destroyed.
"""
self._queue.put(self.COMMAND_STOP)
self._timer_thread.join()
if __name__ == "__main__":
LOG = cmpi_logging.CMPILogger("")
import time
class Env(object):
def AttachThread(self):
pass
def PrepareAttachThread(self):
return self
def get_cimom_handle(self):
return self
clock = MonotonicClock()
start = clock.now()
time.sleep(0.5)
print "Clock 0.5:", clock.now() - start
time.sleep(0.5)
print "Clock 1:", clock.now() - start
mgr = TimerManager.get_instance(Env())
def callback(msg):
if callback.first:
t = mgr.create_timer("internal 0.5")
t.set_callback(callback, "internal 0.5")
t.start(0.5)
callback.first = False
print clock.now(), msg
callback.first = True
t1 = mgr.create_timer("one second")
t1.set_callback(callback, "1")
t1.start(1)
t2 = mgr.create_timer("two seconds")
t2.set_callback(callback, "2")
t2.start(2)
t22 = mgr.create_timer("two seconds 2")
t22.set_callback(callback, "2 again")
t22.start(2)
t15 = mgr.create_timer("one+half seconds")
t15.set_callback(callback, "1.5")
t15.start(1.5)
time.sleep(4)
mgr.stop_thread()
|
PypiClean
|
/ONEmSDK-0.8.0.tar.gz/ONEmSDK-0.8.0/HISTORY.md
|
# History
---
## 0.8.0
- JSON Schema & Python API:
- make `FormItem.description` optional
- Rename `FormItem.confirmation_needed` to `FormItem.skip_confirmation`, which says the opposite and defaults to `false`
- Add `FormItem.pattern` attribute
- Add `FormItemType.regex` (used only with not null `FormItem.pattern`)
- HTML API:
- Rename `<form>`'s attribute `confirmation-needed` to `skip-confirmation`, which defaults to `false`
- Support new `<input>` standard attribute: `pattern`
- Bug fixes:
- `FormItem`s of type "form-menu" were having the options duplicated in description
---
## 0.7.0
- HTML API:
- Added new `<input>` types: "email", "location", "url".
- Python API:
- Added new `FormItem` types: "email", "location", "url".
- Added tests
- Bug fixes:
- input attribute `maxlength_error` was assigned to both
`FormItem.min_length_error` and `FormItem.max_length_error`
---
## 0.6.0
- Decreased the minimum Python version to 3.6.
- HTML API:
- Added a bunch of new attributes on `<input>`: `min`, `min_error`,
`minlength`, `minlength_error`, `max`, `max_error`, `maxlength`,
`maxlength_error`, `step`, `value`
- Added a bunch of new attributes on `<section>`: `chunking_footer`,
`confirmation_label`, `method`, `required`, `status_exclude`,
`status_prepend`, `url`, `validate_type_error`, `validate_type_error_footer`,
`validate_url`
- Added new input types: `number`, `hidden`
- Added `text-search` attribute on `<li>`
- Python API
- Removed `FormItemMenu` and `FormItemContent`. Use a single model instead -
`FormItem` which achieves the functionality of both old models
- A bunch of new properties were added on `FormItem`, taken from `<input>`
and `<section>` tags (see changes in HTML API above).
- Added `text_search` property on `MenuItemFormItem`
- Fixes:
- Fix some bad tests
---
## 0.5.0
- HTML API:
- Added `auto-select`, `multi-select` and `numbered` flags on `<section>`
tag. They take effect only if the `<section>` tag contains options
- Boolean attributes are evaluated according to HTML5 (if present, a boolean
attribute is true; if absent, it's false)
- Python API:
- Added `MenuMeta` and `FormItemMenuMeta` objects to describe `Menu` objects
and `FormItemMenu` objects respectively.
- `MenuMeta` can contain `auto_select`
- `FormItemMenuMeta` can contain `auto_select`, `multi_select` and `numbered`
- these attributes have origin in `<section>` tag
---
|
PypiClean
|
/apache-superset-johan078-0.35.2-patch8277.tar.gz/apache-superset-johan078-0.35.2/superset/migrations/versions/fb13d49b72f9_better_filters.py
|
import json
import logging
from alembic import op
from sqlalchemy import Column, Integer, String, Text
from sqlalchemy.ext.declarative import declarative_base
from superset import db
# revision identifiers, used by Alembic.
revision = "fb13d49b72f9"
down_revision = "de021a1ca60d"
Base = declarative_base()
class Slice(Base):
__tablename__ = "slices"
id = Column(Integer, primary_key=True)
params = Column(Text)
viz_type = Column(String(250))
slice_name = Column(String(250))
def upgrade_slice(slc):
params = json.loads(slc.params)
logging.info(f"Upgrading {slc.slice_name}")
cols = params.get("groupby")
metric = params.get("metric")
if cols:
flts = [
{
"column": col,
"metric": metric,
"asc": False,
"clearable": True,
"multiple": True,
}
for col in cols
]
params["filter_configs"] = flts
if "groupby" in params:
del params["groupby"]
if "metric" in params:
del params["metric"]
slc.params = json.dumps(params, sort_keys=True)
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
filter_box_slices = session.query(Slice).filter_by(viz_type="filter_box")
for slc in filter_box_slices.all():
try:
upgrade_slice(slc)
except Exception as e:
logging.exception(e)
session.commit()
session.close()
def downgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
filter_box_slices = session.query(Slice).filter_by(viz_type="filter_box")
for slc in filter_box_slices.all():
try:
params = json.loads(slc.params)
logging.info(f"Downgrading {slc.slice_name}")
flts = params.get("filter_configs")
if not flts:
continue
params["metric"] = flts[0].get("metric")
params["groupby"] = [o.get("column") for o in flts]
slc.params = json.dumps(params, sort_keys=True)
except Exception as e:
logging.exception(e)
session.commit()
session.close()
|
PypiClean
|
/music_assistant-2.0.0b5.tar.gz/music_assistant-2.0.0b5/music_assistant/server/providers/url/__init__.py
|
from __future__ import annotations
import os
from collections.abc import AsyncGenerator
from music_assistant.common.models.enums import ContentType, ImageType, MediaType
from music_assistant.common.models.media_items import (
Artist,
MediaItemImage,
MediaItemType,
ProviderMapping,
Radio,
StreamDetails,
Track,
)
from music_assistant.server.helpers.audio import get_file_stream, get_http_stream, get_radio_stream
from music_assistant.server.helpers.playlists import fetch_playlist
from music_assistant.server.helpers.tags import AudioTags, parse_tags
from music_assistant.server.models.music_provider import MusicProvider
class URLProvider(MusicProvider):
"""Music Provider for manual URL's/files added to the queue."""
async def setup(self) -> None:
"""Handle async initialization of the provider.
Called when provider is registered.
"""
self._full_url = {}
async def get_track(self, prov_track_id: str) -> Track:
"""Get full track details by id."""
return await self.parse_item(prov_track_id)
async def get_radio(self, prov_radio_id: str) -> Radio:
"""Get full radio details by id."""
return await self.parse_item(prov_radio_id)
async def get_artist(self, prov_artist_id: str) -> Track:
"""Get full artist details by id."""
artist = prov_artist_id
# this is here for compatibility reasons only
return Artist(
artist,
self.domain,
artist,
provider_mappings={
ProviderMapping(artist, self.domain, self.instance_id, available=False)
},
)
async def get_item(self, media_type: MediaType, prov_item_id: str) -> MediaItemType:
"""Get single MediaItem from provider."""
if media_type == MediaType.ARTIST:
return await self.get_artist(prov_item_id)
if media_type == MediaType.TRACK:
return await self.get_track(prov_item_id)
if media_type == MediaType.RADIO:
return await self.get_radio(prov_item_id)
if media_type == MediaType.UNKNOWN:
return await self.parse_item(prov_item_id)
raise NotImplementedError
async def parse_item(self, item_id_or_url: str, force_refresh: bool = False) -> Track | Radio:
"""Parse plain URL to MediaItem of type Radio or Track."""
item_id, url, media_info = await self._get_media_info(item_id_or_url, force_refresh)
is_radio = media_info.get("icy-name") or not media_info.duration
if is_radio:
# treat as radio
media_item = Radio(
item_id=item_id,
provider=self.domain,
name=media_info.get("icy-name") or media_info.title,
)
else:
media_item = Track(
item_id=item_id,
provider=self.domain,
name=media_info.title,
duration=int(media_info.duration or 0),
artists=[await self.get_artist(artist) for artist in media_info.artists],
)
media_item.provider_mappings = {
ProviderMapping(
item_id=item_id,
provider_domain=self.domain,
provider_instance=self.instance_id,
content_type=ContentType.try_parse(media_info.format),
sample_rate=media_info.sample_rate,
bit_depth=media_info.bits_per_sample,
bit_rate=media_info.bit_rate,
)
}
if media_info.has_cover_image:
media_item.metadata.images = [MediaItemImage(ImageType.THUMB, url, True)]
return media_item
async def _get_media_info(
self, item_id_or_url: str, force_refresh: bool = False
) -> tuple[str, str, AudioTags]:
"""Retrieve (cached) mediainfo for url."""
# check if the radio stream is not a playlist
if (
item_id_or_url.endswith("m3u8")
or item_id_or_url.endswith("m3u")
or item_id_or_url.endswith("pls")
):
playlist = await fetch_playlist(self.mass, item_id_or_url)
url = playlist[0]
item_id = item_id_or_url
self._full_url[item_id] = url
elif "?" in item_id_or_url or "&" in item_id_or_url:
# store the 'real' full url to be picked up later
# this makes sure that we're not storing any temporary data like auth keys etc
# a request for an url mediaitem always passes here first before streamdetails
url = item_id_or_url
item_id = item_id_or_url.split("?")[0].split("&")[0]
self._full_url[item_id] = url
else:
url = self._full_url.get(item_id_or_url, item_id_or_url)
item_id = item_id_or_url
cache_key = f"{self.instance_id}.media_info.{item_id}"
# do we have some cached info for this url ?
cached_info = await self.mass.cache.get(cache_key)
if cached_info and not force_refresh:
media_info = AudioTags.parse(cached_info)
else:
# parse info with ffprobe (and store in cache)
media_info = await parse_tags(url)
if "authSig" in url:
media_info.has_cover_image = False
await self.mass.cache.set(cache_key, media_info.raw)
return (item_id, url, media_info)
async def get_stream_details(self, item_id: str) -> StreamDetails | None:
"""Get streamdetails for a track/radio."""
item_id, url, media_info = await self._get_media_info(item_id)
is_radio = media_info.get("icy-name") or not media_info.duration
return StreamDetails(
provider=self.domain,
item_id=item_id,
content_type=ContentType.try_parse(media_info.format),
media_type=MediaType.RADIO if is_radio else MediaType.TRACK,
sample_rate=media_info.sample_rate,
bit_depth=media_info.bits_per_sample,
direct=None if is_radio else url,
data=url,
)
async def get_audio_stream(
self, streamdetails: StreamDetails, seek_position: int = 0
) -> AsyncGenerator[bytes, None]:
"""Return the audio stream for the provider item."""
if streamdetails.media_type == MediaType.RADIO:
# radio stream url
async for chunk in get_radio_stream(self.mass, streamdetails.data, streamdetails):
yield chunk
elif os.path.isfile(streamdetails.data):
# local file
async for chunk in get_file_stream(
self.mass, streamdetails.data, streamdetails, seek_position
):
yield chunk
else:
# regular stream url (without icy meta)
async for chunk in get_http_stream(
self.mass, streamdetails.data, streamdetails, seek_position
):
yield chunk
|
PypiClean
|
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/services/ows/wps/processes/get_dem_processing.py
|
from uuid import uuid4
import json
import numpy as np
from eoxserver.core import Component
import eoxserver.render.browse.functions as functions
from eoxserver.contrib import gdal
from eoxserver.contrib.vsi import open as vsi_open
from eoxserver.services.ows.wps.parameters import (
LiteralData, ComplexData, FormatJSON, CDObject,
FormatBinaryRaw, FormatBinaryBase64, CDByteBuffer,
BoundingBoxData
)
from eoxserver.services.ows.wps.exceptions import InvalidInputValueError
from eoxserver.resources.coverages import models
from eoxserver.backends.access import gdal_open
from django.contrib.gis.geos import Polygon
import logging
logger = logging.getLogger(__name__)
class DemProcessingProcess(Component):
""" DemProcessing defines a WPS process that provides multiple
DEM processes """
identifier = "DemProcessing"
title = "Dem Processing (hillshade, aspect, relief...)for a coverage/s that intersects with the input bbox"
description = ("provides processed results of all the coverages whithin a provided bounding box. "
" The processes returns hillshade, aspect/ratio, slope and contour.")
metadata = {}
profiles = ['EOxServer:DemProcessing']
inputs = {
"coverage": LiteralData(
"coverage",
title="coverage identifier."),
"identifier": LiteralData(
"identifier",
optional=True,
title="identifier of the process to be implemented."
),
"bbox": BoundingBoxData(
"bbox",
title="bounding box that intersect with the products."
),
"azimuth": LiteralData(
"azimuth",
optional=True,
title="azimuth of the light source",
abstract="Optional the azimuth of the light source, only for hillshade mode."
),
"altitude": LiteralData(
"altitude",
optional=True,
title="altitude of the light source",
abstract="Optional the altitude of the light source, only for hillshade mode."
),
"scale": LiteralData(
"scale",
optional=True,
title="Ratio of vertical units to horizontal.",
abstract="Optional can be used to set the ratio of vertical units to horizontal, used for hillshade ans slope"
),
"z_factor": LiteralData(
"z_factor",
optional=True,
title="Vertical exaggeration",
abstract="Optional Vertical exaggeration used to pre-multiply the elevations, only for hillshade mode."
),
"interval": LiteralData(
"interval",
optional=True,
title="Elevation interval between contours.",
abstract="Optional Elevation interval between contours., only for contour."
),
"algorithm": LiteralData(
"algorithm",
optional=True,
title="Dem Processing algorithm.",
abstract="Optional Dem Processing algorithm to be performed,it varies depending on the process."
),
}
outputs = {
"result": ComplexData(
"result",
title="output data",
abstract="Binary/geojson complex data output.",
formats=(
FormatBinaryRaw('image/png'),
FormatBinaryBase64('image/png'),
FormatBinaryRaw('image/jpeg'),
FormatBinaryBase64('image/jpeg'),
FormatBinaryRaw('image/tiff'),
FormatBinaryBase64('image/tiff'),
FormatJSON()
)
),
}
@staticmethod
def execute(coverage, identifier, bbox, result, z_factor, interval, scale, azimuth, altitude, algorithm):
""" The main execution function for the process.
"""
np_bbox = np.array(bbox)
flattened_bbox = np_bbox.flatten()
values = flattened_bbox.tolist()
data_format = "raster"
# output format selection
if result['mime_type'] == "image/png":
extension = "png"
driver = gdal.GetDriverByName("PNG")
elif result['mime_type'] == "image/jpeg":
extension = "jpg"
driver = gdal.GetDriverByName("JPEG")
elif result['mime_type'] == "image/tiff":
extension = "tif"
driver = gdal.GetDriverByName("GTiff")
else:
extension = "geojson"
data_format = "vector"
driver = gdal.GetDriverByName("GeoJSON")
# get the dataset series matching the requested ID
try:
model = models.Coverage.objects.get(
identifier=coverage)
except models.Coverage.DoesNotExist:
raise InvalidInputValueError(
"coverage", "Invalid coverage name '%s'!" % coverage
)
try:
data_items = model.arraydata_items.all()
except model.arraydata_items.all().length > 1:
raise InvalidInputValueError(
"coverage", "coverage '%s' has more than one imagery, the profile process handles single images!" % coverage
)
data_item = data_items[0]
original_ds = gdal_open(data_item, False)
# check if the provided box is compatible with the coverage
geoTransform = original_ds.GetGeoTransform()
minx = geoTransform[0]
maxy = geoTransform[3]
maxx = minx + geoTransform[1] * original_ds.RasterXSize
miny = maxy + geoTransform[5] * original_ds.RasterYSize
coverage_bbox = Polygon.from_bbox((minx, miny, maxx, maxy))
request_bbox = Polygon.from_bbox(values)
if coverage_bbox.contains(request_bbox):
values_bbox = coverage_bbox.intersection(request_bbox)
if values_bbox.area > 0:
values = list(values_bbox.extent)
else:
logger.error('The provided bbox is not inside or intersecting with the coverage')
output_filename = '/vsimem/%s.%s' % (uuid4().hex, extension)
tmp_ds = '/vsimem/%s.tif' % uuid4().hex
ds = gdal.Warp(tmp_ds, original_ds, dstSRS=original_ds.GetProjection(), outputBounds=values, format='Gtiff')
if identifier == 'hillshade':
args = [ds, z_factor, scale, azimuth, altitude, algorithm]
elif identifier == 'aspect':
args = [ds, False, False, algorithm]
elif identifier == 'slopeshade':
args = [ds, scale, algorithm]
elif identifier == 'contours':
interval = int(interval) if interval is not None else 100
args = [ds, 0, interval, -9999, data_format]
func = functions.get_function(identifier)
res_ds = func(*args)
out_ds = driver.CreateCopy(output_filename, res_ds, 0)
if extension == 'tif':
out_ds.SetGeoTransform(ds.GetGeoTransform())
out_ds.SetProjection(ds.GetProjection())
del out_ds
if extension == "geojson":
with vsi_open(output_filename) as f:
_output = CDObject(
json.load(f), format=FormatJSON(),
filename=("contours.json")
)
else:
with vsi_open(output_filename, 'rb') as fid:
_output = CDByteBuffer(
fid.read(), filename=output_filename,
)
if getattr(_output, 'mime_type', None) is None:
setattr(_output, 'mime_type', result['mime_type'])
gdal.Unlink(output_filename)
gdal.Unlink(tmp_ds)
return _output
|
PypiClean
|
/XStatic-DataTables-1.10.15.1.tar.gz/XStatic-DataTables-1.10.15.1/xstatic/pkg/datatables/data/js/dataTables.foundation.js
|
* DataTables integration for Foundation. This requires Foundation 5 and
* DataTables 1.10 or newer.
*
* This file sets the defaults and adds options to DataTables to style its
* controls using Foundation. See http://datatables.net/manual/styling/foundation
* for further information.
*/
(function( factory ){
if ( typeof define === 'function' && define.amd ) {
// AMD
define( ['jquery', 'datatables.net'], function ( $ ) {
return factory( $, window, document );
} );
}
else if ( typeof exports === 'object' ) {
// CommonJS
module.exports = function (root, $) {
if ( ! root ) {
root = window;
}
if ( ! $ || ! $.fn.dataTable ) {
$ = require('datatables.net')(root, $).$;
}
return factory( $, root, root.document );
};
}
else {
// Browser
factory( jQuery, window, document );
}
}(function( $, window, document, undefined ) {
'use strict';
var DataTable = $.fn.dataTable;
// Detect Foundation 5 / 6 as they have different element and class requirements
var meta = $('<meta class="foundation-mq"/>').appendTo('head');
DataTable.ext.foundationVersion = meta.css('font-family').match(/small|medium|large/) ? 6 : 5;
meta.remove();
$.extend( DataTable.ext.classes, {
sWrapper: "dataTables_wrapper dt-foundation",
sProcessing: "dataTables_processing panel callout"
} );
/* Set the defaults for DataTables initialisation */
$.extend( true, DataTable.defaults, {
dom:
"<'row'<'small-6 columns'l><'small-6 columns'f>r>"+
"t"+
"<'row'<'small-6 columns'i><'small-6 columns'p>>",
renderer: 'foundation'
} );
/* Page button renderer */
DataTable.ext.renderer.pageButton.foundation = function ( settings, host, idx, buttons, page, pages ) {
var api = new DataTable.Api( settings );
var classes = settings.oClasses;
var lang = settings.oLanguage.oPaginate;
var aria = settings.oLanguage.oAria.paginate || {};
var btnDisplay, btnClass;
var tag;
var v5 = DataTable.ext.foundationVersion === 5;
var attach = function( container, buttons ) {
var i, ien, node, button;
var clickHandler = function ( e ) {
e.preventDefault();
if ( !$(e.currentTarget).hasClass('unavailable') && api.page() != e.data.action ) {
api.page( e.data.action ).draw( 'page' );
}
};
for ( i=0, ien=buttons.length ; i<ien ; i++ ) {
button = buttons[i];
if ( $.isArray( button ) ) {
attach( container, button );
}
else {
btnDisplay = '';
btnClass = '';
tag = null;
switch ( button ) {
case 'ellipsis':
btnDisplay = '…';
btnClass = 'unavailable disabled';
tag = null;
break;
case 'first':
btnDisplay = lang.sFirst;
btnClass = button + (page > 0 ?
'' : ' unavailable disabled');
tag = page > 0 ? 'a' : null;
break;
case 'previous':
btnDisplay = lang.sPrevious;
btnClass = button + (page > 0 ?
'' : ' unavailable disabled');
tag = page > 0 ? 'a' : null;
break;
case 'next':
btnDisplay = lang.sNext;
btnClass = button + (page < pages-1 ?
'' : ' unavailable disabled');
tag = page < pages-1 ? 'a' : null;
break;
case 'last':
btnDisplay = lang.sLast;
btnClass = button + (page < pages-1 ?
'' : ' unavailable disabled');
tag = page < pages-1 ? 'a' : null;
break;
default:
btnDisplay = button + 1;
btnClass = page === button ?
'current' : '';
tag = page === button ?
null : 'a';
break;
}
if ( v5 ) {
tag = 'a';
}
if ( btnDisplay ) {
node = $('<li>', {
'class': classes.sPageButton+' '+btnClass,
'aria-controls': settings.sTableId,
'aria-label': aria[ button ],
'tabindex': settings.iTabIndex,
'id': idx === 0 && typeof button === 'string' ?
settings.sTableId +'_'+ button :
null
} )
.append( tag ?
$('<'+tag+'/>', {'href': '#'} ).html( btnDisplay ) :
btnDisplay
)
.appendTo( container );
settings.oApi._fnBindAction(
node, {action: button}, clickHandler
);
}
}
}
};
attach(
$(host).empty().html('<ul class="pagination"/>').children('ul'),
buttons
);
};
return DataTable;
}));
|
PypiClean
|
/intel_tensorflow_avx512-2.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/tensorflow/python/ops/histogram_ops.py
|
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_assert
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
@tf_export('histogram_fixed_width_bins')
@dispatch.add_dispatch_support
def histogram_fixed_width_bins(values,
value_range,
nbins=100,
dtype=dtypes.int32,
name=None):
"""Bins the given values for use in a histogram.
Given the tensor `values`, this operation returns a rank 1 `Tensor`
representing the indices of a histogram into which each element
of `values` would be binned. The bins are equal width and
determined by the arguments `value_range` and `nbins`.
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor` of same `dtype` as `values`.
values <= value_range[0] will be mapped to hist[0],
values >= value_range[1] will be mapped to hist[-1].
nbins: Scalar `int32 Tensor`. Number of histogram bins.
dtype: dtype for returned histogram.
name: A name for this operation (defaults to 'histogram_fixed_width').
Returns:
A `Tensor` holding the indices of the binned values whose shape matches
`values`.
Raises:
TypeError: If any unsupported dtype is provided.
tf.errors.InvalidArgumentError: If value_range does not
satisfy value_range[0] < value_range[1].
Examples:
>>> # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
...
>>> nbins = 5
>>> value_range = [0.0, 5.0]
>>> new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
>>> indices = tf.histogram_fixed_width_bins(new_values, value_range, nbins=5)
>>> indices.numpy()
array([0, 0, 1, 2, 4, 4], dtype=int32)
"""
with ops.name_scope(name, 'histogram_fixed_width_bins',
[values, value_range, nbins]):
values = ops.convert_to_tensor(values, name='values')
shape = array_ops.shape(values)
values = array_ops.reshape(values, [-1])
value_range = ops.convert_to_tensor(value_range, name='value_range')
nbins = ops.convert_to_tensor(nbins, dtype=dtypes.int32, name='nbins')
check = control_flow_assert.Assert(
math_ops.greater(nbins, 0), ['nbins %s must > 0' % nbins])
nbins = control_flow_ops.with_dependencies([check], nbins)
nbins_float = math_ops.cast(nbins, values.dtype)
# Map tensor values that fall within value_range to [0, 1].
scaled_values = math_ops.truediv(
values - value_range[0],
value_range[1] - value_range[0],
name='scaled_values')
# map tensor values within the open interval value_range to {0,.., nbins-1},
# values outside the open interval will be zero or less, or nbins or more.
indices = math_ops.floor(nbins_float * scaled_values, name='indices')
# Clip edge cases (e.g. value = value_range[1]) or "outliers."
indices = math_ops.cast(
clip_ops.clip_by_value(indices, 0, nbins_float - 1), dtypes.int32)
return array_ops.reshape(indices, shape)
@tf_export('histogram_fixed_width')
@dispatch.add_dispatch_support
def histogram_fixed_width(values,
value_range,
nbins=100,
dtype=dtypes.int32,
name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting
the number of entries in `values` that fell into every bin. The bins are
equal width and determined by the arguments `value_range` and `nbins`.
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor` of same `dtype` as `values`.
values <= value_range[0] will be mapped to hist[0],
values >= value_range[1] will be mapped to hist[-1].
nbins: Scalar `int32 Tensor`. Number of histogram bins.
dtype: dtype for returned histogram.
name: A name for this operation (defaults to 'histogram_fixed_width').
Returns:
A 1-D `Tensor` holding histogram of values.
Raises:
TypeError: If any unsupported dtype is provided.
tf.errors.InvalidArgumentError: If value_range does not
satisfy value_range[0] < value_range[1].
Examples:
>>> # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
...
>>> nbins = 5
>>> value_range = [0.0, 5.0]
>>> new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
>>> hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
>>> hist.numpy()
array([2, 1, 1, 0, 2], dtype=int32)
"""
with ops.name_scope(name, 'histogram_fixed_width',
[values, value_range, nbins]) as name:
# pylint: disable=protected-access
return gen_math_ops._histogram_fixed_width(
values, value_range, nbins, dtype=dtype, name=name)
# pylint: enable=protected-access
|
PypiClean
|
/klaviyo-sdk-beta-1.0.3.20220907.tar.gz/klaviyo-sdk-beta-1.0.3.20220907/src/openapi_client/model/included_variants_links.py
|
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from openapi_client.exceptions import ApiAttributeError
class IncludedVariantsLinks(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'_self': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'_self': 'self', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, _self, *args, **kwargs): # noqa: E501
"""IncludedVariantsLinks - a model defined in OpenAPI
Args:
_self (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self._self = _self
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, _self, *args, **kwargs): # noqa: E501
"""IncludedVariantsLinks - a model defined in OpenAPI
Args:
_self (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self._self = _self
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/graphservices/v20230413/get_account.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAccountResult',
'AwaitableGetAccountResult',
'get_account',
'get_account_output',
]
@pulumi.output_type
class GetAccountResult:
"""
Account details
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.AccountResourceResponseProperties':
"""
Property bag from billing account
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.AccountResourceResponseSystemData':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_account(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
Returns account resource for a given name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:graphservices/v20230413:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_account)
def get_account_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccountResult]:
"""
Returns account resource for a given name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource.
"""
...
|
PypiClean
|
/pandas_utility-0.1.4.tar.gz/pandas_utility-0.1.4/pandas_utility/pandas_utility.py
|
import numpy as np
import pandas as pd
class PandasUtilities:
"""Some useful functions for dealing with Pandas DataFrames
"""
__version__ = pd.__version__
@staticmethod
def show_version():
return pd.show_versions()
@staticmethod
def create_random_df(rows, cols, cols_name=None):
"""Create a random dataframe with n-rows and n-columns
Parameters
----------
rows : int
number of rows
cols : int
number of columns
cols_name : None, optional
Columns names
Returns
-------
`pandas.core.frame.DataFrame`
Data frame containing random values
"""
if cols_name:
cols_name = list(cols_name)
assert len(cols_name) == cols
return pd.DataFrame(np.random.rand(rows, cols), columns=cols_name)
@staticmethod
def rename_cols(df, new_names=[], prefix=None, suffix=None):
"""Rename column names as well as add prefix and suffix
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
new_names : list, optional
list of new names matching the current length of cols
prefix : None, optional
Add prefix on column name
suffix : None, optional
Add suffix on column name
Returns
-------
`pandas.core.frame.DataFrame`
DataFrame with new column names.
"""
if new_names and (len(df.columns) == len(new_names)):
df.columns = new_names
df.columns = df.columns.str.replace(" ", "_")
if prefix:
df = df.add_prefix(prefix)
if suffix:
df = df.add_suffix(suffix)
return df
@staticmethod
def reverse_row_order(df, reset_index=False):
"""Reverse the order of the dataframe, and reset the indices (optional)
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
reset_index : bool, optional
Reset the index of the DataFrame to start at '0'
Returns
-------
`pandas.core.frame.DataFrame`
Reversed order of rows in DataFrame
"""
return df.loc[::-1].reset_index(drop=True) if reset_index else df.loc[::-1]
@staticmethod
def reverse_col_order(df):
"""Summary
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
Returns
-------
`pandas.core.frame.DataFrame`
Reversed order of cols in DataFrame
"""
return df.loc[:, ::-1]
@staticmethod
def select_by_datatype(df, include_datatype=[], exclude_datatype=[]):
"""
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
include_datatype : list, optional
A list containing data-type to include.
exclude_datatype : list, optional
A list containing data-type to exclude.
Returns
-------
`pandas.core.frame.DataFrame`
DataFrame containing included/excluded data-types
"""
return (
df.select_dtypes(include=include_datatype, exclude=exclude_datatype)
if include_datatype or exclude_datatype
else df
)
@staticmethod
def build_df_from_csvs(csv_files, axis, ignore_index=True):
"""Build a DataFrame from multiple files (row-wise)
Parameters
----------
csv_files : list
List of csv files
axis : int
Concatenate csv files according to columns or rows.
ignore_index : bool, optional
Resets indices
Returns
-------
`pandas.core.frame.DataFrame`
DataFrame containing data from CSV files(s)
"""
return pd.concat(
(pd.read_csv(file) for file in csv_files),
axis=axis,
ignore_index=ignore_index,
)
@staticmethod
def split_df_into_subsets(df, fraction=0.5, random_state=1234):
"""Return a random sample of items from an axis of object.
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
fraction : float, optional
Fraction of axis items to return.
Cannot be used with `n`.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int),
or numpy RandomState object.
Returns
-------
List
List of data frame
"""
df_1 = df.sample(frac=fraction, random_state=random_state)
df_2 = df.drop(df_1.index)
return df_1, df_2
@staticmethod
def filter_by_multiple_categories(df, column_name, filter_by=[], exclude=False):
"""Filter a DataFrame by multiple categories
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
column_name : str
Column name to filter
filter_by : list, optional
List of categories to filter
exclude : bool, optional
Exclude the filter
Returns
-------
TYPE
Description
Deleted Parameters
------------------
head : bool, optional
Only show head of the data frame
"""
_filtered = df[column_name].isin(filter_by)
return df[_filtered] if not exclude else df[~_filtered]
@staticmethod
def filter_by_large_categories(df, column_name, count=3):
"""Filter a DataFrame by largest categories
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
column_name : str
Column name to filter
count : int, optional
Number of largest values in the Series
Returns
-------
`pandas.core.frame.DataFrame`
DataFrame containing included/excluded
data-types
Deleted Parameters
------------------
head : bool, optional
Only show head of the data frame
"""
counts = df[column_name].value_counts()
_filtered = df[column_name].isin(counts.nlargest(count).index)
return df[_filtered]
@staticmethod
def drop_cols_with_NaNs(df, threshold=0.1):
"""Remove columns with missing values
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
threshold : float, optional
Only drop columns in which more than x% of the
values are missing.
Returns
-------
`pandas.core.frame.DataFrame`
DataFrame without NaN's
"""
return df.dropna(thresh=len(df) * threshold, axis="columns")
@staticmethod
def aggregate_by_functions(df, column_name, group_by, functions=["sum", "count"]):
"""Aggregate by multiple functions
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
column_name : TYPE
group_by : TYPE
Description
functions : list, optional
Description
Returns
-------
`pandas.core.frame.DataFrame`
DataFrame
Example
-------
# Lets say, you want the total price of each order as well as the
# number of items in each order.
>> Orders.head(5)
order_id | quantity | item_name | choice_description | item_price
1 | 1 | Chips and Fresh Tomato Salsa | NaN | 2.39
1 | 1 | Izze | [Clementine] | 3.39
1 | 1 | Nantucket Nectar | [Apple] | 3.39
1 | 1 | Chips and Tomatillo-Green Chili Salsa | NaN | 2.39
2 | 2 | Chicken Bowl | NaN | 16.98
>> aggregate_by_functions(orders, 'item_price', 'order_id').head()
order_id | sum | count
1 | 11.56 | 4
2 | 16.98 | 1
3 | 12.67 | 2
4 | 21.00 | 2
5 | 13.70 | 2
"""
return df.groupby(group_by)[column_name].agg(functions)
@staticmethod
def continous_to_categorical_data(df, column_name, bins=[], labels=[]):
"""Summary
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
column_name : str
Description
bins : list
Description
labels : list
Description
Returns
-------
TYPE
Description
Example
-------
>> data['age']
0 | 22.0
1 | 38.0
2 | 26.0
3 | 35.0
4 | 35.0
5 | NaN
6 | 54.0
7 | 2.0
8 | 27.0
9 | 14.0
Name: Age, dtype: float64
>> continuous_to_categorical_data(
data, 'age', bins=[0, 18, 25, 99], labels==['child', 'young adult', 'adult'])
0 | young adult
1 | adult
2 | adult
3 | adult
4 | adult
5 | NaN
6 | adult
7 | child
8 | adult
9 | child
Name: Age, dtype: category
Categories (3, object): [child < young adult < adult]
# This assigned each value to a bin with a label.
# Ages 0 to 18 were assigned the label "child", ages 18 to 25 were assigned the
# label "young adult", and ages 25 to 99 were assigned the label "adult".
"""
return pd.cut(df[column_name], bins=bins, labels=labels)
@staticmethod
def change_display_opt(
option="display.float_format", _format="{:.2f}".format, reset=False
):
"""Standardize the display of the DataFrame
Parameters
----------
option : str, optional
Regexp which should match a single option.
_format : str, optional
new formatting option
reset : bool, optional
Description
Returns
-------
"""
return pd.set_option(option, _format) if not reset else pd.reset_option(option)
@staticmethod
def remove_rows_with_nan(df, column_name):
"""Remove all rows containing NaN values
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
column_name : str
Remove all the rows where column_name has NaN values.
Returns
-------
`pandas.core.frame.DataFrame`
DataFrame
"""
return df[pd.notna(df[column_name])]
@staticmethod
def col_to_datetime(df, column_name, date_format="%Y-%m-%d"):
"""
Parameters
----------
df : pandas.core.frame.DataFrame
Two-dimensional size-mutable,
potentially heterogeneous tabular data
column_name : TYPE
Column to change datetime
date_format : str, optional
Description
Returns
-------
`pandas.core.frame.DataFrame`
DataFrame
"""
return pd.to_datetime(df[column_name], format=date_format)
@staticmethod
def binning_column_by_group_names(
df,
column_name,
num_samples,
group_names=["Low", "Medium", "High"],
include_lowest=True,
):
"""
Parameters
----------
df : TYPE
Description
column_name : TYPE
Description
num_samples : TYPE
Description
group_names : list, optional
Description
include_lowest : bool, optional
Description
Returns:
--------
"""
bins = np.linspace(min(df[column_name]), max(df[column_name]), num_samples)
return pd.cut(
df[column_name], bins, labels=group_names, include_lowest=include_lowest
)
@staticmethod
def open_google_sheet(token):
"""Google Spreadsheet CSV into A Pandas Dataframe
Parameters
----------
token : str
Google Spreadsheet token ID
Returns
-------
`pandas.core.frame.DataFrame`
DataFrame
"""
url = 'https://docs.google.com/spreadsheets/d/{}/export?format=csv'.format(token)
return pd.read_csv(url)
|
PypiClean
|
/dash_grocery-0.0.6.tar.gz/dash_grocery-0.0.6/dash_grocery/Masonry.py
|
from dash.development.base_component import Component, _explicitize_args
class Masonry(Component):
"""A Masonry component.
Wrapped from [react-masonry-component](https://github.com/eiriklv/react-masonry-component).
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
children.
- id (string; optional):
The ID used to identify this component in Dash callbacks.
- class_name (string; optional):
Often used with CSS to style elements with common properties.
- disableImagesLoaded (boolean; optional):
default False.
- elementType (string; optional):
default 'div'.
- enableResizableChildren (boolean; optional):
enableResizableChildren.
- imagesLoadedOptions (dict; optional):
default {}.
- options (dict; optional):
masonry options.
`options` is a dict with keys:
- columnWidth (number | string; optional):
Aligns items to a horizontal grid.
- containerStyle (dict; optional):
CSS styles that are applied to the container element.
- fitWidth (boolean; optional):
Sets the width of the container to fit the available number of
columns, based the size of container's parent element. When
enabled, you can center the container with CSS.
- gutter (number; optional):
Adds horizontal space between item elements.
- horizontalOrder (boolean; optional):
Lays out items to (mostly) maintain horizontal left-to-right
order.
- initLayout (boolean; optional):
Enables layout on initialization. Enabled by default
initLayout: True.
- itemSelector (string; optional):
Specifies which child elements will be used as item elements
in the layout.
- originLeft (boolean; optional):
Controls the horizontal flow of the layout. By default, item
elements start positioning at the left, with originLeft: True.
Set originLeft: False for right-to-left layouts.
- originTop (boolean; optional):
Controls the vertical flow of the layout. By default, item
elements start positioning at the top, with originTop: True.
Set originTop: False for bottom-up layouts.
- percentPosition (boolean; optional):
Sets item positions in percent values, rather than pixel
values. percentPosition: True works well with percent-width
items, as items will not transition their position on resize.
- resize (boolean; optional):
Adjusts sizes and positions when window is resized. Enabled by
default resize: True.
- stagger (number | string; optional):
Staggers item transitions, so items transition incrementally
after one another. Set as a CSS time format, '0.03s', or as a
number in milliseconds, 30.
- stamp (string; optional):
Specifies which elements are stamped within the layout.
Masonry will layout items below stamped elements.
- transitionDuration (number | string; optional):
Duration of the transition when items change position or
appearance, set in a CSS time format. Default:
transitionDuration: '0.4s'.
- style (dict; optional):
style.
- updateOnEachImageLoad (boolean; optional):
default False and works only if disableImagesLoaded is False."""
_children_props = []
_base_nodes = ['children']
_namespace = 'dash_grocery'
_type = 'Masonry'
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, class_name=Component.UNDEFINED, elementType=Component.UNDEFINED, options=Component.UNDEFINED, disableImagesLoaded=Component.UNDEFINED, updateOnEachImageLoad=Component.UNDEFINED, imagesLoadedOptions=Component.UNDEFINED, onImagesLoaded=Component.UNDEFINED, enableResizableChildren=Component.UNDEFINED, onLayoutComplete=Component.UNDEFINED, onRemoveComplete=Component.UNDEFINED, style=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'class_name', 'disableImagesLoaded', 'elementType', 'enableResizableChildren', 'imagesLoadedOptions', 'options', 'style', 'updateOnEachImageLoad']
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'class_name', 'disableImagesLoaded', 'elementType', 'enableResizableChildren', 'imagesLoadedOptions', 'options', 'style', 'updateOnEachImageLoad']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs and excess named props
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Masonry, self).__init__(children=children, **args)
|
PypiClean
|
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/google/protobuf/wrappers_pb2.py
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/wrappers.proto',
package='google.protobuf',
syntax='proto3',
serialized_options=_b('\n\023com.google.protobufB\rWrappersProtoP\001Z*github.com/golang/protobuf/ptypes/wrappers\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'),
serialized_pb=_b('\n\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"\x1c\n\x0b\x44oubleValue\x12\r\n\x05value\x18\x01 \x01(\x01\"\x1b\n\nFloatValue\x12\r\n\x05value\x18\x01 \x01(\x02\"\x1b\n\nInt64Value\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1c\n\x0bUInt64Value\x12\r\n\x05value\x18\x01 \x01(\x04\"\x1b\n\nInt32Value\x12\r\n\x05value\x18\x01 \x01(\x05\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1b\n\nBytesValue\x12\r\n\x05value\x18\x01 \x01(\x0c\x42|\n\x13\x63om.google.protobufB\rWrappersProtoP\x01Z*github.com/golang/protobuf/ptypes/wrappers\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
)
_DOUBLEVALUE = _descriptor.Descriptor(
name='DoubleValue',
full_name='google.protobuf.DoubleValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.DoubleValue.value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=79,
)
_FLOATVALUE = _descriptor.Descriptor(
name='FloatValue',
full_name='google.protobuf.FloatValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.FloatValue.value', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=81,
serialized_end=108,
)
_INT64VALUE = _descriptor.Descriptor(
name='Int64Value',
full_name='google.protobuf.Int64Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Int64Value.value', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=137,
)
_UINT64VALUE = _descriptor.Descriptor(
name='UInt64Value',
full_name='google.protobuf.UInt64Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.UInt64Value.value', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=167,
)
_INT32VALUE = _descriptor.Descriptor(
name='Int32Value',
full_name='google.protobuf.Int32Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Int32Value.value', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=169,
serialized_end=196,
)
_UINT32VALUE = _descriptor.Descriptor(
name='UInt32Value',
full_name='google.protobuf.UInt32Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.UInt32Value.value', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=198,
serialized_end=226,
)
_BOOLVALUE = _descriptor.Descriptor(
name='BoolValue',
full_name='google.protobuf.BoolValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.BoolValue.value', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=228,
serialized_end=254,
)
_STRINGVALUE = _descriptor.Descriptor(
name='StringValue',
full_name='google.protobuf.StringValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.StringValue.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=256,
serialized_end=284,
)
_BYTESVALUE = _descriptor.Descriptor(
name='BytesValue',
full_name='google.protobuf.BytesValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.BytesValue.value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=286,
serialized_end=313,
)
DESCRIPTOR.message_types_by_name['DoubleValue'] = _DOUBLEVALUE
DESCRIPTOR.message_types_by_name['FloatValue'] = _FLOATVALUE
DESCRIPTOR.message_types_by_name['Int64Value'] = _INT64VALUE
DESCRIPTOR.message_types_by_name['UInt64Value'] = _UINT64VALUE
DESCRIPTOR.message_types_by_name['Int32Value'] = _INT32VALUE
DESCRIPTOR.message_types_by_name['UInt32Value'] = _UINT32VALUE
DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE
DESCRIPTOR.message_types_by_name['StringValue'] = _STRINGVALUE
DESCRIPTOR.message_types_by_name['BytesValue'] = _BYTESVALUE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DoubleValue = _reflection.GeneratedProtocolMessageType('DoubleValue', (_message.Message,), {
'DESCRIPTOR' : _DOUBLEVALUE,
'__module__' : 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.DoubleValue)
})
_sym_db.RegisterMessage(DoubleValue)
FloatValue = _reflection.GeneratedProtocolMessageType('FloatValue', (_message.Message,), {
'DESCRIPTOR' : _FLOATVALUE,
'__module__' : 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FloatValue)
})
_sym_db.RegisterMessage(FloatValue)
Int64Value = _reflection.GeneratedProtocolMessageType('Int64Value', (_message.Message,), {
'DESCRIPTOR' : _INT64VALUE,
'__module__' : 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Int64Value)
})
_sym_db.RegisterMessage(Int64Value)
UInt64Value = _reflection.GeneratedProtocolMessageType('UInt64Value', (_message.Message,), {
'DESCRIPTOR' : _UINT64VALUE,
'__module__' : 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.UInt64Value)
})
_sym_db.RegisterMessage(UInt64Value)
Int32Value = _reflection.GeneratedProtocolMessageType('Int32Value', (_message.Message,), {
'DESCRIPTOR' : _INT32VALUE,
'__module__' : 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Int32Value)
})
_sym_db.RegisterMessage(Int32Value)
UInt32Value = _reflection.GeneratedProtocolMessageType('UInt32Value', (_message.Message,), {
'DESCRIPTOR' : _UINT32VALUE,
'__module__' : 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.UInt32Value)
})
_sym_db.RegisterMessage(UInt32Value)
BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), {
'DESCRIPTOR' : _BOOLVALUE,
'__module__' : 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.BoolValue)
})
_sym_db.RegisterMessage(BoolValue)
StringValue = _reflection.GeneratedProtocolMessageType('StringValue', (_message.Message,), {
'DESCRIPTOR' : _STRINGVALUE,
'__module__' : 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.StringValue)
})
_sym_db.RegisterMessage(StringValue)
BytesValue = _reflection.GeneratedProtocolMessageType('BytesValue', (_message.Message,), {
'DESCRIPTOR' : _BYTESVALUE,
'__module__' : 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.BytesValue)
})
_sym_db.RegisterMessage(BytesValue)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
PypiClean
|
/pvpcbill-1.0.0.tar.gz/pvpcbill-1.0.0/README.md
|
[](https://pypi.org/project/pvpcbill/)
[](https://pypi.org/project/pvpcbill/)
[](https://travis-ci.org/azogue/pvpcbill)
[](https://codecov.io/gh/azogue/pvpcbill)
# pvpcbill
**Electrical billing simulation for small consumers in Spain using PVPC** (electricity hourly prices).
It uses **[`aiopvpc`](https://github.com/azogue/aiopvpc)** to download PVPC data, and _the usual suspects_ ([`pandas`](https://pandas.pydata.org) & [`matplotlib`](https://matplotlib.org)) to deal with time-series data and plotting.
<span class="badge-buymeacoffee"><a href="https://www.buymeacoffee.com/azogue" title="Donate to this project using Buy Me A Coffee"><img src="https://img.shields.io/badge/buy%20me%20a%20coffee-donate-yellow.svg" alt="Buy Me A Coffee donate button" /></a></span>
## Install
Install from pypi with **`pip install pvpcbill`**, or clone it to run tests or anything else ;-)
## Usage
From a _jupyter notebook_, just call the `create_bill` async helper to instantiate a new 'bill' object:
```python
from pvpcbill import create_bill
# Creación directa de factura
factura = await create_bill(
path_csv_consumo="/path/to/elec_data/consumo_facturado18_02_2020-18_03_2020-R.csv",
potencia_contratada=4.6, # kW
tipo_peaje="NOC", # GEN / NOC / VHC
zona_impuestos="IVA", # IVA / IGIC / IPSI
)
print(factura)
```
** If using it from a non-async script,
use `asyncio.run(create_bill(**params))` to run the async method.
_Output:_
```text
FACTURA ELÉCTRICA:
--------------------------------------------------------------------------------
* CUPS ES0012345678901234SN
* Fecha inicio 17/02/2020
* Fecha final 18/03/2020
* Peaje de acceso 2.0DHA (Nocturna)
* Potencia contratada 4.60 kW
* Consumo periodo 472.93 kWh
* ¿Bono Social? No
* Equipo de medida 0.80 €
* Impuestos Península y Baleares (IVA)
* Días facturables 30
--------------------------------------------------------------------------------
- CÁLCULO DEL TÉRMINO FIJO POR POTENCIA CONTRATADA:
Peaje acceso potencia:
4.60 kW x 0.103944 €/kW/día x 30 días (366/2020) = 14.34 €
Comercialización:
4.60 kW x 0.008505 €/kW/día x 30 días (366/2020) = 1.17 €
==> Término fijo 15.51 €
- CÁLCULO DEL TÉRMINO VARIABLE POR ENERGÍA CONSUMIDA (TARIFA 2.0DHA):
Periodo 1: 0.111867 €/kWh ---> 19.02€(P1)
- Peaje de acceso: 170 kWh * 0.062012 €/kWh = 10.54€
- Coste de la energía: 170 kWh * 0.049855 €/kWh = 8.48€
Periodo 2: 0.045617 €/kWh ---> 13.82€(P2)
- Peaje de acceso: 303 kWh * 0.002215 €/kWh = 0.67€
- Coste de la energía: 303 kWh * 0.043402 €/kWh = 13.15€
==> Término de consumo 32.84 €
- IMPUESTO ELÉCTRICO:
5.11269632% x (15.51€ + 32.84€ = 48.35€) 2.47 €
==> Subtotal 50.82 €
- EQUIPO DE MEDIDA:
30 días x 0.026667 €/día 0.80 €
==> Importe total 51.62 €
- IVA O EQUIVALENTE:
21% de 51.62€ 10.84 €
################################################################################
# TOTAL FACTURA 62.46 €
################################################################################
Consumo medio diario en el periodo facturado: 2.08 €/día
```
But there is much more:
```python
# Reparto de costes en la factura
p_imp = (
+ factura.data.termino_impuesto_electrico
+ factura.data.termino_equipo_medida
+ factura.data.termino_iva_total
) / factura.data.total
p_ener = factura.data.termino_variable_total / factura.data.total
p_pot = factura.data.termino_fijo_total / factura.data.total
print(
f"El coste de la factura se reparte en:\n "
f"* un {100*p_ener:.1f} % por energía consumida,\n "
f"* un {100*p_pot:.1f} % por potencia contratada,\n "
f"* un {100*p_imp:.1f} % por impuestos aplicados\n\n"
)
print(factura.data.to_json())
```
_Output:_
```text
El coste de la factura se reparte en:
* un 52.6 % por energía consumida,
* un 24.8 % por potencia contratada,
* un 22.6 % por impuestos aplicados
```
```json
{
"config": {
"tipo_peaje": "NOC",
"potencia_contratada": 4.6,
"con_bono_social": false,
"zona_impuestos": "IVA",
"alquiler_anual": 9.72,
"impuesto_electrico": 0.0511269632,
"cups": "ES0012345678901234SN"
},
"num_dias_factura": 30,
"start": "2020-02-17 00:00:00",
"end": "2020-03-18 00:00:00",
"periodos_fact": [
{
"billed_days": 30,
"year": 2020,
"termino_fijo_peaje_acceso": 14.34,
"termino_fijo_comercializacion": 1.17,
"termino_fijo_total": 15.51,
"energy_periods": [
{
"name": "P1",
"coste_peaje_acceso_tea": 10.544458468,
"coste_energia_tcu": 8.477372039999999,
"energia_total": 170.03900000000002
},
{
"name": "P2",
"coste_peaje_acceso_tea": 0.67090578,
"coste_energia_tcu": 13.146024950000003,
"energia_total": 302.892
}
]
}
],
"descuento_bono_social": 0.0,
"termino_impuesto_electrico": 2.47,
"termino_equipo_medida": 0.8,
"termino_iva_gen": 10.6722,
"termino_iva_medida": 0.168,
"termino_iva_total": 10.84,
"total": 62.46
}
```
### Examples
- [Quick example to simulate a bill (jupyter notebook)](Notebooks/Ejemplo%20rápido.ipynb)
- [Detailed example to simulate a bill (jupyter notebook)](Notebooks/Ejemplo%20simulación%20de%20facturación%20eléctrica%20con%20PVPC.ipynb)
|
PypiClean
|
/Miscoto-3.1.2.tar.gz/Miscoto-3.1.2/miscoto/miscoto_mincom.py
|
# Copyright (C) 2018-2021 Clémence Frioux & Arnaud Belcour - Inria Dyliss - Pleiade
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import argparse
import sys
import os
import time
import logging
from miscoto import query, sbml, commons, utils
from os import listdir
from os.path import isfile, join
from clyngor.as_pyasp import TermSet, Atom
from xml.etree.ElementTree import ParseError
logger = logging.getLogger(__name__)
###############################################################################
pusage = """
**1** from SBML files
miscoto mincom [-m host.sbml] -b symbiont_directory -s seeds.sbml -t targets.sbml -o option [--intersection] [--union] [--enumeration] [--optsol] [--output]
\n
**2** from a pre-computed instance with possibly (additional) seeds or targets
miscoto mincom -a instance.lp -o option [-s seeds.sbml] [-t targets.sbml] [--intersection] [--union] [--enumeration] [--optsol] [--output]
\n
Option -o is either 'soup' or 'minexch' depending on the wanted modeling method
\n
"""
###############################################################################
def run_mincom(option=None, bacteria_dir=None, lp_instance_file=None, targets_file=None, seeds_file=None, host_file=None,
intersection=False, enumeration=False, union=False, optsol=False, output_json=None):
"""Computes community selections in microbiota
option (str, optional): Defaults to None. Modeling type: 'soup' for uncompartmentalized, 'minexch' for compartmentalized
bacteria_dir (str, optional): Defaults to None. directory with symbionts metabolic networks
lp_instance_file (str, optional): Defaults to None. ASP instance file
targets_file (str, optional): Defaults to None. targets file
seeds_file (str, optional): Defaults to None. seeds file
host_file (str, optional): Defaults to None. host metabolic network file
intersection (bool, optional): Defaults to False. compute intersection of solutions
enumeration (bool, optional): Defaults to False. compute enumeration of solutions
union (bool, optional): Defaults to False. compute union of solutions
optsol (bool, optional): Defaults to False. compute one optimal solution
"""
start_time = time.time()
results = {}
# checking option
if option == "soup":
encoding = commons.ASP_SRC_TOPO_SOUP
elif option == "minexch" and host_file == None:
# Check if there is an ASP instance file.
if lp_instance_file:
if not os.path.isfile(lp_instance_file) :
logger.critical('Instance file not found')
sys.exit(1)
with open(lp_instance_file, "r") as f:
draft_in_file = [line for line in f if line if 'draft' in line]
# Check if there is an host in the ASP instance file.
if len(draft_in_file) == 0:
encoding = commons.ASP_SRC_TOPO_RXN_MIN_EXCH_NOHOST
else:
encoding = commons.ASP_SRC_TOPO_RXN_MIN_EXCH
else:
encoding = commons.ASP_SRC_TOPO_RXN_MIN_EXCH_NOHOST
elif option == "minexch" and host_file != None:
encoding = commons.ASP_SRC_TOPO_RXN_MIN_EXCH
else:
logger.critical("invalid option choice")
logger.info(pusage)
quit()
# case 1: instance is provided, just read targets and seeds if given
if lp_instance_file:
if not os.path.isfile(lp_instance_file) :
logger.critical('Instance file not found')
sys.exit(1)
delete_lp_instance = False
logger.info(
"Instance provided, only seeds and targets will be added if given")
if targets_file:
logger.info('Reading targets from ' + targets_file)
try:
targetsfacts = sbml.readSBMLspecies_clyngor(targets_file, 'target')
except FileNotFoundError:
logger.critical('Targets file not found')
sys.exit(1)
except ParseError:
logger.critical("Invalid syntax in SBML file: "+targets_file)
sys.exit(1)
else:
targetsfacts = TermSet()
if seeds_file:
logger.info('Reading targets from ' + seeds_file)
try:
seedsfacts = sbml.readSBMLspecies_clyngor(seeds_file, 'seed')
except FileNotFoundError:
logger.critical('Seeds file not found')
sys.exit(1)
except ParseError:
logger.critical("Invalid syntax in SBML file: "+seeds_file)
sys.exit(1)
else:
seedsfacts = TermSet()
with open(lp_instance_file, "a") as f:
for elem in targetsfacts:
f.write(str(elem) + '.\n')
for elem in seedsfacts:
f.write(str(elem) + '.\n')
# case 2: read inputs from SBML files
elif bacteria_dir and seeds_file and targets_file:
if not os.path.isdir(bacteria_dir):
logger.critical("Symbiont directory not found")
sys.exit(1)
delete_lp_instance = True
if host_file:
logger.info('Reading host network from ' + host_file)
try:
draftnet = sbml.readSBMLnetwork_symbionts_clyngor(host_file, 'host_metab_mod')
except FileNotFoundError:
logger.critical('Host file not found')
sys.exit(1)
except ParseError:
logger.critical("Invalid syntax in SBML file: "+host_file)
sys.exit(1)
draftnet.add(Atom('draft', ["\"" + 'host_metab_mod' + "\""]))
else:
logger.warning('No host provided')
draftnet = TermSet()
draftnet.add(Atom('draft', ["\"" + 'host_metab_mod' + "\""]))
logger.info('Reading seeds from ' + seeds_file)
try:
seeds = sbml.readSBMLspecies_clyngor(seeds_file, 'seed')
except FileNotFoundError:
logger.critical('Seeds file not found')
sys.exit(1)
except ParseError:
logger.critical("Invalid syntax in SBML file: "+seeds_file)
sys.exit(1)
lp_instance = TermSet(draftnet.union(seeds))
logger.info('Reading targets from '+ targets_file)
try:
targets = sbml.readSBMLspecies_clyngor(targets_file, 'target')
except FileNotFoundError:
logger.critical('Targets file not found')
sys.exit(1)
except ParseError:
logger.critical("Invalid syntax in SBML file: "+targets_file)
sys.exit(1)
lp_instance = TermSet(lp_instance.union(targets))
lp_instance_file = utils.to_file(lp_instance)
logger.info('Reading bacterial networks from ' + bacteria_dir + '...')
onlyfiles = [f for f in listdir(bacteria_dir) if isfile(join(bacteria_dir, f))]
if len(onlyfiles) == 0:
logger.critical('No bacterial networks in ' + bacteria_dir)
sys.exit(1)
for bacteria_file in onlyfiles:
name = os.path.splitext(bacteria_file)[0]
bacteria_path = os.path.join(bacteria_dir, bacteria_file)
try:
one_bact_model = sbml.readSBMLnetwork_symbionts_clyngor(bacteria_path, name)
one_bact_model.add(Atom('bacteria', ["\"" + name + "\""]))
utils.to_file(one_bact_model, lp_instance_file)
logger.info('Done for ' + name)
except:
logger.info('Could not read file ' + name + ' will ignore it')
else:
logger.info(
"ERROR missing input: missing instance or symbionts/targets/seeds")
logger.info(pusage)
quit()
if not optsol and not union and not enumeration and not intersection:
logger.info(
"No choice of solution provided. Will compute one optimal solution by default"
)
optsol = True
logger.info('\nFinding optimal communities for target production...')
#ground the instance
print(encoding)
grounded_instance = query.get_grounded_communities_from_file(lp_instance_file, encoding)
# one solution
if optsol:
logger.info('\n*** ONE MINIMAL SOLUTION ***')
one_model = query.get_communities_from_g(grounded_instance)
score = one_model[1]
optimum = ','.join(map(str, score))
one_model = one_model[0]
still_unprod = []
bacteria = []
newly_prod = []
prod_targets = []
exchanged = {}
target_producers = {}
for pred in one_model:
if pred == 'unproducible_target':
for a in one_model[pred, 1]:
still_unprod.append(a[0])
elif pred == 'newly_producible_target':
for a in one_model[pred, 1]:
newly_prod.append(a[0])
elif pred == 'producible_target':
for a in one_model[pred, 1]:
prod_targets.append(a[0])
elif pred == 'chosen_bacteria':
for a in one_model[pred, 1]:
bacteria.append(a[0])
elif pred == 'exchanged':
for a in one_model[pred, 4]:
if (a[2], a[3]) in exchanged: #exchanged[(from,to)]=[(what,compartto);(what,compartto)]
exchanged[(a[2], a[3])].append(a[0])
else:
exchanged[(a[2], a[3])] = []
exchanged[(a[2], a[3])].append(a[0])
elif pred == 'target_producer_coop_selectedcom':
for a in one_model[pred, 2]:
if not a[1] in target_producers:
target_producers[a[1]] = [a[0]]
else:
target_producers[a[1]].append(a[0])
logger.info(str(len(newly_prod)) + ' newly producible target(s):')
logger.info("\n".join(newly_prod))
logger.info('Still ' + str(len(still_unprod)) + ' unproducible target(s):')
logger.info("\n".join(still_unprod))
logger.info('Minimal set of bacteria of size ' + str(len(bacteria)))
logger.info("\n".join(bacteria))
if len(exchanged) >= 1:
logger.info('Minimal set of exchanges of size => ' +
str(sum(len(v) for v in exchanged.values())))
for fromto in exchanged:
logger.info("\texchange(s) from " + fromto[0] + ' to ' +
fromto[1] + " = " + ','.join(exchanged[fromto]))
results['one_model_targetsproducers'] = target_producers
results['one_model'] = one_model
results['exchanged'] = exchanged
results['bacteria'] = bacteria
results['still_unprod'] = still_unprod
results['newly_prod'] = newly_prod
results['producible'] = prod_targets
# union of solutions
if union:
logger.info('\n*** UNION OF MINIMAL SOLUTION ***')
try:
if optsol:
union_m = query.get_union_communities_from_g(grounded_instance, optimum)
else:
union_m = query.get_union_communities_from_g_noopti(grounded_instance)
except IndexError:
logger.error(
"No stable model was found. Possible troubleshooting: no harmony between names for identical metabolites among host and microbes"
)
quit()
union_score = union_m[1]
optimum_union = ','.join(map(str, union_score))
union_m = union_m[0]
union_bacteria = []
union_exchanged = {}
union_target_producers = {}
for pred in union_m :
if pred == 'chosen_bacteria':
for a in union_m[pred, 1]:
union_bacteria.append(a[0])
elif pred == 'exchanged':
for a in union_m[pred, 4]:
if (a[2], a[3]) in union_exchanged: #union_exchanged[(from,to)]=[(what,compartto);(what,compartto)]
union_exchanged[(a[2], a[3])].append(a[0])
else:
union_exchanged[(a[2], a[3])] = []
union_exchanged[(a[2], a[3])].append( a[0])
elif pred == 'target_producer_coop_selectedcom':
for a in union_m[pred, 2]:
if not a[1] in union_target_producers:
union_target_producers[a[1]] = [a[0]]
else:
union_target_producers[a[1]].append(a[0])
logger.info('Union of minimal sets of bacteria, with optimum = ' +
optimum_union + ' comprises ' + str(len(union_bacteria)) +
' bacteria')
logger.info("\n".join(union_bacteria))
if len(union_exchanged) >= 1:
logger.info('\nExchanges in union => ' +
str(sum(len(v) for v in union_exchanged.values())))
for fromto in union_exchanged:
logger.info('\texchange(s) from ' + fromto[0] + ' to ' +
fromto[1] + " = " +
','.join(union_exchanged[fromto]))
results['union_exchanged'] = union_exchanged
results['union_bacteria'] = union_bacteria
results['score_optimum_union'] = optimum_union
results['union_targetsproducers'] = union_target_producers
# intersection of solutions
if intersection:
logger.info('\n*** INTERSECTION OF MINIMAL SOLUTION ***')
if optsol:
intersection_m = query.get_intersection_communities_from_g(grounded_instance, optimum)
else:
intersection_m = query.get_intersection_communities_from_g_noopti(grounded_instance)
intersection_score = intersection_m[1]
optimum_inter = ','.join(map(str, intersection_score))
intersection_m = intersection_m[0]
inter_bacteria = []
inter_exchanged = {}
inter_target_producers = {}
for pred in intersection_m :
if pred == 'chosen_bacteria':
for a in intersection_m[pred, 1]:
inter_bacteria.append(a[0])
elif pred == 'exchanged':
for a in intersection_m[pred, 4]:
if (a[2], a[3]) in inter_exchanged: #inter_exchanged[(from,to)]=[(what,compartto);(what,compartto)]
inter_exchanged[(a[2], a[3])].append(a[0])
else:
inter_exchanged[(a[2], a[3])] = []
inter_exchanged[(a[2], a[3])].append(a[0])
elif pred == 'target_producer_coop_selectedcom':
for a in intersection_m[pred, 2]:
if not a[1] in inter_target_producers:
inter_target_producers[a[1]] = [a[0]]
else:
inter_target_producers[a[1]].append(a[0])
logger.info('Intersection of minimal sets of bacteria, with optimum = '
+ optimum_inter + ' comprises ' +
str(len(inter_bacteria)) + ' bacteria')
logger.info("\n".join(inter_bacteria))
if len(inter_exchanged) >= 1:
logger.info('\nExchanges in intersection => ' +
str(sum(len(v) for v in inter_exchanged.values())))
for fromto in inter_exchanged:
logger.info('\texchange(s) from ' + fromto[0] + ' to ' +
fromto[1] + " = " +
','.join(inter_exchanged[fromto]))
results['inter_exchanged'] = inter_exchanged
results['inter_bacteria'] = inter_bacteria
results['score_optimum_inter'] = optimum_inter
results['inter_targetsproducers'] = inter_target_producers
# enumeration of all solutions
if enumeration:
logger.info('\n*** ENUMERATION OF MINIMAL SOLUTION ***')
if optsol:
all_models = query.get_all_communities_from_g(grounded_instance, optimum)
else:
all_models = query.get_all_communities_from_g_noopti(grounded_instance)
count = 1
results['enum_bacteria'] = {}
results['enum_exchanged'] = {}
results['enum_targetsproducers'] = {}
for model in all_models:
enum_bacteria_this_sol = []
enum_exchanged_this_sol = {}
target_producers_this_sol = {}
logger.info('\nSolution ' + str(count))
for pred in model:
if pred == 'chosen_bacteria':
for a in model[pred, 1]:
enum_bacteria_this_sol.append(a[0])
elif pred == 'exchanged':
for a in model[pred, 4]:
if (a[2], a[3]) in enum_exchanged_this_sol: #enum_exchanged_this_sol[(from,to)]=[(what,compartto);(what,compartto)]
enum_exchanged_this_sol[(a[2], a[3])].append(a[0])
else:
enum_exchanged_this_sol[(a[2], a[3])] = []
enum_exchanged_this_sol[(a[2], a[3])].append(a[0])
elif pred == 'target_producer_coop_selectedcom':
for a in model[pred, 2]:
if not a[1] in target_producers_this_sol:
target_producers_this_sol[a[1]] = [a[0]]
else:
target_producers_this_sol[a[1]].append(a[0])
logger.info("\t" + str(len(enum_bacteria_this_sol)) +
" bacterium(ia) in solution " + str(count))
for elem in enum_bacteria_this_sol:
logger.info("\t" + elem)
if len(enum_exchanged_this_sol) >= 1:
logger.info("\t" +
str(sum(len(v) for v in enum_exchanged_this_sol.values())) +
" exchange(s) in solution " + str(count))
for fromto in enum_exchanged_this_sol:
logger.info('\texchange(s) from ' + fromto[0] + ' to ' +
fromto[1] + " = " +
','.join(enum_exchanged_this_sol[fromto]))
results['enum_exchanged'][count] = enum_exchanged_this_sol
results['enum_bacteria'][count] = enum_bacteria_this_sol
results['enum_targetsproducers'][count] = target_producers_this_sol
count+=1
if delete_lp_instance == True:
os.unlink(lp_instance_file)
if output_json:
utils.to_json(results, output_json)
logger.info(f"Export of results in {output_json}.")
logger.info("--- %s seconds ---" % (time.time() - start_time))
utils.clean_up()
return results
|
PypiClean
|
/looking_glass-1.0.4-py3-none-any.whl/looking_glass/static/js/vis-4.21.0/lib/network/modules/components/nodes/shapes/Text.js
|
'use strict';
import NodeBase from '../util/NodeBase'
/**
* A text-based replacement for the default Node shape.
*
* @extends NodeBase
*/
class Text extends NodeBase {
/**
* @param {Object} options
* @param {Object} body
* @param {Label} labelModule
*/
constructor(options, body, labelModule) {
super(options, body, labelModule);
this._setMargins(labelModule);
}
/**
*
* @param {CanvasRenderingContext2D} ctx
* @param {boolean} selected
* @param {boolean} hover
*/
resize(ctx, selected, hover) {
if (this.needsRefresh(selected, hover)) {
this.textSize = this.labelModule.getTextSize(ctx, selected, hover);
this.width = this.textSize.width + this.margin.right + this.margin.left;
this.height = this.textSize.height + this.margin.top + this.margin.bottom;
this.radius = 0.5*this.width;
}
}
/**
*
* @param {CanvasRenderingContext2D} ctx
* @param {number} x width
* @param {number} y height
* @param {boolean} selected
* @param {boolean} hover
* @param {{toArrow: boolean, toArrowScale: (allOptions.edges.arrows.to.scaleFactor|{number}|allOptions.edges.arrows.middle.scaleFactor|allOptions.edges.arrows.from.scaleFactor|Array|number), toArrowType: *, middleArrow: boolean, middleArrowScale: (number|allOptions.edges.arrows.middle.scaleFactor|{number}|Array), middleArrowType: (allOptions.edges.arrows.middle.type|{string}|string|*), fromArrow: boolean, fromArrowScale: (allOptions.edges.arrows.to.scaleFactor|{number}|allOptions.edges.arrows.middle.scaleFactor|allOptions.edges.arrows.from.scaleFactor|Array|number), fromArrowType: *, arrowStrikethrough: (*|boolean|allOptions.edges.arrowStrikethrough|{boolean}), color: undefined, inheritsColor: (string|string|string|allOptions.edges.color.inherit|{string, boolean}|Array|*), opacity: *, hidden: *, length: *, shadow: *, shadowColor: *, shadowSize: *, shadowX: *, shadowY: *, dashes: (*|boolean|Array|allOptions.edges.dashes|{boolean, array}), width: *}} values
*/
draw(ctx, x, y, selected, hover, values) {
this.resize(ctx, selected, hover);
this.left = x - this.width / 2;
this.top = y - this.height / 2;
// draw shadow if enabled
this.enableShadow(ctx, values);
this.labelModule.draw(ctx, this.left + this.textSize.width / 2 + this.margin.left,
this.top + this.textSize.height / 2 + this.margin.top, selected, hover);
// disable shadows for other elements.
this.disableShadow(ctx, values);
this.updateBoundingBox(x, y, ctx, selected, hover);
}
/**
*
* @param {CanvasRenderingContext2D} ctx
* @param {number} angle
* @returns {number}
*/
distanceToBorder(ctx, angle) {
return this._distanceToBorder(ctx,angle);
}
}
export default Text;
|
PypiClean
|
/customgpt_client-1.1.6.tar.gz/customgpt_client-1.1.6/customgpt_client/models/get_settings_response_401.py
|
from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.get_settings_response_401_status import GetSettingsResponse401Status
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..models.get_settings_response_401_data import GetSettingsResponse401Data
T = TypeVar("T", bound="GetSettingsResponse401")
@attr.s(auto_attribs=True)
class GetSettingsResponse401:
"""
Attributes:
status (Union[Unset, GetSettingsResponse401Status]): The status of the response Example: error.
url (Union[Unset, str]): The URL of the request Example: https://app.customgpt.ai/api/v1/projects/1.
data (Union[Unset, GetSettingsResponse401Data]):
"""
status: Union[Unset, GetSettingsResponse401Status] = UNSET
url: Union[Unset, str] = UNSET
data: Union[Unset, "GetSettingsResponse401Data"] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
status: Union[Unset, str] = UNSET
if not isinstance(self.status, Unset):
status = self.status.value
url = self.url
data: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.data, Unset):
data = self.data.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if status is not UNSET:
field_dict["status"] = status
if url is not UNSET:
field_dict["url"] = url
if data is not UNSET:
field_dict["data"] = data
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
from ..models.get_settings_response_401_data import GetSettingsResponse401Data
_status = src_dict.get("status")
status: Union[Unset, GetSettingsResponse401Status]
if isinstance(_status, Unset):
status = UNSET
else:
status = GetSettingsResponse401Status(_status)
url = src_dict.get("url")
_data = src_dict.get("data")
data: Union[Unset, GetSettingsResponse401Data]
if isinstance(_data, Unset):
data = UNSET
else:
data = GetSettingsResponse401Data.from_dict(_data)
get_settings_response_401 = cls(
status=status,
url=url,
data=data,
)
get_settings_response_401.additional_properties = src_dict
return get_settings_response_401
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
PypiClean
|
/mne-icalabel-0.4.tar.gz/mne-icalabel-0.4/mne_icalabel/features/topomap.py
|
from typing import Dict, Union
import numpy as np
from mne.channels.layout import _find_topomap_coords
from mne.defaults import _BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT, _INTERPOLATION_DEFAULT
from mne.io import Info
from mne.io.pick import _get_channel_types, _pick_data_channels, _picks_to_idx, pick_info
from mne.preprocessing import ICA
from mne.utils import _validate_type
from mne.viz.topomap import _check_extrapolate, _make_head_outlines, _setup_interp
from numpy.typing import NDArray
from ..utils._checks import _validate_ica
from ..utils._docs import fill_doc
@fill_doc
def get_topomaps(
ica: ICA,
picks=None,
res: int = 64,
image_interp: str = _INTERPOLATION_DEFAULT, # 'cubic'
border: Union[float, str] = _BORDER_DEFAULT, # 'mean'
extrapolate: str = _EXTRAPOLATE_DEFAULT, # 'auto' -> 'head' for EEG, 'local' for MEG
) -> Dict[str, NDArray[float]]:
"""Generate an array of scalp topographies (n_pixels, n_pixels) for the picked components.
Parameters
----------
ica : ICA
MNE `~mne.preprocessing.ICA` decomposition.
picks : int | list of int | slice | None
Indices of the independent components (ICs) to select.
If an integer, represents the index of the IC to pick.
Multiple ICs can be selected using a list of int or a slice.
The indices are 0-indexed, so ``picks=1`` will pick the second IC: ``ICA001``.
``None`` (default) will pick all independent components in the order fitted.
%(res_topomap)s
%(image_interp_topomap)s
%(border_topomap)s
%(extrapolate_topomap)s
Returns
-------
topomaps : dict of array of shape (n_components, n_pixels, n_pixels)
Dictionary of ICs topographic maps for each channel type.
"""
_validate_ica(ica)
if isinstance(picks, str):
raise TypeError(
"Argument 'picks' should be an integer or a list of integers to select the ICs. "
"Strings are not supported."
)
ic_picks = _picks_to_idx(ica.n_components_, picks)
_validate_type(res, "int", "res", "int")
if res <= 0:
raise ValueError(
f"Argument 'res' should be a strictly positive integer. Provided '{res}' is invalid."
)
# image_interp, border are validated by _setup_interp
# extrapolate is validated by _check_extrapolate
# prepare ICs
data = np.dot(
ica.mixing_matrix_.T,
ica.pca_components_[: ica.n_components_],
)
# list channel types
ch_picks = _pick_data_channels(ica.info, exclude=())
ch_types = _get_channel_types(pick_info(ica.info, ch_picks), unique=True)
# compute topomaps
topomaps = dict()
for ch_type in ch_types:
topomaps[ch_type] = np.zeros((ic_picks.size, res, res))
sel = _picks_to_idx(ica.info, picks=ch_type)
info = pick_info(ica.info, sel)
for k, component in enumerate(ic_picks):
topomaps[ch_type][k, :, :] = _get_topomap_array(
data[component, sel], info, res, image_interp, border, extrapolate
)
return topomaps
@fill_doc
def _get_topomap_array(
data: NDArray[float],
info: Info,
res: int = 64,
image_interp: str = _INTERPOLATION_DEFAULT, # 'cubic'
border: Union[float, str] = _BORDER_DEFAULT, # 'mean'
extrapolate: str = _EXTRAPOLATE_DEFAULT, # 'auto' -> 'head' for EEG, 'local' for MEG
) -> NDArray[float]:
"""Generate a scalp topographic map (n_pixels, n_pixels).
Parameters
----------
data : array of shape (n_channels,)
The data points used to generate the topographic map.
info : Info
Instance of `mne.Info` with the montage associated with the ``(n_channels,)`` points.
%(res_topomap)s
%(image_interp_topomap)s
%(border_topomap)s
%(extrapolate_topomap)s
Returns
-------
topomap : array of shape (n_pixels, n_pixels)
Topographic map array.
"""
ch_type = _get_channel_types(info, unique=True)
assert len(ch_type) == 1 # sanity-check
ch_type = ch_type[0]
picks = list(range(data.shape[0]))
sphere = np.array([0.0, 0.0, 0.0, 0.095])
# inferring (x, y) coordinates form mne.Info instance
pos = _find_topomap_coords(info, picks=picks, sphere=sphere, ignore_overlap=True)
extrapolate = _check_extrapolate(extrapolate, ch_type)
# interpolation, valid only for MNE ≥ 1.1
outlines = _make_head_outlines(sphere, pos, None, (0.0, 0.0))
extent, Xi, Yi, interp = _setup_interp(pos, res, image_interp, extrapolate, outlines, border)
interp.set_values(data)
topomap = np.flipud(interp.set_locations(Xi, Yi)()) # Zi, shape (n_pixels, n_pixels)
np.nan_to_num(topomap, nan=0.0, copy=False)
topomap = topomap / np.max(np.abs(topomap)) # standardize
return topomap # (n_pixels, n_pixels)
|
PypiClean
|
/fast_rl-1.0.1.tar.gz/fast_rl-1.0.1/fast_rl/core/Interpreter.py
|
import io
from functools import partial
from typing import List, Tuple, Dict
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
import torch
from PIL import Image
from fastai.train import Interpretation, DatasetType, copy
from gym.spaces import Box
from itertools import product
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from moviepy.video.VideoClip import VideoClip
from moviepy.video.io.bindings import mplfig_to_npimage
from torch import nn
from fast_rl.core import Learner
from fast_rl.core.data_block import MarkovDecisionProcessSliceAlpha, FEED_TYPE_IMAGE
class AgentInterpretationAlpha(Interpretation):
def __init__(self, learn: Learner, ds_type: DatasetType = DatasetType.Valid, base_chart_size=(20, 10)):
"""
Handles converting a learner, and it's runs into useful human interpretable information.
Notes:
This class is called AgentInterpretationAlpha because it will overall get deprecated.
The final working version will be called AgentInterpretation.
Args:
learn:
"""
super().__init__(learn, None, None, None, ds_type=ds_type)
self.current_animation = None
plt.rcParams["figure.figsize"] = base_chart_size
def _get_items(self, ignore=True):
episodes = list(self.ds.x.info.keys())
if ignore or len(episodes) == 0: return self.ds.x.items
return [item for item in self.ds.x.items if item.episode in episodes]
@classmethod
def from_learner(cls, learn: Learner, ds_type: DatasetType = DatasetType.Valid, activ: nn.Module = None):
raise NotImplementedError
def normalize(self, item: np.array):
if np.max(item) - np.min(item) != 0:
return np.divide(item + np.min(item), np.max(item) - np.min(item))
else:
item.fill(1)
return item
def top_losses(self, k: int = None, largest=True):
raise NotImplementedError
def reward_heatmap(self, episode_slices: List[MarkovDecisionProcessSliceAlpha], action=None):
"""
Takes a state_space and uses the agent to heat map rewards over the space.
We first need to determine if the s space is discrete or discrete.
Args:
state_space:
Returns:
"""
if action is not None: action = torch.tensor(action).long()
current_state_slice = [p for p in product(
np.arange(min(self.ds.env.observation_space.low), max(self.ds.env.observation_space.high) + 1),
repeat=len(self.ds.env.observation_space.high))]
heat_map = np.zeros(np.add(self.ds.env.observation_space.high, 1))
with torch.no_grad():
for state in current_state_slice:
if action is not None:
heat_map[state] = self.learn.model(torch.from_numpy(np.array(state)).unsqueeze(0))[0].gather(0, action)
else:
self.learn.model.eval()
if self.learn.model.name == 'DDPG':
heat_map[state] = self.learn.model.critic_model(torch.cat((torch.from_numpy(np.array(state)).unsqueeze(0).float(), self.learn.model.action_model(torch.from_numpy(np.array(state)).unsqueeze(0).float())), 1))
else:
heat_map[state] = self.learn.model(torch.from_numpy(np.array(state)).unsqueeze(0))[0].max().numpy()
return heat_map
def plot_heatmapped_episode(self, episode, fig_size=(13, 5), action_index=None, return_heat_maps=False):
"""
Generates plots of heatmapped s spaces for analyzing reward distribution.
Currently only makes sense for grid based envs. Will be expecting gym_maze environments that are discrete.
Returns:
"""
if not str(self.ds.env.spec).__contains__('maze'):
raise NotImplementedError('Currently only supports gym_maze envs that have discrete s spaces')
if not isinstance(self.ds.state_size, Box):
raise NotImplementedError('Currently only supports Box based s spaces with 2 dimensions')
items = self._get_items()
heat_maps = []
# For each episode
buffer = []
episode = episode if episode != -1 else list(set([i.episode for i in items]))[-1]
for item in [i for i in items if i.episode == episode]:
buffer.append(item)
heat_map = self.reward_heatmap(buffer, action=action_index)
heat_maps.append((copy(heat_map), copy(buffer[-1]), copy(episode)))
plots = []
for single_heatmap in [heat_maps[-1]]:
fig, ax = plt.subplots(1, 2, figsize=fig_size)
fig.suptitle(f'Episode {episode}')
ax[0].imshow(single_heatmap[1].to_one().data)
im = ax[1].imshow(single_heatmap[0])
ax[0].grid(False)
ax[1].grid(False)
ax[0].set_title('Final State Snapshot')
ax[1].set_title('State Space Heatmap')
fig.colorbar(im, ax=ax[1])
buf = io.BytesIO()
fig.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(fig)
buf.seek(0)
# Create Image object
plots.append(np.array(Image.open(buf))[:, :, :3])
for plot in plots:
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.imshow(plot)
plt.show()
if return_heat_maps: return heat_maps
def plot_episode(self, episode):
items = self._get_items(False) # type: List[MarkovDecisionProcessSliceAlpha]
episode_counter = 0
# For each episode
buffer = []
for item in items:
buffer.append(item)
if item.done:
if episode_counter == episode:
break
episode_counter += 1
buffer = []
plots = []
with torch.no_grad():
agent_reward_plots = [self.learn.model(torch.from_numpy(np.array(i.current_state))).max().numpy() for i in
buffer]
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
fig.suptitle(f'Episode {episode}')
ax.plot(agent_reward_plots)
ax.set_xlabel('Time Steps')
ax.set_ylabel('Max Expected Reward from Agent')
buf = io.BytesIO()
fig.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(fig)
buf.seek(0)
# Create Image object
plots.append(np.array(Image.open(buf))[:, :, :3])
for plot in plots:
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.imshow(plot)
plt.show()
def get_agent_accuracy_density(self, items, episode_num=None):
x = None
y = None
for episode in [_ for _ in list(set(mdp.episode for mdp in items)) if episode_num is None or episode_num == _]:
subset = [item for item in items if item.episode == episode]
state = np.array([_.current_state for _ in subset])
result_state = np.array([_.result_state for _ in subset])
prim_q_pred = self.learn.model(torch.from_numpy(state))
target_q_pred = self.learn.model.target_net(torch.from_numpy(state).float())
state_difference = (prim_q_pred - target_q_pred).sum(1)
prim_q_pred = self.learn.model(torch.from_numpy(result_state))
target_q_pred = self.learn.model.target_net(torch.from_numpy(result_state).float())
result_state_difference = (prim_q_pred - target_q_pred).sum(1)
x = state_difference if x is None else np.hstack((x, state_difference))
y = result_state_difference if y is None else np.hstack((y, result_state_difference))
return x, y
def plot_agent_accuracy_density(self, episode_num=None):
"""
Heat maps the density of actual vs estimated q v. Good reference for this is at [1].
References:
[1] "Simple Example Of 2D Density Plots In Python." Medium. N. p., 2019. Web. 31 Aug. 2019.
https://towardsdatascience.com/simple-example-of-2d-density-plots-in-python-83b83b934f67
Returns:
"""
items = self._get_items(False) # type: List[MarkovDecisionProcessSliceAlpha]
x, y = self.get_agent_accuracy_density(items, episode_num)
fig = plt.figure(figsize=(8, 8))
ax = fig.gca()
fig.suptitle(f'{self.learn.model.name} for {self.ds.env.spec._env_name}')
ax.set_ylabel('State / State Prime Q Value Deviation')
ax.set_xlabel('Iterations')
ax.plot(np.hstack([x, y]))
plt.show()
def get_q_density(self, items, episode_num=None):
x = None
y = None
for episode in [_ for _ in list(set(mdp.episode for mdp in items)) if episode_num is None or episode_num == _]:
subset = [item for item in items if item.episode == episode]
r = np.array([_.reward for _ in subset])
# Gets the total accumulated r over a single markov chain
actual_returns = np.flip([np.cumsum(r)[i:][0] for i in np.flip(np.arange(len(r)))]).reshape(1, -1)
estimated_returns = self.learn.model.interpret_q(subset).view(1, -1).numpy()
x = actual_returns if x is None else np.hstack((x, actual_returns))
y = estimated_returns if y is None else np.hstack((y, estimated_returns))
return self.normalize(x), self.normalize(y)
def plot_q_density(self, episode_num=None):
"""
Heat maps the density of actual vs estimated q v. Good reference for this is at [1].
References:
[1] "Simple Example Of 2D Density Plots In Python." Medium. N. p., 2019. Web. 31 Aug. 2019.
https://towardsdatascience.com/simple-example-of-2d-density-plots-in-python-83b83b934f67
Returns:
"""
items = self._get_items(False) # type: List[MarkovDecisionProcessSliceAlpha]
x, y = self.get_q_density(items, episode_num)
# Define the borders
deltaX = (np.max(x) - np.min(x)) / 10
deltaY = (np.max(y) - np.min(y)) / 10
xmin = np.min(x) - deltaX
xmax = np.max(x) + deltaX
ymin = np.min(y) - deltaY
ymax = np.max(y) + deltaY
print(xmin, xmax, ymin, ymax)
# Create meshgrid
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
fig = plt.figure(figsize=(8, 8))
ax = fig.gca()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
cfset = ax.contourf(xx, yy, f, cmap='coolwarm')
ax.imshow(np.rot90(f), cmap='coolwarm', extent=[xmin, xmax, ymin, ymax])
cset = ax.contour(xx, yy, f, colors='k')
ax.clabel(cset, inline=1, fontsize=10)
ax.set_xlabel('Actual Returns')
ax.set_ylabel('Estimated Q')
if episode_num is None:
plt.title('2D Gaussian Kernel Q Density Estimation')
else:
plt.title(f'2D Gaussian Kernel Q Density Estimation for episode {episode_num}')
plt.show()
def plot_rewards_over_iterations(self, cumulative=False, return_rewards=False):
items = self._get_items()
r_iter = [el.reward[0] if np.ndim(el.reward) == 0 else np.average(el.reward) for el in items]
if cumulative: r_iter = np.cumsum(r_iter)
fig = plt.figure(figsize=(8, 8))
ax = fig.gca()
fig.suptitle(f'{self.learn.model.name} for {self.ds.env.spec._env_name}')
ax.set_ylabel('Rewards' if not cumulative else 'Cumulative Rewards')
ax.set_xlabel('Iterations')
ax.plot(r_iter)
plt.show()
if return_rewards: return r_iter
def plot_rewards_over_episodes(self, cumulative=False, fig_size=(8, 8)):
items = self._get_items()
r_iter = [(el.reward[0] if np.ndim(el.reward) == 0 else np.average(el.reward), el.episode) for el in items]
rewards, episodes = zip(*r_iter)
if cumulative: rewards = np.cumsum(rewards)
fig = plt.figure(figsize=(8, 8))
ax = fig.gca()
fig.suptitle(f'{self.learn.model.name} for {self.ds.env.spec._env_name}')
ax.set_ylabel('Rewards' if not cumulative else 'Cumulative Rewards')
ax.set_xlabel('Episodes')
ax.xaxis.set_ticks([i for i, el in enumerate(episodes) if episodes[i - 1] != el or i == 0])
ax.xaxis.set_ticklabels([el for i, el in enumerate(episodes) if episodes[i - 1] != el or i == 0])
ax.plot(rewards)
plt.show()
def episode_video_frames(self, episode=None) -> Dict[str, np.array]:
""" Returns numpy arrays representing purely episode frames. """
items = self._get_items(False)
if episode is None: episode_frames = {key: None for key in list(set([_.episode for _ in items]))}
else: episode_frames = {episode: None}
for key in episode_frames:
if self.ds.feed_type == FEED_TYPE_IMAGE:
episode_frames[key] = np.array([_.current_state for _ in items if key == _.episode])
else:
episode_frames[key] = np.array([_.alternate_state for _ in items if key == _.episode])
return episode_frames
def episode_to_gif(self, episode=None, path='', fps=30):
frames = self.episode_video_frames(episode)
for ep in frames:
fig, ax = plt.subplots()
animation = VideoClip(partial(self._make_frame, frames=frames[ep], axes=ax, fig=fig, title=f'Episode {ep}'),
duration=frames[ep].shape[0])
animation.write_gif(path + f'episode_{ep}.gif', fps=fps)
def _make_frame(self, t, frames, axes, fig, title):
axes.clear()
fig.suptitle(title)
axes.imshow(frames[int(t)])
return mplfig_to_npimage(fig)
def iplot_episode(self, episode, fps=30):
if episode is None: raise ValueError('The episode cannot be None for jupyter display')
x = self.episode_video_frames(episode)[episode]
fig, ax = plt.subplots()
self.current_animation = VideoClip(partial(self._make_frame, frames=x, axes=ax, fig=fig,
title=f'Episode {episode}'), duration=x.shape[0])
self.current_animation.ipython_display(fps=fps, loop=True, autoplay=True)
def get_memory_samples(self, batch_size=None, key='reward'):
samples = self.learn.model.memory.sample(self.learn.model.batch_size if batch_size is None else batch_size)
if not samples: raise IndexError('Your tree seems empty.')
if batch_size is not None and batch_size > len(self.learn.model.memory):
raise IndexError(f'Your batch size {batch_size} > the tree\'s batch size {len(self.learn.model.memory)}')
if key not in samples[0].obj.keys(): raise ValueError(f'Key {key} not in {samples[0].obj.keys()}')
return [s.obj[key] for s in samples]
def plot_memory_samples(self, batch_size=None, key='reward', fig_size=(8, 8)):
values_of_interest = self.get_memory_samples(batch_size, key)
fig = plt.figure(figsize=fig_size)
ax = fig.gca()
fig.suptitle(f'{self.learn.model.name} for {self.ds.env.spec._env_name}')
ax.set_ylabel(key)
ax.set_xlabel('Values')
ax.plot(values_of_interest)
plt.show()
|
PypiClean
|
/streamlit-code-editor-0.1.10.tar.gz/streamlit-code-editor-0.1.10/code_editor/frontend/build/2c3161ab5238bbc74841c24d33a784a0.js
|
ace.define("ace/mode/terraform_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"],(function(e,t,r){"use strict";var n=e("../lib/oop"),o=e("./text_highlight_rules").TextHighlightRules,i=function(){this.$rules={start:[{token:["storage.function.terraform"],regex:"\\b(output|resource|data|variable|module|export)\\b"},{token:"variable.terraform",regex:"\\$\\s",push:[{token:"keyword.terraform",regex:"(-var-file|-var)"},{token:"variable.terraform",regex:"\\n|$",next:"pop"},{include:"strings"},{include:"variables"},{include:"operators"},{defaultToken:"text"}]},{token:"language.support.class",regex:"\\b(timeouts|provider|connection|provisioner|lifecycleprovider|atlas)\\b"},{token:"singleline.comment.terraform",regex:"#.*$"},{token:"singleline.comment.terraform",regex:"//.*$"},{token:"multiline.comment.begin.terraform",regex:/\/\*/,push:"blockComment"},{token:"storage.function.terraform",regex:"^\\s*(locals|terraform)\\s*{"},{token:"paren.lparen",regex:"[[({]"},{token:"paren.rparen",regex:"[\\])}]"},{include:"constants"},{include:"strings"},{include:"operators"},{include:"variables"}],blockComment:[{regex:/\*\//,token:"multiline.comment.end.terraform",next:"pop"},{defaultToken:"comment"}],constants:[{token:"constant.language.terraform",regex:"\\b(true|false|yes|no|on|off|EOF)\\b"},{token:"constant.numeric.terraform",regex:"(\\b([0-9]+)([kKmMgG]b?)?\\b)|(\\b(0x[0-9A-Fa-f]+)([kKmMgG]b?)?\\b)"}],variables:[{token:["variable.assignment.terraform","keyword.operator"],regex:"\\b([a-zA-Z_]+)(\\s*=)"}],interpolated_variables:[{token:"variable.terraform",regex:"\\b(var|self|count|path|local)\\b(?:\\.*[a-zA-Z_-]*)?"}],strings:[{token:"punctuation.quote.terraform",regex:"'",push:[{token:"punctuation.quote.terraform",regex:"'",next:"pop"},{include:"escaped_chars"},{defaultToken:"string"}]},{token:"punctuation.quote.terraform",regex:'"',push:[{token:"punctuation.quote.terraform",regex:'"',next:"pop"},{include:"interpolation"},{include:"escaped_chars"},{defaultToken:"string"}]}],escaped_chars:[{token:"constant.escaped_char.terraform",regex:"\\\\."}],operators:[{token:"keyword.operator",regex:"\\?|:|==|!=|>|<|>=|<=|&&|\\|\\||!|%|&|\\*|\\+|\\-|/|="}],interpolation:[{token:"punctuation.interpolated.begin.terraform",regex:"\\$?\\$\\{",push:[{token:"punctuation.interpolated.end.terraform",regex:"\\}",next:"pop"},{include:"interpolated_variables"},{include:"operators"},{include:"constants"},{include:"strings"},{include:"functions"},{include:"parenthesis"},{defaultToken:"punctuation"}]}],functions:[{token:"keyword.function.terraform",regex:"\\b(abs|basename|base64decode|base64encode|base64gzip|base64sha256|base64sha512|bcrypt|ceil|chomp|chunklist|cidrhost|cidrnetmask|cidrsubnet|coalesce|coalescelist|compact|concat|contains|dirname|distinct|element|file|floor|flatten|format|formatlist|indent|index|join|jsonencode|keys|length|list|log|lookup|lower|map|matchkeys|max|merge|min|md5|pathexpand|pow|replace|rsadecrypt|sha1|sha256|sha512|signum|slice|sort|split|substr|timestamp|timeadd|title|transpose|trimspace|upper|urlencode|uuid|values|zipmap)\\b"}],parenthesis:[{token:"paren.lparen",regex:"\\["},{token:"paren.rparen",regex:"\\]"}]},this.normalizeRules()};n.inherits(i,o),t.TerraformHighlightRules=i})),ace.define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"],(function(e,t,r){"use strict";var n=e("../../lib/oop"),o=e("../../range").Range,i=e("./fold_mode").FoldMode,a=t.FoldMode=function(e){e&&(this.foldingStartMarker=new RegExp(this.foldingStartMarker.source.replace(/\|[^|]*?$/,"|"+e.start)),this.foldingStopMarker=new RegExp(this.foldingStopMarker.source.replace(/\|[^|]*?$/,"|"+e.end)))};n.inherits(a,i),function(){this.foldingStartMarker=/([\{\[\(])[^\}\]\)]*$|^\s*(\/\*)/,this.foldingStopMarker=/^[^\[\{\(]*([\}\]\)])|^[\s\*]*(\*\/)/,this.singleLineBlockCommentRe=/^\s*(\/\*).*\*\/\s*$/,this.tripleStarBlockCommentRe=/^\s*(\/\*\*\*).*\*\/\s*$/,this.startRegionRe=/^\s*(\/\*|\/\/)#?region\b/,this._getFoldWidgetBase=this.getFoldWidget,this.getFoldWidget=function(e,t,r){var n=e.getLine(r);if(this.singleLineBlockCommentRe.test(n)&&!this.startRegionRe.test(n)&&!this.tripleStarBlockCommentRe.test(n))return"";var o=this._getFoldWidgetBase(e,t,r);return!o&&this.startRegionRe.test(n)?"start":o},this.getFoldWidgetRange=function(e,t,r,n){var o,i=e.getLine(r);if(this.startRegionRe.test(i))return this.getCommentRegionBlock(e,i,r);if(o=i.match(this.foldingStartMarker)){var a=o.index;if(o[1])return this.openingBracketBlock(e,o[1],r,a);var s=e.getCommentFoldRange(r,a+o[0].length,1);return s&&!s.isMultiLine()&&(n?s=this.getSectionRange(e,r):"all"!=t&&(s=null)),s}if("markbegin"!==t&&(o=i.match(this.foldingStopMarker))){a=o.index+o[0].length;return o[1]?this.closingBracketBlock(e,o[1],r,a):e.getCommentFoldRange(r,a,-1)}},this.getSectionRange=function(e,t){for(var r=e.getLine(t),n=r.search(/\S/),i=t,a=r.length,s=t+=1,l=e.getLength();++t<l;){var c=(r=e.getLine(t)).search(/\S/);if(-1!==c){if(n>c)break;var g=this.getFoldWidgetRange(e,"all",t);if(g){if(g.start.row<=i)break;if(g.isMultiLine())t=g.end.row;else if(n==c)break}s=t}}return new o(i,a,s,e.getLine(s).length)},this.getCommentRegionBlock=function(e,t,r){for(var n=t.search(/\s*$/),i=e.getLength(),a=r,s=/^\s*(?:\/\*|\/\/|--)#?(end)?region\b/,l=1;++r<i;){t=e.getLine(r);var c=s.exec(t);if(c&&(c[1]?l--:l++,!l))break}if(r>a)return new o(a,n,r,t.length)}}.call(a.prototype)})),ace.define("ace/mode/matching_brace_outdent",["require","exports","module","ace/range"],(function(e,t,r){"use strict";var n=e("../range").Range,o=function(){};(function(){this.checkOutdent=function(e,t){return!!/^\s+$/.test(e)&&/^\s*\}/.test(t)},this.autoOutdent=function(e,t){var r=e.getLine(t).match(/^(\s*\})/);if(!r)return 0;var o=r[1].length,i=e.findMatchingBracket({row:t,column:o});if(!i||i.row==t)return 0;var a=this.$getIndent(e.getLine(i.row));e.replace(new n(t,0,t,o-1),a)},this.$getIndent=function(e){return e.match(/^\s*/)[0]}}).call(o.prototype),t.MatchingBraceOutdent=o})),ace.define("ace/mode/terraform",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/terraform_highlight_rules","ace/mode/behaviour/cstyle","ace/mode/folding/cstyle","ace/mode/matching_brace_outdent"],(function(e,t,r){"use strict";var n=e("../lib/oop"),o=e("./text").Mode,i=e("./terraform_highlight_rules").TerraformHighlightRules,a=e("./behaviour/cstyle").CstyleBehaviour,s=e("./folding/cstyle").FoldMode,l=e("./matching_brace_outdent").MatchingBraceOutdent,c=function(){o.call(this),this.HighlightRules=i,this.$outdent=new l,this.$behaviour=new a,this.foldingRules=new s};n.inherits(c,o),function(){this.lineCommentStart=["#","//"],this.blockComment={start:"/*",end:"*/"},this.$id="ace/mode/terraform"}.call(c.prototype),t.Mode=c})),ace.require(["ace/mode/terraform"],(function(e){"object"==typeof module&&"object"==typeof exports&&module&&(module.exports=e)}));
|
PypiClean
|
/refinitiv-dataplatform-1.0.0a15.tar.gz/refinitiv-dataplatform-1.0.0a15/refinitiv/dataplatform/legacy/tools.py
|
import json
from datetime import date, datetime, timedelta
from typing import Union, Tuple
import dateutil.parser
from dateutil import tz
__all__ = [
"get_default_session",
"set_default_session",
"close_session",
"set_app_key",
"set_log_level",
]
def is_string_type(value):
try:
return isinstance(value, basestring)
except NameError:
return isinstance(value, str)
def get_json_value(json_data, name):
if name in json_data:
return json_data[name]
else:
return None
def to_datetime(
date_value: Union[str, timedelta, Tuple[datetime, date]]
) -> Union[tuple, datetime, None]:
if date_value is None:
return None
if isinstance(date_value, timedelta):
return datetime.now(tz.tzlocal()) + date_value
if isinstance(date_value, (datetime, date)):
return date_value
try:
return dateutil.parser.parse(date_value)
except ValueError as e:
raise e
except Exception as e:
raise ValueError(e)
def _to_utc(datetime_value):
if datetime_value is None:
return None
_value = to_datetime(datetime_value)
UTC = tz.gettz("UTC")
_value = _value.astimezone(UTC).replace(tzinfo=None)
return _value
def to_utc_datetime(datetime_value):
datetime_value = _to_utc(datetime_value)
if datetime_value is None:
return None
return datetime_value # .strftime("%Y-%m-%d %H:%M:%S")
def to_utc_date(date_value):
date_value = _to_utc(date_value)
if date_value is None:
return None
return date_value.date()
def to_utc_datetime_isofmt(datetime_value):
datetime_value = _to_utc(datetime_value)
if datetime_value is None:
return None
datetime_value = datetime_value.isoformat(timespec="microseconds") + "000Z"
return datetime_value
def get_date_from_today(days_count):
if type(days_count) != int:
raise ValueError(
"The parameter {} should be an integer, found {}".format(
days_count, type(days_count)
)
)
return datetime.now(tz.tzlocal()) + timedelta(days=-days_count)
def is_list_of_string(values):
return all(is_string_type(value) for value in values)
def check_for_string(parameter, name):
if not is_string_type(parameter):
raise ValueError(
"The parameter {} should be a string, found {}".format(name, str(parameter))
)
def check_for_string_or_list_of_strings(parameter, name):
if type(parameter) != list and (not parameter or not is_string_type(parameter)):
raise ValueError(
"The parameter {} should be a string or a list of string, found {}".format(
name, type(parameter)
)
)
if type(parameter) == list and not is_list_of_string(parameter):
raise ValueError(
"All items in the parameter {} should be of data type string, found {}".format(
name, [type(v) for v in parameter]
)
)
def check_for_int(parameter, name):
if type(parameter) is not int:
raise ValueError(
"The parameter {} should be an int, found {} type value ({})".format(
name, type(parameter), str(parameter)
)
)
def build_list_with_params(values, name):
if values is None:
raise ValueError(name + " is None, it must be a string or a list of strings")
if is_string_type(values):
return [(v, None) for v in values.split()]
elif type(values) is list:
try:
return [
(value, None) if is_string_type(value) else (value[0], value[1])
for value in values
]
except Exception:
raise ValueError(
name
+ " must be a string or a list of strings or a tuple or a list of tuple"
)
else:
try:
return values[0], values[1]
except Exception:
raise ValueError(
name
+ " must be a string or a list of strings or a tuple or a list of tuple"
)
def build_list(values, name):
if values is None:
raise ValueError(name + " is None, it must be a string or a list of strings")
if is_string_type(values):
return [values.strip()]
elif type(values) is list:
if all(is_string_type(value) for value in values):
return [value for value in values]
else:
raise ValueError(name + " must be a string or a list of strings")
else:
raise ValueError(name + " must be a string or a list of strings")
def build_dictionary(dic, name):
if dic is None:
raise ValueError(
name + " is None, it must be a string or a dictionary of strings"
)
if is_string_type(dic):
return json.loads(dic)
elif type(dic) is dict:
return dic
else:
raise ValueError(name + " must be a string or a dictionary")
def tz_replacer(s):
if isinstance(s, str):
if s.endswith("Z"):
s = s[:-1]
elif s.endswith("-0000"):
s = s[:-5]
if s.endswith(".000"):
s = s[:-4]
return s
def set_default_session(session):
DefaultSession.set_default_session(session)
def get_default_session(app_key=None):
return DefaultSession.get_default_session(app_key)
def close_session():
DefaultSession.get_default_session().close()
def set_app_key(app_key):
from refinitiv.dataplatform.core.session.session import Session
_session = get_default_session(app_key)
if _session.get_open_state() == Session.State.Closed:
_session.open()
def set_log_level(log_level):
from refinitiv.dataplatform.core.session.session import Session
default_session = DefaultSession.get_default_session()
default_session.set_log_level(log_level)
class DefaultSession(object):
# singleton session
__default_session = None
@classmethod
def set_default_session(cls, session):
from refinitiv.dataplatform.core.session.session import Session
if isinstance(session, Session):
cls.__default_session = session
@classmethod
def get_default_session(cls, app_key=None):
from refinitiv.dataplatform.core.session.desktop_session import DesktopSession
if cls.__default_session is None:
if app_key is None:
return None
cls.__default_session = DesktopSession(app_key)
elif app_key is not None:
if app_key != cls.__default_session.app_key:
cls.__default_session.close()
cls.__default_session = DesktopSession(app_key)
return cls.__default_session
@classmethod
def close_default_session(cls):
if cls.__default_session is not None:
cls.__default_session.close()
|
PypiClean
|
/psat-server-web-0.1.9.tar.gz/psat-server-web-0.1.9/psat_server_web/atlas/atlas/urls.py
|
from django.urls import re_path as url
from django.urls import path, include
from django.contrib import admin
from atlas import views
from django.conf import settings
from django.conf.urls.static import static
admin.autodiscover()
urlpatterns = [
# 2016-02-24 KWS Introduced the Django Admin URL
path('admin/', admin.site.urls),
# 2016-02-26 KWS Add the authentication URLs
url(r'^accounts/login/', views.login, name="login"),
url(r'^accounts/logout/', views.logout, name="logout"),
url(r'^accounts/auth/', views.authView, name="auth"),
url(r'^accounts/loggedin/', views.loggedin, name="loggedin"),
url(r'^accounts/invalid/', views.invalidLogin, name="invalid"),
url(r'^$', views.homepage, name='home'),
# 2019-09-28 KWS New error page.
url(r'^error/$', views.errorpage, name='error'),
# 2017-06-16 KWS New ddc detections
# url(r'^lightcurve/(?P<tcs_transient_objects_id>\d+)/$', views.lightcurveplain, name='lightcurveplain'),
url(r'^lightcurve/(?P<tcs_transient_objects_id>\d+)/$', views.lightcurveplainddc, name='lightcurveplain'),
url(r'^lightcurveforced/(?P<tcs_transient_objects_id>\d+)/$', views.lightcurveforcedplain, name='lightcurveforcedplain'),
url(r'^lightcurvestackedforced/(?P<tcs_transient_objects_id>\d+)/$', views.lightcurvestackedforcedplain, name='lightcurvestackedforcedplain'),
url(r'^atel/(?P<tcs_transient_objects_id>\d+)/$', views.atel, name='atel'),
# 2017-06-16 KWS New ddc detections
# url(r'^candidate/(?P<atlas_diff_objects_id>\d+)/$', views.candidate, name='candidate'),
url(r'^candidate/(?P<atlas_diff_objects_id>\d+)/$', views.candidateddc, {'template_name':'candidate_plotly.html'}, name='candidate'),
url(r'^candidate_bs/(?P<atlas_diff_objects_id>\d+)/$', views.candidateddc, {'template_name':'candidate_plotly.html'}, name='candidate_bs'),
url(r'^candidate_old/(?P<atlas_diff_objects_id>\d+)/$', views.candidateddc, {'template_name':'candidate.html'}, name='candidate_old'),
url(r'^userlist_atel_discovery/(?P<userDefinedListNumber>\d+)/$', views.atelsDiscovery, name='ateldiscovery'),
url(r'^userlist_atel_fast/(?P<userDefinedListNumber>\d+)/$', views.atelsFast, name='atelfast'),
url(r'^userlist_visibility/(?P<userDefinedListNumber>\d+)/$', views.visibility, name='visibility'),
url(r'^userlist_iobserve/(?P<userDefinedListNumber>\d+)/$', views.iobserve, name='iobserve'),
url(r'^externalcrossmatches/$', views.displayExternalCrossmatches, name='externalcrossmatches'),
url(r'^followup/(?P<listNumber>\d+)/$', views.followupList, name='followup'),
url(r'^followuptxt/(?P<listNumber>\d+)/$', views.followuptxt, name='followuptxt'),
url(r'^followupsubsettxt/(?P<listNumber>\d+)/$', views.followupsubsettxt, name='followupsubsettxt'),
url(r'^pesstosummary/$', views.pesstosummary, name='pesstosummary'),
# url(r'^pesstorecurrences/$', views.pesstorecurrences, name='pesstorecurrences'),
url(r'^pesstorecurrences/$', views.pesstorecurrencesddc, name='pesstorecurrences'),
url(r'^summarycsv/(?P<listNumber>\d+)/$', views.summarycsv, name='summarycsv'),
# url(r'^followup2/(?P<listNumber>\d+)/$', views.followupList2, name='followup2'),
url(r'^followup3/(?P<listNumber>\d+)/$', views.followupList3, name='followup3'),
# Experiment!
url(r'^followup_bypass_django_tables/(?P<listNumber>\d+)/$', views.followup_bypass_django_tables, name='followup_bypass_django_tables'),
url(r'^userlist/$', views.userDefinedListDefinitions, name='userdefinedlistdefs'),
url(r'^userlist/(?P<userDefinedListNumber>\d+)/$', views.userDefinedLists, name='userdefinedlists'),
# 2016-06-15 KWS Added quickview URLs.
url(r'^followup_quickview/(?P<listNumber>\d+)/$', views.followupQuickView, name='followupquickview'),
url(r'^followup_quickview_bs/(?P<listNumber>\d+)/$', views.followupQuickViewBootstrapPlotly, name='followupquickviewbootstrapplotly'),
url(r'^followup_quickview/$', views.followupAllQuickView, name='followupallquickview'),
url(r'^userlist_quickview/(?P<userDefinedListNumber>\d+)/$', views.userDefinedListsQuickview, name='userdefinedlistsquickview'),
# 2016-08-27 KWS Search Results URL
url(r'^searchresults/$', views.searchResults, name='searchresults'),
url(r'^snejson/$', views.jsonSNe, name='snejson'),
# 2022-05-06 KWS Exposure heat maps
url(r'^heatmap/(?P<expname>.*)/$', views.heatmap, {'template_name':'heatmap.html'}, name='heatmap'),
# 2023-06-09 KWS GCN notification with custom lists combined with GW events
url(r'^userlist_gcn/(?P<userDefinedListNumber>\d+)/$', views.gcn, {'template_name':'gcn.txt'}, name='gcn'),
url(r'^userlist_gcn_latex/(?P<userDefinedListNumber>\d+)/$', views.gcn, {'template_name':'gcn_latex.txt'}, name='gcn'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
PypiClean
|
/alipay-python-3.3.17.tar.gz/alipay-python-3.3.17/alipay/aop/api/domain/AlipayMarketingCampaignDiscountOperateModel.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.DiscountDstCampPrizeModel import DiscountDstCampPrizeModel
from alipay.aop.api.domain.DstCampRuleModel import DstCampRuleModel
from alipay.aop.api.domain.DateAreaModel import DateAreaModel
from alipay.aop.api.domain.RandomDiscountDstCampPrizeModel import RandomDiscountDstCampPrizeModel
from alipay.aop.api.domain.ReduceDstCampPrizeModel import ReduceDstCampPrizeModel
from alipay.aop.api.domain.ReduceToDiscountDstCampPrizeModel import ReduceToDiscountDstCampPrizeModel
from alipay.aop.api.domain.ResetZeroDstCampPrizeModel import ResetZeroDstCampPrizeModel
from alipay.aop.api.domain.SingleDstCampPrizeModel import SingleDstCampPrizeModel
from alipay.aop.api.domain.StagedDiscountDstCampPrizeModel import StagedDiscountDstCampPrizeModel
class AlipayMarketingCampaignDiscountOperateModel(object):
def __init__(self):
self._camp_code = None
self._camp_desc = None
self._camp_id = None
self._camp_name = None
self._camp_slogan = None
self._discount_dst_camp_prize_model = None
self._dst_camp_rule_model = None
self._dst_camp_sub_time_model_list = None
self._gmt_end = None
self._gmt_start = None
self._operate_type = None
self._prize_type = None
self._random_discount_dst_camp_prize_model = None
self._reduce_dst_camp_prize_model = None
self._reduce_to_discount_dst_camp_prize_model = None
self._reset_zero_dst_camp_prize_model = None
self._single_dst_camp_prize_model = None
self._staged_discount_dst_camp_prize_model = None
@property
def camp_code(self):
return self._camp_code
@camp_code.setter
def camp_code(self, value):
self._camp_code = value
@property
def camp_desc(self):
return self._camp_desc
@camp_desc.setter
def camp_desc(self, value):
self._camp_desc = value
@property
def camp_id(self):
return self._camp_id
@camp_id.setter
def camp_id(self, value):
self._camp_id = value
@property
def camp_name(self):
return self._camp_name
@camp_name.setter
def camp_name(self, value):
self._camp_name = value
@property
def camp_slogan(self):
return self._camp_slogan
@camp_slogan.setter
def camp_slogan(self, value):
self._camp_slogan = value
@property
def discount_dst_camp_prize_model(self):
return self._discount_dst_camp_prize_model
@discount_dst_camp_prize_model.setter
def discount_dst_camp_prize_model(self, value):
if isinstance(value, DiscountDstCampPrizeModel):
self._discount_dst_camp_prize_model = value
else:
self._discount_dst_camp_prize_model = DiscountDstCampPrizeModel.from_alipay_dict(value)
@property
def dst_camp_rule_model(self):
return self._dst_camp_rule_model
@dst_camp_rule_model.setter
def dst_camp_rule_model(self, value):
if isinstance(value, DstCampRuleModel):
self._dst_camp_rule_model = value
else:
self._dst_camp_rule_model = DstCampRuleModel.from_alipay_dict(value)
@property
def dst_camp_sub_time_model_list(self):
return self._dst_camp_sub_time_model_list
@dst_camp_sub_time_model_list.setter
def dst_camp_sub_time_model_list(self, value):
if isinstance(value, list):
self._dst_camp_sub_time_model_list = list()
for i in value:
if isinstance(i, DateAreaModel):
self._dst_camp_sub_time_model_list.append(i)
else:
self._dst_camp_sub_time_model_list.append(DateAreaModel.from_alipay_dict(i))
@property
def gmt_end(self):
return self._gmt_end
@gmt_end.setter
def gmt_end(self, value):
self._gmt_end = value
@property
def gmt_start(self):
return self._gmt_start
@gmt_start.setter
def gmt_start(self, value):
self._gmt_start = value
@property
def operate_type(self):
return self._operate_type
@operate_type.setter
def operate_type(self, value):
self._operate_type = value
@property
def prize_type(self):
return self._prize_type
@prize_type.setter
def prize_type(self, value):
self._prize_type = value
@property
def random_discount_dst_camp_prize_model(self):
return self._random_discount_dst_camp_prize_model
@random_discount_dst_camp_prize_model.setter
def random_discount_dst_camp_prize_model(self, value):
if isinstance(value, RandomDiscountDstCampPrizeModel):
self._random_discount_dst_camp_prize_model = value
else:
self._random_discount_dst_camp_prize_model = RandomDiscountDstCampPrizeModel.from_alipay_dict(value)
@property
def reduce_dst_camp_prize_model(self):
return self._reduce_dst_camp_prize_model
@reduce_dst_camp_prize_model.setter
def reduce_dst_camp_prize_model(self, value):
if isinstance(value, ReduceDstCampPrizeModel):
self._reduce_dst_camp_prize_model = value
else:
self._reduce_dst_camp_prize_model = ReduceDstCampPrizeModel.from_alipay_dict(value)
@property
def reduce_to_discount_dst_camp_prize_model(self):
return self._reduce_to_discount_dst_camp_prize_model
@reduce_to_discount_dst_camp_prize_model.setter
def reduce_to_discount_dst_camp_prize_model(self, value):
if isinstance(value, ReduceToDiscountDstCampPrizeModel):
self._reduce_to_discount_dst_camp_prize_model = value
else:
self._reduce_to_discount_dst_camp_prize_model = ReduceToDiscountDstCampPrizeModel.from_alipay_dict(value)
@property
def reset_zero_dst_camp_prize_model(self):
return self._reset_zero_dst_camp_prize_model
@reset_zero_dst_camp_prize_model.setter
def reset_zero_dst_camp_prize_model(self, value):
if isinstance(value, ResetZeroDstCampPrizeModel):
self._reset_zero_dst_camp_prize_model = value
else:
self._reset_zero_dst_camp_prize_model = ResetZeroDstCampPrizeModel.from_alipay_dict(value)
@property
def single_dst_camp_prize_model(self):
return self._single_dst_camp_prize_model
@single_dst_camp_prize_model.setter
def single_dst_camp_prize_model(self, value):
if isinstance(value, SingleDstCampPrizeModel):
self._single_dst_camp_prize_model = value
else:
self._single_dst_camp_prize_model = SingleDstCampPrizeModel.from_alipay_dict(value)
@property
def staged_discount_dst_camp_prize_model(self):
return self._staged_discount_dst_camp_prize_model
@staged_discount_dst_camp_prize_model.setter
def staged_discount_dst_camp_prize_model(self, value):
if isinstance(value, StagedDiscountDstCampPrizeModel):
self._staged_discount_dst_camp_prize_model = value
else:
self._staged_discount_dst_camp_prize_model = StagedDiscountDstCampPrizeModel.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.camp_code:
if hasattr(self.camp_code, 'to_alipay_dict'):
params['camp_code'] = self.camp_code.to_alipay_dict()
else:
params['camp_code'] = self.camp_code
if self.camp_desc:
if hasattr(self.camp_desc, 'to_alipay_dict'):
params['camp_desc'] = self.camp_desc.to_alipay_dict()
else:
params['camp_desc'] = self.camp_desc
if self.camp_id:
if hasattr(self.camp_id, 'to_alipay_dict'):
params['camp_id'] = self.camp_id.to_alipay_dict()
else:
params['camp_id'] = self.camp_id
if self.camp_name:
if hasattr(self.camp_name, 'to_alipay_dict'):
params['camp_name'] = self.camp_name.to_alipay_dict()
else:
params['camp_name'] = self.camp_name
if self.camp_slogan:
if hasattr(self.camp_slogan, 'to_alipay_dict'):
params['camp_slogan'] = self.camp_slogan.to_alipay_dict()
else:
params['camp_slogan'] = self.camp_slogan
if self.discount_dst_camp_prize_model:
if hasattr(self.discount_dst_camp_prize_model, 'to_alipay_dict'):
params['discount_dst_camp_prize_model'] = self.discount_dst_camp_prize_model.to_alipay_dict()
else:
params['discount_dst_camp_prize_model'] = self.discount_dst_camp_prize_model
if self.dst_camp_rule_model:
if hasattr(self.dst_camp_rule_model, 'to_alipay_dict'):
params['dst_camp_rule_model'] = self.dst_camp_rule_model.to_alipay_dict()
else:
params['dst_camp_rule_model'] = self.dst_camp_rule_model
if self.dst_camp_sub_time_model_list:
if isinstance(self.dst_camp_sub_time_model_list, list):
for i in range(0, len(self.dst_camp_sub_time_model_list)):
element = self.dst_camp_sub_time_model_list[i]
if hasattr(element, 'to_alipay_dict'):
self.dst_camp_sub_time_model_list[i] = element.to_alipay_dict()
if hasattr(self.dst_camp_sub_time_model_list, 'to_alipay_dict'):
params['dst_camp_sub_time_model_list'] = self.dst_camp_sub_time_model_list.to_alipay_dict()
else:
params['dst_camp_sub_time_model_list'] = self.dst_camp_sub_time_model_list
if self.gmt_end:
if hasattr(self.gmt_end, 'to_alipay_dict'):
params['gmt_end'] = self.gmt_end.to_alipay_dict()
else:
params['gmt_end'] = self.gmt_end
if self.gmt_start:
if hasattr(self.gmt_start, 'to_alipay_dict'):
params['gmt_start'] = self.gmt_start.to_alipay_dict()
else:
params['gmt_start'] = self.gmt_start
if self.operate_type:
if hasattr(self.operate_type, 'to_alipay_dict'):
params['operate_type'] = self.operate_type.to_alipay_dict()
else:
params['operate_type'] = self.operate_type
if self.prize_type:
if hasattr(self.prize_type, 'to_alipay_dict'):
params['prize_type'] = self.prize_type.to_alipay_dict()
else:
params['prize_type'] = self.prize_type
if self.random_discount_dst_camp_prize_model:
if hasattr(self.random_discount_dst_camp_prize_model, 'to_alipay_dict'):
params['random_discount_dst_camp_prize_model'] = self.random_discount_dst_camp_prize_model.to_alipay_dict()
else:
params['random_discount_dst_camp_prize_model'] = self.random_discount_dst_camp_prize_model
if self.reduce_dst_camp_prize_model:
if hasattr(self.reduce_dst_camp_prize_model, 'to_alipay_dict'):
params['reduce_dst_camp_prize_model'] = self.reduce_dst_camp_prize_model.to_alipay_dict()
else:
params['reduce_dst_camp_prize_model'] = self.reduce_dst_camp_prize_model
if self.reduce_to_discount_dst_camp_prize_model:
if hasattr(self.reduce_to_discount_dst_camp_prize_model, 'to_alipay_dict'):
params['reduce_to_discount_dst_camp_prize_model'] = self.reduce_to_discount_dst_camp_prize_model.to_alipay_dict()
else:
params['reduce_to_discount_dst_camp_prize_model'] = self.reduce_to_discount_dst_camp_prize_model
if self.reset_zero_dst_camp_prize_model:
if hasattr(self.reset_zero_dst_camp_prize_model, 'to_alipay_dict'):
params['reset_zero_dst_camp_prize_model'] = self.reset_zero_dst_camp_prize_model.to_alipay_dict()
else:
params['reset_zero_dst_camp_prize_model'] = self.reset_zero_dst_camp_prize_model
if self.single_dst_camp_prize_model:
if hasattr(self.single_dst_camp_prize_model, 'to_alipay_dict'):
params['single_dst_camp_prize_model'] = self.single_dst_camp_prize_model.to_alipay_dict()
else:
params['single_dst_camp_prize_model'] = self.single_dst_camp_prize_model
if self.staged_discount_dst_camp_prize_model:
if hasattr(self.staged_discount_dst_camp_prize_model, 'to_alipay_dict'):
params['staged_discount_dst_camp_prize_model'] = self.staged_discount_dst_camp_prize_model.to_alipay_dict()
else:
params['staged_discount_dst_camp_prize_model'] = self.staged_discount_dst_camp_prize_model
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingCampaignDiscountOperateModel()
if 'camp_code' in d:
o.camp_code = d['camp_code']
if 'camp_desc' in d:
o.camp_desc = d['camp_desc']
if 'camp_id' in d:
o.camp_id = d['camp_id']
if 'camp_name' in d:
o.camp_name = d['camp_name']
if 'camp_slogan' in d:
o.camp_slogan = d['camp_slogan']
if 'discount_dst_camp_prize_model' in d:
o.discount_dst_camp_prize_model = d['discount_dst_camp_prize_model']
if 'dst_camp_rule_model' in d:
o.dst_camp_rule_model = d['dst_camp_rule_model']
if 'dst_camp_sub_time_model_list' in d:
o.dst_camp_sub_time_model_list = d['dst_camp_sub_time_model_list']
if 'gmt_end' in d:
o.gmt_end = d['gmt_end']
if 'gmt_start' in d:
o.gmt_start = d['gmt_start']
if 'operate_type' in d:
o.operate_type = d['operate_type']
if 'prize_type' in d:
o.prize_type = d['prize_type']
if 'random_discount_dst_camp_prize_model' in d:
o.random_discount_dst_camp_prize_model = d['random_discount_dst_camp_prize_model']
if 'reduce_dst_camp_prize_model' in d:
o.reduce_dst_camp_prize_model = d['reduce_dst_camp_prize_model']
if 'reduce_to_discount_dst_camp_prize_model' in d:
o.reduce_to_discount_dst_camp_prize_model = d['reduce_to_discount_dst_camp_prize_model']
if 'reset_zero_dst_camp_prize_model' in d:
o.reset_zero_dst_camp_prize_model = d['reset_zero_dst_camp_prize_model']
if 'single_dst_camp_prize_model' in d:
o.single_dst_camp_prize_model = d['single_dst_camp_prize_model']
if 'staged_discount_dst_camp_prize_model' in d:
o.staged_discount_dst_camp_prize_model = d['staged_discount_dst_camp_prize_model']
return o
|
PypiClean
|
/increc-0.0.16.tar.gz/increc-0.0.16/algorithms/collaborative_filtering/matrix_factorization/explicit_feedback/matrix_factorization_prep.py
|
from algorithms.collaborative_filtering.matrix_factorization import SGD
from .preprocess_matrix import PreprocessMatrix
class MFExplicitPrepSGD(PreprocessMatrix, SGD):
"""
Description
The explicit matrix factorization algorithm
with matrix preprocessing and stochastic gradient descent
which extends PreprocessMatrix and SGD.
"""
def __init__(
self, matrix=[], u=[], v=[], user_avg=[],
item_avg=[], lf=2, prep=[], lr=0.01, reg=0.1):
"""
Description
MFExplicitPrepSGD's constructor.
Arguments
:param matrix: The ratings matrix.
:type matrix: list
:param u: U matrix.
:type u: DynamicArray
:param v: V matrix.
:type v: DynamicArray
:param lf: Learning factor.
:type lf: int
:param user_avg: The average ratings of users.
:type user_avg: defaultdict(int)
:param item_avg: The average ratings of items.
:type item_avg: defaultdict(int)
:param prep: The preprocessed matrix.
:type prep: DynamicArray
:param lr: The learning rate.
:type lr: int
:param reg: The regularization factor.
:type reg: int
"""
super().__init__(matrix, u, v, lf, user_avg, item_avg, prep)
SGD.__init__(self, lr, reg)
self._initial_training()
def _initial_training(self):
"""
Description
A function which updates the U, V matrices with
the ratings matrix.
"""
for user_id, ratings in enumerate(self.matrix):
for item_id, value in enumerate(ratings):
if value is not None:
raw_value = self.preprocessed_matrix[user_id][item_id]
error = raw_value - self.predict_prep(user_id, item_id)
self._update_factors(user_id, item_id, error)
def new_rating(self, rating):
"""
Description
The function which processes a new iteration. Expects a tuple
(user, item)
Arguments
:param rating: The rating tuple.
:type rating: tuple
"""
user_id, item_id, value = rating
self.items.add(item_id)
self.users.add(user_id)
self.matrix[user_id][item_id] = value
self.inc_avg(user_id, item_id, value)
raw_value = value - 0.5*(self.user_avg[user_id] + self.item_avg[
item_id])
self.preprocessed_matrix[user_id][item_id] = raw_value
error = raw_value - self.predict_prep(user_id, item_id)
self._update_factors(user_id, item_id, error)
def recommend(self, user_id, n_rec=20, repeated=False):
"""
Description
A function which returns recommendations for a user.
Arguments
:param user_id: The user identifier.
:type user_id: int
:param n_rec: The number of items to recommend.
:type n_rec: int
:param repeated: Variable which defines if already rated products\
can be recommended.
:type repeated: boolean
"""
return super().recommend(user_id, n_rec, lambda item_id: self.predict(
user_id, item_id), repeated)
|
PypiClean
|
/circuitpython-ticstepper-2.0.3.tar.gz/circuitpython-ticstepper-2.0.3/circuitpython_ticstepper/i2c.py
|
import struct
from micropython import const
from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_register.i2c_struct import Struct
from circuitpython_ticstepper import TicMotor
from circuitpython_ticstepper.constants import StepMode, OperationMode
try:
from typing import Optional, Type, List
from circuitpython_typing import ReadableBuffer
from busio import I2C
from circuitpython_ticstepper.constants import ( # pylint: disable=ungrouped-imports
StepModeValues,
)
except ImportError:
pass
_CMD_STEP_MODE = const(0x94)
_CMD_RESET = const(0xB0)
_CMD_CLEAR_ERROR = const(0x8A)
_CMD_MAX_SPEED = const(0xE6)
_CMD_HALT_SET = const(0xEC)
_CMD_MOVE = const(0xE0)
_CMD_DRIVE = const(0xE3)
_CMD_GET_VAR = const(0xA1)
_CMD_MAX_ACCEL = const(0xEA)
_CMD_MAX_DECEL = const(0xE9)
_CMD_CURRENT_LIMIT = const(0x91)
_CMD_ENERGIZE = const(0x85)
_CMD_DEENERGIZE = const(0x86)
_OFFSET_CURRENT_VELOCITY = const(0x26)
_OFFSET_STEP_MODE = const(0x49)
_OFFSET_UPTIME = const(0x35)
_OFFSET_MAX_SPEED = const(0x47)
_OFFSET_MAX_ACCEL = const(0x4F)
_OFFSET_MAX_DECEL = const(0x4B)
_OFFSET_CURRENT_LIMIT = const(0x40)
_OFFSET_ENERGIZED = const(0x00)
class ClearMSBByteStruct:
"""
Arbitrary structure register that is readable and writeable.
Values are tuples that map to the values in the defined struct. See struct
module documentation for struct format string and its possible value types.
:param int register_address: The register address to read the bit from
"""
def __init__(self, register_address: int) -> None:
self.format = "<b"
self.buffer = bytearray(1 + struct.calcsize(self.format))
self.buffer[0] = register_address
def __get__(
self, obj: "TicMotorI2C", objtype: Optional[Type["TicMotorI2C"]] = None
) -> List[int]:
with obj.i2c_device as i2c:
i2c.write_then_readinto(self.buffer, self.buffer, out_end=1, in_start=1)
return struct.unpack_from(self.format, memoryview(self.buffer)[1:])
def __set__(self, obj: "TicMotorI2C", value: ReadableBuffer) -> None:
struct.pack_into(self.format, self.buffer, 1, *value)
with obj.i2c_device as i2c:
i2c.write(self.buffer)
# pylint: disable=too-many-instance-attributes
class TicMotorI2C(TicMotor):
"""Generic TIC motor driver contolled via I2C, this class is not intended
to be instanced directly due to various differences between motor controllers
- you should use a specific one instead (like ``TIC36v4``)
:param I2C i2c: The I2C bus object
:param int address: The I2C address of the motor driver
:param StepModeValues step_mode: The step mode to use
"""
_get_var_32bit_signed_reg = Struct(_CMD_GET_VAR, "<i")
_get_var_32bit_unsigned_reg = Struct(_CMD_GET_VAR, "<I")
_get_var_8bit_unsigned_reg = Struct(_CMD_GET_VAR, "<B")
_step_mode_reg = ClearMSBByteStruct(_CMD_STEP_MODE)
_max_speed_reg = Struct(_CMD_MAX_SPEED, "<I")
_halt_set_reg = Struct(_CMD_HALT_SET, "<i")
_move_reg = Struct(_CMD_MOVE, "<i")
_drive_reg = Struct(_CMD_DRIVE, "<i")
_max_accel_reg = Struct(_CMD_MAX_ACCEL, "<I")
_max_decel_reg = Struct(_CMD_MAX_DECEL, "<I")
_current_limit_reg = ClearMSBByteStruct(_CMD_CURRENT_LIMIT)
_get_var_32bit_signed_reg = Struct(_CMD_GET_VAR, "<i")
_get_var_32bit_unsigned_reg = Struct(_CMD_GET_VAR, "<I")
_get_var_8bit_unsigned_reg = Struct(_CMD_GET_VAR, "<B")
def __init__(
self, i2c: I2C, address: int = 0x0E, step_mode: StepModeValues = StepMode.FULL
) -> None:
self.i2c_device = I2CDevice(i2c, address)
super().__init__(step_mode)
self.clear()
@property
def step_mode(self) -> StepModeValues:
"""Gets and sets the stepper step mode"""
self._get_var_8bit_unsigned_reg = [_OFFSET_STEP_MODE]
return StepMode.get_by_enum(self._get_var_8bit_unsigned_reg[0])
@step_mode.setter
def step_mode(self, mode: StepModeValues) -> None:
self._step_mode = mode
self._step_mode_reg = [mode.value]
def clear(self) -> None:
"""Clears and reinits the stepper motor"""
self.reset()
def _quick_write(self, cmd: int) -> None:
packed_cmd = struct.pack("<B", cmd)
with self.i2c_device as i2c:
i2c.write(packed_cmd)
def reset(self) -> None:
"""Resets the motor driver"""
self._quick_write(_CMD_RESET)
def reinit(self) -> None:
"""Reinitializes the motor driver"""
self.step_mode = self._step_mode
def clear_error(self) -> None:
"""Clears errors for the motor driver"""
self._quick_write(_CMD_CLEAR_ERROR)
@property
def operation_mode(self) -> int:
"""Get the current operation mode"""
self._get_var_8bit_unsigned_reg = [_OFFSET_ENERGIZED]
return self._get_var_8bit_unsigned_reg[0]
@property
def energized(self) -> bool:
"""Whether the motor coils are energized"""
state = self.operation_mode
if state == OperationMode.DEENERGIZED:
return False
if state in (OperationMode.STARTING_UP, OperationMode.NORMAL):
return True
raise RuntimeError("Some other operation mode was detected")
@energized.setter
def energized(self, setting: bool) -> None:
cmd = _CMD_ENERGIZE if setting else _CMD_DEENERGIZE
self._quick_write(cmd)
@property
def max_speed(self) -> float:
"""Gets and sets the maximum speed for the motor"""
self._get_var_32bit_unsigned_reg = [_OFFSET_MAX_SPEED]
pps = self._get_var_32bit_unsigned_reg[0]
return self._pps_to_rpm(pps)
@max_speed.setter
def max_speed(self, rpm: float) -> None:
# if not -self.MAX_RPM <= rpm <= self.MAX_RPM:
# raise ValueError("Given speed is over the RPM threshold")
pulse_speed = self._rpm_to_pps(rpm)
self._max_speed_reg = [pulse_speed]
@property
def max_accel(self) -> float:
"""The maximum acceleration the motor can experience in rpm/s"""
self._get_var_32bit_unsigned_reg = [_OFFSET_MAX_ACCEL]
pps2 = self._get_var_32bit_unsigned_reg[0]
return self._pps_to_rpm(pps2)
@max_accel.setter
def max_accel(self, rpms: float) -> None:
pulse_accel = self._rpm_to_pps(rpms)
self._max_accel_reg = [pulse_accel]
@property
def max_decel(self) -> float:
"""The maximum deceleration the motor can experience in rpm/s"""
self._get_var_32bit_unsigned_reg = [_OFFSET_MAX_DECEL]
pps2 = self._get_var_32bit_unsigned_reg[0]
return self._pps_to_rpm(pps2)
@max_decel.setter
def max_decel(self, rpms: float) -> None:
pulse_decel = self._rpm_to_pps(rpms)
self._max_decel_reg = [pulse_decel]
def halt_and_set_position(self, position: int = 0) -> None:
"""Stops the motor and keeps coils energized"""
self._halt_set_reg = [position]
self.step_mode = self._step_mode
self._rpm = 0
def move(self, units: int) -> None:
"""Moves the given number of steps/microsteps
:param int units: The number of steps/microsteps to move
"""
self._move_reg = [units]
def drive(self, rpm: float) -> None:
"""Drives the motor at a given speed
:param float rpm: The speed to move the motor in RPM
"""
# if not -self.MAX_RPM <= rpm <= self.MAX_RPM:
# raise ValueError("Cannot set speed above {} RPM".format(self.MAX_RPM))
self._drive_reg = [self._rpm_to_pps(rpm)]
self._rpm = rpm
@property
def is_moving(self) -> bool:
"""Whether the stepper motor is actively moving"""
self._get_var_32bit_signed_reg = [_OFFSET_CURRENT_VELOCITY]
return self._get_var_32bit_signed_reg[0] != 0
@property
def uptime(self) -> float:
"""The number of seconds the motor controller has been up. This is
not affected by a reset command
"""
self._get_var_32bit_unsigned_reg = [_OFFSET_UPTIME]
return self._get_var_32bit_unsigned_reg[0] / 1000
@property
def current_limit(self) -> None:
"""Sets the current limit for the I2C device in Amps"""
self._get_var_8bit_unsigned_reg = [_OFFSET_CURRENT_LIMIT]
response = self._get_var_8bit_unsigned_reg[0]
return self.convert_current_enum(response)
@current_limit.setter
def current_limit(self, current: float) -> None:
self._current_limit_reg = [self.convert_current_value(current)]
def convert_current_value(self, current: float) -> int:
"""Converts the desired current into the TIC value, rounds down
to nearest acceptable value
"""
raise NotImplementedError("Must be implemented in a TIC motor subclass")
def convert_current_enum(self, enum_value: int) -> float:
"""Converts the desired TIC enumeration into the corresponding
current limit
"""
raise NotImplementedError("Must be implemented in a TIC motor subclass")
|
PypiClean
|
/future-nodefix-0.17.0.tar.gz/future-nodefix-0.17.0/src/past/builtins/misc.py
|
from __future__ import unicode_literals
import sys
import inspect
from collections import Mapping
from future.utils import PY3, exec_
if PY3:
import builtins
def apply(f, *args, **kw):
return f(*args, **kw)
from past.builtins import str as oldstr
def chr(i):
"""
Return a byte-string of one character with ordinal i; 0 <= i <= 256
"""
return oldstr(bytes((i,)))
def cmp(x, y):
"""
cmp(x, y) -> integer
Return negative if x<y, zero if x==y, positive if x>y.
"""
return (x > y) - (x < y)
from sys import intern
def oct(number):
"""oct(number) -> string
Return the octal representation of an integer
"""
return '0' + builtins.oct(number)[2:]
raw_input = input
from imp import reload
unicode = str
unichr = chr
xrange = range
else:
import __builtin__
apply = __builtin__.apply
chr = __builtin__.chr
cmp = __builtin__.cmp
execfile = __builtin__.execfile
intern = __builtin__.intern
oct = __builtin__.oct
raw_input = __builtin__.raw_input
reload = __builtin__.reload
unicode = __builtin__.unicode
unichr = __builtin__.unichr
xrange = __builtin__.xrange
if PY3:
def execfile(filename, myglobals=None, mylocals=None):
"""
Read and execute a Python script from a file in the given namespaces.
The globals and locals are dictionaries, defaulting to the current
globals and locals. If only globals is given, locals defaults to it.
"""
if myglobals is None:
# There seems to be no alternative to frame hacking here.
caller_frame = inspect.stack()[1]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
elif mylocals is None:
# Only if myglobals is given do we set mylocals to it.
mylocals = myglobals
if not isinstance(myglobals, Mapping):
raise TypeError('globals must be a mapping')
if not isinstance(mylocals, Mapping):
raise TypeError('locals must be a mapping')
with open(filename, "rbU") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec_(code, myglobals, mylocals)
if PY3:
__all__ = ['apply', 'chr', 'cmp', 'execfile', 'intern', 'raw_input',
'reload', 'unichr', 'unicode', 'xrange']
else:
__all__ = []
|
PypiClean
|
/cupy_cuda111-12.2.0-cp39-cp39-manylinux2014_x86_64.whl/cupy/_creation/matrix.py
|
import numpy
import cupy
from cupy import _core
def diag(v, k=0):
"""Returns a diagonal or a diagonal array.
Args:
v (array-like): Array or array-like object.
k (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
Returns:
cupy.ndarray: If ``v`` indicates a 1-D array, then it returns a 2-D
array with the specified diagonal filled by ``v``. If ``v`` indicates a
2-D array, then it returns the specified diagonal of ``v``. In latter
case, if ``v`` is a :class:`cupy.ndarray` object, then its view is
returned.
.. seealso:: :func:`numpy.diag`
"""
if isinstance(v, cupy.ndarray):
ndim = v.ndim
else:
ndim = numpy.ndim(v)
if ndim == 1:
v = cupy.array(v)
if ndim == 2:
# to save bandwidth, don't copy non-diag elements to GPU
v = numpy.array(v)
if ndim == 1:
size = v.size + abs(k)
ret = cupy.zeros((size, size), dtype=v.dtype)
ret.diagonal(k)[:] = v
return ret
elif ndim == 2:
return cupy.array(v.diagonal(k))
else:
raise ValueError('Input must be 1- or 2-d.')
def diagflat(v, k=0):
"""Creates a diagonal array from the flattened input.
Args:
v (array-like): Array or array-like object.
k (int): Index of diagonals. See :func:`cupy.diag` for detail.
Returns:
cupy.ndarray: A 2-D diagonal array with the diagonal copied from ``v``.
.. seealso:: :func:`numpy.diagflat`
"""
if numpy.isscalar(v):
v = numpy.asarray(v)
return cupy.diag(v.ravel(), k)
_tri_kernel = _core.ElementwiseKernel(
'int32 m, int32 k',
'T out',
'''
int row = i / m;
int col = i % m;
out = (col <= row + k);
''',
'cupy_tri',
)
def tri(N, M=None, k=0, dtype=float):
"""Creates an array with ones at and below the given diagonal.
Args:
N (int): Number of rows.
M (int): Number of columns. ``M == N`` by default.
k (int): The sub-diagonal at and below which the array is filled. Zero
is the main diagonal, a positive value is above it, and a negative
value is below.
dtype: Data type specifier.
Returns:
cupy.ndarray: An array with ones at and below the given diagonal.
.. seealso:: :func:`numpy.tri`
"""
if M is None:
M = N
out = cupy.empty((N, M), dtype=dtype)
return _tri_kernel(M, k, out)
def tril(m, k=0):
"""Returns a lower triangle of an array.
Args:
m (array-like): Array or array-like object.
k (int): The diagonal above which to zero elements. Zero is the main
diagonal, a positive value is above it, and a negative value is
below.
Returns:
cupy.ndarray: A lower triangle of an array.
.. seealso:: :func:`numpy.tril`
"""
m = cupy.asarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return cupy.where(mask, m, m.dtype.type(0))
def triu(m, k=0):
"""Returns an upper triangle of an array.
Args:
m (array-like): Array or array-like object.
k (int): The diagonal below which to zero elements. Zero is the main
diagonal, a positive value is above it, and a negative value is
below.
Returns:
cupy.ndarray: An upper triangle of an array.
.. seealso:: :func:`numpy.triu`
"""
m = cupy.asarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return cupy.where(mask, m.dtype.type(0), m)
def vander(x, N=None, increasing=False):
"""Returns a Vandermonde matrix.
Args:
x (array-like): 1-D array or array-like object.
N (int, optional): Number of columns in the output.
``N = len(x)`` by default.
increasing (bool, optional): Order of the powers of the columns.
If True, the powers increase from right to left,
if False (the default) they are reversed.
Returns:
cupy.ndarray: A Vandermonde matrix.
.. seealso:: :func:`numpy.vander`
"""
x = cupy.asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = cupy.empty((len(x), N), dtype=numpy.promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
cupy.power(x.reshape(-1, 1), cupy.arange(N), out=tmp)
return v
# TODO(okuta): Implement mat
# TODO(okuta): Implement bmat
|
PypiClean
|
/reportlab_x-3.4.0.2-cp34-cp34m-macosx_10_10_x86_64.whl/reportlab/pdfbase/_fontdata_enc_zapfdingbats.py
|
ZapfDingbatsEncoding = ( None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
'space', 'a1', 'a2', 'a202', 'a3', 'a4', 'a5', 'a119', 'a118', 'a117', 'a11', 'a12', 'a13', 'a14',
'a15', 'a16', 'a105', 'a17', 'a18', 'a19', 'a20', 'a21', 'a22', 'a23', 'a24', 'a25', 'a26', 'a27',
'a28', 'a6', 'a7', 'a8', 'a9', 'a10', 'a29', 'a30', 'a31', 'a32', 'a33', 'a34', 'a35', 'a36',
'a37', 'a38', 'a39', 'a40', 'a41', 'a42', 'a43', 'a44', 'a45', 'a46', 'a47', 'a48', 'a49', 'a50',
'a51', 'a52', 'a53', 'a54', 'a55', 'a56', 'a57', 'a58', 'a59', 'a60', 'a61', 'a62', 'a63', 'a64',
'a65', 'a66', 'a67', 'a68', 'a69', 'a70', 'a71', 'a72', 'a73', 'a74', 'a203', 'a75', 'a204', 'a76',
'a77', 'a78', 'a79', 'a81', 'a82', 'a83', 'a84', 'a97', 'a98', 'a99', 'a100', None, 'a89', 'a90',
'a93', 'a94', 'a91', 'a92', 'a205', 'a85', 'a206', 'a86', 'a87', 'a88', 'a95', 'a96', None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, 'a101', 'a102', 'a103', 'a104', 'a106', 'a107', 'a108', 'a112', 'a111', 'a110', 'a109',
'a120', 'a121', 'a122', 'a123', 'a124', 'a125', 'a126', 'a127', 'a128', 'a129', 'a130', 'a131',
'a132', 'a133', 'a134', 'a135', 'a136', 'a137', 'a138', 'a139', 'a140', 'a141', 'a142', 'a143',
'a144', 'a145', 'a146', 'a147', 'a148', 'a149', 'a150', 'a151', 'a152', 'a153', 'a154', 'a155',
'a156', 'a157', 'a158', 'a159', 'a160', 'a161', 'a163', 'a164', 'a196', 'a165', 'a192', 'a166',
'a167', 'a168', 'a169', 'a170', 'a171', 'a172', 'a173', 'a162', 'a174', 'a175', 'a176', 'a177',
'a178', 'a179', 'a193', 'a180', 'a199', 'a181', 'a200', 'a182', None, 'a201', 'a183', 'a184',
'a197', 'a185', 'a194', 'a198', 'a186', 'a195', 'a187', 'a188', 'a189', 'a190', 'a191', None)
|
PypiClean
|
/django-url-sso-0.9.tar.gz/django-url-sso-0.9/url_sso/utils.py
|
from django.conf import settings as django_settings
from django.utils.importlib import import_module
from django.core.cache import cache
from suds.cache import Cache
class Singleton(type):
"""
Singleton metaclass.
Source:
http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(
*args, **kwargs
)
return cls._instances[cls]
class SettingsBase(object):
"""
A settings object that proxies settings and handles defaults, inspired
by `django-appconf` and the way it works in `django-rest-framework`.
By default, a single instance of this class is created as `<app>_settings`,
from which `<APP>_SETTING_NAME` can be accessed as `SETTING_NAME`, i.e.::
from myapp.settings import myapp_settings
if myapp_settings.SETTING_NAME:
# DO FUNKY DANCE
If a setting has not been explicitly defined in Django's settings, defaults
can be specified as `DEFAULT_SETTING_NAME` class variable or property.
"""
__metaclass__ = Singleton
def __init__(self):
"""
Assert app-specific prefix.
"""
assert hasattr(self, 'settings_prefix'), 'No prefix specified.'
def __getattr__(self, attr):
"""
Return Django setting `PREFIX_SETTING` if explicitly specified,
otherwise return `PREFIX_SETTING_DEFAULT` if specified.
"""
if attr.isupper():
# Require settings to have uppercase characters
try:
setting = getattr(
django_settings,
'%s_%s' % (self.settings_prefix, attr),
)
except AttributeError:
if not attr.startswith('DEFAULT_'):
setting = getattr(self, 'DEFAULT_%s' % attr)
else:
raise
return setting
else:
# Default behaviour
raise AttributeError(
'No setting or default available for \'%s\'' % attr
)
class SudsDjangoCache(Cache):
"""
Implement the suds cache interface using Django caching.
Source: https://github.com/dpoirier/basket/blob/master/news/backends/exacttarget.py
"""
def __init__(self, days=None, *args, **kwargs):
if days:
self.timeout = 24 * 60 * 60 * days
else:
self.timeout = None
def _cache_key(self, id):
return "suds-%s" % id
def get(self, id):
return cache.get(self._cache_key(id))
def put(self, id, value):
cache.set(self._cache_key(id), value, self.timeout)
def purge(self, id):
cache.delete(self._cache_key(id))
def import_object(from_path):
""" Given an import path, return the object it represents. """
module, attr = from_path.rsplit(".", 1)
mod = import_module(module)
return getattr(mod, attr)
|
PypiClean
|
/box_embeddings-0.1.0.tar.gz/box_embeddings-0.1.0/box_embeddings/initializations/uniform_boxes.py
|
from typing import List, Tuple
import numpy as np
import torch
from box_embeddings.parameterizations.box_tensor import BoxFactory, BoxTensor
from .initializer import BoxInitializer
def uniform_boxes(
dimensions: int,
num_boxes: int,
minimum: float = 0.0,
maximum: float = 1.0,
delta_min: float = 0.01,
delta_max: float = 0.5,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Creates uniform boxes such that each box is inside the
bounding box defined by (minimum,maximum) in each dimension.
Args:
dimensions: number of dimensions for the box
num_boxes: number of boxes to be created
minimum: min dimension of the bounding box in each dimension.
maximum: maximum dimension of the bounding box in each dimension.
delta_min: TODO
delta_max: TODO
Returns:
z, Z tensors
Raises:
ValueError: TODO
"""
if delta_min <= 0:
raise ValueError(f"Delta min should be >0 but is {delta_min}")
if (delta_max - delta_min) <= 0:
raise ValueError(
f"Expected: delta_max {delta_max} > delta_min {delta_min} "
)
if delta_max > (maximum - minimum):
raise ValueError(
f"Expected: delta_max {delta_max} <= (max-min) {maximum-minimum}"
)
if maximum <= minimum:
raise ValueError(f"Expected: maximum {maximum} > minimum {minimum}")
centers = np.random.uniform(
minimum + delta_max / 2.0 + 1e-8,
maximum - delta_max / 2.0 - 1e-8,
size=(num_boxes, dimensions),
)
deltas = np.random.uniform(
delta_min, delta_max - 1e-8, size=(num_boxes, dimensions)
)
z = centers - deltas / 2.0 + 1e-8
Z = centers + deltas / 2.0 - 1e-8
assert (z >= minimum).all()
assert (Z <= maximum).all()
return torch.tensor(z), torch.tensor(Z)
class UniformBoxInitializer(BoxInitializer):
"""Docstring for UniformBoxInitializer."""
def __init__(
self,
dimensions: int,
num_boxes: int,
box_type_factory: BoxFactory,
minimum: float = 0.0,
maximum: float = 1.0,
delta_min: float = 0.01,
delta_max: float = 0.5,
) -> None:
"""TODO: Docstring for __init__.
Args:
dimensions: TODO
num_boxes: TODO
box_type_factory: TODO
minimum: TODO
maximum: TODO
delta_min: TODO
delta_max: TODO
Returns: (None)
"""
self.dimensions = dimensions
self.num_boxes = num_boxes
self.minimum = minimum
self.maximum = maximum
self.delta_min = delta_min
self.delta_max = delta_max
self.box_type_factory = box_type_factory
def __call__(self, t: torch.Tensor) -> None: # type:ignore
z, Z = uniform_boxes(
self.dimensions,
self.num_boxes,
self.minimum,
self.maximum,
self.delta_min,
self.delta_max,
)
with torch.no_grad():
W = self.box_type_factory.box_subclass.W(z, Z, **self.box_type_factory.kwargs_dict) # type: ignore
if W.shape == t.shape:
# print(t,W)
t.copy_(W)
else:
emb = self.box_type_factory.box_subclass.zZ_to_embedding( # type:ignore
z, Z, **self.box_type_factory.kwargs_dict
)
if emb.shape == t.shape:
t.copy_(emb)
else:
raise ValueError(
f"Shape of weights {t.shape} is not suitable "
"for assigning W or embedding"
)
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.