id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/auDeep-0.9.6a1.tar.gz/auDeep-0.9.6a1/audeep/backend/parsers/no_metadata.py
|
# You should have received a copy of the GNU General Public License
# along with auDeep. If not, see <http://www.gnu.org/licenses/>.
"""A parser which does not parse any metadata"""
from pathlib import Path
from typing import Optional, Mapping, Sequence
from audeep.backend.log import LoggingMixin
from audeep.backend.parsers.base import Parser, _InstanceMetadata
class NoMetadataParser(LoggingMixin, Parser):
"""
Parser which does not parse any metadata besides the filenames of instances.
As a result, this parser can process any directory structure containing audio files. It is useful for cases in which
only the basic feature learning capabilities of the auDeep application are required.
"""
def __init__(self, basedir: Path):
"""
Creates and initializes a new NoMetadataParser for the specified data set base directory.
Parameters
----------
basedir: pathlib.Path
The data set base directory
"""
super().__init__(basedir)
self._num_instances_cache = None
def can_parse(self) -> bool:
"""
Returns True, since this parser can process any directory structure.
Returns
-------
bool
True
"""
return True
@property
def num_instances(self) -> int:
"""
Returns the number instances in the data set.
Simply counts the number of WAV files anywhere below the data set base directory.
Returns
-------
int
The number instances in the data set
"""
if self._num_instances_cache is None:
self._num_instances_cache = len(list(self._basedir.rglob("*.wav")))
# noinspection PyTypeChecker
return self._num_instances_cache
@property
def num_folds(self) -> int:
"""
Returns zero, since this parser does not parse cross-validation information.
Returns
-------
int
Zero
"""
return 0
@property
def label_map(self) -> Optional[Mapping[str, int]]:
"""
Returns None, since this parser does not parse labels.
Returns
-------
map of str to int
None
"""
return None
def parse(self) -> Sequence[_InstanceMetadata]:
"""
Parses the instances contained in this data set.
Returns
-------
list of _InstanceMetadata
A list of _InstanceMetadata containing one entry for each parsed audio file
"""
meta_list = []
for file in self._basedir.rglob("*.wav"): # type: Path
filename = str(file.relative_to(self._basedir))
instance_metadata = _InstanceMetadata(path=file,
filename=filename,
label_nominal=None,
label_numeric=None,
cv_folds=[],
partition=None)
self.log.debug("parsed instance %s", filename)
meta_list.append(instance_metadata)
return meta_list
|
PypiClean
|
/python-gamelocker-0.5.0.tar.gz/python-gamelocker-0.5.0/gamelocker/janus.py
|
__author__ = "Bruno Hautzenberger"
__copyright__ = "Copyright 2015, xamoom GmbH"
__version__ = "0.4.3"
__maintainer__ = "Bruno Hautzenberger"
__email__ = "[email protected]"
__status__ = "Development"
__url__ = "https://github.com/xamoom/xamoom-janus"
"""
Copyright (c) 2015, xamoom GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
janus
A module to serialze python objects to json api compatible messages
and also deserialize json api messages back to python objects.
spec: http://jsonapi.org/
"""
import json
import copy
import collections
#--- exceptions ---
import hashlib
import time
class JanusException(Exception):
"""
contains additional information to exceptions to return as much information as possible
using an ErrorMessage object as specified in jsonapi.
"""
id = None #a unique identifier for this particular occurrence of the problem.
#the HTTP status code applicable to this problem, expressed as a string value.
#if we get no specificerror code (derived Exceptions will set their own) we use 503 (internal server error).
status = 503
#an application-specific error code, expressed as a string value. (will be set by derived exceptions or by the one raising the exception)
code = -1
#get's set while raising the exception. a short, human-readable summary of the problem that SHOULD NOT change from occurrence to occurrence of the problem, except for purposes of localization.
title = ""
#get's set while raising the exception. a human-readable explanation specific to this occurrence of the problem.
detail = None
#get's set while raising the exception. a meta object containing non-standard meta-information about the error.
#this has to be none or a dict of primitive types.
#TODO verify that
meta = None
def __init__(self,title="",detail="",status="",code=-1,meta=None):
Exception.__init__(self,self.title)
self.title = title
self.detail = detail
self.status = status
self.code = code
self.meta = meta
#we use a string representation of all we got in details plus timestamp as hash to identify this error.
#So we can search for it in the logs, if we need to.
self.id = hashlib.sha1(
str(time.time()) +
str(self.title) +
str(self.detail) +
str(self.status) +
str(self.code) +
str(self.meta)
).hexdigest()
class BadRequestException(JanusException):
"""
represents a Bad Request exception (HTTP 400)
"""
def __init__(self, detail=None, code=-1, meta = None):
JanusException. __init__(self,
title="The web server was unable to understand the request and process it.",
detail=detail,
status=400,
code=code,
meta=meta)
class UnauthorizedException(JanusException):
"""
represents a Unauthorized exception (HTTP 401)
"""
def __init__(self, detail=None, code=-1, meta = None):
#just call super with some prefilled information fitting this special type of exception
JanusException. __init__(self,
title="The request can not be process, because authorization is missing.",
detail=detail,
status=401,
code=code,
meta=meta)
class ForbiddenException(JanusException):
"""
represents a Forbidden exception (HTTP 403)
"""
def __init__(self, detail=None, code=-1, meta = None):
#just call super with some prefilled information fitting this special type of exception
JanusException. __init__(self,
title="You are not allowed to access this resource.",
detail=detail,
status=403,
code=code,
meta=meta)
class NotFoundException(JanusException):
"""
represents a not found exception (HTTP 404)
"""
def __init__(self, detail=None, code=-1, meta = None):
#just call super with some prefilled information fitting this special type of exception
JanusException. __init__(self,
title="The requested resource could not be found but may be available again in the future. Subsequent requests by the client are permissible.",
detail=detail,
status=404,
code=code,
meta=meta)
class DeveloperException(JanusException):
"""
represents an Exception caused by Developer Error
"""
def __init__(self, title="Developer Error", detail=None, code=-1, meta = None, status = 500):
#just call super with some prefilled information fitting this special type of exception
JanusException. __init__(self,
title=title,
detail=detail,
status=500,
code=code,
meta=meta)
class InternalServerErrorException(JanusException):
"""
represents a Internal Server Error exception (HTTP 500)
"""
def __init__(self, detail=None, meta = None):
#just call super with some prefilled information fitting this special type of exception
JanusException. __init__(self,
title="Internal Server Error",
detail=detail,
status=500,
code="42", #this is always error 42, because this should never happen on production.
meta=meta)
#--- ---
class JanusResponse(object): #JSON API Message Object see: http://jsonapi.org/format/#document-structure
"""
Represents a jsonapi compatible message.
This is the root type for all messages transmitted using jsonapi decorator
spec: http://jsonapi.org/format/#document-structure
"""
message = None #the message typ to return
data = None #an object, or a list of objects that should be returned from this message as data payload
meta = None #custom, non json api standard meta data as dict of simple types (no objects please)
include_relationships = None #flag to overrule this flag in the decorator.
def __init__(self,data=None,meta=None,message=None,include_relationships=None):
self.data = data
self.meta = meta
self.message = message
self.include_relationships = include_relationships
#check data
if self.data == None:
raise Exception("JanusResponse data can't be None.")
#check message
if self.message == None:
raise Exception("JanusResponse message can't be None.")
#check message type
if issubclass(self.message,DataMessage) == False:
raise Exception("JanusResponse message must be subclass of janus.DataMessage.")
#check meta
if self.meta != None and isinstance(self.meta,dict) == False:
raise Exception('Meta has to be a dict with non jsonapi standard information.')
class JsonApiMessage(object): #JSON API Message Object see: http://jsonapi.org/format/#document-structure
"""
Represents a jsonapi compatible message.
This is the root type for all messages transmitted using jsonapi decorator
spec: http://jsonapi.org/format/#document-structure
"""
data = None #an object, or a list of objects, derived from janus.DataMessage or a list of such objects. Represents a json api data object.
meta = None #custom, non json api standard meta data as dict of simple types (no objects please)
errors = None #a list of objects derived from janus.ErrorMessage or a list of such objects. Represents a json api error object.
included = None #an array of resource objects that are related to the primary data and/or each other ("included resources").
def __init__(self,data=None,errors=None,included=None,meta=None):
"""
initializes the object
at least one of the three objects (data,errors,meta) has to be set.
"""
if errors == None and data == None and meta == None:
raise Exception('JSON Api message has to contain at least one of these members: data, errors and/or meta.')
self.errors = errors
self.data = data
self.meta = meta
self.included = included
def __setattr__(self, name, value):
"""
overrides __setattr__ to check if data and errors aren't set at the same time.
"""
if value != None:
if (name == "errors" and self.data != None) or (name == "data" and self.errors != None):
raise Exception('JSON Api message may only contain data or errors, not both.')
#call default __setattr__
object.__setattr__(self, name, value)
def to_json(self):
"""
returns a json representation of the message.
This is always a valid json api message according to http://jsonapi.org/format/#document-structure
"""
msg = {} #initializes a dict which will later be turned into json
if self.data != None: #if data is present add it to the message
if isinstance(self.data, (list, tuple)): #if data is list of objects transform these objects to a list of dicts
#call to_dict on all data objects to get a dict representation of the data object
#and write this as a list to the message
msg['data'] = [d.to_dict() for d in self.data]
else:
msg['data'] = self.data.to_dict() #set the data object dicts to the message
if self.errors != None:
if isinstance(self.errors, (list, tuple)):
msg['errors'] = [e.to_dict() for e in self.errors]
else:
msg['errors'] = [self.errors.to_dict(),]
if self.included != None: msg['included'] = self.included #if included is present add it to the message
if self.meta != None: msg['meta'] = self.meta #if meta is present add it to the message
json_msg = json.loads(json.dumps(msg)) #serialize dict to json and return
return json_msg
class Attribute(object): #Attribute Class to map Data from input Object to Message Object
"""
Repesents an attribute in the DataMessage object that will be present
in the final json api message.
This is used to hold it's value as well as all configuration for the
transformation to json.
"""
__primitive_types = (str,int,float,bool) #all allowed types for attribute values. (lists and dicts are also allowed, but treated differently)
value = None #holds the actual value of this attribute once it is set.
value_type = None #the value type of this attribute. Used for value verification.
name = "" #the name of this value in json.
required = True #indicates if this attribute is required or not.
mapping = None #tells the mapping function (DataMessage.map_object) how to get the value for this
key_mapping = None #tells the mapping function how to get type and id of a related entity without loading the whole entity. This is used only for relationships.
key_value = None #holds the actual key
read_only = False #only for request messages. If property is readonly it won't be serialized back. #TODO implement this.
write_only = False #only for request messages. If property is writeonly it won't be included in responses (passwords on users for example). #TODO implement this.
updated = False #only for request messages. True if property was present in the request and therefor has to be updated.
def __init__(self,value_type=value_type,name=name,required=False,mapping=None,key_mapping=None,read_only=False,write_only=False):
"""
initializes the object
sets all needed configurations and checks if value is a primitive type or list or dict.
"""
if issubclass(value_type, self.__primitive_types + (list, dict)) or issubclass(value_type,DataMessage):
self.value_type = value_type
self.name = name
self.required = required
self.mapping = mapping
self.read_only = read_only
self.write_only = write_only
if issubclass(value_type,DataMessage): #relationship
self.key_mapping = key_mapping
else:
raise Exception('Value Type must be either be a simple type such as ' + str(self.__primitive_types) + ', a subclass of DataMessage or a list or dict containing these types.')
#TODO USE THIS WHEN OBJECT GETS FILLED WITH VALUES
def __check_list(self,list_value):
for item in list_value:
if not item in self.__primitive_types:
raise Exception('Attribute ' + self.name + ' contains invalid object of type ' + str(type(item)) + ". Valid types are " + str(self.__primitive_types))
class DataMessage(object): #JSON API Data Object see: http://jsonapi.org/format/#document-structure
"""
Repesents a DataMessage object that will be present in the final json api message.
This is used as a base class for all message template objects.
Do not initialize this or derived objects yourself. Always inherit from this object
and use class method "from_object" to initialize it with a fitting object containing
members fitting the Attribute mappings to get the Attribute values from.
spec: http://jsonapi.org/format/#document-structure
"""
id = None #the data object's id (has to be set for each json api data object)
__type_name = None #the data object's type (has to be set for each json api data object)
__data_object = None #the data object that holds the data for the message
def __init__(self):
"""
initializes the object
sets __type_name to the name type name of the derived class.
__type_name can be overriden by creating a member "type_name" in the derived class
and setting a new string value to it.
This method also reinitializes all members containing Attribute objects by full
copies of the objects, so every instance of this get's also its own Attribute objects.
"""
#set type name to class name (takes name of sub class)
self.__type_name = self.__class__.__name__
### START ATTRIBUTE COPY """
#get all members of the sub class containing Attribute objects
attributes = {attr:object.__getattribute__(self,attr).value
for attr in dir(self)
if not isinstance(object.__getattribute__(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and not attr.startswith("__")}
#reinitialize all members containing Attribute objects by full
#copies of the objects, so every instance of this get's also its own Attribute objects.
#otherwise they would share these objects resulting in all instances having the same value,
#which is bad. ;-)
for attr in attributes:
object.__setattr__(self,attr,copy.deepcopy(object.__getattribute__(self,attr)))
def __get_id_attribute(self):
#check if there is a id attribute in the subclass
result = [attr for attr in dir(self)
if not isinstance(getattr(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == False
and object.__getattribute__(self,attr).mapping != None
and object.__getattribute__(self,attr).name == 'id'
and not attr.startswith("__")]
if len(result) == 1: #id attribute found
return result[0]
else:
raise Exception(self.__type_name + " is missing Attribute 'id'.")
def __convert_to_value_type(self,name,value):
if value == None:
return None
else:
#try to convert to desired type for simple types
if issubclass(object.__getattribute__(self,name).value_type,DataMessage) == False:
_type = object.__getattribute__(self,name).value_type
try:
return _type(value)
except:
raise AttributeError("Failed to convert " + str(value) + " to " + str(_type) + " in " + self.__type_name)
else:
return value
def __setattr__(self, name, value):
"""
Overrides the default behaviour of members assignments to make members containing
Attribute objects behave like members contaning a simple type like int or str.
So this causes assignments to set the value of the Attribute object inside
the actual member to be set instead of overriding the whole object on
assignment.
"""
if type(object.__getattribute__(self,name)) == Attribute or name == "id":
#if this set's id we also set id to the member that contains id in the subclass
is_id = False
if name == "id":
is_id = True
name = self.__get_id_attribute()
#convert value to defined value_type
value = self.__convert_to_value_type(name, value)
#set value
object.__getattribute__(self,name).value = value
object.__getattribute__(self,name).updated = True
if is_id:
object.__setattr__(self, "id", value) #set value to id
else: #if the member does not contain an Attribute object, act normal.
object.__setattr__(self, name, value)
def __getattribute__(self, name):
"""
Overrides the default behaviour of gettting member values containing Attribute
objects, to return the value of the Attribute object instead of the whole
Attribute object.
"""
try:
if type(object.__getattribute__(self,name)) == Attribute:
return object.__getattribute__(self,name).value
else: #if the member does not contain an Attribute object, act normal.
return object.__getattribute__(self,name)
except AttributeError: #if message does not contain an Attribute, return None
return None
def to_dict(self):
"""
Returns a dict representation of this objects's members containing Attribute
objects, with their configured name as key and their values.
The dict is already in a jsonapi format.
"""
#initialize the dict with id and type, because they are mandatory in json api.
msg = {
'id': str(self.id),
'type': self.__type_name
}
#get all members of the subclass containing Attribute members that are not relations, which do not contain
#None as value and their name is not id, because id is treated diferrently, as a
#a dict fitting the jsonapi specification for the data objects "attributes" member.
#key => attribute name as specified in the Attribute object
#value => the loaded valvue from the object(s) given to "from_object"
attributes = {object.__getattribute__(self,attr).name:object.__getattribute__(self,attr).value
for attr in dir(self)
if not isinstance(object.__getattribute__(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == False
and not attr.startswith("__")
and object.__getattribute__(self,attr).name != 'id'
and object.__getattribute__(self,attr).value != None}
#if there are attributes we add them to the dict using "attributes" as key.
if len(list(attributes.keys())) > 0: msg['attributes'] = attributes
#get all members of the subclass containing Attribute members that are relations, which do not contain
#None as key_value and their name is not id, because id is treated diferrently, as a
#a dict fitting the jsonapi specification for the data objects "relations" member.
#key => attribute name as specified in the Attribute object
#value => the loaded relations key (type and id) from the object(s) given to "from_object"
relations = {object.__getattribute__(self,attr).name:object.__getattribute__(self,attr).key_value
for attr in dir(self)
if not isinstance(object.__getattribute__(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == True
and not attr.startswith("__")
and object.__getattribute__(self,attr).name != 'id'
and object.__getattribute__(self,attr).key_value != None}
#if there are relations we add them to the dict using "relations" as key.
if len(list(relations.keys())) > 0: msg['relationships'] = relations
return msg
def map_object(self,obj,include_relationships=True):
"""
Used to set values from a python object, as specified in the Attribute objects
of the sub class of this, to the values of the Attribute objects of the sub class.
So in other words, this is the data mpping from object to DataMessage object.
"""
self.__data_object = obj #remember the object this message is based on
#get all members of the subclass containing Attribute members that are no relations as a dict.
#key => member name in the sub class.
#value => the Attribute inside of this member.
attributes = {attr:object.__getattribute__(self,attr)
for attr in dir(self)
if not isinstance(getattr(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == False
and object.__getattribute__(self,attr).mapping != None
and object.__getattribute__(self,attr).write_only == False
and not attr.startswith("__")}
#for each member containing an Attribute object that is no relations set its value
#to the value retrieved from the python object as specified in the
#Attribute mapping and set it to the Attribute objects value.
for attr in attributes:
value = obj #start in the object itself to search for value
value_path = attributes[attr].mapping.split('.') #get mapping and split by '.', because this indicates a deeper path to get it.
for path_element in value_path: #go down this path in the python object to find the value
try: #Did a simple try/except, because hassattr actually calls the member
current_value = getattr(value,path_element) #get the next value of current path element.
value = current_value() if isinstance(current_value, collections.Callable) else current_value #call the attribute if it is callable otherwise just read value
except AttributeError:
value = None
if value == None: #check if this field is required
if attributes[attr].required:
raise Exception('Missing required field ' + str(attributes[attr].name) + ".")
else:
if isinstance(value,attributes[attr].value_type) == False: #check if actual value fit's value_type
raise Exception('Expected ' + str(attributes[attr].value_type) + " got " + str(type(value)) + " for " + str(attributes[attr].name) + " of " + str(self.__type_name) + ".")
if attributes[attr].name == 'id': #if the attributes name is id, set it to the object'S id, because id is not inside "attributes"
setattr(self,'id',value)
else:
attributes[attr].value = value #set loaded value to the Attribute object's value.
if include_relationships:
#get all members of the subclass containing Attribute members that are relations as a dict.
#key => member name in the sub class.
#value => the Attribute inside of this member.
relations = {attr:object.__getattribute__(self,attr)
for attr in dir(self)
if not isinstance(getattr(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == True
and object.__getattribute__(self,attr).key_mapping != None
and object.__getattribute__(self,attr).write_only == False
and not attr.startswith("__")}
#for each member containing an Attribute object that is a relations set its value
#to the value retrieved from the python object as specified in the
#Attribute mapping and set it to the Attribute objects value.
for attr in relations:
#load key first (for relations element)
key_id = obj
key_id_path = relations[attr].key_mapping.split('.') #get mapping to the keys of this relations and split by '.', because this indicates a deeper path to get it.
for path_element in key_id_path: #go down this path in the python object to find the value
if key_id == None:
if relations[attr].required:
raise InternalServerErrorException("Keypath: " + str(key_id_path) + " returned None for path element " + path_element + " on message type " + self.__type_name)
else:
key_id = None
continue # skip this not required relationship, because it'S value is None.
if hasattr(key_id,path_element):
current_key_id = getattr(key_id,path_element) #get the next value of current path element.
key_id = current_key_id() if isinstance(current_key_id, collections.Callable) else current_key_id #call the attribute if it is callable otherwise just read value
else:
if relations[attr].required:
raise InternalServerErrorException("Keypath: " + str(key_id_path) + " returned None for path element " + path_element + " on message type " + self.__type_name)
else:
key_id = None
continue # skip this not required relationship, because it'S value is None.
#now get type name for this relation
if key_id != None:
type_name = relations[attr].value_type.__name__
if hasattr(relations[attr].value_type(),'type_name') and relations[attr].value_type().type_name != None: #if sub class has a member "type_name"...
type_name = relations[attr].value_type().type_name #get this type name
if isinstance(key_id,list): #one-to-many relation
relations[attr].key_value = {'data':[]}
for k in key_id:
relations[attr].key_value['data'].append({'type':type_name,'id':str(k)})
else: #one-to-one relation
relations[attr].key_value = {'data':{'type':type_name,'id':str(key_id)}}
if hasattr(self,'type_name') and self.type_name != None: #if sub class has a member "type_name"...
self.__type_name = self.type_name #... override __type_name to set this to 'type' in the final data object.
return self
def get_included(self):
included = []
#get all members of the subclass containing Attribute members that are relations and have a mapping as a dict.
#key => member name in the sub class.
#value => the Attribute inside of this member.
relations = {attr:object.__getattribute__(self,attr)
for attr in dir(self)
if not isinstance(getattr(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == True
and object.__getattribute__(self,attr).mapping != None
and not attr.startswith("__")}
#for each member containing an Attribute object that is a relations set its value
#to the value retrieved from the python object as specified in the
#Attribute mapping and set it to the Attribute objects value.
for attr in relations:
#load key first (for relations element)
value = self.__data_object
value_path = relations[attr].mapping.split('.') #get mapping to the keys of this relations and split by '.', because this indicates a deeper path to get it.
for path_element in value_path: #go down this path in the python object to find the value
if hasattr(value,path_element):
current_value = getattr(value,path_element) #get the next value of current path element.
value = current_value() if isinstance(current_value, collections.Callable) else current_value #call the attribute if it is callable otherwise just read value
else:
if relations[attr].required:
raise InternalServerErrorException("Keypath: " + str(value_path) + " returned None for path element " + path_element + " on message type " + self.__type_name)
else:
value = None
continue # skip this not required relationship, because it'S value is None.
#current_value = getattr(value,path_element) #get the next value of current path element.
#value = current_value() if callable(current_value) else current_value #call the attribute if it is callable otherwise just read value
if value == None:
if relations[attr].required:
raise InternalServerErrorException("Keypath: " + str(value_path) + " returned None for path element " + path_element + " on message type " + self.__type_name)
else:
continue # skip this not required relationship, because it'S value is None.
#data = DataMessage.from_object(value,object.__getattribute__(self,attr).value_type,include_relationships=False) #map but without relationships
data = DataMessage.from_object(value,object.__getattribute__(self,attr).value_type,include_relationships=True) #map now with relationships
if isinstance(data,list) == True:
for d in data: included.append(d.to_dict())
else:
included.append(data.to_dict())
return included
@classmethod
def from_object(cls,obj,msg_class,include_relationships=True):
"""
Used to get a DataMessage (an object derived from DataMessage) with values in its
Attribute members loaded from a python object according to Attribute objects mapping.
obj => the python object containing the data that should be mapped to the message object. If this is a list of objects a list of message objects is returned.
msg_class => the class (derived from DataMessage) which should be used as message class. (This class will be initialized and returned)
"""
if isinstance(obj, (list, tuple)):
messages = []
for o in obj: #map all objects to new meassage objects
msg = msg_class()
msg.map_object(o,include_relationships)
messages.append(msg)
return messages
else: #map a single object to a message object.
msg = msg_class()
msg.map_object(obj,include_relationships)
return msg
### REQUEST HANDLING ###
def map_message(self,message):
"""
Used to set values from a jsonapi request message, as specified in the Attribute objects
of the sub class of this, to the values of the Attribute objects of the sub class.
So in other words, this is the data mapping from message to DataMessage object.
"""
#get id
if 'id' in message:
self.id = message['id']
if 'attributes' in message:
#get attributes
attributes = {attr:object.__getattribute__(self,attr)
for attr in dir(self)
if not isinstance(getattr(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == False
and object.__getattribute__(self,attr).mapping != None
and object.__getattribute__(self,attr).name != 'id'
and not attr.startswith("__")}
for attr in attributes:
if attributes[attr].name in message['attributes']:
setattr(self,attr,message['attributes'][attributes[attr].name])
setattr(attributes[attr],'updated',True) #mark this attribute as updated for later updating the backend object
else:
if attributes[attr].required == True:
raise Exception('Missing required field ' + str(attributes[attr].name) + ".")
if 'relationships' in message:
#get relationships
relations = {attr:object.__getattribute__(self,attr)
for attr in dir(self)
if not isinstance(getattr(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == True
and object.__getattribute__(self,attr).key_mapping != None
and object.__getattribute__(self,attr).name != 'id'
and not attr.startswith("__")}
for attr in relations:
if relations[attr].name in message['relationships']:
rel_objects = []
if isinstance(message['relationships'][relations[attr].name]['data'], (list, tuple)):
for item in message['relationships'][relations[attr].name]['data']:
rel_object = relations[attr].value_type()
rel_object.id = item['id']
rel_objects.append(rel_object)
else:
rel_object = relations[attr].value_type()
rel_data = message['relationships'][relations[attr].name]['data']
#removed releationships result in "None" data part. We use None id to idicate this state internally.
if rel_data == None:
rel_object.id = None
else:
rel_object.id = message['relationships'][relations[attr].name]['data']['id']
rel_objects = rel_object
setattr(self,attr,rel_objects)
setattr(relations[attr],'updated',True) #mark this attribute as updated for later updating the backend object
else:
if relations[attr].required == True:
raise Exception('Missing required field ' + str(relations[attr].name) + ".")
@classmethod
def from_message(cls,raw_message,msg_class):
"""
Used to get a DataMessage (an object derived from DataMessage) with values in its
Attribute members loaded from a jsonapi request object (raw string request) according to Attribute objects mapping.
If the jsonapi request object is a list a list of message objects is returned.
msg_class => the class (derived from DataMessage) which should be used as message class. (This class will be initialized and returned)
"""
json_message = json.loads(raw_message) #parse raw_message to json
if json_message == None: #no request body
return None
#get data part
data = None
if 'data' in json_message:
data = json_message['data']
else:
raise Exception("Message is missing data.")
if isinstance(data, (list, tuple)):
messages = []
for d in data:
msg = msg_class()
msg.map_message(d)
messages.append(msg)
return messages
else:
msg = msg_class()
msg.map_message(data)
return msg
def update_object(self,obj,useids=True):
"""
Used to set values from a DataMessage that were updated (self.__updated == True),
as specified in the Attribute objects of the sub class of this, to the values of
the backend object that matches this DataMessage Object.
So in other words, this is the data mapping from DataMessage to backend object.
Read-Only Attributes are also skipped.
If useids==True (default), relations will be replaced by their respective IDs.
"""
attributes = {attr:object.__getattribute__(self,attr)
for attr in dir(self)
if not isinstance(getattr(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == False
and object.__getattribute__(self,attr).updated == True
and object.__getattribute__(self,attr).read_only == False
and object.__getattribute__(self,attr).mapping != None
and object.__getattribute__(self,attr).name != 'id'
and not attr.startswith("__")}
for attr in attributes:
attr_obj = obj
attr_path = attributes[attr].mapping.split('.') #get mapping and split by '.', because this indicates a deeper path to get it.
actual_attr = None
i = 1
for path_element in attr_path: #go down this path in the python object to find the value
if i < len(attr_path): #go down the path, but exclude the last element to get the parent object of the attribute
current_attr_obj = getattr(attr_obj,path_element) #get the next value of current path element.
attr_obj = current_attr_obj() if isinstance(current_attr_obj, collections.Callable) else current_attr_obj #call the attribute if it is callable otherwise just read value
else: #the last element is what we actually want to set
actual_attr = path_element
i = i + 1
#set value to to the attr i nthe subobject
setattr(attr_obj, actual_attr, object.__getattribute__(self,attr).value)
relations = {attr:object.__getattribute__(self,attr)
for attr in dir(self)
if not isinstance(getattr(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == True
and object.__getattribute__(self,attr).updated == True
and object.__getattribute__(self,attr).read_only == False
and object.__getattribute__(self,attr).key_mapping != None
and not attr.startswith("__")}
for attr in relations:
attr_obj = obj
attr_path = relations[attr].key_mapping.split('.') #get key_mapping and split by '.', because this indicates a deeper path to get it.
actual_attr = None
i = 1
for path_element in attr_path: #go down this path in the python object to find the value
if i < len(attr_path): #go down the path, but exclude the last element to get the parent object of the attribute
current_attr_obj = getattr(attr_obj,path_element) #get the next value of current path element.
attr_obj = current_attr_obj() if isinstance(current_attr_obj, collections.Callable) else current_attr_obj #call the attribute if it is callable otherwise just read value
else: #the last element is what we actually want to set
actual_attr = path_element
i = i + 1
#extract ids and set to object
if isinstance(object.__getattribute__(self,attr).value,(list,tuple)):
if useids:
vals = [r.id for r in object.__getattribute__(self,attr).value]
else:
vals = object.__getattribute__(self,attr).value
setattr(attr_obj, actual_attr, vals)
else:
if useids:
setattr(attr_obj, actual_attr, object.__getattribute__(self,attr).value.id)
else:
setattr(attr_obj, actual_attr, object.__getattribute__(self,attr).value)
return obj
def describe(self):
"""
Used to get a description of this message type (Subclass of DataMessage), containing
all it's attributes and relationships as dict. This can be send in a meta member of
a JsonApiMessage to describe the service entites to client developers.
"""
#read type name
if hasattr(self,'type_name') and self.type_name != None: #if sub class has a member "type_name"...
self.__type_name = self.type_name #... override __type_name to set this to 'type' in the final data object.
message_description = { "type":self.__type_name }
#initialize attribute and relationship lists
message_description['attributes'] = []
message_description['relationships'] = []
#get attributes
attributes = {attr:object.__getattribute__(self,attr)
for attr in dir(self)
if not isinstance(getattr(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == False
and object.__getattribute__(self,attr).mapping != None
and object.__getattribute__(self,attr).name != 'id'
and not attr.startswith("__")}
for attr in attributes:
attr_desription = {
"name": attributes[attr].name,
"value-type": type(attributes[attr].value_type()).__name__,
"is-required": str(attributes[attr].required),
"is-read-only": str(attributes[attr].read_only),
"is-write-only": str(attributes[attr].write_only)
}
message_description['attributes'].append(attr_desription)
#get relationships
relations = {attr:object.__getattribute__(self,attr)
for attr in dir(self)
if not isinstance(getattr(self,attr), collections.Callable)
and type(object.__getattribute__(self,attr)) == Attribute
and issubclass(object.__getattribute__(self,attr).value_type,DataMessage) == True
and object.__getattribute__(self,attr).key_mapping != None
and object.__getattribute__(self,attr).name != 'id'
and not attr.startswith("__")}
for attr in relations:
#get type name of relation
rel_object = relations[attr].value_type()
type_name = type(relations[attr].value_type()).__name__
if hasattr(rel_object,'type_name') and rel_object.type_name != None: #if sub class has a member "type_name"...
type_name = rel_object.type_name #... take this one
attr_desription = {
"name": relations[attr].name,
"value-type": type_name,
"is-required": str(relations[attr].required),
"is-read-only": str(relations[attr].read_only),
"is-write-only": str(relations[attr].write_only)
}
message_description['relationships'].append(attr_desription)
return message_description
class ErrorMessage(object): #JSON API Error Object see: http://jsonapi.org/format/#errors
id = None #a unique identifier for this particular occurrence of the problem.
status = None #the HTTP status code applicable to this problem, expressed as a string value.
code = None #an application-specific error code, expressed as a string value.
title = None #a short, human-readable summary of the problem that SHOULD NOT change from occurrence to occurrence of the problem, except for purposes of localization.
detail = None #a human-readable explanation specific to this occurrence of the problem.
meta = None #a meta object containing non-standard meta-information about the error.
traceback = None #excepton traceback
@classmethod
def from_exception(cls, exception):
"""
Used to get a ErrorMessage filled with data based on the data in the given Exception.
Only of Exception is derived from JanusException all the Data will be filled in with.
All Exceptions that does not derive from Janus Exception will result in a 503 Internal Server Error.
exception => an Exception
"""
msg = cls()
if isinstance(exception,JanusException):
msg.id = exception.id
msg.status = exception.status
msg.code = exception.code
msg.title = exception.title
msg.detail = exception.detail
msg.meta = exception.meta
else:
msg.id = hashlib.sha1(str(time.time()) + str(exception)).hexdigest()
msg.status = 500
msg.code = 500 #TODO add code for uncaught exception
msg.title = "Internal Server Error"
msg.detail = str(exception)
msg.meta = None
return msg
def to_dict(self):
"""
Returns a dict representation of this objects's members containing Attribute
objects, with their configured name as key and their values.
The dict is already in a jsonapi format.
"""
msg = {
'id': self.id,
'status':self.status,
'code':self.code,
'title':self.title,
'detail':self.detail
}
if self.meta != None:
msg['meta'] = self.meta
if self.traceback != None:
if self.meta == None: self.meta = {}
msg['meta']['traceback'] = self.traceback
return msg
|
PypiClean
|
/aiida_fleur-2.0.0-py3-none-any.whl/aiida_fleur/tools/common_fleur_wf.py
|
import warnings
from aiida.orm import Node, load_node, Bool
from aiida.plugins import DataFactory, CalculationFactory
def get_inputs_fleur(code, remote, fleurinp, options, label='', description='', settings=None, add_comp_para=None):
'''
Assembles the input dictionary for Fleur Calculation. Does not check if a user gave
correct input types, it is the work of FleurCalculation to check it.
:param code: FLEUR code of Code type
:param remote: remote_folder from the previous calculation of RemoteData type
:param fleurinp: FleurinpData object representing input files
:param options: calculation options that will be stored in metadata
:param label: a string setting a label of the CalcJob in the DB
:param description: a string setting a description of the CalcJob in the DB
:param settings: additional settings of Dict type
:param add_comp_para: dict with extra keys controlling the behaviour of the parallelization
of the FleurBaseWorkChain
Example of use::
inputs_build = get_inputs_inpgen(structure, inpgencode, options, label,
description, params=params)
future = self.submit(inputs_build)
'''
Dict = DataFactory('core.dict')
inputs = {}
add_comp_para_default = {
'only_even_MPI': False,
'forbid_single_mpi': False,
'max_queue_nodes': 20,
'max_queue_wallclock_sec': 86400
}
if add_comp_para is None:
add_comp_para = {}
add_comp_para = {**add_comp_para_default, **add_comp_para}
if remote:
inputs['parent_folder'] = remote
if code:
inputs['code'] = code
if fleurinp:
inputs['fleurinp'] = fleurinp
if description:
inputs['description'] = description
else:
inputs['description'] = ''
if label:
inputs['label'] = label
else:
inputs['label'] = ''
if add_comp_para.get('serial', False):
warnings.warn('The serial input in add_comp_para is deprecated. Control the usage of'
'MPI with the withmpi key in the options input')
if not options:
options = {}
options['withmpi'] = False # for now
# TODO not every machine/scheduler type takes number of machines
# lsf takes number of total_mpi_procs,slurm and psb take num_machines,\
# also a full will run here mpi on that node... also not what we want.ß
options['resources'] = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}
if options:
options['withmpi'] = options.get('withmpi', True)
if not options['withmpi'] and 'resources' not in options:
# TODO not every machine/scheduler type takes number of machines
# lsf takes number of total_mpi_procs,slurm and psb take num_machines,\
# also a full will run here mpi on that node... also not what we want.ß
options['resources'] = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}
inputs['clean_workdir'] = Bool(add_comp_para.pop('clean_workdir', False))
inputs['add_comp_para'] = Dict(add_comp_para)
if settings:
if isinstance(settings, Dict):
inputs['settings'] = settings
else:
inputs['settings'] = Dict(settings)
if options:
inputs['options'] = Dict(options)
return inputs
def get_inputs_inpgen(structure, inpgencode, options, label='', description='', settings=None, params=None, **kwargs):
'''
Assembles the input dictionary for Fleur Calculation.
:param structure: input structure of StructureData type
:param inpgencode: inpgen code of Code type
:param options: calculation options that will be stored in metadata
:param label: a string setting a label of the CalcJob in the DB
:param description: a string setting a description of the CalcJob in the DB
:param params: input parameters for inpgen code of Dict type
Example of use::
inputs_build = get_inputs_inpgen(structure, inpgencode, options, label,
description, params=params)
future = self.submit(inputs_build)
'''
FleurinpProcess = CalculationFactory('fleur.inpgen')
inputs = FleurinpProcess.get_builder()
if structure:
inputs.structure = structure
if inpgencode:
inputs.code = inpgencode
if params:
inputs.parameters = params
if settings:
inputs.settings = settings
if description:
inputs.metadata.description = description
else:
inputs.metadata.description = ''
if label:
inputs.metadata.label = label
else:
inputs.metadata.label = ''
if not options:
options = {}
# inpgen run always serial
options['withmpi'] = False
options['resources'] = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}
if options:
inputs.metadata.options = options
# Currently this does not work, find out howto...
# for key, val in kwargs.items():
# inputs[key] = val
return inputs
def test_and_get_codenode(codenode, expected_code_type):
"""
Pass a code node and an expected code (plugin) type. Check that the
code exists, is unique, and return the Code object.
:param codenode: the name of the code to load (in the form label@machine)
:param expected_code_type: a string with the plugin that is expected to
be loaded. In case no plugins exist with the given name, show all existing
plugins of that type
:return: a Code object
"""
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm import Code, load_code
if not isinstance(codenode, Code):
codenode = load_code(codenode)
plugin_name = codenode.get_input_plugin_name()
if plugin_name != expected_code_type:
message = f'Expected Code of type {expected_code_type}. Got: {plugin_name}\n'
qb = QueryBuilder()
qb.append(Code, filters={'attributes.input_plugin': {'==': expected_code_type}}, project='*')
valid_code_labels = [f'{c.label}@{c.computer.label}' for c in qb.all(flat=True)]
if valid_code_labels:
message += f'Valid labels for a {expected_code_type} executable are:\n'
message += '\n'.join(f'* {l}' for l in valid_code_labels)
else:
message += f'No valid labels for a {expected_code_type} executable are available\n' \
'Configure at least one first using\n' \
' verdi code setup'
raise ValueError(message)
return codenode
def get_kpoints_mesh_from_kdensity(structure, kpoint_density):
"""
params: structuredata, Aiida structuredata
params: kpoint_density
returns: tuple (mesh, offset)
returns: kpointsdata node
"""
KpointsData = DataFactory('core.array.kpoints')
kp = KpointsData()
kp.set_cell_from_structure(structure)
density = kpoint_density # 1/A
kp.set_kpoints_mesh_from_density(density)
mesh = kp.get_kpoints_mesh()
return mesh, kp
# test
# print(get_kpoints_mesh_from_kdensity(load_node(structure(120)), 0.1))
# (([33, 33, 18], [0.0, 0.0, 0.0]), <KpointsData: uuid: cee9d05f-b31a-44d7-aa72-30a406712fba (unstored)>)
# mesh, kp = get_kpoints_mesh_from_kdensity(structuredata, 0.1)
# print mesh[0]
# TODO maybe allow lists of uuids in workchain dict, or write a second funtion for this,...
# The question is how do get the 'enthalpy for a reaction out of my database?
# where I have redundant calculations or calculations with different parameters...
# are total energies comparable?
# -> as long as the same scheme ist used (all GGA or all GGA+U)
# total energies are compareable and the gibs enthalpy is approximately the
# total energy difference
# there are tricks to also compare mixed energies, with experimental fits
# for binary reactions, where both is needed
def determine_favorable_reaction(reaction_list, workchain_dict):
"""
Finds out with reaction is more favorable by simple energy standpoints
# TODO check physics
reaction list: list of reaction strings
workchain_dict = {'Be12W' : uuid_wc or output, 'Be2W' : uuid, ...}
return dictionary that ranks the reactions after their enthalpy
TODO: refactor aiida part out of this, leaving an aiida independent part and one
more universal
"""
from aiida.engine import WorkChain
from aiida_fleur.tools.common_fleur_wf_util import get_enhalpy_of_equation
# for each reaction get the total energy sum
# make sure to use the right multipliers...
# then sort the given list from (lowest if negativ energies to highest)
energy_sorted_reactions = []
formenergy_dict = {}
for compound, uuid in workchain_dict.items():
# TODO ggf get formation energy from output node, or extras
if isinstance(uuid, float): # allow to give values
formenergy_dict[compound] = uuid
continue
n = load_node(uuid)
extras = n.get_extras() # sadly there is no get(,) method...
try:
formenergy = extras.get('formation_energy', None)
except KeyError:
formenergy = None
if not formenergy: # test if 0 case ok
if isinstance(n, WorkChain): # TODO: untested for aiida > 1.0
plabel = n.get_attr('_process_label')
if plabel == 'FleurInitialCLSWorkChain':
try:
ouputnode = n.out.output_initial_cls_wc_para.get_dict()
except AttributeError:
try:
ouputnode = n.out.output_inital_cls_wc_para.get_dict()
except (AttributeError, KeyError, ValueError): # TODO: Check this
ouputnode = None
formenergy = None
print(f'WARNING: output node of {n} not found. I skip')
continue
formenergy = ouputnode.get('formation_energy')
# TODO is this value per atom?
else: # check if corehole wc?
pass
formenergy_dict[compound] = formenergy
for reaction_string in reaction_list:
ent_peratom = get_enhalpy_of_equation(reaction_string, formenergy_dict)
print(ent_peratom)
energy_sorted_reactions.append([reaction_string, ent_peratom])
energy_sorted_reactions = sorted(energy_sorted_reactions, key=lambda ent: ent[1])
return energy_sorted_reactions
def performance_extract_calcs(calcs):
"""
Extracts some runtime and system data from given fleur calculations
:params calcs: list of calculation nodes/pks/or uuids. Fleur calc specific
:returns data_dict: dictionary, dictionary of arrays with the same length,
from with a panda frame can be created.
Note: Is not the fastest for many calculations > 1000.
"""
data_dict = {
'n_symmetries': [],
'n_spin_components': [],
'n_kpoints': [],
'n_iterations': [],
'walltime_sec': [],
'walltime_sec_per_it': [],
'n_iterations_total': [],
'density_distance': [],
'computer': [],
'n_atoms': [],
'kmax': [],
'cost': [],
'costkonstant': [],
'walltime_sec_cor': [],
'total_cost': [],
'fermi_energy': [],
'bandgap': [],
'energy': [],
'force_largest': [],
'ncores': [],
'pk': [],
'uuid': [],
'serial': [],
'resources': []
}
count = 0
for calc in calcs:
if not isinstance(calc, Node):
calc = load_node(calc)
count = count + 1
pk = calc.pk
print((count, pk))
res = calc.res
res_keys = list(res)
try:
efermi = res.fermi_energy
except AttributeError:
print(f'skipping {pk}, {calc.uuid}')
efermi = -10000
continue # we skip these entries
try:
gap = res.bandgap
except AttributeError:
gap = -10000
print(f'skipping 2 {pk}, {calc.uuid}')
continue
try:
energy = res.energy
except AttributeError:
energy = 0.0
print(f'skipping 3 {pk}, {calc.uuid}')
continue
data_dict['bandgap'].append(gap)
data_dict['fermi_energy'].append(efermi)
data_dict['energy'].append(energy)
data_dict['force_largest'].append(res.force_largest)
data_dict['pk'].append(pk)
data_dict['uuid'].append(calc.uuid)
data_dict['n_symmetries'].append(res.number_of_symmetries)
nspins = res.number_of_spin_components
data_dict['n_spin_components'].append(nspins)
nkpt = res.number_of_kpoints
data_dict['n_kpoints'].append(nkpt)
niter = res.number_of_iterations
data_dict['n_iterations'].append(niter)
data_dict['n_iterations_total'].append(res.number_of_iterations_total)
if 'overall_density_convergence' not in res_keys:
data_dict['density_distance'].append(res.density_convergence)
else: # magnetic, old
data_dict['density_distance'].append(res.overall_density_convergence)
walltime = res.walltime
if walltime <= 0:
# date was not considert yet, we assume one day...
walltime_new = walltime + 86400
else:
walltime_new = walltime
walltime_periteration = walltime_new / niter
data_dict['walltime_sec'].append(walltime)
data_dict['walltime_sec_cor'].append(walltime_new)
data_dict['walltime_sec_per_it'].append(walltime_periteration)
cname = calc.computer.label
data_dict['computer'].append(cname)
natom = res.number_of_atoms
data_dict['n_atoms'].append(natom)
# fleurinp = calc.get_inputs_dict()['fleurinpdata']
# kmax = fleurinp.inp_dict['calculationSetup']['cutoffs']['Kmax']
kmax = res.kmax
data_dict['kmax'].append(kmax)
cost = calc_time_cost_function(natom, nkpt, kmax, nspins)
total_cost = cost * niter
serial = not calc.attributes['withmpi']
# codename = calc.get_code().label
# code_col.append(codename)
# if 'mpi' in codename:
# serial = False
# else:
# serial = True
data_dict['serial'].append(serial)
resources = calc.attributes['resources']
mpi_proc = get_mpi_proc(resources)
c_ratio = cost_ratio(cost, walltime_new, mpi_proc)
data_dict['resources'].append(resources)
data_dict['cost'].append(cost)
data_dict['costkonstant'].append(c_ratio)
data_dict['total_cost'].append(total_cost)
data_dict['ncores'].append(mpi_proc)
return data_dict
def get_mpi_proc(resources):
"""Determine number of total processes from given resource dict"""
nmachines = resources.get('num_machines', 0)
total_proc = resources.get('tot_num_mpiprocs', 0)
if not total_proc:
if nmachines:
total_proc = nmachines * resources.get('default_mpiprocs_per_machine', 12)
else:
total_proc = resources.get('tot_num_mpiprocs', 24)
return total_proc
def calc_time_cost_function(natom, nkpt, kmax, nspins=1):
"""Estimates the cost of simulating a single iteration of a system"""
costs = natom**3 * kmax**3 * nkpt * nspins
return costs
def calc_time_cost_function_total(natom, nkpt, kmax, niter, nspins=1):
"""Estimates the cost of simulating a all iteration of a system"""
costs = natom**3 * kmax**3 * nkpt * nspins * niter
return costs
def cost_ratio(total_costs, walltime_sec, ncores):
"""Estimates if simulation cost matches resources"""
ratio = total_costs / (walltime_sec * ncores)
return ratio
def optimize_calc_options(nodes,
mpi_per_node,
omp_per_mpi,
use_omp,
mpi_omp_ratio,
fleurinpData=None,
kpts=None,
sacrifice_level=0.9,
only_even_MPI=False,
forbid_single_mpi=False):
"""
Makes a suggestion on parallelisation setup for a particular fleurinpData.
Only the total number of k-points is analysed: the function suggests ideal k-point
parallelisation + OMP parallelisation (if required). Note: the total number of used CPUs
per node will not exceed mpi_per_node * omp_per_mpi.
Sometimes perfect parallelisation is terms of idle CPUs is not what
used wanted because it can harm MPI/OMP ratio. Thus the function first chooses first top
parallelisations in terms of total CPUs used
(bigger than sacrifice_level * maximal_number_CPUs_possible). Then a parallelisation which is
the closest to the MPI/OMP ratio is chosen among them and returned.
:param nodes: maximal number of nodes that can be used
:param mpi_per_node: an input suggestion of MPI tasks per node
:param omp_per_mpi: an input suggestion for OMP tasks per MPI process
:param use_omp: False if OMP parallelisation is not needed
:param mpi_omp_ratio: requested MPI/OMP ratio
:param fleurinpData: FleurinpData to extract total number of kpts from
:param kpts: the total number of kpts
:param sacrifice_level: sets a level of performance sacrifice that a user can afford for better
MPI/OMP ratio.
:param only_even_MPI: if set to True, the function does not set MPI to an odd number (if possible)
:param forbid_single_mpi: if set to True, the configuration 1 node 1 MPI per node will be forbidden
:returns nodes, MPI_tasks, OMP_per_MPI, message: first three are parallelisation info and
the last one is an exit message.
"""
from sympy.ntheory.factor_ import divisors
import numpy as np
cpus_per_node = mpi_per_node * omp_per_mpi
if fleurinpData:
kpts = fleurinpData.get_nkpts()
elif not kpts:
raise ValueError('You must specify either kpts of fleurinpData')
divisors_kpts = divisors(kpts)
possible_nodes = [x for x in divisors_kpts if x <= nodes]
suggestions = []
for n_n in possible_nodes:
advise_cpus = [x for x in divisors(kpts // n_n) if x <= cpus_per_node]
for advised_cpu_per_node in advise_cpus:
suggestions.append((n_n, advised_cpu_per_node))
def add_omp(suggestions, only_even_MPI_1):
"""
Also adds possibility of omp parallelisation
"""
final_suggestion = []
for suggestion in suggestions:
if use_omp:
omp = cpus_per_node // suggestion[1]
else:
omp = 1
# here we drop parallelisations having odd number of MPIs
if only_even_MPI_1 and suggestion[1] % 2 == 0 or not only_even_MPI_1:
final_suggestion.append([suggestion[0], suggestion[1], omp])
return final_suggestion
# all possible suggestions taking into account omp
suggestions_save = suggestions
suggestions = np.array(add_omp(suggestions, only_even_MPI))
if not len(suggestions): # only odd MPI parallelisations possible, ignore only_even_MPI
suggestions = np.array(add_omp(suggestions_save, False))
best_resources = max(np.prod(suggestions, axis=1))
top_suggestions = suggestions[np.prod(suggestions, axis=1) > sacrifice_level * best_resources]
if forbid_single_mpi:
top_suggestions = [s for s in top_suggestions if s[0] * s[1] != 1]
if len(top_suggestions) == 0:
raise ValueError('A Parallelization meeting the requirements could not be determined'
f'for the given number k-points ({kpts})')
def best_criterion(suggestion):
if use_omp:
return -abs(suggestion[1] / suggestion[2] - mpi_omp_ratio)
return (suggestion[0] * suggestion[1], -suggestion[0])
best_suggestion = max(top_suggestions, key=best_criterion)
message = ''
if float(best_suggestion[1] * best_suggestion[2]) / cpus_per_node < 0.6:
message = ('WARNING: Changed the number of MPIs per node from {} to {} and OMP per MPI '
'from {} to {}.'
'Changed the number of nodes from {} to {}. '
'Computational setup, needed for a given number k-points ({})'
' provides less then 60% of node load.'
''.format(mpi_per_node, best_suggestion[1], omp_per_mpi, best_suggestion[2], nodes,
best_suggestion[0], kpts))
raise ValueError(message)
if best_suggestion[1] * best_suggestion[2] == cpus_per_node:
if best_suggestion[0] != nodes:
message = f'WARNING: Changed the number of nodes from {nodes} to {best_suggestion[0]}'
else:
message = ('Computational setup is perfect! Nodes: {}, MPIs per node {}, OMP per MPI '
'{}. Number of k-points is {}'.format(best_suggestion[0], best_suggestion[1], best_suggestion[2],
kpts))
else:
message = ('WARNING: Changed the number of MPIs per node from {} to {} and OMP from {} to {}'
'. Changed the number of nodes from {} to {}. Number of k-points is {}.'
''.format(mpi_per_node, best_suggestion[1], omp_per_mpi, best_suggestion[2], nodes,
best_suggestion[0], kpts))
return int(best_suggestion[0]), int(best_suggestion[1]), int(best_suggestion[2]), message
def find_last_submitted_calcjob(restart_wc):
"""
Finds the last CalcJob submitted in a higher-level workchain
and returns it's uuid
"""
from aiida.common.exceptions import NotExistent
from aiida.orm import CalcJobNode
calls = restart_wc.get_outgoing(node_class=CalcJobNode).all()
if calls:
calls = sorted(calls, key=lambda x: x.node.pk)
return calls[-1].node.uuid
raise NotExistent
def find_last_submitted_workchain(restart_wc):
"""
Finds the last WorkChain submitted in a higher-level workchain
and returns it's uuid
"""
from aiida.common.exceptions import NotExistent
from aiida.orm import WorkChainNode
calls = restart_wc.get_outgoing(node_class=WorkChainNode).all()
if calls:
calls = sorted(calls, key=lambda x: x.node.pk)
return calls[-1].node.uuid
raise NotExistent
def find_nested_process(wc_node, p_class):
'''
This function finds all nested child processes of p_class
'''
child_process = []
lower = wc_node.get_outgoing().all()
for i in lower:
try:
if i.node.process_class is p_class:
child_process.append(i.node)
else:
child_process.extend(find_nested_process(i.node, p_class))
except: #pylint: disable=bare-except
pass
return child_process
|
PypiClean
|
/kython-1.4.1.tar.gz/kython-1.4.1/.eggs/pytest-6.2.1-py3.8.egg/_pytest/assertion/__init__.py
|
import sys
from typing import Any
from typing import Generator
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from _pytest.assertion import rewrite
from _pytest.assertion import truncate
from _pytest.assertion import util
from _pytest.assertion.rewrite import assertstate_key
from _pytest.config import Config
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.nodes import Item
if TYPE_CHECKING:
from _pytest.main import Session
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("debugconfig")
group.addoption(
"--assert",
action="store",
dest="assertmode",
choices=("rewrite", "plain"),
default="rewrite",
metavar="MODE",
help=(
"Control assertion debugging tools.\n"
"'plain' performs no assertion debugging.\n"
"'rewrite' (the default) rewrites assert statements in test modules"
" on import to provide assert expression information."
),
)
parser.addini(
"enable_assertion_pass_hook",
type="bool",
default=False,
help="Enables the pytest_assertion_pass hook."
"Make sure to delete any previously generated pyc cache files.",
)
def register_assert_rewrite(*names: str) -> None:
"""Register one or more module names to be rewritten on import.
This function will make sure that this module or all modules inside
the package will get their assert statements rewritten.
Thus you should make sure to call this before the module is
actually imported, usually in your __init__.py if you are a plugin
using a package.
:raises TypeError: If the given module names are not strings.
"""
for name in names:
if not isinstance(name, str):
msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable]
raise TypeError(msg.format(repr(names)))
for hook in sys.meta_path:
if isinstance(hook, rewrite.AssertionRewritingHook):
importhook = hook
break
else:
# TODO(typing): Add a protocol for mark_rewrite() and use it
# for importhook and for PytestPluginManager.rewrite_hook.
importhook = DummyRewriteHook() # type: ignore
importhook.mark_rewrite(*names)
class DummyRewriteHook:
"""A no-op import hook for when rewriting is disabled."""
def mark_rewrite(self, *names: str) -> None:
pass
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config: Config, mode) -> None:
self.mode = mode
self.trace = config.trace.root.get("assertion")
self.hook: Optional[rewrite.AssertionRewritingHook] = None
def install_importhook(config: Config) -> rewrite.AssertionRewritingHook:
"""Try to install the rewrite hook, raise SystemError if it fails."""
config._store[assertstate_key] = AssertionState(config, "rewrite")
config._store[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config)
sys.meta_path.insert(0, hook)
config._store[assertstate_key].trace("installed rewrite import hook")
def undo() -> None:
hook = config._store[assertstate_key].hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
return hook
def pytest_collection(session: "Session") -> None:
# This hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules).
assertstate = session.config._store.get(assertstate_key, None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(session)
@hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
"""Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks.
The rewrite module will use util._reprcompare if it exists to use custom
reporting via the pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
ihook = item.ihook
def callbinrepr(op, left: object, right: object) -> Optional[str]:
"""Call the pytest_assertrepr_compare hook and prepare the result.
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are truncated unless configured otherwise
(eg. if running in verbose mode).
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right
)
for new_expl in hook_result:
if new_expl:
new_expl = truncate.truncate_if_required(new_expl, item)
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = "\n~".join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
return None
saved_assert_hooks = util._reprcompare, util._assertion_pass
util._reprcompare = callbinrepr
if ihook.pytest_assertion_pass.get_hookimpls():
def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None:
ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl)
util._assertion_pass = call_assertion_pass_hook
yield
util._reprcompare, util._assertion_pass = saved_assert_hooks
def pytest_sessionfinish(session: "Session") -> None:
assertstate = session.config._store.get(assertstate_key, None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(None)
def pytest_assertrepr_compare(
config: Config, op: str, left: Any, right: Any
) -> Optional[List[str]]:
return util.assertrepr_compare(config=config, op=op, left=left, right=right)
|
PypiClean
|
/django-easy-reports-0.3.2.tar.gz/django-easy-reports-0.3.2/ereports/static/ereports/multiselect/js/jquery.blockUI.min.js
|
(function(g){if(/1\.(0|1|2)\.(0|1|2)/.test(g.fn.jquery)||/^1.1/.test(g.fn.jquery)){alert("blockUI requires jQuery v1.2.3 or later! You are using v"+g.fn.jquery);return}g.fn._fadeIn=g.fn.fadeIn;var d=(function(){if(!g.browser.msie){return false}var o=document.createElement("div");try{o.style.setExpression("width","0+0")}catch(n){return false}return true})();g.blockUI=function(n){c(window,n)};g.unblockUI=function(n){h(window,n)};g.growlUI=function(r,p,q,n){var o=g('<div class="growlUI"></div>');if(r){o.append("<h1>"+r+"</h1>")}if(p){o.append("<h2>"+p+"</h2>")}if(q==undefined){q=3000}g.blockUI({message:o,fadeIn:700,fadeOut:1000,centerY:false,timeout:q,showOverlay:false,onUnblock:n,css:g.blockUI.defaults.growlCSS})};g.fn.block=function(n){return this.unblock({fadeOut:0}).each(function(){if(g.css(this,"position")=="static"){this.style.position="relative"}if(g.browser.msie){this.style.zoom=1}c(this,n)})};g.fn.unblock=function(n){return this.each(function(){h(this,n)})};g.blockUI.version=2.2;g.blockUI.defaults={message:"<h1>Please wait...</h1>",css:{padding:0,margin:0,width:"30%",top:"40%",left:"35%",textAlign:"center",color:"#000",border:"3px solid #aaa",backgroundColor:"#fff",cursor:"wait"},overlayCSS:{backgroundColor:"#000",opacity:0.6,cursor:"wait"},growlCSS:{width:"350px",top:"10px",left:"",right:"10px",border:"none",padding:"5px",opacity:0.6,cursor:null,color:"#fff",backgroundColor:"#000","-webkit-border-radius":"10px","-moz-border-radius":"10px"},iframeSrc:/^https/i.test(window.location.href||"")?"javascript:false":"about:blank",forceIframe:false,baseZ:1000,centerX:true,centerY:true,allowBodyStretch:true,bindEvents:true,constrainTabKey:true,fadeIn:200,fadeOut:400,timeout:0,showOverlay:true,focusInput:true,applyPlatformOpacityRules:true,onUnblock:null,quirksmodeOffsetHack:4};var e=g.browser.msie&&/MSIE 6.0/.test(navigator.userAgent);var b=null;var f=[];function c(p,n){var A=(p==window);var q=n&&n.message!==undefined?n.message:undefined;n=g.extend({},g.blockUI.defaults,n||{});n.overlayCSS=g.extend({},g.blockUI.defaults.overlayCSS,n.overlayCSS||{});var y=g.extend({},g.blockUI.defaults.css,n.css||{});q=q===undefined?n.message:q;if(A&&b){h(window,{fadeOut:0})}if(q&&typeof q!="string"&&(q.parentNode||q.jquery)){var s=q.jquery?q[0]:q;var x={};g(p).data("blockUI.history",x);x.el=s;x.parent=s.parentNode;x.display=s.style.display;x.position=s.style.position;if(x.parent){x.parent.removeChild(s)}}var B=n.baseZ;var w=(g.browser.msie||n.forceIframe)?g('<iframe class="blockUI" style="z-index:'+(B++)+';display:none;border:none;margin:0;padding:0;position:absolute;width:100%;height:100%;top:0;left:0" src="'+n.iframeSrc+'"></iframe>'):g('<div class="blockUI" style="display:none"></div>');var v=g('<div class="blockUI blockOverlay" style="z-index:'+(B++)+';display:none;border:none;margin:0;padding:0;width:100%;height:100%;top:0;left:0"></div>');var r=A?g('<div class="blockUI blockMsg blockPage" style="z-index:'+B+';display:none;position:fixed"></div>'):g('<div class="blockUI blockMsg blockElement" style="z-index:'+B+';display:none;position:absolute"></div>');if(q){r.css(y)}if(!n.applyPlatformOpacityRules||!(g.browser.mozilla&&/Linux/.test(navigator.platform))){v.css(n.overlayCSS)}v.css("position",A?"fixed":"absolute");if(g.browser.msie||n.forceIframe){w.css("opacity",0)}g([w[0],v[0],r[0]]).appendTo(A?"body":p);var E=g.browser.msie&&(g.browser.version<8||!g.boxModel)&&(!g.boxModel||g("object,embed",A?null:p).length>0);if(e||(E&&d)){if(A&&n.allowBodyStretch&&g.boxModel){g("html,body").css("height","100%")}if((e||!g.boxModel)&&!A){var F=k(p,"borderTopWidth"),u=k(p,"borderLeftWidth");var D=F?"(0 - "+F+")":0;var o=u?"(0 - "+u+")":0}g.each([w,v,r],function(t,I){var z=I[0].style;z.position="absolute";if(t<2){A?z.setExpression("height","Math.max(document.body.scrollHeight, document.body.offsetHeight) - (jQuery.boxModel?0:"+n.quirksmodeOffsetHack+') + "px"'):z.setExpression("height",'this.parentNode.offsetHeight + "px"');A?z.setExpression("width",'jQuery.boxModel && document.documentElement.clientWidth || document.body.clientWidth + "px"'):z.setExpression("width",'this.parentNode.offsetWidth + "px"');if(o){z.setExpression("left",o)}if(D){z.setExpression("top",D)}}else{if(n.centerY){if(A){z.setExpression("top",'(document.documentElement.clientHeight || document.body.clientHeight) / 2 - (this.offsetHeight / 2) + (blah = document.documentElement.scrollTop ? document.documentElement.scrollTop : document.body.scrollTop) + "px"')}z.marginTop=0}else{if(!n.centerY&&A){var G=(n.css&&n.css.top)?parseInt(n.css.top):0;var H="((document.documentElement.scrollTop ? document.documentElement.scrollTop : document.body.scrollTop) + "+G+') + "px"';z.setExpression("top",H)}}}})}if(q){r.append(q);if(q.jquery||q.nodeType){g(q).show()}}if((g.browser.msie||n.forceIframe)&&n.showOverlay){w.show()}if(n.fadeIn){if(n.showOverlay){v._fadeIn(n.fadeIn)}if(q){r.fadeIn(n.fadeIn)}}else{if(n.showOverlay){v.show()}if(q){r.show()}}j(1,p,n);if(A){b=r[0];f=g(":input:enabled:visible",b);if(n.focusInput){setTimeout(m,20)}}else{a(r[0],n.centerX,n.centerY)}if(n.timeout){var C=setTimeout(function(){A?g.unblockUI(n):g(p).unblock(n)},n.timeout);g(p).data("blockUI.timeout",C)}}function h(q,r){var p=q==window;var o=g(q);var s=o.data("blockUI.history");var t=o.data("blockUI.timeout");if(t){clearTimeout(t);o.removeData("blockUI.timeout")}r=g.extend({},g.blockUI.defaults,r||{});j(0,q,r);var n=p?g("body").children().filter(".blockUI"):g(".blockUI",q);if(p){b=f=null}if(r.fadeOut){n.fadeOut(r.fadeOut);setTimeout(function(){i(n,s,r,q)},r.fadeOut)}else{i(n,s,r,q)}}function i(n,q,p,o){n.each(function(r,s){if(this.parentNode){this.parentNode.removeChild(this)}});if(q&&q.el){q.el.style.display=q.display;q.el.style.position=q.position;if(q.parent){q.parent.appendChild(q.el)}g(q.el).removeData("blockUI.history")}if(typeof p.onUnblock=="function"){p.onUnblock(o,p)}}function j(n,r,s){var q=r==window,p=g(r);if(!n&&(q&&!b||!q&&!p.data("blockUI.isBlocked"))){return}if(!q){p.data("blockUI.isBlocked",n)}if(!s.bindEvents||(n&&!s.showOverlay)){return}var o="mousedown mouseup keydown keypress";n?g(document).bind(o,s,l):g(document).unbind(o,l)}function l(q){if(q.keyCode&&q.keyCode==9){if(b&&q.data.constrainTabKey){var p=f;var o=!q.shiftKey&&q.target==p[p.length-1];var n=q.shiftKey&&q.target==p[0];if(o||n){setTimeout(function(){m(n)},10);return false}}}if(g(q.target).parents("div.blockMsg").length>0){return true}return g(q.target).parents().children().filter("div.blockUI").length==0}function m(n){if(!f){return}var o=f[n===true?f.length-1:0];if(o){o.focus()}}function a(u,n,w){var v=u.parentNode,r=u.style;var o=((v.offsetWidth-u.offsetWidth)/2)-k(v,"borderLeftWidth");var q=((v.offsetHeight-u.offsetHeight)/2)-k(v,"borderTopWidth");if(n){r.left=o>0?(o+"px"):"0"}if(w){r.top=q>0?(q+"px"):"0"}}function k(n,o){return parseInt(g.css(n,o))||0}})(jQuery);
|
PypiClean
|
/eti-page-cms-1.0.1.tar.gz/eti-page-cms-1.0.1/README.rst
|
django-page-cms
===============
.. image:: https://travis-ci.org/batiste/django-page-cms.svg?branch=master
:target: https://travis-ci.org/batiste/django-page-cms
.. image:: https://coveralls.io/repos/batiste/django-page-cms/badge.svg
:target: https://coveralls.io/r/batiste/django-page-cms
.. image:: https://img.shields.io/pypi/dm/django-page-cms.svg
:target: https://pypi.python.org/pypi/django-page-cms/
.. image:: https://codeclimate.com/github/batiste/django-page-cms/badges/gpa.svg
:target: https://codeclimate.com/github/batiste/django-page-cms
:alt: Code Climate
This Django CMS enables you to create and administrate hierarchical pages in a simple and powerful way.
* `Full documentation <http://django-page-cms.readthedocs.org/en/latest/>`_
* `How to contribute <doc/contributions.rst>`_
Django page CMS is based around a placeholders concept. Placeholder is a special template tag that
you use in your page templates. Every time you add a placeholder in your template such field
dynamically appears in the page admin interface.
Each page can have a different template with different placeholders.
.. image:: https://github.com/batiste/django-page-cms/raw/master/doc/admin-screenshot1.png
|
PypiClean
|
/plone.patternslib-1.3.0-py3-none-any.whl/plone/patternslib/static/components/google-code-prettify/src/lang-vhdl.js
|
PR['registerLangHandler'](
PR['createSimpleLexer'](
[
// Whitespace
[PR['PR_PLAIN'], /^[\t\n\r \xA0]+/, null, '\t\n\r \xA0']
],
[
// String, character or bit string
[PR['PR_STRING'], /^(?:[BOX]?"(?:[^\"]|"")*"|'.')/i],
// Comment, from two dashes until end of line.
[PR['PR_COMMENT'], /^--[^\r\n]*/],
[PR['PR_KEYWORD'], /^(?:abs|access|after|alias|all|and|architecture|array|assert|attribute|begin|block|body|buffer|bus|case|component|configuration|constant|disconnect|downto|else|elsif|end|entity|exit|file|for|function|generate|generic|group|guarded|if|impure|in|inertial|inout|is|label|library|linkage|literal|loop|map|mod|nand|new|next|nor|not|null|of|on|open|or|others|out|package|port|postponed|procedure|process|pure|range|record|register|reject|rem|report|return|rol|ror|select|severity|shared|signal|sla|sll|sra|srl|subtype|then|to|transport|type|unaffected|units|until|use|variable|wait|when|while|with|xnor|xor)(?=[^\w-]|$)/i, null],
// Type, predefined or standard
[PR['PR_TYPE'], /^(?:bit|bit_vector|character|boolean|integer|real|time|string|severity_level|positive|natural|signed|unsigned|line|text|std_u?logic(?:_vector)?)(?=[^\w-]|$)/i, null],
// Predefined attributes
[PR['PR_TYPE'], /^\'(?:ACTIVE|ASCENDING|BASE|DELAYED|DRIVING|DRIVING_VALUE|EVENT|HIGH|IMAGE|INSTANCE_NAME|LAST_ACTIVE|LAST_EVENT|LAST_VALUE|LEFT|LEFTOF|LENGTH|LOW|PATH_NAME|POS|PRED|QUIET|RANGE|REVERSE_RANGE|RIGHT|RIGHTOF|SIMPLE_NAME|STABLE|SUCC|TRANSACTION|VAL|VALUE)(?=[^\w-]|$)/i, null],
// Number, decimal or based literal
[PR['PR_LITERAL'], /^\d+(?:_\d+)*(?:#[\w\\.]+#(?:[+\-]?\d+(?:_\d+)*)?|(?:\.\d+(?:_\d+)*)?(?:E[+\-]?\d+(?:_\d+)*)?)/i],
// Identifier, basic or extended
[PR['PR_PLAIN'], /^(?:[a-z]\w*|\\[^\\]*\\)/i],
// Punctuation
[PR['PR_PUNCTUATION'], /^[^\w\t\n\r \xA0\"\'][^\w\t\n\r \xA0\-\"\']*/]
]),
['vhdl', 'vhd']);
|
PypiClean
|
/pulumi_alicloud-3.44.0a1693632188.tar.gz/pulumi_alicloud-3.44.0a1693632188/pulumi_alicloud/vpn/get_gateway_vco_routes.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetGatewayVcoRoutesResult',
'AwaitableGetGatewayVcoRoutesResult',
'get_gateway_vco_routes',
'get_gateway_vco_routes_output',
]
@pulumi.output_type
class GetGatewayVcoRoutesResult:
"""
A collection of values returned by getGatewayVcoRoutes.
"""
def __init__(__self__, id=None, ids=None, output_file=None, page_number=None, page_size=None, route_entry_type=None, routes=None, status=None, vpn_connection_id=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if output_file and not isinstance(output_file, str):
raise TypeError("Expected argument 'output_file' to be a str")
pulumi.set(__self__, "output_file", output_file)
if page_number and not isinstance(page_number, int):
raise TypeError("Expected argument 'page_number' to be a int")
pulumi.set(__self__, "page_number", page_number)
if page_size and not isinstance(page_size, int):
raise TypeError("Expected argument 'page_size' to be a int")
pulumi.set(__self__, "page_size", page_size)
if route_entry_type and not isinstance(route_entry_type, str):
raise TypeError("Expected argument 'route_entry_type' to be a str")
pulumi.set(__self__, "route_entry_type", route_entry_type)
if routes and not isinstance(routes, list):
raise TypeError("Expected argument 'routes' to be a list")
pulumi.set(__self__, "routes", routes)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if vpn_connection_id and not isinstance(vpn_connection_id, str):
raise TypeError("Expected argument 'vpn_connection_id' to be a str")
pulumi.set(__self__, "vpn_connection_id", vpn_connection_id)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="outputFile")
def output_file(self) -> Optional[str]:
return pulumi.get(self, "output_file")
@property
@pulumi.getter(name="pageNumber")
def page_number(self) -> Optional[int]:
return pulumi.get(self, "page_number")
@property
@pulumi.getter(name="pageSize")
def page_size(self) -> Optional[int]:
return pulumi.get(self, "page_size")
@property
@pulumi.getter(name="routeEntryType")
def route_entry_type(self) -> Optional[str]:
return pulumi.get(self, "route_entry_type")
@property
@pulumi.getter
def routes(self) -> Sequence['outputs.GetGatewayVcoRoutesRouteResult']:
return pulumi.get(self, "routes")
@property
@pulumi.getter
def status(self) -> Optional[str]:
return pulumi.get(self, "status")
@property
@pulumi.getter(name="vpnConnectionId")
def vpn_connection_id(self) -> str:
return pulumi.get(self, "vpn_connection_id")
class AwaitableGetGatewayVcoRoutesResult(GetGatewayVcoRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGatewayVcoRoutesResult(
id=self.id,
ids=self.ids,
output_file=self.output_file,
page_number=self.page_number,
page_size=self.page_size,
route_entry_type=self.route_entry_type,
routes=self.routes,
status=self.status,
vpn_connection_id=self.vpn_connection_id)
def get_gateway_vco_routes(ids: Optional[Sequence[str]] = None,
output_file: Optional[str] = None,
page_number: Optional[int] = None,
page_size: Optional[int] = None,
route_entry_type: Optional[str] = None,
status: Optional[str] = None,
vpn_connection_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGatewayVcoRoutesResult:
"""
This data source provides the Vpn Gateway Vco Routes of the current Alibaba Cloud user.
> **NOTE:** Available in v1.183.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_instance = alicloud.cen.Instance("defaultInstance", cen_instance_name=var["name"])
default_transit_router = alicloud.cen.TransitRouter("defaultTransitRouter",
cen_id=default_instance.id,
transit_router_description="desd",
transit_router_name=var["name"])
default_transit_router_available_resources = alicloud.cen.get_transit_router_available_resources()
default_customer_gateway = alicloud.vpn.CustomerGateway("defaultCustomerGateway",
ip_address="42.104.22.210",
asn="45014",
description="testAccVpnConnectionDesc")
default_gateway_vpn_attachment = alicloud.vpn.GatewayVpnAttachment("defaultGatewayVpnAttachment",
customer_gateway_id=default_customer_gateway.id,
network_type="public",
local_subnet="0.0.0.0/0",
remote_subnet="0.0.0.0/0",
effect_immediately=False,
ike_config=alicloud.vpn.GatewayVpnAttachmentIkeConfigArgs(
ike_auth_alg="md5",
ike_enc_alg="des",
ike_version="ikev2",
ike_mode="main",
ike_lifetime=86400,
psk="tf-testvpn2",
ike_pfs="group1",
remote_id="testbob2",
local_id="testalice2",
),
ipsec_config=alicloud.vpn.GatewayVpnAttachmentIpsecConfigArgs(
ipsec_pfs="group5",
ipsec_enc_alg="des",
ipsec_auth_alg="md5",
ipsec_lifetime=86400,
),
bgp_config=alicloud.vpn.GatewayVpnAttachmentBgpConfigArgs(
enable=True,
local_asn=45014,
tunnel_cidr="169.254.11.0/30",
local_bgp_ip="169.254.11.1",
),
health_check_config=alicloud.vpn.GatewayVpnAttachmentHealthCheckConfigArgs(
enable=True,
sip="192.168.1.1",
dip="10.0.0.1",
interval=10,
retry=10,
policy="revoke_route",
),
enable_dpd=True,
enable_nat_traversal=True,
vpn_attachment_name=var["name"])
default_transit_router_vpn_attachment = alicloud.cen.TransitRouterVpnAttachment("defaultTransitRouterVpnAttachment",
auto_publish_route_enabled=False,
transit_router_attachment_description=var["name"],
transit_router_attachment_name=var["name"],
cen_id=default_transit_router.cen_id,
transit_router_id=default_transit_router.transit_router_id,
vpn_id=default_gateway_vpn_attachment.id,
zones=[alicloud.cen.TransitRouterVpnAttachmentZoneArgs(
zone_id=default_transit_router_available_resources.resources[0].master_zones[0],
)])
default_gateway_vco_route = alicloud.vpn.GatewayVcoRoute("defaultGatewayVcoRoute",
route_dest="192.168.12.0/24",
next_hop=default_transit_router_vpn_attachment.vpn_id,
vpn_connection_id=default_transit_router_vpn_attachment.vpn_id,
weight=100)
default_gateway_vco_routes = alicloud.vpn.get_gateway_vco_routes_output(vpn_connection_id=default_transit_router_vpn_attachment.vpn_id)
pulumi.export("vpnGatewayVcoRouteId1", data["alicloud_vpn_gateway_vco_routes"]["ids"]["routes"][0]["id"])
```
:param Sequence[str] ids: A list of Vco Route IDs.
:param str output_file: File name where to save data source results (after running `pulumi preview`).
:param str route_entry_type: The Routing input type. Valid values: `custom`, `bgp`.
:param str status: The status of the vpn route entry.
:param str vpn_connection_id: The id of the vpn connection.
"""
__args__ = dict()
__args__['ids'] = ids
__args__['outputFile'] = output_file
__args__['pageNumber'] = page_number
__args__['pageSize'] = page_size
__args__['routeEntryType'] = route_entry_type
__args__['status'] = status
__args__['vpnConnectionId'] = vpn_connection_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('alicloud:vpn/getGatewayVcoRoutes:getGatewayVcoRoutes', __args__, opts=opts, typ=GetGatewayVcoRoutesResult).value
return AwaitableGetGatewayVcoRoutesResult(
id=pulumi.get(__ret__, 'id'),
ids=pulumi.get(__ret__, 'ids'),
output_file=pulumi.get(__ret__, 'output_file'),
page_number=pulumi.get(__ret__, 'page_number'),
page_size=pulumi.get(__ret__, 'page_size'),
route_entry_type=pulumi.get(__ret__, 'route_entry_type'),
routes=pulumi.get(__ret__, 'routes'),
status=pulumi.get(__ret__, 'status'),
vpn_connection_id=pulumi.get(__ret__, 'vpn_connection_id'))
@_utilities.lift_output_func(get_gateway_vco_routes)
def get_gateway_vco_routes_output(ids: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,
output_file: Optional[pulumi.Input[Optional[str]]] = None,
page_number: Optional[pulumi.Input[Optional[int]]] = None,
page_size: Optional[pulumi.Input[Optional[int]]] = None,
route_entry_type: Optional[pulumi.Input[Optional[str]]] = None,
status: Optional[pulumi.Input[Optional[str]]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGatewayVcoRoutesResult]:
"""
This data source provides the Vpn Gateway Vco Routes of the current Alibaba Cloud user.
> **NOTE:** Available in v1.183.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_instance = alicloud.cen.Instance("defaultInstance", cen_instance_name=var["name"])
default_transit_router = alicloud.cen.TransitRouter("defaultTransitRouter",
cen_id=default_instance.id,
transit_router_description="desd",
transit_router_name=var["name"])
default_transit_router_available_resources = alicloud.cen.get_transit_router_available_resources()
default_customer_gateway = alicloud.vpn.CustomerGateway("defaultCustomerGateway",
ip_address="42.104.22.210",
asn="45014",
description="testAccVpnConnectionDesc")
default_gateway_vpn_attachment = alicloud.vpn.GatewayVpnAttachment("defaultGatewayVpnAttachment",
customer_gateway_id=default_customer_gateway.id,
network_type="public",
local_subnet="0.0.0.0/0",
remote_subnet="0.0.0.0/0",
effect_immediately=False,
ike_config=alicloud.vpn.GatewayVpnAttachmentIkeConfigArgs(
ike_auth_alg="md5",
ike_enc_alg="des",
ike_version="ikev2",
ike_mode="main",
ike_lifetime=86400,
psk="tf-testvpn2",
ike_pfs="group1",
remote_id="testbob2",
local_id="testalice2",
),
ipsec_config=alicloud.vpn.GatewayVpnAttachmentIpsecConfigArgs(
ipsec_pfs="group5",
ipsec_enc_alg="des",
ipsec_auth_alg="md5",
ipsec_lifetime=86400,
),
bgp_config=alicloud.vpn.GatewayVpnAttachmentBgpConfigArgs(
enable=True,
local_asn=45014,
tunnel_cidr="169.254.11.0/30",
local_bgp_ip="169.254.11.1",
),
health_check_config=alicloud.vpn.GatewayVpnAttachmentHealthCheckConfigArgs(
enable=True,
sip="192.168.1.1",
dip="10.0.0.1",
interval=10,
retry=10,
policy="revoke_route",
),
enable_dpd=True,
enable_nat_traversal=True,
vpn_attachment_name=var["name"])
default_transit_router_vpn_attachment = alicloud.cen.TransitRouterVpnAttachment("defaultTransitRouterVpnAttachment",
auto_publish_route_enabled=False,
transit_router_attachment_description=var["name"],
transit_router_attachment_name=var["name"],
cen_id=default_transit_router.cen_id,
transit_router_id=default_transit_router.transit_router_id,
vpn_id=default_gateway_vpn_attachment.id,
zones=[alicloud.cen.TransitRouterVpnAttachmentZoneArgs(
zone_id=default_transit_router_available_resources.resources[0].master_zones[0],
)])
default_gateway_vco_route = alicloud.vpn.GatewayVcoRoute("defaultGatewayVcoRoute",
route_dest="192.168.12.0/24",
next_hop=default_transit_router_vpn_attachment.vpn_id,
vpn_connection_id=default_transit_router_vpn_attachment.vpn_id,
weight=100)
default_gateway_vco_routes = alicloud.vpn.get_gateway_vco_routes_output(vpn_connection_id=default_transit_router_vpn_attachment.vpn_id)
pulumi.export("vpnGatewayVcoRouteId1", data["alicloud_vpn_gateway_vco_routes"]["ids"]["routes"][0]["id"])
```
:param Sequence[str] ids: A list of Vco Route IDs.
:param str output_file: File name where to save data source results (after running `pulumi preview`).
:param str route_entry_type: The Routing input type. Valid values: `custom`, `bgp`.
:param str status: The status of the vpn route entry.
:param str vpn_connection_id: The id of the vpn connection.
"""
...
|
PypiClean
|
/graphormer-pretrained-0.2.3.tar.gz/graphormer-pretrained-0.2.3/graphormer_pretrained/tasks/graph_prediction.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import contextlib
import importlib
import numpy as np
import torch
import sys
import os
import math
from dataclasses import dataclass, field
from omegaconf import II, open_dict, OmegaConf
from fairseq.data import (
NestedDictionaryDataset,
NumSamplesDataset,
)
from fairseq.tasks import FairseqDataclass, FairseqTask, register_task
from graphormer_pretrained.utils.loader import load_pretrained_model
from graphormer_pretrained.utils.amp_optimizer import AMPOptimizer
from graphormer_pretrained.data.dataset import (
BatchedDataDataset,
TargetDataset,
GraphormerDataset,
EpochShuffleDataset,
)
from graphormer_pretrained.data import DATASET_REGISTRY
logger = logging.getLogger(__name__)
@dataclass
class GraphPredictionConfig(FairseqDataclass):
dataset_name: str = field(
default="pcqm4m",
metadata={"help": "name of the dataset"},
)
num_classes: int = field(
default=-1,
metadata={"help": "number of classes or regression targets"},
)
max_nodes: int = field(
default=128,
metadata={"help": "max nodes per graph"},
)
dataset_source: str = field(
default="pyg",
metadata={"help": "source of graph dataset, can be: pyg, ogb, smiles"},
)
num_atoms: int = field(
default=512 * 9,
metadata={"help": "number of atom types in the graph"},
)
num_edges: int = field(
default=512 * 3,
metadata={"help": "number of edge types in the graph"},
)
num_in_degree: int = field(
default=512,
metadata={"help": "number of in degree types in the graph"},
)
num_out_degree: int = field(
default=512,
metadata={"help": "number of out degree types in the graph"},
)
num_spatial: int = field(
default=512,
metadata={"help": "number of spatial types in the graph"},
)
num_edge_dis: int = field(
default=128,
metadata={"help": "number of edge dis types in the graph"},
)
multi_hop_max_dist: int = field(
default=5,
metadata={"help": "max distance of multi-hop edges"},
)
spatial_pos_max: int = field(
default=1024,
metadata={"help": "max distance of multi-hop edges"},
)
edge_type: str = field(
default="multi_hop",
metadata={"help": "edge type in the graph"},
)
seed: int = II("common.seed")
pretrained_model_name: str = field(
default="none",
metadata={"help": "name of used pretrained model"},
)
load_pretrained_model_output_layer: bool = field(
default=False,
metadata={"help": "whether to load the output layer of pretrained model"},
)
train_epoch_shuffle: bool = field(
default=False,
metadata={"help": "whether to shuffle the dataset at each epoch"},
)
user_data_dir: str = field(
default="",
metadata={"help": "path to the module of user-defined dataset"},
)
@register_task("graph_prediction", dataclass=GraphPredictionConfig)
class GraphPredictionTask(FairseqTask):
"""
Graph prediction (classification or regression) task.
"""
def __init__(self, cfg):
super().__init__(cfg)
if cfg.user_data_dir != "":
self.__import_user_defined_datasets(cfg.user_data_dir)
if cfg.dataset_name in DATASET_REGISTRY:
dataset_dict = DATASET_REGISTRY[cfg.dataset_name]
self.dm = GraphormerDataset(
dataset=dataset_dict["dataset"],
dataset_source=dataset_dict["source"],
train_idx=dataset_dict["train_idx"],
valid_idx=dataset_dict["valid_idx"],
test_idx=dataset_dict["test_idx"],
seed=cfg.seed,
)
else:
raise ValueError(
f"dataset {cfg.dataset_name} is not found in customized dataset module {cfg.user_data_dir}"
)
else:
self.dm = GraphormerDataset(
dataset_spec=cfg.dataset_name,
dataset_source=cfg.dataset_source,
seed=cfg.seed,
)
def __import_user_defined_datasets(self, dataset_dir):
dataset_dir = dataset_dir.strip("/")
module_parent, module_name = os.path.split(dataset_dir)
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
for file in os.listdir(dataset_dir):
path = os.path.join(dataset_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(module_name + "." + task_name)
@classmethod
def setup_task(cls, cfg, **kwargs):
assert cfg.num_classes > 0, "Must set task.num_classes"
return cls(cfg)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
assert split in ["train", "valid", "test"]
if split == "train":
batched_data = self.dm.dataset_train
elif split == "valid":
batched_data = self.dm.dataset_val
elif split == "test":
batched_data = self.dm.dataset_test
batched_data = BatchedDataDataset(
batched_data,
max_node=self.max_nodes(),
multi_hop_max_dist=self.cfg.multi_hop_max_dist,
spatial_pos_max=self.cfg.spatial_pos_max,
)
data_sizes = np.array([self.max_nodes()] * len(batched_data))
target = TargetDataset(batched_data)
dataset = NestedDictionaryDataset(
{
"nsamples": NumSamplesDataset(),
"net_input": {"batched_data": batched_data},
"target": target,
},
sizes=data_sizes,
)
if split == "train" and self.cfg.train_epoch_shuffle:
dataset = EpochShuffleDataset(dataset, size=len(dataset), seed=self.cfg.seed)
logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, cfg):
from fairseq import models
with open_dict(cfg) if OmegaConf.is_config(cfg) else contextlib.ExitStack():
cfg.max_nodes = self.cfg.max_nodes
model = models.build_model(cfg, self)
return model
def max_nodes(self):
return self.cfg.max_nodes
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return None
@property
def label_dictionary(self):
return None
@dataclass
class GraphPredictionWithFlagConfig(GraphPredictionConfig):
flag_m: int = field(
default=3,
metadata={"help": "number of iterations to optimize the perturbations with flag objectives"},
)
flag_step_size: float = field(
default=1e-3,
metadata={"help": "learing rate of iterations to optimize the perturbations with flag objective"},
)
flag_mag: float = field(
default=1e-3,
metadata={"help": "magnitude bound for perturbations in flag objectives"},
)
@register_task("graph_prediction_with_flag", dataclass=GraphPredictionWithFlagConfig)
class GraphPredictionWithFlagTask(GraphPredictionTask):
"""
Graph prediction (classification or regression) task.
"""
def __init__(self, cfg):
super().__init__(cfg)
self.flag_m = cfg.flag_m
self.flag_step_size = cfg.flag_step_size
self.flag_mag = cfg.flag_mag
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
batched_data = sample["net_input"]["batched_data"]["x"]
n_graph, n_node = batched_data.shape[:2]
perturb_shape = n_graph, n_node, model.encoder_embed_dim
if self.flag_mag > 0:
perturb = torch.FloatTensor(*perturb_shape).uniform_(-1, 1).to(batched_data.device)
perturb = perturb * self.flag_mag / math.sqrt(perturb_shape[-1])
else:
perturb = (
torch.FloatTensor(*perturb_shape)
.uniform_(-self.flag_step_size, self.flag_step_size)
.to(batched_data.device)
)
perturb.requires_grad_()
sample["perturb"] = perturb
with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
loss /= self.flag_m
total_loss = 0
for _ in range(self.flag_m - 1):
optimizer.backward(loss)
total_loss += loss.detach()
perturb_data = perturb.detach() + self.flag_step_size * torch.sign(perturb.grad.detach())
if self.flag_mag > 0:
perturb_data_norm = torch.norm(perturb_data, dim=-1).detach()
exceed_mask = (perturb_data_norm > self.flag_mag).to(perturb_data)
reweights = (self.flag_mag / perturb_data_norm * exceed_mask + (1 - exceed_mask)).unsqueeze(
-1
)
perturb_data = (perturb_data * reweights).detach()
perturb.data = perturb_data.data
perturb.grad[:] = 0
sample["perturb"] = perturb
with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
loss /= self.flag_m
optimizer.backward(loss)
total_loss += loss.detach()
logging_output["loss"] = total_loss
return total_loss, sample_size, logging_output
|
PypiClean
|
/baserow_open_api_client-0.0.6.tar.gz/baserow_open_api_client-0.0.6/baserow_open_api_client/api/role_assignments/batch_assign_role.py
|
from http import HTTPStatus
from typing import Any, Dict, List, Optional, Union
import httpx
from ... import errors
from ...client import AuthenticatedClient, Client
from ...models.batch_assign_role_response_400 import BatchAssignRoleResponse400
from ...models.batch_assign_role_response_404 import BatchAssignRoleResponse404
from ...models.batch_create_role_assignment import BatchCreateRoleAssignment
from ...models.open_api_role_assignment import OpenApiRoleAssignment
from ...types import Response
def _get_kwargs(
workspace_id: int,
*,
client: AuthenticatedClient,
json_body: BatchCreateRoleAssignment,
) -> Dict[str, Any]:
url = "{}/api/role/{workspace_id}/batch/".format(client.base_url, workspace_id=workspace_id)
headers: Dict[str, str] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
json_json_body = json_body.to_dict()
result = {
"method": "post",
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"follow_redirects": client.follow_redirects,
"json": json_json_body,
}
if hasattr(client, "auth"):
result["auth"] = client.auth
return result
def _parse_response(
*, client: Client, response: httpx.Response
) -> Optional[Union[BatchAssignRoleResponse400, BatchAssignRoleResponse404, List["OpenApiRoleAssignment"]]]:
if response.status_code == HTTPStatus.OK:
response_200 = []
_response_200 = response.json()
for response_200_item_data in _response_200:
response_200_item = OpenApiRoleAssignment.from_dict(response_200_item_data)
response_200.append(response_200_item)
return response_200
if response.status_code == HTTPStatus.BAD_REQUEST:
response_400 = BatchAssignRoleResponse400.from_dict(response.json())
return response_400
if response.status_code == HTTPStatus.NOT_FOUND:
response_404 = BatchAssignRoleResponse404.from_dict(response.json())
return response_404
if client.raise_on_unexpected_status:
raise errors.UnexpectedStatus(response.status_code, response.content)
else:
return None
def _build_response(
*, client: Client, response: httpx.Response
) -> Response[Union[BatchAssignRoleResponse400, BatchAssignRoleResponse404, List["OpenApiRoleAssignment"]]]:
return Response(
status_code=HTTPStatus(response.status_code),
content=response.content,
headers=response.headers,
parsed=_parse_response(client=client, response=response),
)
def sync_detailed(
workspace_id: int, *, client: AuthenticatedClient, json_body: BatchCreateRoleAssignment, httpx_client=None
) -> Response[Union[BatchAssignRoleResponse400, BatchAssignRoleResponse404, List["OpenApiRoleAssignment"]]]:
"""You can assign a role to a multiple subjects into the given workspace for the given scopes with this
endpoint. If you want to remove the role you can omit the role property.
Args:
workspace_id (int):
json_body (BatchCreateRoleAssignment):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Response[Union[BatchAssignRoleResponse400, BatchAssignRoleResponse404, List['OpenApiRoleAssignment']]]
"""
kwargs = _get_kwargs(
workspace_id=workspace_id,
client=client,
json_body=json_body,
)
if httpx_client:
response = httpx_client.request(
**kwargs,
)
else:
response = httpx.request(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(client=client, response=response)
def sync(
workspace_id: int,
*,
client: AuthenticatedClient,
json_body: BatchCreateRoleAssignment,
) -> Optional[Union[BatchAssignRoleResponse400, BatchAssignRoleResponse404, List["OpenApiRoleAssignment"]]]:
"""You can assign a role to a multiple subjects into the given workspace for the given scopes with this
endpoint. If you want to remove the role you can omit the role property.
Args:
workspace_id (int):
json_body (BatchCreateRoleAssignment):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Union[BatchAssignRoleResponse400, BatchAssignRoleResponse404, List['OpenApiRoleAssignment']]
"""
return sync_detailed(
workspace_id=workspace_id,
client=client,
json_body=json_body,
).parsed
async def asyncio_detailed(
workspace_id: int,
*,
client: AuthenticatedClient,
json_body: BatchCreateRoleAssignment,
) -> Response[Union[BatchAssignRoleResponse400, BatchAssignRoleResponse404, List["OpenApiRoleAssignment"]]]:
"""You can assign a role to a multiple subjects into the given workspace for the given scopes with this
endpoint. If you want to remove the role you can omit the role property.
Args:
workspace_id (int):
json_body (BatchCreateRoleAssignment):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Response[Union[BatchAssignRoleResponse400, BatchAssignRoleResponse404, List['OpenApiRoleAssignment']]]
"""
kwargs = _get_kwargs(
workspace_id=workspace_id,
client=client,
json_body=json_body,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.request(**kwargs)
return _build_response(client=client, response=response)
async def asyncio(
workspace_id: int,
*,
client: AuthenticatedClient,
json_body: BatchCreateRoleAssignment,
) -> Optional[Union[BatchAssignRoleResponse400, BatchAssignRoleResponse404, List["OpenApiRoleAssignment"]]]:
"""You can assign a role to a multiple subjects into the given workspace for the given scopes with this
endpoint. If you want to remove the role you can omit the role property.
Args:
workspace_id (int):
json_body (BatchCreateRoleAssignment):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Union[BatchAssignRoleResponse400, BatchAssignRoleResponse404, List['OpenApiRoleAssignment']]
"""
return (
await asyncio_detailed(
workspace_id=workspace_id,
client=client,
json_body=json_body,
)
).parsed
|
PypiClean
|
/lbrlabs_pulumi_scaleway-1.10.0a1687249042.tar.gz/lbrlabs_pulumi_scaleway-1.10.0a1687249042/pulumiverse_scaleway/__init__.py
|
from . import _utilities
import typing
# Export this package's modules as members:
from .account_ssh_key import *
from .apple_slicon_valley_server import *
from .baremetal_server import *
from .container import *
from .container_cron import *
from .container_namespace import *
from .database import *
from .database_acl import *
from .database_backup import *
from .database_instance import *
from .database_privilege import *
from .database_read_replica import *
from .database_user import *
from .domain_record import *
from .domain_zone import *
from .flexible_ip import *
from .function import *
from .function_cron import *
from .function_namespace import *
from .get_account_ssh_key import *
from .get_baremetal_offer import *
from .get_baremetal_os import *
from .get_baremetal_server import *
from .get_container import *
from .get_container_namespace import *
from .get_database import *
from .get_database_acl import *
from .get_database_backup import *
from .get_database_instance import *
from .get_database_privilege import *
from .get_domain_record import *
from .get_domain_zone import *
from .get_flexible_ip import *
from .get_function import *
from .get_function_namespace import *
from .get_instance_image import *
from .get_instance_ip import *
from .get_instance_security_group import *
from .get_instance_server import *
from .get_instance_servers import *
from .get_instance_volume import *
from .get_iot_device import *
from .get_iot_hub import *
from .get_kubernetes_cluster import *
from .get_kubernetes_node_pool import *
from .get_loadbalancer import *
from .get_loadbalancer_certificate import *
from .get_loadbalancer_ip import *
from .get_marketplace_image import *
from .get_object_bucket import *
from .get_redis_cluster import *
from .get_registry_image import *
from .get_registry_namespace import *
from .get_vpc_gateway_network import *
from .get_vpc_private_network import *
from .get_vpc_public_gateway import *
from .get_vpc_public_gateway_dhcp import *
from .get_vpc_public_gateway_dhcp_reservation import *
from .get_vpc_public_gateway_ip import *
from .get_vpc_public_pat_rule import *
from .instance_image import *
from .instance_ip import *
from .instance_ip_reverse_dns import *
from .instance_placement_group import *
from .instance_private_nic import *
from .instance_security_group import *
from .instance_security_group_rules import *
from .instance_server import *
from .instance_snapshot import *
from .instance_volume import *
from .iot_device import *
from .iot_hub import *
from .iot_network import *
from .kubernetes_cluster import *
from .kubernetes_node_pool import *
from .loadbalancer import *
from .loadbalancer_backend import *
from .loadbalancer_certificate import *
from .loadbalancer_frontend import *
from .loadbalancer_ip import *
from .loadbalancer_route import *
from .object_bucket import *
from .object_bucket_policy import *
from .object_bucket_website_configuration import *
from .provider import *
from .redis_cluster import *
from .registry_namespace import *
from .vpc_gateway_network import *
from .vpc_private_network import *
from .vpc_public_gateway import *
from .vpc_public_gateway_dhcp import *
from .vpc_public_gateway_dhcp_reservation import *
from .vpc_public_gateway_ip import *
from .vpc_public_gateway_pat_rule import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import lbrlabs_scaleway.config as __config
config = __config
else:
config = _utilities.lazy_import('lbrlabs_scaleway.config')
_utilities.register(
resource_modules="""
[
{
"pkg": "scaleway",
"mod": "index/accountSshKey",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/accountSshKey:AccountSshKey": "AccountSshKey"
}
},
{
"pkg": "scaleway",
"mod": "index/appleSliconValleyServer",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/appleSliconValleyServer:AppleSliconValleyServer": "AppleSliconValleyServer"
}
},
{
"pkg": "scaleway",
"mod": "index/baremetalServer",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/baremetalServer:BaremetalServer": "BaremetalServer"
}
},
{
"pkg": "scaleway",
"mod": "index/container",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/container:Container": "Container"
}
},
{
"pkg": "scaleway",
"mod": "index/containerCron",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/containerCron:ContainerCron": "ContainerCron"
}
},
{
"pkg": "scaleway",
"mod": "index/containerNamespace",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/containerNamespace:ContainerNamespace": "ContainerNamespace"
}
},
{
"pkg": "scaleway",
"mod": "index/database",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/database:Database": "Database"
}
},
{
"pkg": "scaleway",
"mod": "index/databaseAcl",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/databaseAcl:DatabaseAcl": "DatabaseAcl"
}
},
{
"pkg": "scaleway",
"mod": "index/databaseBackup",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/databaseBackup:DatabaseBackup": "DatabaseBackup"
}
},
{
"pkg": "scaleway",
"mod": "index/databaseInstance",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/databaseInstance:DatabaseInstance": "DatabaseInstance"
}
},
{
"pkg": "scaleway",
"mod": "index/databasePrivilege",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/databasePrivilege:DatabasePrivilege": "DatabasePrivilege"
}
},
{
"pkg": "scaleway",
"mod": "index/databaseReadReplica",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/databaseReadReplica:DatabaseReadReplica": "DatabaseReadReplica"
}
},
{
"pkg": "scaleway",
"mod": "index/databaseUser",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/databaseUser:DatabaseUser": "DatabaseUser"
}
},
{
"pkg": "scaleway",
"mod": "index/domainRecord",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/domainRecord:DomainRecord": "DomainRecord"
}
},
{
"pkg": "scaleway",
"mod": "index/domainZone",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/domainZone:DomainZone": "DomainZone"
}
},
{
"pkg": "scaleway",
"mod": "index/flexibleIp",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/flexibleIp:FlexibleIp": "FlexibleIp"
}
},
{
"pkg": "scaleway",
"mod": "index/function",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/function:Function": "Function"
}
},
{
"pkg": "scaleway",
"mod": "index/functionCron",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/functionCron:FunctionCron": "FunctionCron"
}
},
{
"pkg": "scaleway",
"mod": "index/functionNamespace",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/functionNamespace:FunctionNamespace": "FunctionNamespace"
}
},
{
"pkg": "scaleway",
"mod": "index/instanceImage",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/instanceImage:InstanceImage": "InstanceImage"
}
},
{
"pkg": "scaleway",
"mod": "index/instanceIp",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/instanceIp:InstanceIp": "InstanceIp"
}
},
{
"pkg": "scaleway",
"mod": "index/instanceIpReverseDns",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/instanceIpReverseDns:InstanceIpReverseDns": "InstanceIpReverseDns"
}
},
{
"pkg": "scaleway",
"mod": "index/instancePlacementGroup",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/instancePlacementGroup:InstancePlacementGroup": "InstancePlacementGroup"
}
},
{
"pkg": "scaleway",
"mod": "index/instancePrivateNic",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/instancePrivateNic:InstancePrivateNic": "InstancePrivateNic"
}
},
{
"pkg": "scaleway",
"mod": "index/instanceSecurityGroup",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/instanceSecurityGroup:InstanceSecurityGroup": "InstanceSecurityGroup"
}
},
{
"pkg": "scaleway",
"mod": "index/instanceSecurityGroupRules",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/instanceSecurityGroupRules:InstanceSecurityGroupRules": "InstanceSecurityGroupRules"
}
},
{
"pkg": "scaleway",
"mod": "index/instanceServer",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/instanceServer:InstanceServer": "InstanceServer"
}
},
{
"pkg": "scaleway",
"mod": "index/instanceSnapshot",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/instanceSnapshot:InstanceSnapshot": "InstanceSnapshot"
}
},
{
"pkg": "scaleway",
"mod": "index/instanceVolume",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/instanceVolume:InstanceVolume": "InstanceVolume"
}
},
{
"pkg": "scaleway",
"mod": "index/iotDevice",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/iotDevice:IotDevice": "IotDevice"
}
},
{
"pkg": "scaleway",
"mod": "index/iotHub",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/iotHub:IotHub": "IotHub"
}
},
{
"pkg": "scaleway",
"mod": "index/iotNetwork",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/iotNetwork:IotNetwork": "IotNetwork"
}
},
{
"pkg": "scaleway",
"mod": "index/kubernetesCluster",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/kubernetesCluster:KubernetesCluster": "KubernetesCluster"
}
},
{
"pkg": "scaleway",
"mod": "index/kubernetesNodePool",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/kubernetesNodePool:KubernetesNodePool": "KubernetesNodePool"
}
},
{
"pkg": "scaleway",
"mod": "index/loadbalancer",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/loadbalancer:Loadbalancer": "Loadbalancer"
}
},
{
"pkg": "scaleway",
"mod": "index/loadbalancerBackend",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/loadbalancerBackend:LoadbalancerBackend": "LoadbalancerBackend"
}
},
{
"pkg": "scaleway",
"mod": "index/loadbalancerCertificate",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/loadbalancerCertificate:LoadbalancerCertificate": "LoadbalancerCertificate"
}
},
{
"pkg": "scaleway",
"mod": "index/loadbalancerFrontend",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/loadbalancerFrontend:LoadbalancerFrontend": "LoadbalancerFrontend"
}
},
{
"pkg": "scaleway",
"mod": "index/loadbalancerIp",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/loadbalancerIp:LoadbalancerIp": "LoadbalancerIp"
}
},
{
"pkg": "scaleway",
"mod": "index/loadbalancerRoute",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/loadbalancerRoute:LoadbalancerRoute": "LoadbalancerRoute"
}
},
{
"pkg": "scaleway",
"mod": "index/objectBucket",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/objectBucket:ObjectBucket": "ObjectBucket"
}
},
{
"pkg": "scaleway",
"mod": "index/objectBucketPolicy",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/objectBucketPolicy:ObjectBucketPolicy": "ObjectBucketPolicy"
}
},
{
"pkg": "scaleway",
"mod": "index/objectBucketWebsiteConfiguration",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/objectBucketWebsiteConfiguration:ObjectBucketWebsiteConfiguration": "ObjectBucketWebsiteConfiguration"
}
},
{
"pkg": "scaleway",
"mod": "index/redisCluster",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/redisCluster:RedisCluster": "RedisCluster"
}
},
{
"pkg": "scaleway",
"mod": "index/registryNamespace",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/registryNamespace:RegistryNamespace": "RegistryNamespace"
}
},
{
"pkg": "scaleway",
"mod": "index/vpcGatewayNetwork",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/vpcGatewayNetwork:VpcGatewayNetwork": "VpcGatewayNetwork"
}
},
{
"pkg": "scaleway",
"mod": "index/vpcPrivateNetwork",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/vpcPrivateNetwork:VpcPrivateNetwork": "VpcPrivateNetwork"
}
},
{
"pkg": "scaleway",
"mod": "index/vpcPublicGateway",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/vpcPublicGateway:VpcPublicGateway": "VpcPublicGateway"
}
},
{
"pkg": "scaleway",
"mod": "index/vpcPublicGatewayDhcp",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/vpcPublicGatewayDhcp:VpcPublicGatewayDhcp": "VpcPublicGatewayDhcp"
}
},
{
"pkg": "scaleway",
"mod": "index/vpcPublicGatewayDhcpReservation",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/vpcPublicGatewayDhcpReservation:VpcPublicGatewayDhcpReservation": "VpcPublicGatewayDhcpReservation"
}
},
{
"pkg": "scaleway",
"mod": "index/vpcPublicGatewayIp",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/vpcPublicGatewayIp:VpcPublicGatewayIp": "VpcPublicGatewayIp"
}
},
{
"pkg": "scaleway",
"mod": "index/vpcPublicGatewayPatRule",
"fqn": "lbrlabs_scaleway",
"classes": {
"scaleway:index/vpcPublicGatewayPatRule:VpcPublicGatewayPatRule": "VpcPublicGatewayPatRule"
}
}
]
""",
resource_packages="""
[
{
"pkg": "scaleway",
"token": "pulumi:providers:scaleway",
"fqn": "lbrlabs_scaleway",
"class": "Provider"
}
]
"""
)
|
PypiClean
|
/django_ace-1.24.1-py3-none-any.whl/django_ace/static/django_ace/ace/mode-plsql.js
|
define("ace/mode/plsql_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module){/* ***** BEGIN LICENSE BLOCK *****
* Distributed under the BSD license:
*
* Copyright (c) 2012, Ajax.org B.V.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Ajax.org B.V. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL AJAX.ORG B.V. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
"use strict";
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var plsqlHighlightRules = function () {
var keywords = ("all|alter|and|any|array|arrow|as|asc|at|begin|between|by|case|check|clusters|cluster|colauth|columns|compress|connect|crash|create|cross|current|database|declare|default|delete|desc|distinct|drop|else|end|exception|exclusive|exists|fetch|form|for|foreign|from|goto|grant|group|having|identified|if|in|inner|indexes|index|insert|intersect|into|is|join|key|left|like|lock|minus|mode|natural|nocompress|not|nowait|null|of|on|option|or|order,overlaps|outer|primary|prior|procedure|public|range|record|references|resource|revoke|right|select|share|size|sql|start|subtype|tabauth|table|then|to|type|union|unique|update|use|values|view|views|when|where|with");
var builtinConstants = ("true|false");
var builtinFunctions = ("abs|acos|add_months|ascii|asciistr|asin|atan|atan2|avg|bfilename|bin_to_num|bitand|cardinality|case|cast|ceil|chartorowid|chr|coalesce|compose|concat|convert|corr|cos|cosh|count|covar_pop|covar_samp|cume_dist|current_date|current_timestamp|dbtimezone|decode|decompose|dense_rank|dump|empty_blob|empty_clob|exp|extract|first_value|floor|from_tz|greatest|group_id|hextoraw|initcap|instr|instr2|instr4|instrb|instrc|lag|last_day|last_value|lead|least|length|length2|length4|lengthb|lengthc|listagg|ln|lnnvl|localtimestamp|log|lower|lpad|ltrim|max|median|min|mod|months_between|nanvl|nchr|new_time|next_day|nth_value|nullif|numtodsinterval|numtoyminterval|nvl|nvl2|power|rank|rawtohex|regexp_count|regexp_instr|regexp_replace|regexp_substr|remainder|replace|round|rownum|rpad|rtrim|sessiontimezone|sign|sin|sinh|soundex|sqrt|stddev|substr|sum|sys_context|sysdate|systimestamp|tan|tanh|to_char|to_clob|to_date|to_dsinterval|to_lob|to_multi_byte|to_nclob|to_number|to_single_byte|to_timestamp|to_timestamp_tz|to_yminterval|translate|trim|trunc|tz_offset|uid|upper|user|userenv|var_pop|var_samp|variance|vsize");
var dataTypes = ("char|nchar|nvarchar2|varchar2|long|raw|" +
"number|numeric|float|dec|decimal|integer|int|smallint|real|double|precision|" +
"date|timestamp|interval|year|day|" +
"bfile|blob|clob|nclob|" +
"rowid|urowid");
var keywordMapper = this.createKeywordMapper({
"support.function": builtinFunctions,
"keyword": keywords,
"constant.language": builtinConstants,
"storage.type": dataTypes
}, "identifier", true);
this.$rules = {
"start": [{
token: "comment",
regex: "--.*$"
}, {
token: "comment",
start: "/\\*",
end: "\\*/"
}, {
token: "string",
regex: '".*?"'
}, {
token: "string",
regex: "'.*?'"
}, {
token: "constant.numeric",
regex: "[+-]?\\d+(?:(?:\\.\\d*)?(?:[eE][+-]?\\d+)?)?\\b"
}, {
token: keywordMapper,
regex: "[a-zA-Z_$][a-zA-Z0-9_$]*\\b"
}, {
token: "keyword.operator",
regex: "\\+|\\-|\\/|\\/\\/|%|<@>|@>|<@|&|\\^|~|<|>|<=|=>|==|!=|<>|="
}, {
token: "paren.lparen",
regex: "[\\(]"
}, {
token: "paren.rparen",
regex: "[\\)]"
}, {
token: "text",
regex: "\\s+"
}]
};
this.normalizeRules();
};
oop.inherits(plsqlHighlightRules, TextHighlightRules);
exports.plsqlHighlightRules = plsqlHighlightRules;
});
define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"], function(require, exports, module){"use strict";
var oop = require("../../lib/oop");
var Range = require("../../range").Range;
var BaseFoldMode = require("./fold_mode").FoldMode;
var FoldMode = exports.FoldMode = function (commentRegex) {
if (commentRegex) {
this.foldingStartMarker = new RegExp(this.foldingStartMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.start));
this.foldingStopMarker = new RegExp(this.foldingStopMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.end));
}
};
oop.inherits(FoldMode, BaseFoldMode);
(function () {
this.foldingStartMarker = /([\{\[\(])[^\}\]\)]*$|^\s*(\/\*)/;
this.foldingStopMarker = /^[^\[\{\(]*([\}\]\)])|^[\s\*]*(\*\/)/;
this.singleLineBlockCommentRe = /^\s*(\/\*).*\*\/\s*$/;
this.tripleStarBlockCommentRe = /^\s*(\/\*\*\*).*\*\/\s*$/;
this.startRegionRe = /^\s*(\/\*|\/\/)#?region\b/;
this._getFoldWidgetBase = this.getFoldWidget;
this.getFoldWidget = function (session, foldStyle, row) {
var line = session.getLine(row);
if (this.singleLineBlockCommentRe.test(line)) {
if (!this.startRegionRe.test(line) && !this.tripleStarBlockCommentRe.test(line))
return "";
}
var fw = this._getFoldWidgetBase(session, foldStyle, row);
if (!fw && this.startRegionRe.test(line))
return "start"; // lineCommentRegionStart
return fw;
};
this.getFoldWidgetRange = function (session, foldStyle, row, forceMultiline) {
var line = session.getLine(row);
if (this.startRegionRe.test(line))
return this.getCommentRegionBlock(session, line, row);
var match = line.match(this.foldingStartMarker);
if (match) {
var i = match.index;
if (match[1])
return this.openingBracketBlock(session, match[1], row, i);
var range = session.getCommentFoldRange(row, i + match[0].length, 1);
if (range && !range.isMultiLine()) {
if (forceMultiline) {
range = this.getSectionRange(session, row);
}
else if (foldStyle != "all")
range = null;
}
return range;
}
if (foldStyle === "markbegin")
return;
var match = line.match(this.foldingStopMarker);
if (match) {
var i = match.index + match[0].length;
if (match[1])
return this.closingBracketBlock(session, match[1], row, i);
return session.getCommentFoldRange(row, i, -1);
}
};
this.getSectionRange = function (session, row) {
var line = session.getLine(row);
var startIndent = line.search(/\S/);
var startRow = row;
var startColumn = line.length;
row = row + 1;
var endRow = row;
var maxRow = session.getLength();
while (++row < maxRow) {
line = session.getLine(row);
var indent = line.search(/\S/);
if (indent === -1)
continue;
if (startIndent > indent)
break;
var subRange = this.getFoldWidgetRange(session, "all", row);
if (subRange) {
if (subRange.start.row <= startRow) {
break;
}
else if (subRange.isMultiLine()) {
row = subRange.end.row;
}
else if (startIndent == indent) {
break;
}
}
endRow = row;
}
return new Range(startRow, startColumn, endRow, session.getLine(endRow).length);
};
this.getCommentRegionBlock = function (session, line, row) {
var startColumn = line.search(/\s*$/);
var maxRow = session.getLength();
var startRow = row;
var re = /^\s*(?:\/\*|\/\/|--)#?(end)?region\b/;
var depth = 1;
while (++row < maxRow) {
line = session.getLine(row);
var m = re.exec(line);
if (!m)
continue;
if (m[1])
depth--;
else
depth++;
if (!depth)
break;
}
var endRow = row;
if (endRow > startRow) {
return new Range(startRow, startColumn, endRow, line.length);
}
};
}).call(FoldMode.prototype);
});
define("ace/mode/folding/sql",["require","exports","module","ace/lib/oop","ace/mode/folding/cstyle"], function(require, exports, module){"use strict";
var oop = require("../../lib/oop");
var BaseFoldMode = require("./cstyle").FoldMode;
var FoldMode = exports.FoldMode = function () { };
oop.inherits(FoldMode, BaseFoldMode);
(function () {
}).call(FoldMode.prototype);
});
define("ace/mode/plsql",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/plsql_highlight_rules","ace/mode/folding/sql"], function(require, exports, module){/* ***** BEGIN LICENSE BLOCK *****
* Distributed under the BSD license:
*
* Copyright (c) 2012, Ajax.org B.V.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Ajax.org B.V. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL AJAX.ORG B.V. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
"use strict";
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var PLSqlHighlightRules = require("./plsql_highlight_rules").plsqlHighlightRules;
var FoldMode = require("./folding/sql").FoldMode;
var Mode = function () {
this.HighlightRules = PLSqlHighlightRules;
this.foldingRules = new FoldMode();
};
oop.inherits(Mode, TextMode);
(function () {
this.lineCommentStart = "--";
this.blockComment = { start: "/*", end: "*/" };
this.$id = "ace/mode/plsql";
}).call(Mode.prototype);
exports.Mode = Mode;
}); (function() {
window.require(["ace/mode/plsql"], function(m) {
if (typeof module == "object" && typeof exports == "object" && module) {
module.exports = m;
}
});
})();
|
PypiClean
|
/ui/docs/assets/plugins/popper/esm/popper.js
|
var isBrowser = typeof window !== 'undefined' && typeof document !== 'undefined' && typeof navigator !== 'undefined';
var timeoutDuration = function () {
var longerTimeoutBrowsers = ['Edge', 'Trident', 'Firefox'];
for (var i = 0; i < longerTimeoutBrowsers.length; i += 1) {
if (isBrowser && navigator.userAgent.indexOf(longerTimeoutBrowsers[i]) >= 0) {
return 1;
}
}
return 0;
}();
function microtaskDebounce(fn) {
var called = false;
return function () {
if (called) {
return;
}
called = true;
window.Promise.resolve().then(function () {
called = false;
fn();
});
};
}
function taskDebounce(fn) {
var scheduled = false;
return function () {
if (!scheduled) {
scheduled = true;
setTimeout(function () {
scheduled = false;
fn();
}, timeoutDuration);
}
};
}
var supportsMicroTasks = isBrowser && window.Promise;
/**
* Create a debounced version of a method, that's asynchronously deferred
* but called in the minimum time possible.
*
* @method
* @memberof Popper.Utils
* @argument {Function} fn
* @returns {Function}
*/
var debounce = supportsMicroTasks ? microtaskDebounce : taskDebounce;
/**
* Check if the given variable is a function
* @method
* @memberof Popper.Utils
* @argument {Any} functionToCheck - variable to check
* @returns {Boolean} answer to: is a function?
*/
function isFunction(functionToCheck) {
var getType = {};
return functionToCheck && getType.toString.call(functionToCheck) === '[object Function]';
}
/**
* Get CSS computed property of the given element
* @method
* @memberof Popper.Utils
* @argument {Eement} element
* @argument {String} property
*/
function getStyleComputedProperty(element, property) {
if (element.nodeType !== 1) {
return [];
}
// NOTE: 1 DOM access here
var window = element.ownerDocument.defaultView;
var css = window.getComputedStyle(element, null);
return property ? css[property] : css;
}
/**
* Returns the parentNode or the host of the element
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @returns {Element} parent
*/
function getParentNode(element) {
if (element.nodeName === 'HTML') {
return element;
}
return element.parentNode || element.host;
}
/**
* Returns the scrolling parent of the given element
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @returns {Element} scroll parent
*/
function getScrollParent(element) {
// Return body, `getScroll` will take care to get the correct `scrollTop` from it
if (!element) {
return document.body;
}
switch (element.nodeName) {
case 'HTML':
case 'BODY':
return element.ownerDocument.body;
case '#document':
return element.body;
}
// Firefox want us to check `-x` and `-y` variations as well
var _getStyleComputedProp = getStyleComputedProperty(element),
overflow = _getStyleComputedProp.overflow,
overflowX = _getStyleComputedProp.overflowX,
overflowY = _getStyleComputedProp.overflowY;
if (/(auto|scroll|overlay)/.test(overflow + overflowY + overflowX)) {
return element;
}
return getScrollParent(getParentNode(element));
}
/**
* Returns the reference node of the reference object, or the reference object itself.
* @method
* @memberof Popper.Utils
* @param {Element|Object} reference - the reference element (the popper will be relative to this)
* @returns {Element} parent
*/
function getReferenceNode(reference) {
return reference && reference.referenceNode ? reference.referenceNode : reference;
}
var isIE11 = isBrowser && !!(window.MSInputMethodContext && document.documentMode);
var isIE10 = isBrowser && /MSIE 10/.test(navigator.userAgent);
/**
* Determines if the browser is Internet Explorer
* @method
* @memberof Popper.Utils
* @param {Number} version to check
* @returns {Boolean} isIE
*/
function isIE(version) {
if (version === 11) {
return isIE11;
}
if (version === 10) {
return isIE10;
}
return isIE11 || isIE10;
}
/**
* Returns the offset parent of the given element
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @returns {Element} offset parent
*/
function getOffsetParent(element) {
if (!element) {
return document.documentElement;
}
var noOffsetParent = isIE(10) ? document.body : null;
// NOTE: 1 DOM access here
var offsetParent = element.offsetParent || null;
// Skip hidden elements which don't have an offsetParent
while (offsetParent === noOffsetParent && element.nextElementSibling) {
offsetParent = (element = element.nextElementSibling).offsetParent;
}
var nodeName = offsetParent && offsetParent.nodeName;
if (!nodeName || nodeName === 'BODY' || nodeName === 'HTML') {
return element ? element.ownerDocument.documentElement : document.documentElement;
}
// .offsetParent will return the closest TH, TD or TABLE in case
// no offsetParent is present, I hate this job...
if (['TH', 'TD', 'TABLE'].indexOf(offsetParent.nodeName) !== -1 && getStyleComputedProperty(offsetParent, 'position') === 'static') {
return getOffsetParent(offsetParent);
}
return offsetParent;
}
function isOffsetContainer(element) {
var nodeName = element.nodeName;
if (nodeName === 'BODY') {
return false;
}
return nodeName === 'HTML' || getOffsetParent(element.firstElementChild) === element;
}
/**
* Finds the root node (document, shadowDOM root) of the given element
* @method
* @memberof Popper.Utils
* @argument {Element} node
* @returns {Element} root node
*/
function getRoot(node) {
if (node.parentNode !== null) {
return getRoot(node.parentNode);
}
return node;
}
/**
* Finds the offset parent common to the two provided nodes
* @method
* @memberof Popper.Utils
* @argument {Element} element1
* @argument {Element} element2
* @returns {Element} common offset parent
*/
function findCommonOffsetParent(element1, element2) {
// This check is needed to avoid errors in case one of the elements isn't defined for any reason
if (!element1 || !element1.nodeType || !element2 || !element2.nodeType) {
return document.documentElement;
}
// Here we make sure to give as "start" the element that comes first in the DOM
var order = element1.compareDocumentPosition(element2) & Node.DOCUMENT_POSITION_FOLLOWING;
var start = order ? element1 : element2;
var end = order ? element2 : element1;
// Get common ancestor container
var range = document.createRange();
range.setStart(start, 0);
range.setEnd(end, 0);
var commonAncestorContainer = range.commonAncestorContainer;
// Both nodes are inside #document
if (element1 !== commonAncestorContainer && element2 !== commonAncestorContainer || start.contains(end)) {
if (isOffsetContainer(commonAncestorContainer)) {
return commonAncestorContainer;
}
return getOffsetParent(commonAncestorContainer);
}
// one of the nodes is inside shadowDOM, find which one
var element1root = getRoot(element1);
if (element1root.host) {
return findCommonOffsetParent(element1root.host, element2);
} else {
return findCommonOffsetParent(element1, getRoot(element2).host);
}
}
/**
* Gets the scroll value of the given element in the given side (top and left)
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @argument {String} side `top` or `left`
* @returns {number} amount of scrolled pixels
*/
function getScroll(element) {
var side = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'top';
var upperSide = side === 'top' ? 'scrollTop' : 'scrollLeft';
var nodeName = element.nodeName;
if (nodeName === 'BODY' || nodeName === 'HTML') {
var html = element.ownerDocument.documentElement;
var scrollingElement = element.ownerDocument.scrollingElement || html;
return scrollingElement[upperSide];
}
return element[upperSide];
}
/*
* Sum or subtract the element scroll values (left and top) from a given rect object
* @method
* @memberof Popper.Utils
* @param {Object} rect - Rect object you want to change
* @param {HTMLElement} element - The element from the function reads the scroll values
* @param {Boolean} subtract - set to true if you want to subtract the scroll values
* @return {Object} rect - The modifier rect object
*/
function includeScroll(rect, element) {
var subtract = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false;
var scrollTop = getScroll(element, 'top');
var scrollLeft = getScroll(element, 'left');
var modifier = subtract ? -1 : 1;
rect.top += scrollTop * modifier;
rect.bottom += scrollTop * modifier;
rect.left += scrollLeft * modifier;
rect.right += scrollLeft * modifier;
return rect;
}
/*
* Helper to detect borders of a given element
* @method
* @memberof Popper.Utils
* @param {CSSStyleDeclaration} styles
* Result of `getStyleComputedProperty` on the given element
* @param {String} axis - `x` or `y`
* @return {number} borders - The borders size of the given axis
*/
function getBordersSize(styles, axis) {
var sideA = axis === 'x' ? 'Left' : 'Top';
var sideB = sideA === 'Left' ? 'Right' : 'Bottom';
return parseFloat(styles['border' + sideA + 'Width'], 10) + parseFloat(styles['border' + sideB + 'Width'], 10);
}
function getSize(axis, body, html, computedStyle) {
return Math.max(body['offset' + axis], body['scroll' + axis], html['client' + axis], html['offset' + axis], html['scroll' + axis], isIE(10) ? parseInt(html['offset' + axis]) + parseInt(computedStyle['margin' + (axis === 'Height' ? 'Top' : 'Left')]) + parseInt(computedStyle['margin' + (axis === 'Height' ? 'Bottom' : 'Right')]) : 0);
}
function getWindowSizes(document) {
var body = document.body;
var html = document.documentElement;
var computedStyle = isIE(10) && getComputedStyle(html);
return {
height: getSize('Height', body, html, computedStyle),
width: getSize('Width', body, html, computedStyle)
};
}
var classCallCheck = function (instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
};
var createClass = function () {
function defineProperties(target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];
descriptor.enumerable = descriptor.enumerable || false;
descriptor.configurable = true;
if ("value" in descriptor) descriptor.writable = true;
Object.defineProperty(target, descriptor.key, descriptor);
}
}
return function (Constructor, protoProps, staticProps) {
if (protoProps) defineProperties(Constructor.prototype, protoProps);
if (staticProps) defineProperties(Constructor, staticProps);
return Constructor;
};
}();
var defineProperty = function (obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true
});
} else {
obj[key] = value;
}
return obj;
};
var _extends = Object.assign || function (target) {
for (var i = 1; i < arguments.length; i++) {
var source = arguments[i];
for (var key in source) {
if (Object.prototype.hasOwnProperty.call(source, key)) {
target[key] = source[key];
}
}
}
return target;
};
/**
* Given element offsets, generate an output similar to getBoundingClientRect
* @method
* @memberof Popper.Utils
* @argument {Object} offsets
* @returns {Object} ClientRect like output
*/
function getClientRect(offsets) {
return _extends({}, offsets, {
right: offsets.left + offsets.width,
bottom: offsets.top + offsets.height
});
}
/**
* Get bounding client rect of given element
* @method
* @memberof Popper.Utils
* @param {HTMLElement} element
* @return {Object} client rect
*/
function getBoundingClientRect(element) {
var rect = {};
// IE10 10 FIX: Please, don't ask, the element isn't
// considered in DOM in some circumstances...
// This isn't reproducible in IE10 compatibility mode of IE11
try {
if (isIE(10)) {
rect = element.getBoundingClientRect();
var scrollTop = getScroll(element, 'top');
var scrollLeft = getScroll(element, 'left');
rect.top += scrollTop;
rect.left += scrollLeft;
rect.bottom += scrollTop;
rect.right += scrollLeft;
} else {
rect = element.getBoundingClientRect();
}
} catch (e) {}
var result = {
left: rect.left,
top: rect.top,
width: rect.right - rect.left,
height: rect.bottom - rect.top
};
// subtract scrollbar size from sizes
var sizes = element.nodeName === 'HTML' ? getWindowSizes(element.ownerDocument) : {};
var width = sizes.width || element.clientWidth || result.width;
var height = sizes.height || element.clientHeight || result.height;
var horizScrollbar = element.offsetWidth - width;
var vertScrollbar = element.offsetHeight - height;
// if an hypothetical scrollbar is detected, we must be sure it's not a `border`
// we make this check conditional for performance reasons
if (horizScrollbar || vertScrollbar) {
var styles = getStyleComputedProperty(element);
horizScrollbar -= getBordersSize(styles, 'x');
vertScrollbar -= getBordersSize(styles, 'y');
result.width -= horizScrollbar;
result.height -= vertScrollbar;
}
return getClientRect(result);
}
function getOffsetRectRelativeToArbitraryNode(children, parent) {
var fixedPosition = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false;
var isIE10 = isIE(10);
var isHTML = parent.nodeName === 'HTML';
var childrenRect = getBoundingClientRect(children);
var parentRect = getBoundingClientRect(parent);
var scrollParent = getScrollParent(children);
var styles = getStyleComputedProperty(parent);
var borderTopWidth = parseFloat(styles.borderTopWidth, 10);
var borderLeftWidth = parseFloat(styles.borderLeftWidth, 10);
// In cases where the parent is fixed, we must ignore negative scroll in offset calc
if (fixedPosition && isHTML) {
parentRect.top = Math.max(parentRect.top, 0);
parentRect.left = Math.max(parentRect.left, 0);
}
var offsets = getClientRect({
top: childrenRect.top - parentRect.top - borderTopWidth,
left: childrenRect.left - parentRect.left - borderLeftWidth,
width: childrenRect.width,
height: childrenRect.height
});
offsets.marginTop = 0;
offsets.marginLeft = 0;
// Subtract margins of documentElement in case it's being used as parent
// we do this only on HTML because it's the only element that behaves
// differently when margins are applied to it. The margins are included in
// the box of the documentElement, in the other cases not.
if (!isIE10 && isHTML) {
var marginTop = parseFloat(styles.marginTop, 10);
var marginLeft = parseFloat(styles.marginLeft, 10);
offsets.top -= borderTopWidth - marginTop;
offsets.bottom -= borderTopWidth - marginTop;
offsets.left -= borderLeftWidth - marginLeft;
offsets.right -= borderLeftWidth - marginLeft;
// Attach marginTop and marginLeft because in some circumstances we may need them
offsets.marginTop = marginTop;
offsets.marginLeft = marginLeft;
}
if (isIE10 && !fixedPosition ? parent.contains(scrollParent) : parent === scrollParent && scrollParent.nodeName !== 'BODY') {
offsets = includeScroll(offsets, parent);
}
return offsets;
}
function getViewportOffsetRectRelativeToArtbitraryNode(element) {
var excludeScroll = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false;
var html = element.ownerDocument.documentElement;
var relativeOffset = getOffsetRectRelativeToArbitraryNode(element, html);
var width = Math.max(html.clientWidth, window.innerWidth || 0);
var height = Math.max(html.clientHeight, window.innerHeight || 0);
var scrollTop = !excludeScroll ? getScroll(html) : 0;
var scrollLeft = !excludeScroll ? getScroll(html, 'left') : 0;
var offset = {
top: scrollTop - relativeOffset.top + relativeOffset.marginTop,
left: scrollLeft - relativeOffset.left + relativeOffset.marginLeft,
width: width,
height: height
};
return getClientRect(offset);
}
/**
* Check if the given element is fixed or is inside a fixed parent
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @argument {Element} customContainer
* @returns {Boolean} answer to "isFixed?"
*/
function isFixed(element) {
var nodeName = element.nodeName;
if (nodeName === 'BODY' || nodeName === 'HTML') {
return false;
}
if (getStyleComputedProperty(element, 'position') === 'fixed') {
return true;
}
var parentNode = getParentNode(element);
if (!parentNode) {
return false;
}
return isFixed(parentNode);
}
/**
* Finds the first parent of an element that has a transformed property defined
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @returns {Element} first transformed parent or documentElement
*/
function getFixedPositionOffsetParent(element) {
// This check is needed to avoid errors in case one of the elements isn't defined for any reason
if (!element || !element.parentElement || isIE()) {
return document.documentElement;
}
var el = element.parentElement;
while (el && getStyleComputedProperty(el, 'transform') === 'none') {
el = el.parentElement;
}
return el || document.documentElement;
}
/**
* Computed the boundaries limits and return them
* @method
* @memberof Popper.Utils
* @param {HTMLElement} popper
* @param {HTMLElement} reference
* @param {number} padding
* @param {HTMLElement} boundariesElement - Element used to define the boundaries
* @param {Boolean} fixedPosition - Is in fixed position mode
* @returns {Object} Coordinates of the boundaries
*/
function getBoundaries(popper, reference, padding, boundariesElement) {
var fixedPosition = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : false;
// NOTE: 1 DOM access here
var boundaries = { top: 0, left: 0 };
var offsetParent = fixedPosition ? getFixedPositionOffsetParent(popper) : findCommonOffsetParent(popper, getReferenceNode(reference));
// Handle viewport case
if (boundariesElement === 'viewport') {
boundaries = getViewportOffsetRectRelativeToArtbitraryNode(offsetParent, fixedPosition);
} else {
// Handle other cases based on DOM element used as boundaries
var boundariesNode = void 0;
if (boundariesElement === 'scrollParent') {
boundariesNode = getScrollParent(getParentNode(reference));
if (boundariesNode.nodeName === 'BODY') {
boundariesNode = popper.ownerDocument.documentElement;
}
} else if (boundariesElement === 'window') {
boundariesNode = popper.ownerDocument.documentElement;
} else {
boundariesNode = boundariesElement;
}
var offsets = getOffsetRectRelativeToArbitraryNode(boundariesNode, offsetParent, fixedPosition);
// In case of HTML, we need a different computation
if (boundariesNode.nodeName === 'HTML' && !isFixed(offsetParent)) {
var _getWindowSizes = getWindowSizes(popper.ownerDocument),
height = _getWindowSizes.height,
width = _getWindowSizes.width;
boundaries.top += offsets.top - offsets.marginTop;
boundaries.bottom = height + offsets.top;
boundaries.left += offsets.left - offsets.marginLeft;
boundaries.right = width + offsets.left;
} else {
// for all the other DOM elements, this one is good
boundaries = offsets;
}
}
// Add paddings
padding = padding || 0;
var isPaddingNumber = typeof padding === 'number';
boundaries.left += isPaddingNumber ? padding : padding.left || 0;
boundaries.top += isPaddingNumber ? padding : padding.top || 0;
boundaries.right -= isPaddingNumber ? padding : padding.right || 0;
boundaries.bottom -= isPaddingNumber ? padding : padding.bottom || 0;
return boundaries;
}
function getArea(_ref) {
var width = _ref.width,
height = _ref.height;
return width * height;
}
/**
* Utility used to transform the `auto` placement to the placement with more
* available space.
* @method
* @memberof Popper.Utils
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function computeAutoPlacement(placement, refRect, popper, reference, boundariesElement) {
var padding = arguments.length > 5 && arguments[5] !== undefined ? arguments[5] : 0;
if (placement.indexOf('auto') === -1) {
return placement;
}
var boundaries = getBoundaries(popper, reference, padding, boundariesElement);
var rects = {
top: {
width: boundaries.width,
height: refRect.top - boundaries.top
},
right: {
width: boundaries.right - refRect.right,
height: boundaries.height
},
bottom: {
width: boundaries.width,
height: boundaries.bottom - refRect.bottom
},
left: {
width: refRect.left - boundaries.left,
height: boundaries.height
}
};
var sortedAreas = Object.keys(rects).map(function (key) {
return _extends({
key: key
}, rects[key], {
area: getArea(rects[key])
});
}).sort(function (a, b) {
return b.area - a.area;
});
var filteredAreas = sortedAreas.filter(function (_ref2) {
var width = _ref2.width,
height = _ref2.height;
return width >= popper.clientWidth && height >= popper.clientHeight;
});
var computedPlacement = filteredAreas.length > 0 ? filteredAreas[0].key : sortedAreas[0].key;
var variation = placement.split('-')[1];
return computedPlacement + (variation ? '-' + variation : '');
}
/**
* Get offsets to the reference element
* @method
* @memberof Popper.Utils
* @param {Object} state
* @param {Element} popper - the popper element
* @param {Element} reference - the reference element (the popper will be relative to this)
* @param {Element} fixedPosition - is in fixed position mode
* @returns {Object} An object containing the offsets which will be applied to the popper
*/
function getReferenceOffsets(state, popper, reference) {
var fixedPosition = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : null;
var commonOffsetParent = fixedPosition ? getFixedPositionOffsetParent(popper) : findCommonOffsetParent(popper, getReferenceNode(reference));
return getOffsetRectRelativeToArbitraryNode(reference, commonOffsetParent, fixedPosition);
}
/**
* Get the outer sizes of the given element (offset size + margins)
* @method
* @memberof Popper.Utils
* @argument {Element} element
* @returns {Object} object containing width and height properties
*/
function getOuterSizes(element) {
var window = element.ownerDocument.defaultView;
var styles = window.getComputedStyle(element);
var x = parseFloat(styles.marginTop || 0) + parseFloat(styles.marginBottom || 0);
var y = parseFloat(styles.marginLeft || 0) + parseFloat(styles.marginRight || 0);
var result = {
width: element.offsetWidth + y,
height: element.offsetHeight + x
};
return result;
}
/**
* Get the opposite placement of the given one
* @method
* @memberof Popper.Utils
* @argument {String} placement
* @returns {String} flipped placement
*/
function getOppositePlacement(placement) {
var hash = { left: 'right', right: 'left', bottom: 'top', top: 'bottom' };
return placement.replace(/left|right|bottom|top/g, function (matched) {
return hash[matched];
});
}
/**
* Get offsets to the popper
* @method
* @memberof Popper.Utils
* @param {Object} position - CSS position the Popper will get applied
* @param {HTMLElement} popper - the popper element
* @param {Object} referenceOffsets - the reference offsets (the popper will be relative to this)
* @param {String} placement - one of the valid placement options
* @returns {Object} popperOffsets - An object containing the offsets which will be applied to the popper
*/
function getPopperOffsets(popper, referenceOffsets, placement) {
placement = placement.split('-')[0];
// Get popper node sizes
var popperRect = getOuterSizes(popper);
// Add position, width and height to our offsets object
var popperOffsets = {
width: popperRect.width,
height: popperRect.height
};
// depending by the popper placement we have to compute its offsets slightly differently
var isHoriz = ['right', 'left'].indexOf(placement) !== -1;
var mainSide = isHoriz ? 'top' : 'left';
var secondarySide = isHoriz ? 'left' : 'top';
var measurement = isHoriz ? 'height' : 'width';
var secondaryMeasurement = !isHoriz ? 'height' : 'width';
popperOffsets[mainSide] = referenceOffsets[mainSide] + referenceOffsets[measurement] / 2 - popperRect[measurement] / 2;
if (placement === secondarySide) {
popperOffsets[secondarySide] = referenceOffsets[secondarySide] - popperRect[secondaryMeasurement];
} else {
popperOffsets[secondarySide] = referenceOffsets[getOppositePlacement(secondarySide)];
}
return popperOffsets;
}
/**
* Mimics the `find` method of Array
* @method
* @memberof Popper.Utils
* @argument {Array} arr
* @argument prop
* @argument value
* @returns index or -1
*/
function find(arr, check) {
// use native find if supported
if (Array.prototype.find) {
return arr.find(check);
}
// use `filter` to obtain the same behavior of `find`
return arr.filter(check)[0];
}
/**
* Return the index of the matching object
* @method
* @memberof Popper.Utils
* @argument {Array} arr
* @argument prop
* @argument value
* @returns index or -1
*/
function findIndex(arr, prop, value) {
// use native findIndex if supported
if (Array.prototype.findIndex) {
return arr.findIndex(function (cur) {
return cur[prop] === value;
});
}
// use `find` + `indexOf` if `findIndex` isn't supported
var match = find(arr, function (obj) {
return obj[prop] === value;
});
return arr.indexOf(match);
}
/**
* Loop trough the list of modifiers and run them in order,
* each of them will then edit the data object.
* @method
* @memberof Popper.Utils
* @param {dataObject} data
* @param {Array} modifiers
* @param {String} ends - Optional modifier name used as stopper
* @returns {dataObject}
*/
function runModifiers(modifiers, data, ends) {
var modifiersToRun = ends === undefined ? modifiers : modifiers.slice(0, findIndex(modifiers, 'name', ends));
modifiersToRun.forEach(function (modifier) {
if (modifier['function']) {
// eslint-disable-line dot-notation
console.warn('`modifier.function` is deprecated, use `modifier.fn`!');
}
var fn = modifier['function'] || modifier.fn; // eslint-disable-line dot-notation
if (modifier.enabled && isFunction(fn)) {
// Add properties to offsets to make them a complete clientRect object
// we do this before each modifier to make sure the previous one doesn't
// mess with these values
data.offsets.popper = getClientRect(data.offsets.popper);
data.offsets.reference = getClientRect(data.offsets.reference);
data = fn(data, modifier);
}
});
return data;
}
/**
* Updates the position of the popper, computing the new offsets and applying
* the new style.<br />
* Prefer `scheduleUpdate` over `update` because of performance reasons.
* @method
* @memberof Popper
*/
function update() {
// if popper is destroyed, don't perform any further update
if (this.state.isDestroyed) {
return;
}
var data = {
instance: this,
styles: {},
arrowStyles: {},
attributes: {},
flipped: false,
offsets: {}
};
// compute reference element offsets
data.offsets.reference = getReferenceOffsets(this.state, this.popper, this.reference, this.options.positionFixed);
// compute auto placement, store placement inside the data object,
// modifiers will be able to edit `placement` if needed
// and refer to originalPlacement to know the original value
data.placement = computeAutoPlacement(this.options.placement, data.offsets.reference, this.popper, this.reference, this.options.modifiers.flip.boundariesElement, this.options.modifiers.flip.padding);
// store the computed placement inside `originalPlacement`
data.originalPlacement = data.placement;
data.positionFixed = this.options.positionFixed;
// compute the popper offsets
data.offsets.popper = getPopperOffsets(this.popper, data.offsets.reference, data.placement);
data.offsets.popper.position = this.options.positionFixed ? 'fixed' : 'absolute';
// run the modifiers
data = runModifiers(this.modifiers, data);
// the first `update` will call `onCreate` callback
// the other ones will call `onUpdate` callback
if (!this.state.isCreated) {
this.state.isCreated = true;
this.options.onCreate(data);
} else {
this.options.onUpdate(data);
}
}
/**
* Helper used to know if the given modifier is enabled.
* @method
* @memberof Popper.Utils
* @returns {Boolean}
*/
function isModifierEnabled(modifiers, modifierName) {
return modifiers.some(function (_ref) {
var name = _ref.name,
enabled = _ref.enabled;
return enabled && name === modifierName;
});
}
/**
* Get the prefixed supported property name
* @method
* @memberof Popper.Utils
* @argument {String} property (camelCase)
* @returns {String} prefixed property (camelCase or PascalCase, depending on the vendor prefix)
*/
function getSupportedPropertyName(property) {
var prefixes = [false, 'ms', 'Webkit', 'Moz', 'O'];
var upperProp = property.charAt(0).toUpperCase() + property.slice(1);
for (var i = 0; i < prefixes.length; i++) {
var prefix = prefixes[i];
var toCheck = prefix ? '' + prefix + upperProp : property;
if (typeof document.body.style[toCheck] !== 'undefined') {
return toCheck;
}
}
return null;
}
/**
* Destroys the popper.
* @method
* @memberof Popper
*/
function destroy() {
this.state.isDestroyed = true;
// touch DOM only if `applyStyle` modifier is enabled
if (isModifierEnabled(this.modifiers, 'applyStyle')) {
this.popper.removeAttribute('x-placement');
this.popper.style.position = '';
this.popper.style.top = '';
this.popper.style.left = '';
this.popper.style.right = '';
this.popper.style.bottom = '';
this.popper.style.willChange = '';
this.popper.style[getSupportedPropertyName('transform')] = '';
}
this.disableEventListeners();
// remove the popper if user explicitly asked for the deletion on destroy
// do not use `remove` because IE11 doesn't support it
if (this.options.removeOnDestroy) {
this.popper.parentNode.removeChild(this.popper);
}
return this;
}
/**
* Get the window associated with the element
* @argument {Element} element
* @returns {Window}
*/
function getWindow(element) {
var ownerDocument = element.ownerDocument;
return ownerDocument ? ownerDocument.defaultView : window;
}
function attachToScrollParents(scrollParent, event, callback, scrollParents) {
var isBody = scrollParent.nodeName === 'BODY';
var target = isBody ? scrollParent.ownerDocument.defaultView : scrollParent;
target.addEventListener(event, callback, { passive: true });
if (!isBody) {
attachToScrollParents(getScrollParent(target.parentNode), event, callback, scrollParents);
}
scrollParents.push(target);
}
/**
* Setup needed event listeners used to update the popper position
* @method
* @memberof Popper.Utils
* @private
*/
function setupEventListeners(reference, options, state, updateBound) {
// Resize event listener on window
state.updateBound = updateBound;
getWindow(reference).addEventListener('resize', state.updateBound, { passive: true });
// Scroll event listener on scroll parents
var scrollElement = getScrollParent(reference);
attachToScrollParents(scrollElement, 'scroll', state.updateBound, state.scrollParents);
state.scrollElement = scrollElement;
state.eventsEnabled = true;
return state;
}
/**
* It will add resize/scroll events and start recalculating
* position of the popper element when they are triggered.
* @method
* @memberof Popper
*/
function enableEventListeners() {
if (!this.state.eventsEnabled) {
this.state = setupEventListeners(this.reference, this.options, this.state, this.scheduleUpdate);
}
}
/**
* Remove event listeners used to update the popper position
* @method
* @memberof Popper.Utils
* @private
*/
function removeEventListeners(reference, state) {
// Remove resize event listener on window
getWindow(reference).removeEventListener('resize', state.updateBound);
// Remove scroll event listener on scroll parents
state.scrollParents.forEach(function (target) {
target.removeEventListener('scroll', state.updateBound);
});
// Reset state
state.updateBound = null;
state.scrollParents = [];
state.scrollElement = null;
state.eventsEnabled = false;
return state;
}
/**
* It will remove resize/scroll events and won't recalculate popper position
* when they are triggered. It also won't trigger `onUpdate` callback anymore,
* unless you call `update` method manually.
* @method
* @memberof Popper
*/
function disableEventListeners() {
if (this.state.eventsEnabled) {
cancelAnimationFrame(this.scheduleUpdate);
this.state = removeEventListeners(this.reference, this.state);
}
}
/**
* Tells if a given input is a number
* @method
* @memberof Popper.Utils
* @param {*} input to check
* @return {Boolean}
*/
function isNumeric(n) {
return n !== '' && !isNaN(parseFloat(n)) && isFinite(n);
}
/**
* Set the style to the given popper
* @method
* @memberof Popper.Utils
* @argument {Element} element - Element to apply the style to
* @argument {Object} styles
* Object with a list of properties and values which will be applied to the element
*/
function setStyles(element, styles) {
Object.keys(styles).forEach(function (prop) {
var unit = '';
// add unit if the value is numeric and is one of the following
if (['width', 'height', 'top', 'right', 'bottom', 'left'].indexOf(prop) !== -1 && isNumeric(styles[prop])) {
unit = 'px';
}
element.style[prop] = styles[prop] + unit;
});
}
/**
* Set the attributes to the given popper
* @method
* @memberof Popper.Utils
* @argument {Element} element - Element to apply the attributes to
* @argument {Object} styles
* Object with a list of properties and values which will be applied to the element
*/
function setAttributes(element, attributes) {
Object.keys(attributes).forEach(function (prop) {
var value = attributes[prop];
if (value !== false) {
element.setAttribute(prop, attributes[prop]);
} else {
element.removeAttribute(prop);
}
});
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by `update` method
* @argument {Object} data.styles - List of style properties - values to apply to popper element
* @argument {Object} data.attributes - List of attribute properties - values to apply to popper element
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The same data object
*/
function applyStyle(data) {
// any property present in `data.styles` will be applied to the popper,
// in this way we can make the 3rd party modifiers add custom styles to it
// Be aware, modifiers could override the properties defined in the previous
// lines of this modifier!
setStyles(data.instance.popper, data.styles);
// any property present in `data.attributes` will be applied to the popper,
// they will be set as HTML attributes of the element
setAttributes(data.instance.popper, data.attributes);
// if arrowElement is defined and arrowStyles has some properties
if (data.arrowElement && Object.keys(data.arrowStyles).length) {
setStyles(data.arrowElement, data.arrowStyles);
}
return data;
}
/**
* Set the x-placement attribute before everything else because it could be used
* to add margins to the popper margins needs to be calculated to get the
* correct popper offsets.
* @method
* @memberof Popper.modifiers
* @param {HTMLElement} reference - The reference element used to position the popper
* @param {HTMLElement} popper - The HTML element used as popper
* @param {Object} options - Popper.js options
*/
function applyStyleOnLoad(reference, popper, options, modifierOptions, state) {
// compute reference element offsets
var referenceOffsets = getReferenceOffsets(state, popper, reference, options.positionFixed);
// compute auto placement, store placement inside the data object,
// modifiers will be able to edit `placement` if needed
// and refer to originalPlacement to know the original value
var placement = computeAutoPlacement(options.placement, referenceOffsets, popper, reference, options.modifiers.flip.boundariesElement, options.modifiers.flip.padding);
popper.setAttribute('x-placement', placement);
// Apply `position` to popper before anything else because
// without the position applied we can't guarantee correct computations
setStyles(popper, { position: options.positionFixed ? 'fixed' : 'absolute' });
return options;
}
/**
* @function
* @memberof Popper.Utils
* @argument {Object} data - The data object generated by `update` method
* @argument {Boolean} shouldRound - If the offsets should be rounded at all
* @returns {Object} The popper's position offsets rounded
*
* The tale of pixel-perfect positioning. It's still not 100% perfect, but as
* good as it can be within reason.
* Discussion here: https://github.com/FezVrasta/popper.js/pull/715
*
* Low DPI screens cause a popper to be blurry if not using full pixels (Safari
* as well on High DPI screens).
*
* Firefox prefers no rounding for positioning and does not have blurriness on
* high DPI screens.
*
* Only horizontal placement and left/right values need to be considered.
*/
function getRoundedOffsets(data, shouldRound) {
var _data$offsets = data.offsets,
popper = _data$offsets.popper,
reference = _data$offsets.reference;
var round = Math.round,
floor = Math.floor;
var noRound = function noRound(v) {
return v;
};
var referenceWidth = round(reference.width);
var popperWidth = round(popper.width);
var isVertical = ['left', 'right'].indexOf(data.placement) !== -1;
var isVariation = data.placement.indexOf('-') !== -1;
var sameWidthParity = referenceWidth % 2 === popperWidth % 2;
var bothOddWidth = referenceWidth % 2 === 1 && popperWidth % 2 === 1;
var horizontalToInteger = !shouldRound ? noRound : isVertical || isVariation || sameWidthParity ? round : floor;
var verticalToInteger = !shouldRound ? noRound : round;
return {
left: horizontalToInteger(bothOddWidth && !isVariation && shouldRound ? popper.left - 1 : popper.left),
top: verticalToInteger(popper.top),
bottom: verticalToInteger(popper.bottom),
right: horizontalToInteger(popper.right)
};
}
var isFirefox = isBrowser && /Firefox/i.test(navigator.userAgent);
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by `update` method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function computeStyle(data, options) {
var x = options.x,
y = options.y;
var popper = data.offsets.popper;
// Remove this legacy support in Popper.js v2
var legacyGpuAccelerationOption = find(data.instance.modifiers, function (modifier) {
return modifier.name === 'applyStyle';
}).gpuAcceleration;
if (legacyGpuAccelerationOption !== undefined) {
console.warn('WARNING: `gpuAcceleration` option moved to `computeStyle` modifier and will not be supported in future versions of Popper.js!');
}
var gpuAcceleration = legacyGpuAccelerationOption !== undefined ? legacyGpuAccelerationOption : options.gpuAcceleration;
var offsetParent = getOffsetParent(data.instance.popper);
var offsetParentRect = getBoundingClientRect(offsetParent);
// Styles
var styles = {
position: popper.position
};
var offsets = getRoundedOffsets(data, window.devicePixelRatio < 2 || !isFirefox);
var sideA = x === 'bottom' ? 'top' : 'bottom';
var sideB = y === 'right' ? 'left' : 'right';
// if gpuAcceleration is set to `true` and transform is supported,
// we use `translate3d` to apply the position to the popper we
// automatically use the supported prefixed version if needed
var prefixedProperty = getSupportedPropertyName('transform');
// now, let's make a step back and look at this code closely (wtf?)
// If the content of the popper grows once it's been positioned, it
// may happen that the popper gets misplaced because of the new content
// overflowing its reference element
// To avoid this problem, we provide two options (x and y), which allow
// the consumer to define the offset origin.
// If we position a popper on top of a reference element, we can set
// `x` to `top` to make the popper grow towards its top instead of
// its bottom.
var left = void 0,
top = void 0;
if (sideA === 'bottom') {
// when offsetParent is <html> the positioning is relative to the bottom of the screen (excluding the scrollbar)
// and not the bottom of the html element
if (offsetParent.nodeName === 'HTML') {
top = -offsetParent.clientHeight + offsets.bottom;
} else {
top = -offsetParentRect.height + offsets.bottom;
}
} else {
top = offsets.top;
}
if (sideB === 'right') {
if (offsetParent.nodeName === 'HTML') {
left = -offsetParent.clientWidth + offsets.right;
} else {
left = -offsetParentRect.width + offsets.right;
}
} else {
left = offsets.left;
}
if (gpuAcceleration && prefixedProperty) {
styles[prefixedProperty] = 'translate3d(' + left + 'px, ' + top + 'px, 0)';
styles[sideA] = 0;
styles[sideB] = 0;
styles.willChange = 'transform';
} else {
// othwerise, we use the standard `top`, `left`, `bottom` and `right` properties
var invertTop = sideA === 'bottom' ? -1 : 1;
var invertLeft = sideB === 'right' ? -1 : 1;
styles[sideA] = top * invertTop;
styles[sideB] = left * invertLeft;
styles.willChange = sideA + ', ' + sideB;
}
// Attributes
var attributes = {
'x-placement': data.placement
};
// Update `data` attributes, styles and arrowStyles
data.attributes = _extends({}, attributes, data.attributes);
data.styles = _extends({}, styles, data.styles);
data.arrowStyles = _extends({}, data.offsets.arrow, data.arrowStyles);
return data;
}
/**
* Helper used to know if the given modifier depends from another one.<br />
* It checks if the needed modifier is listed and enabled.
* @method
* @memberof Popper.Utils
* @param {Array} modifiers - list of modifiers
* @param {String} requestingName - name of requesting modifier
* @param {String} requestedName - name of requested modifier
* @returns {Boolean}
*/
function isModifierRequired(modifiers, requestingName, requestedName) {
var requesting = find(modifiers, function (_ref) {
var name = _ref.name;
return name === requestingName;
});
var isRequired = !!requesting && modifiers.some(function (modifier) {
return modifier.name === requestedName && modifier.enabled && modifier.order < requesting.order;
});
if (!isRequired) {
var _requesting = '`' + requestingName + '`';
var requested = '`' + requestedName + '`';
console.warn(requested + ' modifier is required by ' + _requesting + ' modifier in order to work, be sure to include it before ' + _requesting + '!');
}
return isRequired;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function arrow(data, options) {
var _data$offsets$arrow;
// arrow depends on keepTogether in order to work
if (!isModifierRequired(data.instance.modifiers, 'arrow', 'keepTogether')) {
return data;
}
var arrowElement = options.element;
// if arrowElement is a string, suppose it's a CSS selector
if (typeof arrowElement === 'string') {
arrowElement = data.instance.popper.querySelector(arrowElement);
// if arrowElement is not found, don't run the modifier
if (!arrowElement) {
return data;
}
} else {
// if the arrowElement isn't a query selector we must check that the
// provided DOM node is child of its popper node
if (!data.instance.popper.contains(arrowElement)) {
console.warn('WARNING: `arrow.element` must be child of its popper element!');
return data;
}
}
var placement = data.placement.split('-')[0];
var _data$offsets = data.offsets,
popper = _data$offsets.popper,
reference = _data$offsets.reference;
var isVertical = ['left', 'right'].indexOf(placement) !== -1;
var len = isVertical ? 'height' : 'width';
var sideCapitalized = isVertical ? 'Top' : 'Left';
var side = sideCapitalized.toLowerCase();
var altSide = isVertical ? 'left' : 'top';
var opSide = isVertical ? 'bottom' : 'right';
var arrowElementSize = getOuterSizes(arrowElement)[len];
//
// extends keepTogether behavior making sure the popper and its
// reference have enough pixels in conjunction
//
// top/left side
if (reference[opSide] - arrowElementSize < popper[side]) {
data.offsets.popper[side] -= popper[side] - (reference[opSide] - arrowElementSize);
}
// bottom/right side
if (reference[side] + arrowElementSize > popper[opSide]) {
data.offsets.popper[side] += reference[side] + arrowElementSize - popper[opSide];
}
data.offsets.popper = getClientRect(data.offsets.popper);
// compute center of the popper
var center = reference[side] + reference[len] / 2 - arrowElementSize / 2;
// Compute the sideValue using the updated popper offsets
// take popper margin in account because we don't have this info available
var css = getStyleComputedProperty(data.instance.popper);
var popperMarginSide = parseFloat(css['margin' + sideCapitalized], 10);
var popperBorderSide = parseFloat(css['border' + sideCapitalized + 'Width'], 10);
var sideValue = center - data.offsets.popper[side] - popperMarginSide - popperBorderSide;
// prevent arrowElement from being placed not contiguously to its popper
sideValue = Math.max(Math.min(popper[len] - arrowElementSize, sideValue), 0);
data.arrowElement = arrowElement;
data.offsets.arrow = (_data$offsets$arrow = {}, defineProperty(_data$offsets$arrow, side, Math.round(sideValue)), defineProperty(_data$offsets$arrow, altSide, ''), _data$offsets$arrow);
return data;
}
/**
* Get the opposite placement variation of the given one
* @method
* @memberof Popper.Utils
* @argument {String} placement variation
* @returns {String} flipped placement variation
*/
function getOppositeVariation(variation) {
if (variation === 'end') {
return 'start';
} else if (variation === 'start') {
return 'end';
}
return variation;
}
/**
* List of accepted placements to use as values of the `placement` option.<br />
* Valid placements are:
* - `auto`
* - `top`
* - `right`
* - `bottom`
* - `left`
*
* Each placement can have a variation from this list:
* - `-start`
* - `-end`
*
* Variations are interpreted easily if you think of them as the left to right
* written languages. Horizontally (`top` and `bottom`), `start` is left and `end`
* is right.<br />
* Vertically (`left` and `right`), `start` is top and `end` is bottom.
*
* Some valid examples are:
* - `top-end` (on top of reference, right aligned)
* - `right-start` (on right of reference, top aligned)
* - `bottom` (on bottom, centered)
* - `auto-end` (on the side with more space available, alignment depends by placement)
*
* @static
* @type {Array}
* @enum {String}
* @readonly
* @method placements
* @memberof Popper
*/
var placements = ['auto-start', 'auto', 'auto-end', 'top-start', 'top', 'top-end', 'right-start', 'right', 'right-end', 'bottom-end', 'bottom', 'bottom-start', 'left-end', 'left', 'left-start'];
// Get rid of `auto` `auto-start` and `auto-end`
var validPlacements = placements.slice(3);
/**
* Given an initial placement, returns all the subsequent placements
* clockwise (or counter-clockwise).
*
* @method
* @memberof Popper.Utils
* @argument {String} placement - A valid placement (it accepts variations)
* @argument {Boolean} counter - Set to true to walk the placements counterclockwise
* @returns {Array} placements including their variations
*/
function clockwise(placement) {
var counter = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false;
var index = validPlacements.indexOf(placement);
var arr = validPlacements.slice(index + 1).concat(validPlacements.slice(0, index));
return counter ? arr.reverse() : arr;
}
var BEHAVIORS = {
FLIP: 'flip',
CLOCKWISE: 'clockwise',
COUNTERCLOCKWISE: 'counterclockwise'
};
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function flip(data, options) {
// if `inner` modifier is enabled, we can't use the `flip` modifier
if (isModifierEnabled(data.instance.modifiers, 'inner')) {
return data;
}
if (data.flipped && data.placement === data.originalPlacement) {
// seems like flip is trying to loop, probably there's not enough space on any of the flippable sides
return data;
}
var boundaries = getBoundaries(data.instance.popper, data.instance.reference, options.padding, options.boundariesElement, data.positionFixed);
var placement = data.placement.split('-')[0];
var placementOpposite = getOppositePlacement(placement);
var variation = data.placement.split('-')[1] || '';
var flipOrder = [];
switch (options.behavior) {
case BEHAVIORS.FLIP:
flipOrder = [placement, placementOpposite];
break;
case BEHAVIORS.CLOCKWISE:
flipOrder = clockwise(placement);
break;
case BEHAVIORS.COUNTERCLOCKWISE:
flipOrder = clockwise(placement, true);
break;
default:
flipOrder = options.behavior;
}
flipOrder.forEach(function (step, index) {
if (placement !== step || flipOrder.length === index + 1) {
return data;
}
placement = data.placement.split('-')[0];
placementOpposite = getOppositePlacement(placement);
var popperOffsets = data.offsets.popper;
var refOffsets = data.offsets.reference;
// using floor because the reference offsets may contain decimals we are not going to consider here
var floor = Math.floor;
var overlapsRef = placement === 'left' && floor(popperOffsets.right) > floor(refOffsets.left) || placement === 'right' && floor(popperOffsets.left) < floor(refOffsets.right) || placement === 'top' && floor(popperOffsets.bottom) > floor(refOffsets.top) || placement === 'bottom' && floor(popperOffsets.top) < floor(refOffsets.bottom);
var overflowsLeft = floor(popperOffsets.left) < floor(boundaries.left);
var overflowsRight = floor(popperOffsets.right) > floor(boundaries.right);
var overflowsTop = floor(popperOffsets.top) < floor(boundaries.top);
var overflowsBottom = floor(popperOffsets.bottom) > floor(boundaries.bottom);
var overflowsBoundaries = placement === 'left' && overflowsLeft || placement === 'right' && overflowsRight || placement === 'top' && overflowsTop || placement === 'bottom' && overflowsBottom;
// flip the variation if required
var isVertical = ['top', 'bottom'].indexOf(placement) !== -1;
// flips variation if reference element overflows boundaries
var flippedVariationByRef = !!options.flipVariations && (isVertical && variation === 'start' && overflowsLeft || isVertical && variation === 'end' && overflowsRight || !isVertical && variation === 'start' && overflowsTop || !isVertical && variation === 'end' && overflowsBottom);
// flips variation if popper content overflows boundaries
var flippedVariationByContent = !!options.flipVariationsByContent && (isVertical && variation === 'start' && overflowsRight || isVertical && variation === 'end' && overflowsLeft || !isVertical && variation === 'start' && overflowsBottom || !isVertical && variation === 'end' && overflowsTop);
var flippedVariation = flippedVariationByRef || flippedVariationByContent;
if (overlapsRef || overflowsBoundaries || flippedVariation) {
// this boolean to detect any flip loop
data.flipped = true;
if (overlapsRef || overflowsBoundaries) {
placement = flipOrder[index + 1];
}
if (flippedVariation) {
variation = getOppositeVariation(variation);
}
data.placement = placement + (variation ? '-' + variation : '');
// this object contains `position`, we want to preserve it along with
// any additional property we may add in the future
data.offsets.popper = _extends({}, data.offsets.popper, getPopperOffsets(data.instance.popper, data.offsets.reference, data.placement));
data = runModifiers(data.instance.modifiers, data, 'flip');
}
});
return data;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function keepTogether(data) {
var _data$offsets = data.offsets,
popper = _data$offsets.popper,
reference = _data$offsets.reference;
var placement = data.placement.split('-')[0];
var floor = Math.floor;
var isVertical = ['top', 'bottom'].indexOf(placement) !== -1;
var side = isVertical ? 'right' : 'bottom';
var opSide = isVertical ? 'left' : 'top';
var measurement = isVertical ? 'width' : 'height';
if (popper[side] < floor(reference[opSide])) {
data.offsets.popper[opSide] = floor(reference[opSide]) - popper[measurement];
}
if (popper[opSide] > floor(reference[side])) {
data.offsets.popper[opSide] = floor(reference[side]);
}
return data;
}
/**
* Converts a string containing value + unit into a px value number
* @function
* @memberof {modifiers~offset}
* @private
* @argument {String} str - Value + unit string
* @argument {String} measurement - `height` or `width`
* @argument {Object} popperOffsets
* @argument {Object} referenceOffsets
* @returns {Number|String}
* Value in pixels, or original string if no values were extracted
*/
function toValue(str, measurement, popperOffsets, referenceOffsets) {
// separate value from unit
var split = str.match(/((?:\-|\+)?\d*\.?\d*)(.*)/);
var value = +split[1];
var unit = split[2];
// If it's not a number it's an operator, I guess
if (!value) {
return str;
}
if (unit.indexOf('%') === 0) {
var element = void 0;
switch (unit) {
case '%p':
element = popperOffsets;
break;
case '%':
case '%r':
default:
element = referenceOffsets;
}
var rect = getClientRect(element);
return rect[measurement] / 100 * value;
} else if (unit === 'vh' || unit === 'vw') {
// if is a vh or vw, we calculate the size based on the viewport
var size = void 0;
if (unit === 'vh') {
size = Math.max(document.documentElement.clientHeight, window.innerHeight || 0);
} else {
size = Math.max(document.documentElement.clientWidth, window.innerWidth || 0);
}
return size / 100 * value;
} else {
// if is an explicit pixel unit, we get rid of the unit and keep the value
// if is an implicit unit, it's px, and we return just the value
return value;
}
}
/**
* Parse an `offset` string to extrapolate `x` and `y` numeric offsets.
* @function
* @memberof {modifiers~offset}
* @private
* @argument {String} offset
* @argument {Object} popperOffsets
* @argument {Object} referenceOffsets
* @argument {String} basePlacement
* @returns {Array} a two cells array with x and y offsets in numbers
*/
function parseOffset(offset, popperOffsets, referenceOffsets, basePlacement) {
var offsets = [0, 0];
// Use height if placement is left or right and index is 0 otherwise use width
// in this way the first offset will use an axis and the second one
// will use the other one
var useHeight = ['right', 'left'].indexOf(basePlacement) !== -1;
// Split the offset string to obtain a list of values and operands
// The regex addresses values with the plus or minus sign in front (+10, -20, etc)
var fragments = offset.split(/(\+|\-)/).map(function (frag) {
return frag.trim();
});
// Detect if the offset string contains a pair of values or a single one
// they could be separated by comma or space
var divider = fragments.indexOf(find(fragments, function (frag) {
return frag.search(/,|\s/) !== -1;
}));
if (fragments[divider] && fragments[divider].indexOf(',') === -1) {
console.warn('Offsets separated by white space(s) are deprecated, use a comma (,) instead.');
}
// If divider is found, we divide the list of values and operands to divide
// them by ofset X and Y.
var splitRegex = /\s*,\s*|\s+/;
var ops = divider !== -1 ? [fragments.slice(0, divider).concat([fragments[divider].split(splitRegex)[0]]), [fragments[divider].split(splitRegex)[1]].concat(fragments.slice(divider + 1))] : [fragments];
// Convert the values with units to absolute pixels to allow our computations
ops = ops.map(function (op, index) {
// Most of the units rely on the orientation of the popper
var measurement = (index === 1 ? !useHeight : useHeight) ? 'height' : 'width';
var mergeWithPrevious = false;
return op
// This aggregates any `+` or `-` sign that aren't considered operators
// e.g.: 10 + +5 => [10, +, +5]
.reduce(function (a, b) {
if (a[a.length - 1] === '' && ['+', '-'].indexOf(b) !== -1) {
a[a.length - 1] = b;
mergeWithPrevious = true;
return a;
} else if (mergeWithPrevious) {
a[a.length - 1] += b;
mergeWithPrevious = false;
return a;
} else {
return a.concat(b);
}
}, [])
// Here we convert the string values into number values (in px)
.map(function (str) {
return toValue(str, measurement, popperOffsets, referenceOffsets);
});
});
// Loop trough the offsets arrays and execute the operations
ops.forEach(function (op, index) {
op.forEach(function (frag, index2) {
if (isNumeric(frag)) {
offsets[index] += frag * (op[index2 - 1] === '-' ? -1 : 1);
}
});
});
return offsets;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @argument {Number|String} options.offset=0
* The offset value as described in the modifier description
* @returns {Object} The data object, properly modified
*/
function offset(data, _ref) {
var offset = _ref.offset;
var placement = data.placement,
_data$offsets = data.offsets,
popper = _data$offsets.popper,
reference = _data$offsets.reference;
var basePlacement = placement.split('-')[0];
var offsets = void 0;
if (isNumeric(+offset)) {
offsets = [+offset, 0];
} else {
offsets = parseOffset(offset, popper, reference, basePlacement);
}
if (basePlacement === 'left') {
popper.top += offsets[0];
popper.left -= offsets[1];
} else if (basePlacement === 'right') {
popper.top += offsets[0];
popper.left += offsets[1];
} else if (basePlacement === 'top') {
popper.left += offsets[0];
popper.top -= offsets[1];
} else if (basePlacement === 'bottom') {
popper.left += offsets[0];
popper.top += offsets[1];
}
data.popper = popper;
return data;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by `update` method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function preventOverflow(data, options) {
var boundariesElement = options.boundariesElement || getOffsetParent(data.instance.popper);
// If offsetParent is the reference element, we really want to
// go one step up and use the next offsetParent as reference to
// avoid to make this modifier completely useless and look like broken
if (data.instance.reference === boundariesElement) {
boundariesElement = getOffsetParent(boundariesElement);
}
// NOTE: DOM access here
// resets the popper's position so that the document size can be calculated excluding
// the size of the popper element itself
var transformProp = getSupportedPropertyName('transform');
var popperStyles = data.instance.popper.style; // assignment to help minification
var top = popperStyles.top,
left = popperStyles.left,
transform = popperStyles[transformProp];
popperStyles.top = '';
popperStyles.left = '';
popperStyles[transformProp] = '';
var boundaries = getBoundaries(data.instance.popper, data.instance.reference, options.padding, boundariesElement, data.positionFixed);
// NOTE: DOM access here
// restores the original style properties after the offsets have been computed
popperStyles.top = top;
popperStyles.left = left;
popperStyles[transformProp] = transform;
options.boundaries = boundaries;
var order = options.priority;
var popper = data.offsets.popper;
var check = {
primary: function primary(placement) {
var value = popper[placement];
if (popper[placement] < boundaries[placement] && !options.escapeWithReference) {
value = Math.max(popper[placement], boundaries[placement]);
}
return defineProperty({}, placement, value);
},
secondary: function secondary(placement) {
var mainSide = placement === 'right' ? 'left' : 'top';
var value = popper[mainSide];
if (popper[placement] > boundaries[placement] && !options.escapeWithReference) {
value = Math.min(popper[mainSide], boundaries[placement] - (placement === 'right' ? popper.width : popper.height));
}
return defineProperty({}, mainSide, value);
}
};
order.forEach(function (placement) {
var side = ['left', 'top'].indexOf(placement) !== -1 ? 'primary' : 'secondary';
popper = _extends({}, popper, check[side](placement));
});
data.offsets.popper = popper;
return data;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by `update` method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function shift(data) {
var placement = data.placement;
var basePlacement = placement.split('-')[0];
var shiftvariation = placement.split('-')[1];
// if shift shiftvariation is specified, run the modifier
if (shiftvariation) {
var _data$offsets = data.offsets,
reference = _data$offsets.reference,
popper = _data$offsets.popper;
var isVertical = ['bottom', 'top'].indexOf(basePlacement) !== -1;
var side = isVertical ? 'left' : 'top';
var measurement = isVertical ? 'width' : 'height';
var shiftOffsets = {
start: defineProperty({}, side, reference[side]),
end: defineProperty({}, side, reference[side] + reference[measurement] - popper[measurement])
};
data.offsets.popper = _extends({}, popper, shiftOffsets[shiftvariation]);
}
return data;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by update method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function hide(data) {
if (!isModifierRequired(data.instance.modifiers, 'hide', 'preventOverflow')) {
return data;
}
var refRect = data.offsets.reference;
var bound = find(data.instance.modifiers, function (modifier) {
return modifier.name === 'preventOverflow';
}).boundaries;
if (refRect.bottom < bound.top || refRect.left > bound.right || refRect.top > bound.bottom || refRect.right < bound.left) {
// Avoid unnecessary DOM access if visibility hasn't changed
if (data.hide === true) {
return data;
}
data.hide = true;
data.attributes['x-out-of-boundaries'] = '';
} else {
// Avoid unnecessary DOM access if visibility hasn't changed
if (data.hide === false) {
return data;
}
data.hide = false;
data.attributes['x-out-of-boundaries'] = false;
}
return data;
}
/**
* @function
* @memberof Modifiers
* @argument {Object} data - The data object generated by `update` method
* @argument {Object} options - Modifiers configuration and options
* @returns {Object} The data object, properly modified
*/
function inner(data) {
var placement = data.placement;
var basePlacement = placement.split('-')[0];
var _data$offsets = data.offsets,
popper = _data$offsets.popper,
reference = _data$offsets.reference;
var isHoriz = ['left', 'right'].indexOf(basePlacement) !== -1;
var subtractLength = ['top', 'left'].indexOf(basePlacement) === -1;
popper[isHoriz ? 'left' : 'top'] = reference[basePlacement] - (subtractLength ? popper[isHoriz ? 'width' : 'height'] : 0);
data.placement = getOppositePlacement(placement);
data.offsets.popper = getClientRect(popper);
return data;
}
/**
* Modifier function, each modifier can have a function of this type assigned
* to its `fn` property.<br />
* These functions will be called on each update, this means that you must
* make sure they are performant enough to avoid performance bottlenecks.
*
* @function ModifierFn
* @argument {dataObject} data - The data object generated by `update` method
* @argument {Object} options - Modifiers configuration and options
* @returns {dataObject} The data object, properly modified
*/
/**
* Modifiers are plugins used to alter the behavior of your poppers.<br />
* Popper.js uses a set of 9 modifiers to provide all the basic functionalities
* needed by the library.
*
* Usually you don't want to override the `order`, `fn` and `onLoad` props.
* All the other properties are configurations that could be tweaked.
* @namespace modifiers
*/
var modifiers = {
/**
* Modifier used to shift the popper on the start or end of its reference
* element.<br />
* It will read the variation of the `placement` property.<br />
* It can be one either `-end` or `-start`.
* @memberof modifiers
* @inner
*/
shift: {
/** @prop {number} order=100 - Index used to define the order of execution */
order: 100,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: shift
},
/**
* The `offset` modifier can shift your popper on both its axis.
*
* It accepts the following units:
* - `px` or unit-less, interpreted as pixels
* - `%` or `%r`, percentage relative to the length of the reference element
* - `%p`, percentage relative to the length of the popper element
* - `vw`, CSS viewport width unit
* - `vh`, CSS viewport height unit
*
* For length is intended the main axis relative to the placement of the popper.<br />
* This means that if the placement is `top` or `bottom`, the length will be the
* `width`. In case of `left` or `right`, it will be the `height`.
*
* You can provide a single value (as `Number` or `String`), or a pair of values
* as `String` divided by a comma or one (or more) white spaces.<br />
* The latter is a deprecated method because it leads to confusion and will be
* removed in v2.<br />
* Additionally, it accepts additions and subtractions between different units.
* Note that multiplications and divisions aren't supported.
*
* Valid examples are:
* ```
* 10
* '10%'
* '10, 10'
* '10%, 10'
* '10 + 10%'
* '10 - 5vh + 3%'
* '-10px + 5vh, 5px - 6%'
* ```
* > **NB**: If you desire to apply offsets to your poppers in a way that may make them overlap
* > with their reference element, unfortunately, you will have to disable the `flip` modifier.
* > You can read more on this at this [issue](https://github.com/FezVrasta/popper.js/issues/373).
*
* @memberof modifiers
* @inner
*/
offset: {
/** @prop {number} order=200 - Index used to define the order of execution */
order: 200,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: offset,
/** @prop {Number|String} offset=0
* The offset value as described in the modifier description
*/
offset: 0
},
/**
* Modifier used to prevent the popper from being positioned outside the boundary.
*
* A scenario exists where the reference itself is not within the boundaries.<br />
* We can say it has "escaped the boundaries" — or just "escaped".<br />
* In this case we need to decide whether the popper should either:
*
* - detach from the reference and remain "trapped" in the boundaries, or
* - if it should ignore the boundary and "escape with its reference"
*
* When `escapeWithReference` is set to`true` and reference is completely
* outside its boundaries, the popper will overflow (or completely leave)
* the boundaries in order to remain attached to the edge of the reference.
*
* @memberof modifiers
* @inner
*/
preventOverflow: {
/** @prop {number} order=300 - Index used to define the order of execution */
order: 300,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: preventOverflow,
/**
* @prop {Array} [priority=['left','right','top','bottom']]
* Popper will try to prevent overflow following these priorities by default,
* then, it could overflow on the left and on top of the `boundariesElement`
*/
priority: ['left', 'right', 'top', 'bottom'],
/**
* @prop {number} padding=5
* Amount of pixel used to define a minimum distance between the boundaries
* and the popper. This makes sure the popper always has a little padding
* between the edges of its container
*/
padding: 5,
/**
* @prop {String|HTMLElement} boundariesElement='scrollParent'
* Boundaries used by the modifier. Can be `scrollParent`, `window`,
* `viewport` or any DOM element.
*/
boundariesElement: 'scrollParent'
},
/**
* Modifier used to make sure the reference and its popper stay near each other
* without leaving any gap between the two. Especially useful when the arrow is
* enabled and you want to ensure that it points to its reference element.
* It cares only about the first axis. You can still have poppers with margin
* between the popper and its reference element.
* @memberof modifiers
* @inner
*/
keepTogether: {
/** @prop {number} order=400 - Index used to define the order of execution */
order: 400,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: keepTogether
},
/**
* This modifier is used to move the `arrowElement` of the popper to make
* sure it is positioned between the reference element and its popper element.
* It will read the outer size of the `arrowElement` node to detect how many
* pixels of conjunction are needed.
*
* It has no effect if no `arrowElement` is provided.
* @memberof modifiers
* @inner
*/
arrow: {
/** @prop {number} order=500 - Index used to define the order of execution */
order: 500,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: arrow,
/** @prop {String|HTMLElement} element='[x-arrow]' - Selector or node used as arrow */
element: '[x-arrow]'
},
/**
* Modifier used to flip the popper's placement when it starts to overlap its
* reference element.
*
* Requires the `preventOverflow` modifier before it in order to work.
*
* **NOTE:** this modifier will interrupt the current update cycle and will
* restart it if it detects the need to flip the placement.
* @memberof modifiers
* @inner
*/
flip: {
/** @prop {number} order=600 - Index used to define the order of execution */
order: 600,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: flip,
/**
* @prop {String|Array} behavior='flip'
* The behavior used to change the popper's placement. It can be one of
* `flip`, `clockwise`, `counterclockwise` or an array with a list of valid
* placements (with optional variations)
*/
behavior: 'flip',
/**
* @prop {number} padding=5
* The popper will flip if it hits the edges of the `boundariesElement`
*/
padding: 5,
/**
* @prop {String|HTMLElement} boundariesElement='viewport'
* The element which will define the boundaries of the popper position.
* The popper will never be placed outside of the defined boundaries
* (except if `keepTogether` is enabled)
*/
boundariesElement: 'viewport',
/**
* @prop {Boolean} flipVariations=false
* The popper will switch placement variation between `-start` and `-end` when
* the reference element overlaps its boundaries.
*
* The original placement should have a set variation.
*/
flipVariations: false,
/**
* @prop {Boolean} flipVariationsByContent=false
* The popper will switch placement variation between `-start` and `-end` when
* the popper element overlaps its reference boundaries.
*
* The original placement should have a set variation.
*/
flipVariationsByContent: false
},
/**
* Modifier used to make the popper flow toward the inner of the reference element.
* By default, when this modifier is disabled, the popper will be placed outside
* the reference element.
* @memberof modifiers
* @inner
*/
inner: {
/** @prop {number} order=700 - Index used to define the order of execution */
order: 700,
/** @prop {Boolean} enabled=false - Whether the modifier is enabled or not */
enabled: false,
/** @prop {ModifierFn} */
fn: inner
},
/**
* Modifier used to hide the popper when its reference element is outside of the
* popper boundaries. It will set a `x-out-of-boundaries` attribute which can
* be used to hide with a CSS selector the popper when its reference is
* out of boundaries.
*
* Requires the `preventOverflow` modifier before it in order to work.
* @memberof modifiers
* @inner
*/
hide: {
/** @prop {number} order=800 - Index used to define the order of execution */
order: 800,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: hide
},
/**
* Computes the style that will be applied to the popper element to gets
* properly positioned.
*
* Note that this modifier will not touch the DOM, it just prepares the styles
* so that `applyStyle` modifier can apply it. This separation is useful
* in case you need to replace `applyStyle` with a custom implementation.
*
* This modifier has `850` as `order` value to maintain backward compatibility
* with previous versions of Popper.js. Expect the modifiers ordering method
* to change in future major versions of the library.
*
* @memberof modifiers
* @inner
*/
computeStyle: {
/** @prop {number} order=850 - Index used to define the order of execution */
order: 850,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: computeStyle,
/**
* @prop {Boolean} gpuAcceleration=true
* If true, it uses the CSS 3D transformation to position the popper.
* Otherwise, it will use the `top` and `left` properties
*/
gpuAcceleration: true,
/**
* @prop {string} [x='bottom']
* Where to anchor the X axis (`bottom` or `top`). AKA X offset origin.
* Change this if your popper should grow in a direction different from `bottom`
*/
x: 'bottom',
/**
* @prop {string} [x='left']
* Where to anchor the Y axis (`left` or `right`). AKA Y offset origin.
* Change this if your popper should grow in a direction different from `right`
*/
y: 'right'
},
/**
* Applies the computed styles to the popper element.
*
* All the DOM manipulations are limited to this modifier. This is useful in case
* you want to integrate Popper.js inside a framework or view library and you
* want to delegate all the DOM manipulations to it.
*
* Note that if you disable this modifier, you must make sure the popper element
* has its position set to `absolute` before Popper.js can do its work!
*
* Just disable this modifier and define your own to achieve the desired effect.
*
* @memberof modifiers
* @inner
*/
applyStyle: {
/** @prop {number} order=900 - Index used to define the order of execution */
order: 900,
/** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */
enabled: true,
/** @prop {ModifierFn} */
fn: applyStyle,
/** @prop {Function} */
onLoad: applyStyleOnLoad,
/**
* @deprecated since version 1.10.0, the property moved to `computeStyle` modifier
* @prop {Boolean} gpuAcceleration=true
* If true, it uses the CSS 3D transformation to position the popper.
* Otherwise, it will use the `top` and `left` properties
*/
gpuAcceleration: undefined
}
};
/**
* The `dataObject` is an object containing all the information used by Popper.js.
* This object is passed to modifiers and to the `onCreate` and `onUpdate` callbacks.
* @name dataObject
* @property {Object} data.instance The Popper.js instance
* @property {String} data.placement Placement applied to popper
* @property {String} data.originalPlacement Placement originally defined on init
* @property {Boolean} data.flipped True if popper has been flipped by flip modifier
* @property {Boolean} data.hide True if the reference element is out of boundaries, useful to know when to hide the popper
* @property {HTMLElement} data.arrowElement Node used as arrow by arrow modifier
* @property {Object} data.styles Any CSS property defined here will be applied to the popper. It expects the JavaScript nomenclature (eg. `marginBottom`)
* @property {Object} data.arrowStyles Any CSS property defined here will be applied to the popper arrow. It expects the JavaScript nomenclature (eg. `marginBottom`)
* @property {Object} data.boundaries Offsets of the popper boundaries
* @property {Object} data.offsets The measurements of popper, reference and arrow elements
* @property {Object} data.offsets.popper `top`, `left`, `width`, `height` values
* @property {Object} data.offsets.reference `top`, `left`, `width`, `height` values
* @property {Object} data.offsets.arrow] `top` and `left` offsets, only one of them will be different from 0
*/
/**
* Default options provided to Popper.js constructor.<br />
* These can be overridden using the `options` argument of Popper.js.<br />
* To override an option, simply pass an object with the same
* structure of the `options` object, as the 3rd argument. For example:
* ```
* new Popper(ref, pop, {
* modifiers: {
* preventOverflow: { enabled: false }
* }
* })
* ```
* @type {Object}
* @static
* @memberof Popper
*/
var Defaults = {
/**
* Popper's placement.
* @prop {Popper.placements} placement='bottom'
*/
placement: 'bottom',
/**
* Set this to true if you want popper to position it self in 'fixed' mode
* @prop {Boolean} positionFixed=false
*/
positionFixed: false,
/**
* Whether events (resize, scroll) are initially enabled.
* @prop {Boolean} eventsEnabled=true
*/
eventsEnabled: true,
/**
* Set to true if you want to automatically remove the popper when
* you call the `destroy` method.
* @prop {Boolean} removeOnDestroy=false
*/
removeOnDestroy: false,
/**
* Callback called when the popper is created.<br />
* By default, it is set to no-op.<br />
* Access Popper.js instance with `data.instance`.
* @prop {onCreate}
*/
onCreate: function onCreate() {},
/**
* Callback called when the popper is updated. This callback is not called
* on the initialization/creation of the popper, but only on subsequent
* updates.<br />
* By default, it is set to no-op.<br />
* Access Popper.js instance with `data.instance`.
* @prop {onUpdate}
*/
onUpdate: function onUpdate() {},
/**
* List of modifiers used to modify the offsets before they are applied to the popper.
* They provide most of the functionalities of Popper.js.
* @prop {modifiers}
*/
modifiers: modifiers
};
/**
* @callback onCreate
* @param {dataObject} data
*/
/**
* @callback onUpdate
* @param {dataObject} data
*/
// Utils
// Methods
var Popper = function () {
/**
* Creates a new Popper.js instance.
* @class Popper
* @param {Element|referenceObject} reference - The reference element used to position the popper
* @param {Element} popper - The HTML / XML element used as the popper
* @param {Object} options - Your custom options to override the ones defined in [Defaults](#defaults)
* @return {Object} instance - The generated Popper.js instance
*/
function Popper(reference, popper) {
var _this = this;
var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
classCallCheck(this, Popper);
this.scheduleUpdate = function () {
return requestAnimationFrame(_this.update);
};
// make update() debounced, so that it only runs at most once-per-tick
this.update = debounce(this.update.bind(this));
// with {} we create a new object with the options inside it
this.options = _extends({}, Popper.Defaults, options);
// init state
this.state = {
isDestroyed: false,
isCreated: false,
scrollParents: []
};
// get reference and popper elements (allow jQuery wrappers)
this.reference = reference && reference.jquery ? reference[0] : reference;
this.popper = popper && popper.jquery ? popper[0] : popper;
// Deep merge modifiers options
this.options.modifiers = {};
Object.keys(_extends({}, Popper.Defaults.modifiers, options.modifiers)).forEach(function (name) {
_this.options.modifiers[name] = _extends({}, Popper.Defaults.modifiers[name] || {}, options.modifiers ? options.modifiers[name] : {});
});
// Refactoring modifiers' list (Object => Array)
this.modifiers = Object.keys(this.options.modifiers).map(function (name) {
return _extends({
name: name
}, _this.options.modifiers[name]);
})
// sort the modifiers by order
.sort(function (a, b) {
return a.order - b.order;
});
// modifiers have the ability to execute arbitrary code when Popper.js get inited
// such code is executed in the same order of its modifier
// they could add new properties to their options configuration
// BE AWARE: don't add options to `options.modifiers.name` but to `modifierOptions`!
this.modifiers.forEach(function (modifierOptions) {
if (modifierOptions.enabled && isFunction(modifierOptions.onLoad)) {
modifierOptions.onLoad(_this.reference, _this.popper, _this.options, modifierOptions, _this.state);
}
});
// fire the first update to position the popper in the right place
this.update();
var eventsEnabled = this.options.eventsEnabled;
if (eventsEnabled) {
// setup event listeners, they will take care of update the position in specific situations
this.enableEventListeners();
}
this.state.eventsEnabled = eventsEnabled;
}
// We can't use class properties because they don't get listed in the
// class prototype and break stuff like Sinon stubs
createClass(Popper, [{
key: 'update',
value: function update$$1() {
return update.call(this);
}
}, {
key: 'destroy',
value: function destroy$$1() {
return destroy.call(this);
}
}, {
key: 'enableEventListeners',
value: function enableEventListeners$$1() {
return enableEventListeners.call(this);
}
}, {
key: 'disableEventListeners',
value: function disableEventListeners$$1() {
return disableEventListeners.call(this);
}
/**
* Schedules an update. It will run on the next UI update available.
* @method scheduleUpdate
* @memberof Popper
*/
/**
* Collection of utilities useful when writing custom modifiers.
* Starting from version 1.7, this method is available only if you
* include `popper-utils.js` before `popper.js`.
*
* **DEPRECATION**: This way to access PopperUtils is deprecated
* and will be removed in v2! Use the PopperUtils module directly instead.
* Due to the high instability of the methods contained in Utils, we can't
* guarantee them to follow semver. Use them at your own risk!
* @static
* @private
* @type {Object}
* @deprecated since version 1.8
* @member Utils
* @memberof Popper
*/
}]);
return Popper;
}();
/**
* The `referenceObject` is an object that provides an interface compatible with Popper.js
* and lets you use it as replacement of a real DOM node.<br />
* You can use this method to position a popper relatively to a set of coordinates
* in case you don't have a DOM node to use as reference.
*
* ```
* new Popper(referenceObject, popperNode);
* ```
*
* NB: This feature isn't supported in Internet Explorer 10.
* @name referenceObject
* @property {Function} data.getBoundingClientRect
* A function that returns a set of coordinates compatible with the native `getBoundingClientRect` method.
* @property {number} data.clientWidth
* An ES6 getter that will return the width of the virtual reference element.
* @property {number} data.clientHeight
* An ES6 getter that will return the height of the virtual reference element.
*/
Popper.Utils = (typeof window !== 'undefined' ? window : global).PopperUtils;
Popper.placements = placements;
Popper.Defaults = Defaults;
export default Popper;
//# sourceMappingURL=popper.js.map
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/request/AntLinkeAlcollectioncenterCreateRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AntLinkeAlcollectioncenterCreateModel import AntLinkeAlcollectioncenterCreateModel
class AntLinkeAlcollectioncenterCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AntLinkeAlcollectioncenterCreateModel):
self._biz_content = value
else:
self._biz_content = AntLinkeAlcollectioncenterCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'ant.linke.alcollectioncenter.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/pqnha_cozinha-0.1.1.tar.gz/pqnha_cozinha-0.1.1/README.md
|
## pqnha_cozinha
---
py_cozinha é parte de um sistema que etou desenvolvendo para auxiliar tarefas administrativas e a logística
de funcionamento de restaurantes e lanchonetes.
O sistema se divide em três partes:
O nome que dei ao sistema como um todo é paçoquinha.
---
- Pedidos, atendimento.
- Cozinha, bar.
- Adminisreação/caixa.
---
## Essa parte do sistema tem a finalidade de enviar os pedidos a cozinha.
O sistema tem por objetivo o uso de software livre e sistemas linux.
Também temos a intenção de construir integração com a plataforma android.
* Este software tem dentre outras a finalidade educacional.
* Academia do Software livre (São Lourenço - Minas Gerais).
---
Visite <http://www.asl-sl.com.br>
Colaboradores:
Leonardo de Araújo Lima
<[email protected]>
|
PypiClean
|
/database/database.py
|
import abc
import pandas as pd
from contextlib import contextmanager
class Database(abc.ABC):
""" Abstract Base Class for EMAT data storage
Database constains the design experiments, meta-model parameters,
and the core and meta-model results (performance measures)
"""
def __init__(self, readonly=False):
self.readonly = readonly
self.__locked = False
@property
@contextmanager
def lock(self):
"""Context manager to temporarily mark this database as locked."""
self.__locked = True
yield
self.__locked = False
@property
def is_locked(self):
return self.readonly or self.__locked
def get_db_info(self):
"""
Get a short string describing this Database
Returns:
str
"""
return "no info available"
@abc.abstractmethod
def init_xlm(self, parameter_list, measure_list):
"""
Initialize or extend set of experiment variables and measures
Initialize database with universe of risk variables,
policy variables, and performance measures. All variables and measures
defined in scopes must be defined in this set.
This method only needs to be run
once after creating a new database.
Args:
parameter_list (List[tuple]):
Experiment variable tuples (variable name, type)
where variable name is a string and
type is 'uncertainty', 'lever', or 'constant'
measure_list (List[tuple]):
Performance measure tuples (name, transform), where
name is a string and transform is a defined transformation
used in metamodeling, currently supported include {'log', None}.
"""
@abc.abstractmethod
def _write_scope(self, scope_name, sheet, scp_xl, scp_m, content):
"""
Save the emat scope information to the database.
Generally users should not call this function directly,
use `store_scope` instead.
Args:
scope_name (str):
The scope name, used to identify experiments,
performance measures, and results associated with this model.
Multiple scopes can be stored in the same database.
sheet (str):
Yaml file name with scope definition.
scp_xl (List[str]):
Scope parameter names - both uncertainties and policy levers
scp_m (List[str]):
Scope performance measure names
content (Scope, optional):
Scope object to be pickled and stored in the database.
Raises:
KeyError:
If scope name already exists, the scp_vars are not
available, or the performance measures are not initialized
in the database.
"""
@abc.abstractmethod
def update_scope(self, scope):
"""
Update the emat scope information in the database.
Args:
scope (Scope): scope to update
"""
@abc.abstractmethod
def store_scope(self, scope):
"""
Save an emat.Scope directly to the database.
Args:
scope (Scope): The scope object to store.
Raises:
KeyError: If scope name already exists.
"""
@abc.abstractmethod
def read_scope(self, scope_name=None):
"""
Load the pickled scope from the database.
Args:
scope_name (str, optional):
The name of the scope to load. If not
given and there is only one scope stored
in the database, that scope is loaded. If not
given and there are multiple scopes stored in
the database, a KeyError is raised.
Returns:
Scope
Raises:
KeyError: If a name is given but is not found in
the database, or if no name is given but there
is more than one scope stored.
"""
@abc.abstractmethod
def add_scope_meas(self, scope_name, scp_m):
"""Update the set of performance measures associated with the scope
Use this function when the core model runs are complete to add
performance measures to the scope and post-process against the
archived results
Args:
scope_name (str): scope name, used to identify experiments,
performance measures, and results associated with this run
scp_m (List[str]): scope performance measures
Raises:
KeyError: If scope name does not exist or the
performance measures are not initialized in the database.
"""
@abc.abstractmethod
def delete_scope(self, scope_name):
"""Delete the scope from the database
Deletes the scope as well as any experiments and results associated
with the scope
Args:
scope_name (str): scope name, used to identify experiments,
performance measures, and results associated with this run"""
@abc.abstractmethod
def write_experiment_parameters(
self,
scope_name,
design_name,
xl_df,
):
"""
Write experiment definitions the the database.
This method records values for each experiment parameter,
for each experiment in a design of one or more experiments.
Args:
scope_name (str):
A scope name, used to identify experiments,
performance measures, and results associated with this
exploratory analysis. The scope with this name should
already have been stored in this database.
design_name (str):
An experiment design name. This name should be unique
within the named scope, and typically will include a
reference to the design sampler, for example:
'uni' - generated by univariate sensitivity test design
'lhs' - generated by latin hypercube sample design
The design_name is used primarily to load groups of
related experiments together.
xl_df (pandas.Dataframe):
The columns of this DataFrame are the experiment
parameters (i.e. policy levers, uncertainties, and
constants), and each row is an experiment.
Returns:
list: the experiment id's of the newly recorded experiments
Raises:
UserWarning: If scope name does not exist
TypeError: If not all scope variables are defined in the
exp_def
"""
def write_experiment_parameters_1(
self,
scope_name,
design_name: str,
*args,
**kwargs
):
"""
Write experiment definitions for a single experiment.
This method records values for each experiment parameter,
for a single experiment only.
Args:
scope_name (str):
A scope name, used to identify experiments,
performance measures, and results associated with this
exploratory analysis. The scope with this name should
already have been stored in this database.
design_name (str):
An experiment design name. This name should be unique
within the named scope, and typically will include a
reference to the design sampler, for example:
'uni' - generated by univariate sensitivity test design
'lhs' - generated by latin hypercube sample design
The design_name is used primarily to load groups of
related experiments together.
*args, **kwargs (Mapping[s]):
A dictionary where the keys are experiment parameter names
(i.e. policy levers, uncertainties, and constants), and
values are the the parameter values for this experiment.
Subsequent positional or keyword arguments are used to update
the parameters.
Returns:
int: The experiment id of the newly recorded experiments
Raises:
UserWarning: If scope name does not exist
TypeError: If not all scope variables are defined in the
exp_def
"""
parameters = {}
for a in args:
if a is not None:
parameters.update(a)
parameters.update(kwargs)
xl_df = pd.DataFrame(parameters, index=[0])
result = self.write_experiment_parameters(scope_name, design_name, xl_df)
return result[0]
@abc.abstractmethod
def read_experiment_parameters(
self,
scope_name,
design_name=None,
only_pending=False,
design=None,
*,
experiment_ids=None,
ensure_dtypes=True,
):
"""
Read experiment definitions from the database.
Read the values for each experiment parameter per experiment.
Args:
scope_name (str):
A scope name, used to identify experiments,
performance measures, and results associated with this
exploratory analysis.
design_name (str, optional): If given, only experiments
associated with both the scope and the named design
are returned, otherwise all experiments associated
with the scope are returned.
only_pending (bool, default False): If True, only pending
experiments (which have no performance measure results
stored in the database) are returned.
design (str, optional): Deprecated. Use design_name.
experiment_ids (Collection, optional):
A collection of experiment id's to load. If given,
both `design_name` and `only_pending` are ignored.
ensure_dtypes (bool, default True): If True, the scope
associated with these experiments is also read out
of the database, and that scope file is used to
format experimental data consistently (i.e., as
float, integer, bool, or categorical).
Returns:
emat.ExperimentalDesign:
The experiment parameters are returned in a subclass
of a normal pandas.DataFrame, which allows attaching
the `design_name` as meta-data to the DataFrame.
Raises:
ValueError: if `scope_name` is not stored in this database
"""
@abc.abstractmethod
def write_experiment_measures(
self,
scope_name,
source,
m_df,
run_ids=None,
experiment_id=None,
):
"""
Write experiment results to the database.
Write the performance measure results for each experiment
in the scope - if the scope does not exist, nothing is recorded.
Note that the design_name is not required to write experiment
measures, as the individual experiments from any design are
uniquely identified by the experiment id's.
Args:
scope_name (str):
A scope name, used to identify experiments,
performance measures, and results associated with this
exploratory analysis. The scope with this name should
already have been stored in this database.
source (int):
An indicator of performance measure source. This should
be 0 for a bona-fide run of the associated core models,
or some non-zero metamodel_id number.
m_df (pandas.DataFrame):
The columns of this DataFrame are the performance
measure names, and row indexes are the experiment id's.
run_ids (pandas.Index, optional):
Provide an optional index of universally unique run ids
(UUIDs) for these results. The UUIDs can be used to help
identify problems and organize model runs.
Raises:
UserWarning: If scope name does not exist
"""
def write_experiment_run_status(
self,
scope_name,
run_id,
experiment_id,
msg,
):
"""
Write experiment status to the database.
Parameters
----------
scope_name : str
run_id : UUID
experiment_id : int
msg : str
The status to write.
"""
def read_experiment_run_status(
self,
scope_name,
design_name=None,
*,
experiment_id=None,
experiment_ids=None,
):
"""
Read experiment definitions from the database.
Read the values for each experiment parameter per experiment.
Args:
scope_name (str):
A scope name, used to identify experiments,
performance measures, and results associated with this
exploratory analysis.
design_name (str, optional): If given, only experiments
associated with both the scope and the named design
are returned, otherwise all experiments associated
with the scope are returned.
experiment_ids (int, optional):
A single experiment id to check. If given,
`design_name` is ignored.
experiment_ids (int or Collection[int], optional):
A collection of experiment id's to check. If given,
`design_name` is ignored.
Returns:
emat.ExperimentalDesign:
The experiment run statuses are returned in a subclass
of a normal pandas.DataFrame, which allows attaching
the `design_name` as meta-data to the DataFrame.
Raises:
ValueError: if `scope_name` is not stored in this database
"""
raise NotImplementedError
@abc.abstractmethod
def read_experiment_all(
self,
scope_name,
design_name=None,
source=None,
*,
only_pending=False,
only_incomplete=False,
only_complete=False,
only_with_measures=False,
ensure_dtypes=True,
with_run_ids=False,
runs=None,
):
"""
Read experiment definitions and results
Read the values from each experiment variable and the
results for each performance measure per experiment.
Args:
scope_name (str):
A scope name, used to identify experiments,
performance measures, and results associated with this
exploratory analysis.
design_name (str or Collection[str], optional):
The experimental design name (a single `str`) or
a collection of design names to read.
source (int, optional): The source identifier of the
experimental outcomes to load. If not given, but
there are only results from a single source in the
database, those results are returned. If there are
results from multiple sources, an error is raised.
only_pending (bool, default False): If True, only pending
experiments (which have no performance measure results
stored in the database) are returned. Experiments that
have any results, even if only partial results, are
excluded.
only_incomplete (bool, default False): If True, only incomplete
experiments (which have at least one missing performance
measure result that is not stored in the database) are
returned. Only complete experiments (that have every
performance measure populated) are excluded.
only_complete (bool, default False): If True, only complete
experiments (which have no missing performance measure
results stored in the database) are returned.
only_with_measures (bool, default False): If True, only
experiments with at least one stored performance measure
are returned.
ensure_dtypes (bool, default True): If True, the scope
associated with these experiments is also read out
of the database, and that scope file is used to
format experimental data consistently (i.e., as
float, integer, bool, or categorical).
with_run_ids (bool, default False): Whether to use a
two-level pd.MultiIndex that includes both the
experiment_id (which always appears in the index)
as well as the run_id (which only appears in the
index if this argument is set to True).
runs ({None, 'all', 'valid', 'invalid'}, default None):
By default, this method returns the one and only
valid model run matching the given `design_name`
and `source` (if any) for any experiment, and fails
if there is more than one such valid run. Set this to
'valid' or 'invalid' to get all valid or invalid model
runs (instead of raising an exception). Set to 'all' to get
everything, including both valid and invalidated results.
Returns:
emat.ExperimentalDesign:
The experiment parameters are returned in a subclass
of a normal pandas.DataFrame, which allows attaching
the `design_name` as meta-data to the DataFrame.
Raises:
ValueError
When no source is given but the database contains
results from multiple sources.
"""
@abc.abstractmethod
def read_experiment_measures(
self,
scope_name,
design_name=None,
experiment_id=None,
source=None,
design=None,
runs=None,
formulas=True,
with_validity=False,
):
"""
Read experiment results from the database.
Args:
scope_name (str or Scope):
A scope or just its name, used to identify experiments,
performance measures, and results associated with this
exploratory analysis.
design_name (str, optional): If given, only experiments
associated with both the scope and the named design
are returned, otherwise all experiments associated
with the scope are returned.
experiment_id (int, optional): The id of the experiment to
retrieve. If omitted, get all experiments matching the
named scope and design.
source (int, optional): The source identifier of the
experimental outcomes to load. If not given, but
there are only results from a single source in the
database, those results are returned. If there are
results from multiple sources, an error is raised.
design (str): Deprecated, use `design_name`.
runs ({None, 'all', 'valid', 'invalid'}, default None):
By default, this method fails if there is more than
one valid model run matching the given `design_name`
and `source` (if any) for any experiment. Set this to
'valid' or 'invalid' to get all valid or invalid model
runs (instead of raising an exception). Set to 'all' to get
everything, including both valid and invalidated results.
formulas (bool, default True): If the scope includes
formulaic measures (computed directly from other
measures) then compute these values and include them in
the results.
Returns:
results (pandas.DataFrame): performance measures
Raises:
ValueError
When the database contains multiple sets of results
matching the given `design_name` and/or `source`
(if any) for any experiment.
"""
@abc.abstractmethod
def read_experiment_measure_sources(
self,
scope_name,
design_name=None,
experiment_id=None,
design=None,
):
"""
Read all source ids from the results stored in the database.
Args:
scope_name (str):
A scope name, used to identify experiments,
performance measures, and results associated with this
exploratory analysis.
design_name (str, optional): If given, only experiments
associated with both the scope and the named design
are returned, otherwise all experiments associated
with the scope are returned.
experiment_id (int, optional): The id of the experiment to
retrieve. If omitted, get all experiments matching the
named scope and design.
design (str): Deprecated, use `design_name`.
Returns:
List[Int]: performance measure source ids
"""
@abc.abstractmethod
def delete_experiments(self, scope_name, design_name=None, design=None):
"""
Delete experiment definitions and results.
The method removes the linkage between experiments and the
identified experimental design. Experiment parameters and results
are only removed if they are also not linked to any other experimental
design stored in the database.
Args:
scope_name (str): scope name, used to identify experiments,
performance measures, and results associated with this run
design_name (str): Only experiments
associated with both the scope and the named design
are deleted.
design (str): Deprecated, use `design_name`.
"""
@abc.abstractmethod
def delete_experiment_measures(
self,
experiment_ids=None,
):
"""
Delete experiment performance measure results.
The method removes only the performance measures, not the
parameters. This can be useful if a set of corrupted model
results was stored in the database.
Args:
experiment_ids (Collection, optional):
A collection of experiment id's for which measures shall
be deleted. Note that no scope or design are given here,
experiments must be individually identified.
"""
@abc.abstractmethod
def write_experiment_all(self, scope_name, design_name, source, xlm_df):
"""
Write experiment definitions and results
Writes the values from each experiment variable and the
results for each performance measure per experiment
Args:
scope_name (str):
A scope name, used to identify experiments,
performance measures, and results associated with this
exploratory analysis. The scope with this name should
already have been stored in this database.
design_name (str):
An experiment design name. This name should be unique
within the named scope, and typically will include a
reference to the design sampler, for example:
'uni' - generated by univariate sensitivity test design
'lhs' - generated by latin hypercube sample design
The design_name is used primarily to load groups of
related experiments together.
source (int):
An indicator of performance measure source. This should
be 0 for a bona fide run of the associated core models,
or some non-zero metamodel_id number.
xlm_df (pandas.Dataframe):
The columns of this DataFrame are the experiment
parameters (i.e. policy levers, uncertainties, and
constants) and performance measures, and each row
is an experiment.
Raises:
DesignExistsError: If scope and design already exist
TypeError: If not all scope variables are defined in the
experiment
"""
@abc.abstractmethod
def read_scope_names(self, design_name=None) -> list:
"""A list of all available scopes in the database.
Args:
design_name (str, optional): If a design name, is given, only
scopes containing a design with this name are returned.
Returns:
list
"""
@abc.abstractmethod
def read_design_names(self, scope_name:str) -> list:
"""A list of all available designs for a given scope.
Args:
scope_name (str): scope name, used to identify experiments,
performance measures, and results associated with this run
"""
@abc.abstractmethod
def read_experiment_id(self, scope_name, *args, **kwargs):
"""
Read the experiment id previously defined in the database
Args:
scope_name (str): scope name, used to identify experiments,
performance measures, and results associated with this run
parameters (dict): keys are experiment parameters, values are the
experimental values to look up. Subsequent positional or keyword
arguments are used to update parameters.
Returns:
int: the experiment id of the identified experiment
Raises:
ValueError: If scope name does not exist
ValueError: If multiple experiments match an experiment definition.
This can happen, for example, if the definition is incomplete.
"""
@abc.abstractmethod
def read_experiment_ids(self, scope_name, xl_df):
"""
Read the experiment ids previously defined in the database.
This method is used to recover the experiment id, if the
set of parameter values is known but the id of the experiment
is not known.
Args:
scope_name (str): scope name, used to identify experiments,
performance measures, and results associated with this run
xl_df (pandas.DataFrame): columns are experiment parameters,
each row is a full experiment
Returns:
list: the experiment id's of the identified experiments
Raises:
ValueError: If scope name does not exist
ValueError: If multiple experiments match an experiment definition.
This can happen, for example, if the definition is incomplete.
"""
@abc.abstractmethod
def read_uncertainties(self, scope_name:str) -> list:
"""A list of all uncertainties for a given scope.
Args:
scope_name (str): scope name
"""
@abc.abstractmethod
def read_levers(self, scope_name:str) -> list:
"""A list of all levers for a given scope.
Args:
scope_name (str): scope name
"""
@abc.abstractmethod
def read_constants(self, scope_name:str) -> list:
"""A list of all constants for a given scope.
Args:
scope_name (str): scope name
"""
@abc.abstractmethod
def read_measures(self, scope_name:str) -> list:
"""A list of all performance measures for a given scope.
Args:
scope_name (str): scope name
"""
@abc.abstractmethod
def write_metamodel(self, scope_name, metamodel, metamodel_id=None, metamodel_name=''):
"""Store a meta-model in the database
Args:
scope_name (str): scope name
metamodel (emat.MetaModel): The meta-model to be stored.
If a PythonCoreModel containing a MetaModel is given,
the MetaModel will be extracted.
metamodel_id (int, optional): A unique id number for this
metamodel. If no id number is given and it cannot be
inferred from `metamodel`, a unique id number
will be created.
metamodel_name (str, optional): A name for this meta-model.
If no name is given and it cannot be
inferred from `metamodel`, an empty string is used.
"""
@abc.abstractmethod
def read_metamodel(self, scope_name, metamodel_id=None):
"""Retrieve a meta-model from the database.
Args:
scope_name (str): scope name
metamodel_id (int, optional): A unique id number for this
metamodel. If not given but there is exactly one
metamodel stored for the given scope, that metamodel
will be returned.
Returns:
PythonCoreModel: The meta-model, ready to use
"""
@abc.abstractmethod
def read_metamodel_ids(self, scope_name):
"""A list of all metamodel id's for a given scope.
Args:
scope_name (str): scope name
"""
@abc.abstractmethod
def get_new_metamodel_id(self, scope_name):
"""Get a new unused metamodel id for a given scope.
Args:
scope_name (str): scope name
Returns:
int
"""
@abc.abstractmethod
def read_box(self, scope_name: str, box_name: str, scope=None):
"""
Read a Box from the database.
Args:
scope_name (str):
The name of the scope from which to read the box.
box_name (str):
The name of the box to read.
scope (Scope, optional):
The Scope to assign to the Box that is returned.
If not given, no Scope object is assigned to the
box.
Returns:
Box
"""
@abc.abstractmethod
def read_box_names(self, scope_name: str):
"""
Get the names of all boxes associated with a particular scope.
Args:
scope_name (str):
The name of the scope from which to read the Box names.
Returns:
list[str]
"""
@abc.abstractmethod
def read_box_parent_name(self, scope_name: str, box_name:str):
"""
Get the name of the parent box for a particular box in the database
Args:
scope_name (str):
The name of the scope from which to read the Box parent.
box_name (str):
The name of the box from which to read the parent.
Returns:
str or None:
If the identified box has a parent, this is the name of that
parent, otherwise None is returned.
"""
@abc.abstractmethod
def read_box_parent_names(self, scope_name: str):
"""
Get the name of the parent box for each box in the database.
Args:
scope_name (str):
The name of the scope from which to read Box parents.
Returns:
dict
A dictionary, with keys giving Box names and values
giving the respective Box parent names.
"""
@abc.abstractmethod
def read_boxes(self, scope_name: str=None, scope=None):
"""
Read Boxes from the database.
Args:
scope_name (str, optional):
The name of the scope from which to load Boxes. This
is used exclusively to identify the Boxes to load from
the database, and the scope by this name is not attached
to the Boxes, unless `scope` is given, in which case this
argument is ignored.
scope (Scope, optional):
The scope to assign to the Boxes. If not given,
no Scope object is assigned.
Returns:
Boxes
"""
@abc.abstractmethod
def write_box(self, box, scope_name=None):
"""
Write a single box to the database.
Args:
box (Box):
The Box to write to the database.
scope_name (str, optional):
The scope name to use when writing to the database. If
the `boxes` has a particular scope assigned, the name
of that scope is used.
Raises:
ValueError:
If the `box` has a particular scope assigned, and
`scope_name` is given but it is not the same name
of the assigned scope.
"""
@abc.abstractmethod
def write_boxes(self, boxes, scope_name=None):
"""
Write Boxes to the database.
Args:
boxes (Boxes):
The collection of Boxes to write to the database.
scope_name (str, optional):
The scope name to use when writing to the database. If
the `boxes` has a particular scope assigned, the name
of that scope is used.
Raises:
ValueError:
If the `boxes` has a particular scope assigned, and
`scope_name` is given but it is not the same name
of the assigned scope.
"""
@abc.abstractmethod
def new_run_id(
self,
scope_name=None,
parameters=None,
location=None,
experiment_id=None,
source=0,
**extra_attrs,
):
"""
Create a new run_id in the database.
Args:
scope_name (str): scope name, used to identify experiments,
performance measures, and results associated with this run
parameters (dict): keys are experiment parameters, values are the
experimental values to look up. Subsequent positional or keyword
arguments are used to update parameters.
location (str or True, optional): An identifier for this location
(i.e. this computer). If set to True, the name of this node
is found using the `platform` module.
experiment_id (int, optional): The experiment id associated
with this run. If given, the parameters are ignored.
source (int, default 0): The metamodel_id of the source for this
run, or 0 for a core model run.
Returns:
Tuple[Int,Int]:
The run_id and experiment_id of the identified experiment
Raises:
ValueError: If scope name does not exist
ValueError: If multiple experiments match an experiment definition.
This can happen, for example, if the definition is incomplete.
"""
def info(self, stream=None):
"""
Print info about scopes and designs in this database.
"""
if stream is None:
import sys
stream = sys.stdout
print(f"<emat.{self.__class__.__name__}>", file=stream)
scope_names = self.read_scope_names()
for scope_name in scope_names:
print(f"scope: {scope_name}:", file=stream)
design_names = self.read_design_names(scope_name)
if design_names:
print(f" designs:", file=stream)
for design_name in design_names:
print(f" - {design_name}", file=stream)
else:
print(f" no designs", file=stream)
|
PypiClean
|
/dealcloud-api-wrapper-0.0.5.tar.gz/dealcloud-api-wrapper-0.0.5/dealcloud_api_wrapper/dc_schema.py
|
class Schema():
""" Methods for calling the Schema endpoint """
def schema_get_allfields(self):
"""
Get all fields on all lists from site.
:returns: json response
"""
endpoint = "/api/rest/v4/schema/allfields"
response = self._get_response(method='get', endpoint=endpoint)
return response.json()
def schema_get_onefield(self, fieldId):
"""
Get information for one field.
:param fieldId: Field Id.
:type fieldId: integer
:returns: json response
"""
endpoint = f"/api/rest/v4/schema/fields/{fieldId}"
response = self._get_response(method='get', endpoint=endpoint)
return response.json()
def schema_get_entrytypes(self):
"""
Get a list of entry types (Contacts, Companies, etc.)
:returns: json response.
"""
endpoint = f"/api/rest/v4/schema/entrytypes"
response = self._get_response(method='get', endpoint=endpoint)
return response.json()
def schema_get_fieldtypes(self):
"""
Get field types supported by DealCloud (user fields)
:returns: json response.
"""
endpoint = f"/api/rest/v4/schema/fieldtypes"
response = self._get_response(method='get', endpoint=endpoint)
return response.json()
def schema_get_fields(self, fields: list):
"""
Get information on a list of fields.
:param fields: List of fieldIds to get information on.
:returns: json response.
"""
fields = '&fieldIds='.join([str(item) for item in fields])
endpoint = f"/api/rest/v4/schema/fields?fieldIds={fields}"
response = self._get_response(method='get', endpoint=endpoint)
return response.json()
def schema_get_systemfields(self):
"""
Get types of system fields.
:returns: json response.
"""
endpoint = "/api/rest/v4/schema/systemfieldtypes"
response = self._get_response(method='get', endpoint=endpoint)
return response.json()
def schema_get_entrytype_fields(self, entryTypeId):
"""
Get fields for an entry type.
:param entryTypeId: entryListID or entry apiName. Required.
:returns: json response.
"""
endpoint = f"/api/rest/v4/schema/entrytypes/{entryTypeId}/fields"
response = self._get_response(method='get', endpoint=endpoint)
return response.json()
def schema_get_entrytype_info(self, entryTypeId):
"""
Get information for one entry type
:param entryTypeId: entryListID or entry apiName. Required.
:returns: json response.
"""
endpoint = f"/api/rest/v4/schema/entrytypes/{entryTypeId}"
response = self._get_response(method = 'get', endpoint=endpoint)
return response.json()
def schema_get_filteroperations(self):
"""
Get list of filter operations supported by the DealCloud column API
:returns: json response.
"""
endpoint = f"/api/rest/v4/schema/filteroperations"
response = self._get_response(method = 'get', endpoint=endpoint)
return response.json()
def schema_post_choicefieldvalues(self, fieldId, choiceValuesToAppend):
"""
Add values to choice fields.
:param fieldId: Id of the existing choice field.
:type fieldId: int.
:param choiceValuesToAppend: Values which will be appended to choice values of field.
:type choiceValuesToAppend: list.
:returns: json response
"""
endpoint = f"/api/rest/v4/schema/choiceFieldValues/{fieldId}"
response = self._get_response(method='post', endpoint=endpoint, json=choiceValuesToAppend)
return response.json()
|
PypiClean
|
/Satchmo-0.9.2.tar.gz/Satchmo-0.9.2/satchmo/apps/payment/modules/authorizenet/processor.py
|
from datetime import datetime
from decimal import Decimal
from django.template import loader, Context
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from payment.modules.base import BasePaymentProcessor, ProcessorResult
from satchmo_store.shop.models import Config
from satchmo_utils.numbers import trunc_decimal
from tax.utils import get_tax_processor
from xml.dom import minidom
import random
import urllib2
class PaymentProcessor(BasePaymentProcessor):
"""
Authorize.NET payment processing module
You must have an account with authorize.net in order to use this module.
Additionally, you must have ARB enabled in your account to use recurring billing.
"""
def __init__(self, settings):
super(PaymentProcessor, self).__init__('authorizenet', settings)
self.arb_enabled = settings.ARB.value
def authorize_payment(self, order=None, amount=None, testing=False):
"""Authorize a single payment.
Returns: ProcessorResult
"""
if order:
self.prepare_data(order)
else:
order = self.order
if order.paid_in_full:
self.log_extra('%s is paid in full, no authorization attempted.', order)
results = ProcessorResult(self.key, True, _("No charge needed, paid in full."))
else:
self.log_extra('Authorizing payment of %s for %s', amount, order)
standard = self.get_standard_charge_data(authorize=True, amount=amount)
results = self.send_post(standard, testing)
return results
def can_authorize(self):
return True
def can_recur_bill(self):
return True
def capture_authorized_payment(self, authorization, testing=False, order=None, amount=None):
"""Capture a single payment"""
if order:
self.prepare_data(order)
else:
order = self.order
if order.authorized_remaining == Decimal('0.00'):
self.log_extra('No remaining authorizations on %s', order)
return ProcessorResult(self.key, True, _("Already complete"))
self.log_extra('Capturing Authorization #%i for %s', authorization.id, order)
data = self.get_prior_auth_data(authorization, amount=amount)
results = None
if data:
results = self.send_post(data, testing)
return results
def capture_payment(self, testing=False, order=None, amount=None):
"""Process payments without an authorization step."""
if order:
self.prepare_data(order)
else:
order = self.order
recurlist = self.get_recurring_charge_data()
if recurlist:
success, results = self.process_recurring_subscriptions(recurlist, testing)
if not success:
self.log_extra('recur payment failed, aborting the rest of the module')
return results
if order.paid_in_full:
self.log_extra('%s is paid in full, no capture attempted.', order)
results = ProcessorResult(self.key, True, _("No charge needed, paid in full."))
self.record_payment()
else:
self.log_extra('Capturing payment for %s', order)
standard = self.get_standard_charge_data(amount=amount)
results = self.send_post(standard, testing)
return results
def get_prior_auth_data(self, authorization, amount=None):
"""Build the dictionary needed to process a prior auth capture."""
settings = self.settings
trans = {'authorization' : authorization}
remaining = authorization.remaining()
if amount is None or amount > remaining:
amount = remaining
balance = trunc_decimal(amount, 2)
trans['amount'] = amount
if self.is_live():
conn = settings.CONNECTION.value
self.log_extra('Using live connection.')
else:
testflag = 'TRUE'
conn = settings.CONNECTION_TEST.value
self.log_extra('Using test connection.')
if self.settings.SIMULATE.value:
testflag = 'TRUE'
else:
testflag = 'FALSE'
trans['connection'] = conn
trans['configuration'] = {
'x_login' : settings.LOGIN.value,
'x_tran_key' : settings.TRANKEY.value,
'x_version' : '3.1',
'x_relay_response' : 'FALSE',
'x_test_request' : testflag,
'x_delim_data' : 'TRUE',
'x_delim_char' : '|',
'x_type': 'PRIOR_AUTH_CAPTURE',
'x_trans_id' : authorization.transaction_id
}
self.log_extra('prior auth configuration: %s', trans['configuration'])
trans['transactionData'] = {
'x_amount' : balance,
}
part1 = urlencode(trans['configuration'])
postdata = part1 + "&" + urlencode(trans['transactionData'])
trans['postString'] = postdata
self.log_extra('prior auth poststring: %s', postdata)
trans['logPostString'] = postdata
return trans
def get_void_auth_data(self, authorization):
"""Build the dictionary needed to process a prior auth release."""
settings = self.settings
trans = {
'authorization' : authorization,
'amount' : Decimal('0.00'),
}
if self.is_live():
conn = settings.CONNECTION.value
self.log_extra('Using live connection.')
else:
testflag = 'TRUE'
conn = settings.CONNECTION_TEST.value
self.log_extra('Using test connection.')
if self.settings.SIMULATE.value:
testflag = 'TRUE'
else:
testflag = 'FALSE'
trans['connection'] = conn
trans['configuration'] = {
'x_login' : settings.LOGIN.value,
'x_tran_key' : settings.TRANKEY.value,
'x_version' : '3.1',
'x_relay_response' : 'FALSE',
'x_test_request' : testflag,
'x_delim_data' : 'TRUE',
'x_delim_char' : '|',
'x_type': 'VOID',
'x_trans_id' : authorization.transaction_id
}
self.log_extra('void auth configuration: %s', trans['configuration'])
postdata = urlencode(trans['configuration'])
trans['postString'] = postdata
self.log_extra('void auth poststring: %s', postdata)
trans['logPostString'] = postdata
return trans
def get_recurring_charge_data(self, testing=False):
"""Build the list of dictionaries needed to process a recurring charge.
Because Authorize can only take one subscription at a time, we build a list
of the transaction dictionaries, for later sequential posting.
"""
if not self.arb_enabled:
return []
# get all subscriptions from the order
subscriptions = self.get_recurring_orderitems()
if len(subscriptions) == 0:
self.log_extra('No subscription items')
return []
settings = self.settings
# set up the base dictionary
trans = {}
if self.is_live():
conn = settings.ARB_CONNECTION.value
self.log_extra('Using live recurring charge connection.')
else:
conn = settings.ARB_CONNECTION_TEST.value
self.log_extra('Using test recurring charge connection.')
shop_config = Config.objects.get_current()
trans['connection'] = conn
trans['config'] = {
'merchantID' : settings.LOGIN.value,
'transactionKey' : settings.TRANKEY.value,
'shop_name' : shop_config.store_name,
}
trans['order'] = self.order
trans['card'] = self.order.credit_card
trans['card_expiration'] = "%4i-%02i" % (self.order.credit_card.expire_year, self.order.credit_card.expire_month)
translist = []
taxer = get_tax_processor(user = self.order.contact.user)
for subscription in subscriptions:
product = subscription.product
subtrans = trans.copy()
subtrans['subscription'] = subscription
subtrans['product'] = product
sub = product.subscriptionproduct
trial = sub.get_trial_terms(0)
if trial:
price = trunc_decimal(trial.price, 2)
trial_amount = price
if price and subscription.product.taxable:
trial_amount = taxer.by_price(subscription.product.taxClass, price)
#todo, maybe add shipping for trial?
amount = sub.recurring_price()
trial_occurrences = trial.occurrences
if not trial_occurrences:
self.log.warn("Trial expiration period is less than one recurring billing cycle. " +
"Authorize does not allow this, so the trial period has been adjusted to be equal to one recurring cycle.")
trial_occurrences = 1
else:
trial_occurrences = 0
trial_amount = Decimal('0.00')
amount = subscription.total_with_tax
occurrences = sub.recurring_times + trial_occurrences
if occurrences > 9999:
occurrences = 9999
subtrans['occurrences'] = occurrences
subtrans['trial_occurrences'] = trial_occurrences
subtrans['trial'] = trial
subtrans['trial_amount'] = trunc_decimal(trial_amount, 2)
subtrans['amount'] = trunc_decimal(amount, 2)
if trial:
charged_today = trial_amount
else:
charged_today = amount
charged_today = trunc_decimal(charged_today, 2)
subtrans['charged_today'] = charged_today
translist.append(subtrans)
return translist
def get_standard_charge_data(self, amount=None, authorize=False):
"""Build the dictionary needed to process a credit card charge"""
order = self.order
settings = self.settings
trans = {}
if amount is None:
amount = order.balance
balance = trunc_decimal(amount, 2)
trans['amount'] = balance
if self.is_live():
conn = settings.CONNECTION.value
self.log_extra('Using live connection.')
else:
testflag = 'TRUE'
conn = settings.CONNECTION_TEST.value
self.log_extra('Using test connection.')
if self.settings.SIMULATE.value:
testflag = 'TRUE'
else:
testflag = 'FALSE'
trans['connection'] = conn
trans['authorize_only'] = authorize
if not authorize:
transaction_type = 'AUTH_CAPTURE'
else:
transaction_type = 'AUTH_ONLY'
trans['configuration'] = {
'x_login' : settings.LOGIN.value,
'x_tran_key' : settings.TRANKEY.value,
'x_version' : '3.1',
'x_relay_response' : 'FALSE',
'x_test_request' : testflag,
'x_delim_data' : 'TRUE',
'x_delim_char' : '|',
'x_type': transaction_type,
'x_method': 'CC',
}
self.log_extra('standard charges configuration: %s', trans['configuration'])
trans['custBillData'] = {
'x_first_name' : order.contact.first_name,
'x_last_name' : order.contact.last_name,
'x_address': order.full_bill_street,
'x_city': order.bill_city,
'x_state' : order.bill_state,
'x_zip' : order.bill_postal_code,
'x_country': order.bill_country,
'x_phone' : order.contact.primary_phone.phone,
'x_email' : order.contact.email,
}
trans['custShipData'] = {
'x_ship_to_first_name' : order.ship_first_name,
'x_ship_to_last_name' : order.ship_last_name,
'x_ship_to_address' : order.full_ship_street,
'x_ship_to_city' : order.ship_city,
'x_ship_to_state' : order.ship_state,
'x_ship_to_zip' : order.ship_postal_code,
'x_ship_to_country' : order.ship_country,
}
self.log_extra('standard charges configuration: %s', trans['custBillData'])
invoice = "%s" % order.id
failct = order.paymentfailures.count()
if failct > 0:
invoice = "%s_%i" % (invoice, failct)
if not self.is_live():
# add random test id to this, for testing repeatability
invoice = "%s_test_%s_%i" % (invoice, datetime.now().strftime('%m%d%y'), random.randint(1,1000000))
cc = order.credit_card.decryptedCC
ccv = order.credit_card.ccv
if not self.is_live() and cc == '4222222222222':
if ccv == '222':
self.log_extra('Setting a bad ccv number to force an error')
ccv = '1'
else:
self.log_extra('Setting a bad credit card number to force an error')
cc = '1234'
trans['transactionData'] = {
'x_amount' : balance,
'x_card_num' : cc,
'x_exp_date' : order.credit_card.expirationDate,
'x_card_code' : ccv,
'x_invoice_num' : invoice
}
part1 = urlencode(trans['configuration']) + "&"
part2 = "&" + urlencode(trans['custBillData'])
part3 = "&" + urlencode(trans['custShipData'])
trans['postString'] = part1 + urlencode(trans['transactionData']) + part2 + part3
redactedData = {
'x_amount' : balance,
'x_card_num' : order.credit_card.display_cc,
'x_exp_date' : order.credit_card.expirationDate,
'x_card_code' : "REDACTED",
'x_invoice_num' : invoice
}
self.log_extra('standard charges transactionData: %s', redactedData)
trans['logPostString'] = part1 + urlencode(redactedData) + part2
return trans
def process_recurring_subscriptions(self, recurlist, testing=False):
"""Post all subscription requests."""
results = []
for recur in recurlist:
success, reason, response, subscription_id = self.process_recurring_subscription(recur, testing=testing)
if success:
if not testing:
payment = self.record_payment(order=self.order, amount=recur['charged_today'], transaction_id=subscription_id, reason_code=reason)
results.append(ProcessorResult(self.key, success, response, payment=payment))
else:
self.log.info("Failed to process recurring subscription, %s: %s", reason, response)
break
return success, results
def process_recurring_subscription(self, data, testing=False):
"""Post one subscription request."""
self.log_extra('Processing subscription: %s', data['product'].slug)
t = loader.get_template('shop/checkout/authorizenet/arb_create_subscription.xml')
ctx = Context(data)
request = t.render(ctx)
if self.settings.EXTRA_LOGGING.value:
data['redact'] = True
ctx = Context(data)
redacted = t.render(ctx)
self.log_extra('Posting data to: %s\n%s', data['connection'], redacted)
headers = {'Content-type':'text/xml'}
conn = urllib2.Request(data['connection'], request, headers)
try:
f = urllib2.urlopen(conn)
all_results = f.read()
except urllib2.URLError, ue:
self.log.error("error opening %s\n%s", data['connection'], ue)
return (False, 'ERROR', _('Could not talk to Authorize.net gateway'), None)
self.log_extra('Authorize response: %s', all_results)
subscriptionID = None
try:
response = minidom.parseString(all_results)
doc = response.documentElement
reason = doc.getElementsByTagName('code')[0].firstChild.nodeValue
response_text = doc.getElementsByTagName('text')[0].firstChild.nodeValue
result = doc.getElementsByTagName('resultCode')[0].firstChild.nodeValue
success = result == "Ok"
if success:
#refID = doc.getElementsByTagName('refId')[0].firstChild.nodeValue
subscriptionID = doc.getElementsByTagName('subscriptionId')[0].firstChild.nodeValue
except Exception, e:
self.log.error("Error %s\nCould not parse response: %s", e, all_results)
success = False
reason = "Parse Error"
response_text = "Could not parse response"
return success, reason, response_text, subscriptionID
def release_authorized_payment(self, order=None, auth=None, testing=False):
"""Release a previously authorized payment."""
if order:
self.prepare_data(order)
else:
order = self.order
self.log_extra('Releasing Authorization #%i for %s', auth.id, order)
data = self.get_void_auth_data(auth)
results = None
if data:
results = self.send_post(data, testing)
if results.success:
auth.complete = True
auth.save()
return results
def send_post(self, data, testing=False, amount=None):
"""Execute the post to Authorize Net.
Params:
- data: dictionary as returned by get_standard_charge_data
- testing: if true, then don't record the payment
Returns:
- ProcessorResult
"""
self.log.info("About to send a request to authorize.net: %(connection)s\n%(logPostString)s", data)
conn = urllib2.Request(url=data['connection'], data=data['postString'])
try:
f = urllib2.urlopen(conn)
all_results = f.read()
self.log_extra('Authorize response: %s', all_results)
except urllib2.URLError, ue:
self.log.error("error opening %s\n%s", data['connection'], ue)
return ProcessorResult(self.key, False, _('Could not talk to Authorize.net gateway'))
parsed_results = all_results.split(data['configuration']['x_delim_char'])
response_code = parsed_results[0]
reason_code = parsed_results[1]
response_text = parsed_results[3]
transaction_id = parsed_results[6]
success = response_code == '1'
if amount is None:
amount = data['amount']
payment = None
if success and not testing:
if data.get('authorize_only', False):
self.log_extra('Success, recording authorization')
payment = self.record_authorization(order=self.order, amount=amount,
transaction_id=transaction_id, reason_code=reason_code)
else:
if amount <= 0:
self.log_extra('Success, recording refund')
else:
self.log_extra('Success, recording payment')
authorization = data.get('authorization', None)
payment = self.record_payment(order=self.order, amount=amount,
transaction_id=transaction_id, reason_code=reason_code, authorization=authorization)
elif not testing:
payment = self.record_failure(amount=amount, transaction_id=transaction_id,
reason_code=reason_code, details=response_text)
self.log_extra("Returning success=%s, reason=%s, response_text=%s", success, reason_code, response_text)
return ProcessorResult(self.key, success, response_text, payment=payment)
if __name__ == "__main__":
"""
This is for testing - enabling you to run from the command line and make
sure everything is ok
"""
import os
from livesettings import config_get_group
# Set up some dummy classes to mimic classes being passed through Satchmo
class testContact(object):
pass
class testCC(object):
pass
class testOrder(object):
def __init__(self):
self.contact = testContact()
self.credit_card = testCC()
def order_success(self):
pass
if not os.environ.has_key("DJANGO_SETTINGS_MODULE"):
os.environ["DJANGO_SETTINGS_MODULE"]="satchmo_store.settings"
settings_module = os.environ['DJANGO_SETTINGS_MODULE']
settingsl = settings_module.split('.')
settings = __import__(settings_module, {}, {}, settingsl[-1])
sampleOrder = testOrder()
sampleOrder.contact.first_name = 'Chris'
sampleOrder.contact.last_name = 'Smith'
sampleOrder.contact.primary_phone = '801-555-9242'
sampleOrder.full_bill_street = '123 Main Street'
sampleOrder.bill_postal_code = '12345'
sampleOrder.bill_state = 'TN'
sampleOrder.bill_city = 'Some City'
sampleOrder.bill_country = 'US'
sampleOrder.total = "27.01"
sampleOrder.balance = "27.01"
sampleOrder.credit_card.decryptedCC = '6011000000000012'
sampleOrder.credit_card.expirationDate = "10/11"
sampleOrder.credit_card.ccv = "144"
authorize_settings = config_get_group('PAYMENT_AUTHORIZENET')
if authorize_settings.LIVE.value:
print "Warning. You are submitting a live order. AUTHORIZE.NET system is set LIVE."
processor = PaymentProcessor(authorize_settings)
processor.prepare_data(sampleOrder)
results = processor.process(testing=True)
print results
|
PypiClean
|
/django_opstasks-0.2.10-py3-none-any.whl/django_opstasks/common/requests.py
|
from json import dumps
from requests import request
from logging import getLogger
from opsadmin.settings import CONSUL_CONFIGS
LOGGER = getLogger('django')
class OpstasksRequests(object):
def __init__(self, timeout=5):
opstasks_hosts = CONSUL_CONFIGS.get(
'OPSTASKS_HOST', 'opstasks-api.devops.svc.cluster.local')
self.base_url = f'http://{opstasks_hosts}/'
self.headers = {'Content-Type': 'application/json'}
self.timeout = timeout
def _request(self, method, url, data=None):
try:
LOGGER.info('"%s %s"', method, url)
response = request(method, url, headers=self.headers, data=data, timeout=self.timeout)
# if response.status_code == 200:
# LOGGER.info("return: %s", response.text)
# return response
# LOGGER.error('Faild to run tasks, status code is %s', response.status_code)
# return False
LOGGER.info('%s %s', response.status_code, response.text)
return response.status_code, response.text
except Exception as error:
LOGGER.error('Faild to run tasks.')
LOGGER.exception(error)
return 1000, error
class TasksRequests(OpstasksRequests):
def __init__(self):
super().__init__()
self.base_url = self.base_url + 'tasks/'
def database(self):
kwargs = {
"method": "GET",
"url": self.base_url + 'database',
}
return self._request(**kwargs)
class NetworksRequests(OpstasksRequests):
def __init__(self):
super().__init__()
self.base_url = self.base_url + 'networks/'
def service_for_node(self, data):
kwargs = {
"method": "POST",
"url": self.base_url + 'service_for_node',
"data": dumps(data)
}
return self._request(**kwargs)
def ipsec_tunnel(self, data):
"""
- update the ipsec.conf、ipsec.secret file
- then restart strongswan
- finally check the status of all tunnel
"""
kwargs = {
"method": "POST",
"url": self.base_url + 'ipsec_tunnel',
"data": dumps(data)
}
return self._request(**kwargs)
def firewalld_rule(self, data):
kwargs = {
"method": "POST",
"url": self.base_url + 'firewalld_rule',
"data": dumps(data)
}
return self._request(**kwargs)
|
PypiClean
|
/ifb-openlink-1.0.0.tar.gz/ifb-openlink-1.0.0/openlink/core/static/ckeditor/ckeditor/lang/ja.js
|
/*
Copyright (c) 2003-2021, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['ja']={"editor":"リッチテキストエディタ","editorPanel":"リッチテキストエディタパネル","common":{"editorHelp":"ヘルプは ALT 0 を押してください","browseServer":"サーバブラウザ","url":"URL","protocol":"プロトコル","upload":"アップロード","uploadSubmit":"サーバーに送信","image":"イメージ","flash":"Flash","form":"フォーム","checkbox":"チェックボックス","radio":"ラジオボタン","textField":"1行テキスト","textarea":"テキストエリア","hiddenField":"不可視フィールド","button":"ボタン","select":"選択フィールド","imageButton":"画像ボタン","notSet":"<なし>","id":"Id","name":"Name属性","langDir":"文字表記の方向","langDirLtr":"左から右 (LTR)","langDirRtl":"右から左 (RTL)","langCode":"言語コード","longDescr":"longdesc属性(長文説明)","cssClass":"スタイルシートクラス","advisoryTitle":"Title属性","cssStyle":"スタイルシート","ok":"OK","cancel":"キャンセル","close":"閉じる","preview":"プレビュー","resize":"ドラッグしてリサイズ","generalTab":"全般","advancedTab":"高度な設定","validateNumberFailed":"値が数値ではありません","confirmNewPage":"変更内容を保存せず、 新しいページを開いてもよろしいでしょうか?","confirmCancel":"オプション設定を変更しました。ダイアログを閉じてもよろしいでしょうか?","options":"オプション","target":"ターゲット","targetNew":"新しいウインドウ (_blank)","targetTop":"最上部ウィンドウ (_top)","targetSelf":"同じウィンドウ (_self)","targetParent":"親ウィンドウ (_parent)","langDirLTR":"左から右 (LTR)","langDirRTL":"右から左 (RTL)","styles":"スタイル","cssClasses":"スタイルシートクラス","width":"幅","height":"高さ","align":"行揃え","left":"左","right":"右","center":"中央","justify":"両端揃え","alignLeft":"左揃え","alignRight":"右揃え","alignCenter":"Align Center","alignTop":"上","alignMiddle":"中央","alignBottom":"下","alignNone":"なし","invalidValue":"不正な値です。","invalidHeight":"高さは数値で入力してください。","invalidWidth":"幅は数値で入力してください。","invalidLength":"Value specified for the \"%1\" field must be a positive number with or without a valid measurement unit (%2).","invalidCssLength":"入力された \"%1\" 項目の値は、CSSの大きさ(px, %, in, cm, mm, em, ex, pt, または pc)が正しいものである/ないに関わらず、正の値である必要があります。","invalidHtmlLength":"入力された \"%1\" 項目の値は、HTMLの大きさ(px または %)が正しいものである/ないに関わらず、正の値である必要があります。","invalidInlineStyle":"入力されたインラインスタイルの値は、\"名前 : 値\" のフォーマットのセットで、複数の場合はセミコロンで区切られている形式である必要があります。","cssLengthTooltip":"ピクセル数もしくはCSSにセットできる数値を入力してください。(px,%,in,cm,mm,em,ex,pt,or pc)","unavailable":"%1<span class=\"cke_accessibility\">, 利用不可能</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"Space","35":"End","36":"Home","46":"Delete","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"キーボードショートカット","optionDefault":"Default"},"about":{"copy":"Copyright © $1. All rights reserved.","dlgTitle":"CKEditorについて","moreInfo":"ライセンス情報の詳細はウェブサイトにて確認してください:"},"basicstyles":{"bold":"太字","italic":"斜体","strike":"打ち消し線","subscript":"下付き","superscript":"上付き","underline":"下線"},"bidi":{"ltr":"テキストの向き : 左から右へ","rtl":"テキストの向き : 右から左へ"},"blockquote":{"toolbar":"ブロック引用文"},"notification":{"closed":"通知を閉じました。"},"toolbar":{"toolbarCollapse":"ツールバーを閉じる","toolbarExpand":"ツールバーを開く","toolbarGroups":{"document":"Document","clipboard":"Clipboard/Undo","editing":"Editing","forms":"Forms","basicstyles":"Basic Styles","paragraph":"Paragraph","links":"Links","insert":"Insert","styles":"Styles","colors":"Colors","tools":"Tools"},"toolbars":"編集ツールバー"},"clipboard":{"copy":"コピー","copyError":"ブラウザーのセキュリティ設定によりエディタのコピー操作を自動で実行することができません。実行するには手動でキーボードの(Ctrl/Cmd+C)を使用してください。","cut":"切り取り","cutError":"ブラウザーのセキュリティ設定によりエディタの切り取り操作を自動で実行することができません。実行するには手動でキーボードの(Ctrl/Cmd+X)を使用してください。","paste":"貼り付け","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","pasteArea":"貼り付け場所","pasteMsg":"Paste your content inside the area below and press OK."},"colorbutton":{"auto":"自動","bgColorTitle":"背景色","colors":{"000":"Black","800000":"Maroon","8B4513":"Saddle Brown","2F4F4F":"Dark Slate Gray","008080":"Teal","000080":"Navy","4B0082":"Indigo","696969":"Dark Gray","B22222":"Fire Brick","A52A2A":"Brown","DAA520":"Golden Rod","006400":"Dark Green","40E0D0":"Turquoise","0000CD":"Medium Blue","800080":"Purple","808080":"Gray","F00":"Red","FF8C00":"Dark Orange","FFD700":"Gold","008000":"Green","0FF":"Cyan","00F":"Blue","EE82EE":"Violet","A9A9A9":"Dim Gray","FFA07A":"Light Salmon","FFA500":"Orange","FFFF00":"Yellow","00FF00":"Lime","AFEEEE":"Pale Turquoise","ADD8E6":"Light Blue","DDA0DD":"Plum","D3D3D3":"Light Grey","FFF0F5":"Lavender Blush","FAEBD7":"Antique White","FFFFE0":"Light Yellow","F0FFF0":"Honeydew","F0FFFF":"Azure","F0F8FF":"Alice Blue","E6E6FA":"Lavender","FFF":"White","1ABC9C":"Strong Cyan","2ECC71":"Emerald","3498DB":"Bright Blue","9B59B6":"Amethyst","4E5F70":"Grayish Blue","F1C40F":"Vivid Yellow","16A085":"Dark Cyan","27AE60":"Dark Emerald","2980B9":"Strong Blue","8E44AD":"Dark Violet","2C3E50":"Desaturated Blue","F39C12":"Orange","E67E22":"Carrot","E74C3C":"Pale Red","ECF0F1":"Bright Silver","95A5A6":"Light Grayish Cyan","DDD":"Light Gray","D35400":"Pumpkin","C0392B":"Strong Red","BDC3C7":"Silver","7F8C8D":"Grayish Cyan","999":"Dark Gray"},"more":"その他の色...","panelTitle":"色","textColorTitle":"文字色"},"colordialog":{"clear":"クリア","highlight":"ハイライト","options":"カラーオプション","selected":"選択された色","title":"色選択"},"templates":{"button":"テンプレート","emptyListMsg":"(テンプレートが定義されていません)","insertOption":"現在のエディタの内容と置き換えます","options":"テンプレートオプション","selectPromptMsg":"エディターで使用するテンプレートを選択してください。<br>(現在のエディタの内容は失われます):","title":"内容テンプレート"},"contextmenu":{"options":"コンテキストメニューオプション"},"copyformatting":{"label":"フォーマットをコピー","notification":{"copied":"コピーされたフォーマット","applied":"フォーマットを適用しました","canceled":"フォーマットを取り消しました","failed":"フォーマットのコピーに失敗しました。最初にフォーマットをコピーしてから適用してください。"}},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Title属性","cssClassInputLabel":"スタイルシートクラス","edit":"Divコンテナを編集","inlineStyleInputLabel":"インラインスタイル","langDirLTRLabel":"左から右 (LTR)","langDirLabel":"文字表記の方向","langDirRTLLabel":"右から左 (RTL)","languageCodeInputLabel":" 言語コード","remove":"Divコンテナを削除","styleSelectLabel":"スタイル","title":"Divコンテナを作成","toolbar":"Divコンテナを作成"},"elementspath":{"eleLabel":"要素パス","eleTitle":"%1 要素"},"exportpdf":{"documentReady":"Document is ready!","error":"Error occurred.","processingDocument":"Processing PDF document...","toolbar":"Export to PDF"},"filetools":{"loadError":"ファイルの読み込み中にエラーが発生しました。","networkError":"ファイルのアップロード中にネットワークエラーが発生しました。","httpError404":"ファイルのアップロード中にHTTPエラーが発生しました。(404: File not found)","httpError403":"ファイルのアップロード中にHTTPエラーが発生しました。(403: Forbidden)","httpError":"ファイルのアップロード中にHTTPエラーが発生しました。(error status: %1)","noUrlError":"アップロードURLが定義されていません。","responseError":"サーバーの応答が不正です。"},"find":{"find":"検索","findOptions":"検索オプション","findWhat":"検索する文字列:","matchCase":"大文字と小文字を区別する","matchCyclic":"末尾に逹したら先頭に戻る","matchWord":"単語単位で探す","notFoundMsg":"指定された文字列は見つかりませんでした。","replace":"置換","replaceAll":"すべて置換","replaceSuccessMsg":"%1 個置換しました。","replaceWith":"置換後の文字列:","title":"検索と置換"},"fakeobjects":{"anchor":"アンカー","flash":"Flash Animation","hiddenfield":"不可視フィールド","iframe":"IFrame","unknown":"Unknown Object"},"flash":{"access":"スクリプトアクセス(AllowScriptAccess)","accessAlways":"すべての場合に通信可能(Always)","accessNever":"すべての場合に通信不可能(Never)","accessSameDomain":"同一ドメインのみに通信可能(Same domain)","alignAbsBottom":"下部(絶対的)","alignAbsMiddle":"中央(絶対的)","alignBaseline":"ベースライン","alignTextTop":"テキスト上部","bgcolor":"背景色","chkFull":"フルスクリーン許可","chkLoop":"ループ再生","chkMenu":"Flashメニュー可能","chkPlay":"再生","flashvars":"フラッシュに渡す変数(FlashVars)","hSpace":"横間隔","properties":"Flash プロパティ","propertiesTab":"プロパティ","quality":"画質","qualityAutoHigh":"自動/高","qualityAutoLow":"自動/低","qualityBest":"品質優先","qualityHigh":"高","qualityLow":"低","qualityMedium":"中","scale":"拡大縮小設定","scaleAll":"すべて表示","scaleFit":"上下左右にフィット","scaleNoBorder":"外が見えない様に拡大","title":"Flash プロパティ","vSpace":"縦間隔","validateHSpace":"横間隔は数値で入力してください。","validateSrc":"リンクURLを入力してください。","validateVSpace":"縦間隔は数値で入力してください。","windowMode":"ウィンドウモード","windowModeOpaque":"背景を不透明設定","windowModeTransparent":"背景を透過設定","windowModeWindow":"標準"},"font":{"fontSize":{"label":"サイズ","voiceLabel":"フォントサイズ","panelTitle":"フォントサイズ"},"label":"フォント","panelTitle":"フォント","voiceLabel":"フォント"},"forms":{"button":{"title":"ボタン プロパティ","text":"テキスト (値)","type":"タイプ","typeBtn":"ボタン","typeSbm":"送信","typeRst":"リセット"},"checkboxAndRadio":{"checkboxTitle":"チェックボックスのプロパティ","radioTitle":"ラジオボタンのプロパティ","value":"値","selected":"選択済み","required":"必須"},"form":{"title":"フォームのプロパティ","menu":"フォームのプロパティ","action":"アクション (action)","method":"メソッド (method)","encoding":"エンコード方式 (encoding)"},"hidden":{"title":"不可視フィールド プロパティ","name":"名前 (name)","value":"値 (value)"},"select":{"title":"選択フィールドのプロパティ","selectInfo":"情報","opAvail":"利用可能なオプション","value":"選択項目値","size":"サイズ","lines":"行","chkMulti":"複数選択を許可","required":"必須","opText":"選択項目名","opValue":"値","btnAdd":"追加","btnModify":"編集","btnUp":"上へ","btnDown":"下へ","btnSetValue":"選択した値を設定","btnDelete":"削除"},"textarea":{"title":"テキストエリア プロパティ","cols":"列","rows":"行"},"textfield":{"title":"1行テキスト プロパティ","name":"名前","value":"値","charWidth":"サイズ","maxChars":"最大長","required":"必須","type":"タイプ","typeText":"テキスト","typePass":"パスワード入力","typeEmail":"メール","typeSearch":"検索","typeTel":"電話番号","typeUrl":"URL"}},"format":{"label":"書式","panelTitle":"段落の書式","tag_address":"アドレス","tag_div":"標準 (DIV)","tag_h1":"見出し 1","tag_h2":"見出し 2","tag_h3":"見出し 3","tag_h4":"見出し 4","tag_h5":"見出し 5","tag_h6":"見出し 6","tag_p":"標準","tag_pre":"書式付き"},"horizontalrule":{"toolbar":"水平線"},"iframe":{"border":"フレームの枠を表示","noUrl":"iframeのURLを入力してください。","scrolling":"スクロールバーの表示を許可","title":"iFrameのプロパティ","toolbar":"IFrame","tabindex":"Remove from tabindex"},"image":{"alt":"代替テキスト","border":"枠線の幅","btnUpload":"サーバーに送信","button2Img":"選択した画像ボタンを画像に変換しますか?","hSpace":"水平間隔","img2Button":"選択した画像を画像ボタンに変換しますか?","infoTab":"画像情報","linkTab":"リンク","lockRatio":"比率を固定","menu":"画像のプロパティ","resetSize":"サイズをリセット","title":"画像のプロパティ","titleButton":"画像ボタンのプロパティ","upload":"アップロード","urlMissing":"画像のURLを入力してください。","vSpace":"垂直間隔","validateBorder":"枠線の幅は数値で入力してください。","validateHSpace":"水平間隔は数値で入力してください。","validateVSpace":"垂直間隔は数値で入力してください。"},"indent":{"indent":"インデント","outdent":"インデント解除"},"smiley":{"options":"絵文字オプション","title":"顔文字挿入","toolbar":"絵文字"},"language":{"button":"言語を設定","remove":"言語を削除"},"link":{"acccessKey":"アクセスキー","advanced":"高度な設定","advisoryContentType":"Content Type属性","advisoryTitle":"Title属性","anchor":{"toolbar":"アンカー挿入/編集","menu":"アンカーの編集","title":"アンカーのプロパティ","name":"アンカー名","errorName":"アンカー名を入力してください。","remove":"アンカーを削除"},"anchorId":"エレメントID","anchorName":"アンカー名","charset":"リンク先のcharset","cssClasses":"スタイルシートクラス","download":"強制的にダウンロード","displayText":"表示文字","emailAddress":"E-Mail アドレス","emailBody":"本文","emailSubject":"件名","id":"Id","info":"ハイパーリンク情報","langCode":"言語コード","langDir":"文字表記の方向","langDirLTR":"左から右 (LTR)","langDirRTL":"右から左 (RTL)","menu":"リンクを編集","name":"Name属性","noAnchors":"(このドキュメント内にアンカーはありません)","noEmail":"メールアドレスを入力してください。","noUrl":"リンクURLを入力してください。","noTel":"Please type the phone number","other":"<その他の>","phoneNumber":"Phone number","popupDependent":"開いたウィンドウに連動して閉じる (Netscape)","popupFeatures":"ポップアップウィンドウ特徴","popupFullScreen":"全画面モード(IE)","popupLeft":"左端からの座標で指定","popupLocationBar":"ロケーションバー","popupMenuBar":"メニューバー","popupResizable":"サイズ可変","popupScrollBars":"スクロールバー","popupStatusBar":"ステータスバー","popupToolbar":"ツールバー","popupTop":"上端からの座標で指定","rel":"関連リンク","selectAnchor":"アンカーを選択","styles":"スタイルシート","tabIndex":"タブインデックス","target":"ターゲット","targetFrame":"<フレーム>","targetFrameName":"ターゲットのフレーム名","targetPopup":"<ポップアップウィンドウ>","targetPopupName":"ポップアップウィンドウ名","title":"ハイパーリンク","toAnchor":"ページ内のアンカー","toEmail":"E-Mail","toUrl":"URL","toPhone":"Phone","toolbar":"リンク挿入/編集","type":"リンクタイプ","unlink":"リンクを削除","upload":"アップロード"},"list":{"bulletedlist":"番号無しリスト","numberedlist":"番号付きリスト"},"liststyle":{"bulletedTitle":"箇条書きのプロパティ","circle":"白丸","decimal":"数字 (1, 2, 3, etc.)","disc":"黒丸","lowerAlpha":"小文字アルファベット (a, b, c, d, e, etc.)","lowerRoman":"小文字ローマ数字 (i, ii, iii, iv, v, etc.)","none":"なし","notset":"<なし>","numberedTitle":"番号付きリストのプロパティ","square":"四角","start":"開始","type":"種類","upperAlpha":"大文字アルファベット (A, B, C, D, E, etc.)","upperRoman":"大文字ローマ数字 (I, II, III, IV, V, etc.)","validateStartNumber":"リストの開始番号は数値で入力してください。"},"magicline":{"title":"ここに段落を挿入"},"maximize":{"maximize":"最大化","minimize":"最小化"},"newpage":{"toolbar":"新しいページ"},"pagebreak":{"alt":"改ページ","toolbar":"印刷の為に改ページ挿入"},"pastetext":{"button":"プレーンテキストとして貼り付け","pasteNotification":"%1 を押して貼り付けます。 ブラウザは、ツールバーボタンまたはコンテキストメニューオプションを使用した貼り付けをサポートしていません。","title":"プレーンテキストとして貼り付け"},"pastefromword":{"confirmCleanup":"貼り付けを行うテキストはワード文章からコピーされようとしています。貼り付ける前にクリーニングを行いますか?","error":"内部エラーにより貼り付けたデータをクリアできませんでした","title":"ワード文章から貼り付け","toolbar":"ワード文章から貼り付け"},"preview":{"preview":"プレビュー"},"print":{"toolbar":"印刷"},"removeformat":{"toolbar":"書式を解除"},"save":{"toolbar":"保存"},"selectall":{"toolbar":"すべて選択"},"showblocks":{"toolbar":"ブロック表示"},"sourcearea":{"toolbar":"ソース"},"specialchar":{"options":"特殊文字オプション","title":"特殊文字の選択","toolbar":"特殊文字を挿入"},"scayt":{"btn_about":"SCAYTバージョン","btn_dictionaries":"辞書","btn_disable":"SCAYT無効","btn_enable":"SCAYT有効","btn_langs":"言語","btn_options":"オプション","text_title":"スペルチェック設定(SCAYT)"},"stylescombo":{"label":"スタイル","panelTitle":"スタイル","panelTitle1":"ブロックスタイル","panelTitle2":"インラインスタイル","panelTitle3":"オブジェクトスタイル"},"table":{"border":"枠線の幅","caption":"キャプション","cell":{"menu":"セル","insertBefore":"セルを前に挿入","insertAfter":"セルを後に挿入","deleteCell":"セルを削除","merge":"セルを結合","mergeRight":"右に結合","mergeDown":"下に結合","splitHorizontal":"セルを水平方向に分割","splitVertical":"セルを垂直方向に分割","title":"セルのプロパティ","cellType":"セルの種類","rowSpan":"行の結合数","colSpan":"列の結合数","wordWrap":"単語の折り返し","hAlign":"水平方向の配置","vAlign":"垂直方向の配置","alignBaseline":"ベースライン","bgColor":"背景色","borderColor":"ボーダーカラー","data":"テーブルデータ (td)","header":"ヘッダ","yes":"はい","no":"いいえ","invalidWidth":"セル幅は数値で入力してください。","invalidHeight":"セル高さは数値で入力してください。","invalidRowSpan":"縦幅(行数)は数値で入力してください。","invalidColSpan":"横幅(列数)は数値で入力してください。","chooseColor":"色の選択"},"cellPad":"セル内間隔","cellSpace":"セル内余白","column":{"menu":"列","insertBefore":"列を左に挿入","insertAfter":"列を右に挿入","deleteColumn":"列を削除"},"columns":"列数","deleteTable":"表を削除","headers":"ヘッダ (th)","headersBoth":"両方","headersColumn":"最初の列のみ","headersNone":"なし","headersRow":"最初の行のみ","heightUnit":"height unit","invalidBorder":"枠線の幅は数値で入力してください。","invalidCellPadding":"セル内余白は数値で入力してください。","invalidCellSpacing":"セル間余白は数値で入力してください。","invalidCols":"列数は0より大きな数値を入力してください。","invalidHeight":"高さは数値で入力してください。","invalidRows":"行数は0より大きな数値を入力してください。","invalidWidth":"幅は数値で入力してください。","menu":"表のプロパティ","row":{"menu":"行","insertBefore":"行を上に挿入","insertAfter":"行を下に挿入","deleteRow":"行を削除"},"rows":"行数","summary":"表の概要","title":"表のプロパティ","toolbar":"表","widthPc":"パーセント","widthPx":"ピクセル","widthUnit":"幅の単位"},"undo":{"redo":"やり直す","undo":"元に戻す"},"widget":{"move":"ドラッグして移動","label":"%1 ウィジェット"},"uploadwidget":{"abort":"アップロードを中止しました。","doneOne":"ファイルのアップロードに成功しました。","doneMany":"%1個のファイルのアップロードに成功しました。","uploadOne":"ファイルのアップロード中 ({percentage}%)...","uploadMany":"{max} 個中 {current} 個のファイルをアップロードしました。 ({percentage}%)..."}};
|
PypiClean
|
/pytsdb-0.2.1.tar.gz/pytsdb-0.2.1/pytsdb.py
|
import datetime
import requests
class ConnectionError(Exception):
pass
class QueryError(Exception):
pass
class TimeoutError(Exception):
pass
class Connection(object):
def __init__(self, host="localhost", port=4242, timeout=1):
self.url = "http://{0}:{1}/api".format(host, port)
self.timeout = timeout
if not self.__valid():
raise ConnectionError()
def fetch_metric(self, metric, start, end, tags={}, aggregator="sum",
downsample=None, ms_resolution=True):
"""Fetch time series data from OpenTSDB
Parameters:
metric:
A string representing a valid OpenTSDB metric.
tags:
A dict mapping tag names to tag values. Tag names and values are
always strings.
{ 'user_id': '44' }
start:
A datetime.datetime-like object representing the start of the
range to query over.
end:
A datetime.datetime-like object representing the end of the
range to query over.
aggregator:
The function for merging multiple time series together. For
example, if the "user_id" tag is not specified, this aggregator
function is used to combine all heart rate time series into one
time series. (Yes, this isn't very useful.)
For queries that return only one time series, this parameter is
not relevant.
Valid values: "sum", "min", "max", "avg", "dev"
See: http://opentsdb.net/docs/build/html/user_guide/query/aggregators.html
downsampling:
A relative time interval to "downsample". This isn't true
downsampling; rather, if you specify a downsampling of "5m"
(five minutes), OpenTSDB will split data into five minute
intervals, and return one data point in the middle of each
interval whose value is the average of all data points within
that interval.
Valid relative time values are strings of the following format:
"<amount><time_unit>"
Valid time units: "ms", "s", "m", "h", "d", "w", "n", "y"
Date and time format: http://opentsdb.net/docs/build/html/user_guide/query/dates.html
ms_resolution:
Whether or not to output data point timestamps in milliseconds
or seconds. If this flag is false and there are multiple
data points within a second, those data points will be down
sampled using the query's aggregation function.
Returns:
A dict mapping timestamps to data points
"""
query = "{aggregator}:{downsample}{metric}{{{tags}}}".format(
aggregator=aggregator,
downsample=downsample + "-avg:" if downsample else "",
metric=metric,
tags=','.join("%s=%s" % (k, v) for k, v in tags.items())
)
params = {
'ms': ms_resolution,
'start': '{0:.3f}'.format(start.timestamp()),
'end': '{0:.3f}'.format(end.timestamp()),
'm': query
}
response = self.__request("/query", params)
if response.status_code == 200:
try:
return response.json()[0]['dps']
except IndexError:
# empty data set
return {}
raise QueryError(response.json())
def fetch_sorted_metric(self, *args, **kwargs):
"""Fetch and sort time series data from OpenTSDB
Takes the same parameters as `fetch_metric`, but returns a list of
(timestamp, value) tuples sorted by timestamp.
"""
return sorted(self.fetch_metric(*args, **kwargs).items(),
key=lambda x: float(x[0]))
def __valid(self):
return self.__request("/version").status_code == 200
def __request(self, path, params=None):
try:
return requests.get(self.url + path, params=params, timeout=self.timeout)
except requests.exceptions.Timeout:
raise TimeoutError()
def connect(*args, **kwargs):
return Connection(*args, **kwargs)
|
PypiClean
|
/artifacts-20230723.tar.gz/artifacts-20230723/docs/sources/background/index.rst
|
##########
Background
##########
The first version of the artifact definitions originated from the
`GRR project <https://github.com/google/grr>`__, where it is used to describe
and quickly collect data of interest, for example specific files or Windows
Registry keys. The goal of the format is to provide a tool independent way to
describe the majority of forensic artifacts in a language that is readable by
humans and machines.
The format is designed to be simple and straight forward, so that a digital
forensic analysist is able to quickly write artifact definitions during an
investigation without having to rely on complex standards or tooling.
The format is intended to describe forensically-relevant data on a machine,
while being tool agnostic. In particular we intentionally avoided adding
IOC-like logic, or describing how the data should be collected since this
various between tools.
For some background on the artifacts system and how we expect it to be used see
`this Blackhat presentation <https://www.blackhat.com/us-14/archives.html#grr-find-all-the-badness-collect-all-the-things>`__
and `YouTube video <https://www.youtube.com/watch?v=ren6QSvwFvg>`__ from the GRR team.
.. toctree::
:maxdepth: 2
Terminology <Terminology>
Statistics <Stats>
|
PypiClean
|
/certora_cli_alpha_uri_use_certora_cli_req_in_publish_script-20230517.18.55.375777-py3-none-any.whl/certora_cli/EVMVerifier/certoraNodeFilters.py
|
from typing import Any, Dict
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from Shared.certoraUtils import NoValEnum
class NodeFilters:
class NodeType(NoValEnum):
def is_this_node_type(self, type_name_node: Dict[str, Any]) -> bool:
return type_name_node["nodeType"] == self.value
class TypeNameNode(NodeType):
ELEMENTARY = "ElementaryTypeName"
FUNCTION = "FunctionTypeName"
USER_DEFINED = "UserDefinedTypeName"
MAPPING = "Mapping"
ARRAY = "ArrayTypeName"
class UserDefinedTypeDefNode(NodeType):
ENUM = "EnumDefinition"
STRUCT = "StructDefinition"
VALUE_TYPE = "UserDefinedValueTypeDefinition"
CONTRACT = "ContractDefinition"
@staticmethod
def CERTORA_CONTRACT_NAME() -> str:
return "certora_contract_name"
@staticmethod
def is_enum_definition(node: Dict[str, Any]) -> bool:
return node["nodeType"] == "EnumDefinition"
@staticmethod
def is_struct_definition(node: Dict[str, Any]) -> bool:
return node["nodeType"] == "StructDefinition"
@staticmethod
def is_user_defined_value_type_definition(node: Dict[str, Any]) -> bool:
return node["nodeType"] == "UserDefinedValueTypeDefinition"
@staticmethod
def is_contract_definition(node: Dict[str, Any]) -> bool:
return node["nodeType"] == "ContractDefinition"
@staticmethod
def is_user_defined_type_definition(node: Dict[str, Any]) -> bool:
return NodeFilters.is_enum_definition(node) or NodeFilters.is_struct_definition(
node) or NodeFilters.is_user_defined_value_type_definition(node)
@staticmethod
def is_import(node: Dict[str, Any]) -> bool:
return node["nodeType"] == "ImportDirective"
@staticmethod
def is_defined_in_a_contract_or_library(node: Dict[str, Any]) -> bool:
return NodeFilters.CERTORA_CONTRACT_NAME() in node
@staticmethod
def is_defined_in_contract(node: Dict[str, Any], contract_name: str) -> bool:
return node[NodeFilters.CERTORA_CONTRACT_NAME()] == contract_name
|
PypiClean
|
/matchkraft-0.0.2.tar.gz/matchkraft-0.0.2/LICENCE.md
|
Copyright 2020 Rene Tatua Castillo
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
PypiClean
|
/pomsets-gui-1.0.10.tar.gz/pomsets-gui-1.0.10/src/pomsets_app/gui/qt/license_agreement/controller.py
|
from __future__ import with_statement
import logging
import os
import StringIO
from PyQt4.QtCore import *
from PyQt4 import QtGui
import pomsets_app.utils as AppUtilsModule
import pomsets_app.gui.qt as QtModule
import pomsets_app.gui.qt.application as ApplicationModule
class Controller(QtModule.Controller, QtGui.QDialog):
LICENSE_FILES = ['gpl-2.0.txt',
'gpl-3.0.txt',
'commercial.txt']
LICENSE_LABELS = ['Non-commercial | GPL v2',
'Non-commercial | GPL v3',
'Commercial | 30 day evaluation']
def __init__(self, *args, **kwds):
QtGui.QDialog.__init__(self, *args, **kwds)
QtModule.Controller.__init__(self)
return
def populate(self):
widget = self.widget()
contextManager = self.contextManager()
comboBoxLicense = widget.comboBoxLicense
comboBoxLicense.addItems(Controller.LICENSE_LABELS)
self.populateLicense()
return
def populateLicense(self):
widget = self.widget()
resourcePath = ApplicationModule.getDefaultResourcePath()
licenseType = str(widget.comboBoxLicense.currentText())
licenseIndex = Controller.LICENSE_LABELS.index(licenseType)
licenseFile = Controller.LICENSE_FILES[licenseIndex]
licensePath = os.path.join(resourcePath, 'licenses', licenseFile)
textEditLicense = widget.textEditLicense
with open(licensePath, 'r') as f:
licenseText = ''.join(f.readlines())
textEditLicense.setText(licenseText)
pass
return
def userHasReadLicense(self):
checkBoxRead = self.widget().checkBoxRead
return checkBoxRead.isChecked()
def saveUserAgreement(self):
configDir = AppUtilsModule.getDefaultConfigDir()
if not os.path.exists(configDir):
os.makedirs(configDir)
licenseFile = self.contextManager().app().getLicenseFile()
if os.path.exists(licenseFile):
return
# this will create the file
with open(licenseFile, 'w') as f:
pass
return
@pyqtSlot()
def on_buttonAgree_clicked(self):
if not self.userHasReadLicense():
# show a message box
messageBox = QtGui.QMessageBox(self)
messageBox.setText('Please check the box to indicate that you have read the license')
messageBox.show()
return
self.saveUserAgreement()
self.accept()
return
@pyqtSlot()
def on_buttonDisagree_clicked(self):
self.reject()
return
@pyqtSlot(str)
def on_comboBoxLicense_currentIndexChanged(self, value):
self.populateLicense()
return
# END class Controller
pass
|
PypiClean
|
/sherpa-4.15.1.tar.gz/sherpa-4.15.1/docs/optimisers/index.rst
|
*******************************************************
Optimisers: How to improve the current parameter values
*******************************************************
The optimiser varies the model parameters in an attempt to find
the solution which minimises the chosen
:doc:`statistic <../statistics/index>`.
In general it is expected that the optimiser will be used by
a :py:class:`~sherpa.fit.Fit` object to
:doc:`perform the fit <../fit/index>`, but
it can be used directly using the
:py:meth:`~sherpa.optmethods.OptMethod.fit` method. The optimiser
object allows configuration values to be changed which can
tweak the behavior; for instance the tolerance to determine whether
the fit has converged, the maximum number of iterations to use,
or how much information to display whilst optimising a model.
As an example, the default parameter values for the
:py:class:`Levenberg-Marquardt <sherpa.optmethods.LevMar>`
optimiser are::
>>> from sherpa.optmethods.LevMar
>>> lm = LevMar()
>>> print(lm)
name = levmar
ftol = 1.19209289551e-07
xtol = 1.19209289551e-07
gtol = 1.19209289551e-07
maxfev = None
epsfcn = 1.19209289551e-07
factor = 100.0
verbose = 0
These settings are available both as fields of the object and via
the :py:attr:`~sherpa.optmethods.OptMethod.config` dictionary
field.
Additional optimisers can be built by extending from the
:py:class:`sherpa.optmethods.OptMethod` class. This can be used
to provide access to external packages such as
`CERN's MINUIT optimisation library <https://iminuit.readthedocs.io>`_.
Choosing an optimiser
=====================
.. todo::
Need to work on this section.
.. warning::
The following may not correctly represent Sherpa's current capabilities,
so please take care when interpreting this section.
The following information is adapted from a memo written by
Mark Birkinshaw (1998).
The minimization of mathematical functions is a difficult operation. A general
function :math:`f({\bf x})` of the vector argument :math:`\bf x` may
have many isolated local minima, non-isolated minimum hypersurfaces, or
even more complicated topologies. No finite minimization routine can
guarantee to locate the unique, global, minimum of :math:`f({\bf x})`
without being fed intimate knowledge about the function by the user.
This does not mean that minimization is a hopeless task. For many problems
there are techniques which will locate a local minimum which may be "close
enough" to the global minimum, and there are techniques which will find the
global minimum a large fraction of the time (in a probabilistic
sense). However, the reader should be aware of my philosophy is that there is
no "best" algorithm for finding the minimum of a general function. Instead,
Sherpa provides tools which will allow the user to look at the overall behavior
of the function and find plausible local minima, which will often contain
the physically-meaningful minimum in the types of problem with which Sherpa
deals.
In general, the best assurance that the correct minimum has been found in a
particular calculation is careful examination of the nature of the solution
(e.g., by plotting a fitted function over data), and some confidence that the
full region that the minimum may lie in has been well searched by the algorithm
used. This document seeks to give the reader some information about what the
different choices of algorithm will mean in terms of run-time and confidence of
locating a good minimum.
Some points to take away from the discussions in the rest of this document.
1. Never accept the result of a minimization using a single optimization run;
always test the minimum using a different method.
2. Check that the result of the minimization does not have parameter values
at the edges of the parameter space. If this happens, then the fit must be
disregarded since the minimum lies outside the space that has been
searched, or the minimization missed the minimum.
3. Get a feel for the range of values of the target function (in Sherpa this
is the fit statistic), and the stability of the solution, by starting the
minimization from several different parameter values.
4. Always check that the minimum "looks right" by visualizing the
model and the data.
Sherpa contains two types of routine for minimizing a fit statistic. I will
call them the "single-shot" routines, which start from a guessed set of
parameters, and then try to improve the parameters in a continuous fashion, and
the "scatter-shot" routines, which try to look at parameters over the entire
permitted hypervolume to see if there are better minima than near the starting
guessed set of parameters.
Single-shot techniques
----------------------
As the reader might expect, the single-shot routines are relatively quick, but
depend critically on the guessed initial parameter values :math:`{\bf x}_0`
being near (in some sense) to the minimum :math:`{\bf x}_{\rm min}`. All the
single-shot routines investigate the local behaviour of the function near
:math:`{\bf x}_0`, and then make a guess at the best direction and distance to
move to find a better minimum. After testing at the new point, they accept that
point as the next guess, :math:`{\bf x}_1`, if the fit statistic is smaller
than at the first point, and modify the search procedure if it isn't
smaller. The routines continue to run until one of the following occurs:
1. all search directions result in an increased value of the fit statistic;
2. an excessive number of steps have been taken; or
3. something strange happens to the fit statistic (e.g., it turns out to be
discontinuous in some horrible way).
This description indicates that for the single-shot routines, there is a
considerable emphasis on the initial search position, :math:`{\bf x}_0`, being
reasonable. It may also be apparent that the values of these parameters should
be moderate; neither too small (:math:`10^{-12}`, say), nor too large
(:math:`10^{12}`, say). This is because the initial choice of step size in
moving from :math:`{\bf x}_0` towards the next improved set of parameters,
:math:`{\bf x}_1`, is based on the change in the fit statistic, :math:`f({\bf
x})` as components of :math:`{\bf x}` are varied by amounts :math:`{\cal
O}(1)`. If :math:`f` varies little as :math:`{\bf x}` is varied by this
amount, then the calculation of the distance to move to reach the next root may
be inaccurate. On the other hand, if :math:`f` has a lot of structure (several
maxima and minima) as :math:`{\bf x}` is varied by the initial step size, then
these single-shot minimizers may mistakenly jump entirely over the
"interesting" region of parameter space.
These considerations suggest that the user should arrange that the search
vector is scaled so that the range of parameter space to be searched is neither
too large nor too small. To take a concrete example, it would not be a good
idea to have :math:`x_7` parameterize the Hydrogen column density
(:math:`N_{\rm H}`) in a spectral fit, with an initial guess of
:math:`10^{20}\ {\rm cm}^{-2}`, and a search range
(in units of :math:`{\rm cm}^{-2}`) of
:math:`10^{16}` to :math:`10^{24}`. The minimizers will look for variations in
the fit statistic as :math:`N_{\rm H}` is varied by
:math:`1\ {\rm cm}^{-2}`, and
finding none (to the rounding accuracy likely for the code), will conclude that
:math:`x_7` is close to being a null parameter and can be ignored in the
fitting. It would be much better to have :math:`x_7 = \log_{10}(N_{\rm H})`,
with a search range of 16 to 24. Significant variations in the fit statistic
will occur as :math:`x_7` is varied by :math:`\pm 1`, and the code has a
reasonable chance of finding a useful solution.
Bearing this in mind, the single-shot minimizers in Sherpa are listed below:
:py:class:`~sherpa.optmethods.NelderMead`
This technique - also known as Simplex - creates a polyhedral search element
around the initial position, :math:`{\bf x}_0`, and then grows or shrinks in
particular directions while crawling around parameter space, to try to place
a minimum within the final search polyhedron. This technique has some
hilarious ways of getting stuck in high-dimension parameter spaces (where the
polyhedron can become a strange shape), but is very good at finding minima in
regions where the fit statistic has a moderately well-defined topology. Since
it works in a different way than Levenberg-Marquardt minimization, a good
strategy is to combine both minimization to test whether an apparent minimum
found by one technique is stable when searched by the other. I regard
NelderMead searching as good in smooth and simple parameter spaces,
particularly when looking at regions where the fit statistic depends on a
parameter in a linear or parabolic fashion, and bad where surfaces of equal
value of the fit statistic are complicated. In either case, it is essential
that the initial size of the polyhedron (with sides of length 1 unit) is a
smallish fraction of the search space.
:py:class:`Levenberg-Marquardt <sherpa.optmethods.LevMar>`
This can be considered to be a censored maximum-gradients technique which,
starting from a first guess, moves towards a minimum by finding a good
direction in which to move, and calculating a sensible distance to go.
Its principal drawback is that to calculate the distance to move it has to
make some assumptions about how large a step size to take, and hence there is
an implicit assumption that the search space is reasonably well scaled (to
:math:`\pm 10` units in each of the search directions, say). It is also
important that in finding these gradients, the steps do not miss a lot of
important structure; i.e. there should not be too many subsidiary minima.
The search directions and distances to move are based on the shape of the
target function near the initial guessed minimum, :math:`{\bf x}_0`,
with progressive movement towards the dominant local minimum. Since
this technique uses information about the local curvature of the fit
statistic as well as its local gradients, the approach tends to stabilize the
result in somce cases. I regard the techniques implemented in Sherpa as being
good minimum-refiners for simple local topologies, since more assumptions
about topology are made than in the NelderMead approach, but bad at
finding global minima for target functions with complicated topologies.
Scatter-shot techniques
-----------------------
Although a bit ad hoc, these techniques attempt to locate a decent minimum over
the entire range of the search parameter space. Because they involve searching
a lot of the parameter space, they involve many function evaluations, and are
somewhere between quite slow and incredibly-tediously slow.
The routines are listed below:
:py:class:`~sherpa.optmethods.GridSearch`
This routine simply searches a grid in each of the search parameters,
where the spacing is uniform between the minimum and maximum
value of each parameter. There is an option to refine the fit
at each point, by setting the
:py:attr:`~sherpa.optmethods.GridSearch.method` attribute to one of the
single-shot optimisers, but this is not set by default, as it can
significantly increase the time required to fit the data.
The coarseness of the grid sets how precise a root will be found,
and if the fit statistic has significant structure on a
smaller scale, then the grid-searcher will miss it completely. This is a good
technique for finding an approximation to the minimum for a slowly-varying
function. It is a bad technique for getting accurate estimates of the
location of a minimum, or for examining a fit statistic with lots of
subsidiary maxima and minima within the search space. It is intended
for use with
:py:class:`template models <sherpa.models.template.TemplateModel>`.
:py:class:`Monte Carlo <sherpa.optmethods.MonCar>`
This is a simple population based, stochastic function minimizer. At
each iteration it combines population vectors - each containing a set of
parameter values - using a weighted difference. This optimiser can
be used to find solutions to complex search spaces but is not guaranteed
to find a global minimum. It is over-kill for relatively simple problems.
Summary and best-buy strategies
===============================
Overall, the single-shot methods are best regarded as ways of refining minima
located in other ways: from good starting guesses, or from the scatter-shot
techniques. Using intelligence to come up with a good first-guess solution is
the best approach, when the single-shot refiners can be used to get accurate
values for the parameters at the minimum. However, I would certainly recommend
running at least a second single-shot minimizer after the first, to get some
indication that one set of assumptions about the shape of the minimum is not
compromising the solution. It is probably best if the code rescales the
parameter range between minimizations, so that a completely different sampling
of the function near the trial minimum is being made.
=================== ============ ========= ==========================
Optimiser Type Speed Commentary
=================== ============ ========= ==========================
NelderMead single-shot fast OK for refining minima
Levenberg-Marquardt single-shot fast OK for refining minima,
should **only** be used
with chi-square statistics
GridSearch scatter-shot slow OK for smooth functions
Monte Carlo scatter-shot very slow Good in many cases
=================== ============ ========= ==========================
Reference/API
=============
.. toctree::
:maxdepth: 2
optmethods
optfcts
|
PypiClean
|
/gamification-engine-0.4.0.tar.gz/gamification-engine-0.4.0/gengine/app/jsscripts/node_modules/prop-types/checkPropTypes.js
|
'use strict';
var printWarning = function() {};
if (process.env.NODE_ENV !== 'production') {
var ReactPropTypesSecret = require('./lib/ReactPropTypesSecret');
var loggedTypeFailures = {};
var has = Function.call.bind(Object.prototype.hasOwnProperty);
printWarning = function(text) {
var message = 'Warning: ' + text;
if (typeof console !== 'undefined') {
console.error(message);
}
try {
// --- Welcome to debugging React ---
// This error was thrown as a convenience so that you can use this stack
// to find the callsite that caused this warning to fire.
throw new Error(message);
} catch (x) {}
};
}
/**
* Assert that the values match with the type specs.
* Error messages are memorized and will only be shown once.
*
* @param {object} typeSpecs Map of name to a ReactPropType
* @param {object} values Runtime values that need to be type-checked
* @param {string} location e.g. "prop", "context", "child context"
* @param {string} componentName Name of the component for error messages.
* @param {?Function} getStack Returns the component stack.
* @private
*/
function checkPropTypes(typeSpecs, values, location, componentName, getStack) {
if (process.env.NODE_ENV !== 'production') {
for (var typeSpecName in typeSpecs) {
if (has(typeSpecs, typeSpecName)) {
var error;
// Prop type validation may throw. In case they do, we don't want to
// fail the render phase where it didn't fail before. So we log it.
// After these have been cleaned up, we'll let them throw.
try {
// This is intentionally an invariant that gets caught. It's the same
// behavior as without this statement except with a better message.
if (typeof typeSpecs[typeSpecName] !== 'function') {
var err = Error(
(componentName || 'React class') + ': ' + location + ' type `' + typeSpecName + '` is invalid; ' +
'it must be a function, usually from the `prop-types` package, but received `' + typeof typeSpecs[typeSpecName] + '`.'
);
err.name = 'Invariant Violation';
throw err;
}
error = typeSpecs[typeSpecName](values, typeSpecName, componentName, location, null, ReactPropTypesSecret);
} catch (ex) {
error = ex;
}
if (error && !(error instanceof Error)) {
printWarning(
(componentName || 'React class') + ': type specification of ' +
location + ' `' + typeSpecName + '` is invalid; the type checker ' +
'function must return `null` or an `Error` but returned a ' + typeof error + '. ' +
'You may have forgotten to pass an argument to the type checker ' +
'creator (arrayOf, instanceOf, objectOf, oneOf, oneOfType, and ' +
'shape all require an argument).'
);
}
if (error instanceof Error && !(error.message in loggedTypeFailures)) {
// Only monitor this failure once because there tends to be a lot of the
// same error.
loggedTypeFailures[error.message] = true;
var stack = getStack ? getStack() : '';
printWarning(
'Failed ' + location + ' type: ' + error.message + (stack != null ? stack : '')
);
}
}
}
}
}
/**
* Resets warning cache when testing.
*
* @private
*/
checkPropTypes.resetWarningCache = function() {
if (process.env.NODE_ENV !== 'production') {
loggedTypeFailures = {};
}
}
module.exports = checkPropTypes;
|
PypiClean
|
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/containerservice/v20230702preview/managed_cluster_snapshot.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ManagedClusterSnapshotArgs', 'ManagedClusterSnapshot']
@pulumi.input_type
class ManagedClusterSnapshotArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
creation_data: Optional[pulumi.Input['CreationDataArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
snapshot_type: Optional[pulumi.Input[Union[str, 'SnapshotType']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ManagedClusterSnapshot resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input['CreationDataArgs'] creation_data: CreationData to be used to specify the source resource ID to create this snapshot.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_name: The name of the managed cluster resource.
:param pulumi.Input[Union[str, 'SnapshotType']] snapshot_type: The type of a snapshot. The default is NodePool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if creation_data is not None:
pulumi.set(__self__, "creation_data", creation_data)
if location is not None:
pulumi.set(__self__, "location", location)
if resource_name is not None:
pulumi.set(__self__, "resource_name", resource_name)
if snapshot_type is not None:
pulumi.set(__self__, "snapshot_type", snapshot_type)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="creationData")
def creation_data(self) -> Optional[pulumi.Input['CreationDataArgs']]:
"""
CreationData to be used to specify the source resource ID to create this snapshot.
"""
return pulumi.get(self, "creation_data")
@creation_data.setter
def creation_data(self, value: Optional[pulumi.Input['CreationDataArgs']]):
pulumi.set(self, "creation_data", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the managed cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="snapshotType")
def snapshot_type(self) -> Optional[pulumi.Input[Union[str, 'SnapshotType']]]:
"""
The type of a snapshot. The default is NodePool.
"""
return pulumi.get(self, "snapshot_type")
@snapshot_type.setter
def snapshot_type(self, value: Optional[pulumi.Input[Union[str, 'SnapshotType']]]):
pulumi.set(self, "snapshot_type", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ManagedClusterSnapshot(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
creation_data: Optional[pulumi.Input[pulumi.InputType['CreationDataArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
snapshot_type: Optional[pulumi.Input[Union[str, 'SnapshotType']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
A managed cluster snapshot resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['CreationDataArgs']] creation_data: CreationData to be used to specify the source resource ID to create this snapshot.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] resource_name_: The name of the managed cluster resource.
:param pulumi.Input[Union[str, 'SnapshotType']] snapshot_type: The type of a snapshot. The default is NodePool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ManagedClusterSnapshotArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A managed cluster snapshot resource.
:param str resource_name: The name of the resource.
:param ManagedClusterSnapshotArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ManagedClusterSnapshotArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
creation_data: Optional[pulumi.Input[pulumi.InputType['CreationDataArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
snapshot_type: Optional[pulumi.Input[Union[str, 'SnapshotType']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ManagedClusterSnapshotArgs.__new__(ManagedClusterSnapshotArgs)
__props__.__dict__["creation_data"] = creation_data
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["snapshot_type"] = snapshot_type
__props__.__dict__["tags"] = tags
__props__.__dict__["managed_cluster_properties_read_only"] = None
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:containerservice:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20220202preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20220302preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20220402preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20220502preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20220602preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20220702preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20220802preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20220803preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20220902preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20221002preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20221102preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20230102preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20230202preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20230302preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20230402preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20230502preview:ManagedClusterSnapshot"), pulumi.Alias(type_="azure-native:containerservice/v20230602preview:ManagedClusterSnapshot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ManagedClusterSnapshot, __self__).__init__(
'azure-native:containerservice/v20230702preview:ManagedClusterSnapshot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ManagedClusterSnapshot':
"""
Get an existing ManagedClusterSnapshot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ManagedClusterSnapshotArgs.__new__(ManagedClusterSnapshotArgs)
__props__.__dict__["creation_data"] = None
__props__.__dict__["location"] = None
__props__.__dict__["managed_cluster_properties_read_only"] = None
__props__.__dict__["name"] = None
__props__.__dict__["snapshot_type"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return ManagedClusterSnapshot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="creationData")
def creation_data(self) -> pulumi.Output[Optional['outputs.CreationDataResponse']]:
"""
CreationData to be used to specify the source resource ID to create this snapshot.
"""
return pulumi.get(self, "creation_data")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedClusterPropertiesReadOnly")
def managed_cluster_properties_read_only(self) -> pulumi.Output['outputs.ManagedClusterPropertiesForSnapshotResponse']:
"""
What the properties will be showed when getting managed cluster snapshot. Those properties are read-only.
"""
return pulumi.get(self, "managed_cluster_properties_read_only")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="snapshotType")
def snapshot_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of a snapshot. The default is NodePool.
"""
return pulumi.get(self, "snapshot_type")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
PypiClean
|
/hnzhu010507-0.0.9.tar.gz/hnzhu010507-0.0.9/airtest/utils/snippet.py
|
import os
import sys
import stat
import threading
from functools import wraps
from six import string_types
from six.moves import queue
def split_cmd(cmds):
"""
Split the commands to the list for subprocess
Args:
cmds: command(s)
Returns:
array commands
"""
# cmds = shlex.split(cmds) # disable auto removing \ on windows
return cmds.split() if isinstance(cmds, string_types) else list(cmds)
def get_std_encoding(stream):
"""
Get encoding of the stream
Args:
stream: stream
Returns:
encoding or file system encoding
"""
return getattr(stream, "encoding", None) or sys.getfilesystemencoding()
CLEANUP_CALLS = queue.Queue()
IS_EXITING = False
def reg_cleanup(func, *args, **kwargs):
"""
Clean the register for given function
Args:
func: function name
*args: optional argument
**kwargs: optional arguments
Returns:
None
"""
CLEANUP_CALLS.put((func, args, kwargs))
def _cleanup():
# cleanup together to prevent atexit thread issue
while not CLEANUP_CALLS.empty():
(func, args, kwargs) = CLEANUP_CALLS.get()
func(*args, **kwargs)
def kill_proc(proc):
"""
Kill the process and close _io.BufferedWriter to avoid `ResourceWarning: unclosed file <_io.BufferedWriter name=6>`
Args:
proc: subprocess.Popen()
Returns:
"""
proc.kill()
# https://bugs.python.org/issue35182
# 部分低版本的python中,重复关闭io流可能会导致异常报错,因此需要额外加入判断closed
if proc.stdout and not proc.stdout.closed:
proc.communicate()
# atexit.register(_cleanup)
_shutdown = threading._shutdown
def exitfunc():
global IS_EXITING
IS_EXITING = True
_cleanup()
_shutdown()
def is_exiting():
return IS_EXITING
# use threading._shutdown to exec cleanup when main thread exit
# atexit exec after all thread exit, which needs to cooperate with daemon thread.
# daemon thread is evil, which abruptly exit causing unexpected error
threading._shutdown = exitfunc
def on_method_ready(method_name):
"""
Wrapper for lazy initialization of some instance methods
Args:
method_name: instance method name
Returns:
wrapper
"""
def wrapper(func):
@wraps(func)
def ready_func(inst, *args, **kwargs):
key = "_%s_ready" % method_name
if not getattr(inst, key, None):
method = getattr(inst, method_name)
method()
setattr(inst, key, True)
return func(inst, *args, **kwargs)
return ready_func
return wrapper
def ready_method(func):
@wraps(func)
def wrapper(inst, *args, **kwargs):
ret = func(inst, *args, **kwargs)
key = "_%s_ready" % func.__name__
if not getattr(inst, key, None):
setattr(inst, key, True)
return ret
return wrapper
def make_file_executable(file_path):
"""
If the path does not have executable permissions, execute chmod +x
:param file_path:
:return:
"""
if os.path.isfile(file_path):
mode = os.lstat(file_path)[stat.ST_MODE]
executable = True if mode & stat.S_IXUSR else False
if not executable:
os.chmod(file_path, mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
return True
return False
|
PypiClean
|
/cdat-lite-6.0.1.tar.gz/cdat-lite-6.0.1/Packages/genutil/Lib/salstat.py
|
import numpy.ma,cdms2,array_indexing_emulate as array_indexing
from statistics import __checker
import numpy
## Short routines used in the functional constructs to reduce analysis time
add=numpy.ma.add
multiply=numpy.ma.multiply
sum=numpy.ma.sum
mean=numpy.ma.average # Shortcut
def _fixScalar(a):
if isinstance(a,(float,int)) or a.shape==():
a=numpy.ma.array([a,],copy=0)
return a
else:
return a
## Diference Squared
def _diffsquared(a,b): return numpy.ma.power(a-b,2)
def differencesquared(x,y,axis=0):
"""Computes the Squared differecne between 2 datasets
Usage:
diff=differencesquared(a,b)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
isvar=0
if cdms2.isVariable(y) :
isvar=1
xatt=y.attributes
ax=y.getAxisList()
if cdms2.isVariable(x) :
isvar=1
xatt=x.attributes
ax=x.getAxisList()
diff=_diffsquared(x,y)
if isvar:
diff=cdms2.createVariable(diff,axes=ax,id='differencesquared',copy=0)
if 'units' in xatt.keys(): diff.units=xatt['units']+'*'+xatt['units']
## in case we passed 2 numpy
if (not numpy.ma.isMA(x)) and (not numpy.ma.isMA(y)):
diff=diff.filled(1.e20)
return diff
## No need to make it user available
def _shellsort(inlist):
""" _shellsort algorithm. Sorts a 1D-list.
Usage: _shellsort(inlist)
Returns: sorted-inlist, sorting-index-vector (for original list)
"""
return numpy.ma.sort(inlist,axis=0),numpy.ma.argsort(inlist,axis=0)
## Rankdata
def _rankdata(inlist):
"""
Ranks the data in inlist, dealing with ties appropritely.
Adapted from Gary Perlman's |Stat ranksort.
Usage: _rankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
n = inlist.shape[0]
svec, ivec = _shellsort(inlist)
ivec=ivec.astype('i')
sumranks = numpy.ma.zeros(inlist.shape[1:])
dupcount = numpy.ma.zeros(inlist.shape[1:],'d')
newlist = numpy.ma.zeros(inlist.shape,'d')
newlist2 = numpy.ma.zeros(inlist.shape,'d')
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1.
if i!=n-1:
c1=numpy.ma.not_equal(svec[i],svec[i+1])
else:
c1=numpy.ma.ones(c1.shape)
if i==n-1 or (not numpy.ma.allequal(c1,0)):
averank = numpy.ma.array(sumranks / dupcount + 1)
maxdupcount=int(numpy.ma.maximum(dupcount))
for j in range(i-maxdupcount+1,i+1):
c2=numpy.ma.logical_and(c1,numpy.ma.greater_equal(j,maxdupcount-dupcount))
newlist[j]=numpy.ma.where(c2,averank,newlist[j])
sumranks = numpy.ma.where(c1,0.,sumranks)
dupcount = numpy.ma.where(c1,0,dupcount)
for i in range(n):
newlist2=array_indexing.set(newlist2,ivec[i],newlist[i])
return newlist2
def rankdata(x,axis=0):
"""
Ranks the data, dealing with ties appropritely.
Adapted from Gary Perlman's |Stat ranksort.
Further adapted to numpy.ma/numpy by PCMDI's team
Usage: rankdata(array, axis=axisoptions)
Returns: a list of length equal to inlist, containing rank scores
Option:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
even: 'xy': to do over 2 dimensions at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x2,y,weights,axis,ax=__checker(x,None,None,axis)
rk=_rankdata(x2)
if not ax is None:
rk=cdms2.createVariable(rk,id='ranked',copy=0)
if len(axis)>1:
ax.insert(0,rk.getAxis(0))
rk.setAxisList(ax)
else:
ax.insert(0,x.getAxis(axis[0]))
rk.setAxisList(ax)
rk=rk(order=x.getOrder(ids=1))
return rk
def _tiecorrect(rankvals):
"""
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: _tiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted=numpy.ma.sort(rankvals,axis=0)
n = sorted.shape[0]
T = numpy.ma.zeros(sorted.shape[1:])
i = 0
c0=numpy.ma.ones(sorted.shape[1:])
while (i<n-1):
nties = numpy.ma.ones(sorted.shape[1:])
c1=numpy.ma.logical_and(numpy.ma.equal(sorted[i],sorted[i+1]),c0)
c2=c1
j=i
while not numpy.ma.allequal(c2,0):
c2=numpy.ma.logical_and(c2,numpy.ma.equal(sorted[j],sorted[j+1]))
nties=nties+c2
j=j+1
if j>=n-1:
break
T = numpy.ma.where(c1,T + nties**3 - nties,T)
i = i+1
if i<n-1:
c0=numpy.ma.not_equal(sorted[i],sorted[i-1])
T = T / float(n**3-n)
return 1.0 - T
def tiecorrect(x,axis=0):
"""
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: T = tiecorrect(rankvals,axis=axisoptions)
Option:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
even: 'xy': to do over 2 dimensions at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
T=_tiecorrect(x)
if not ax is None:
T=cdms2.createVariable(T,id='tiecorrect',copy=0,axes=ax)
## print axis,ax,T.shape
## if len(axis)>1:
## ax.insert(0,T.getAxis(0))
## T.setAxisList(ax)
## else:
## ax.insert(0,x.getAxis(axis[0]))
## T.setAxisList(ax)
## T=T(order=x.getOrder(ids=1))
return T
###########################
## Probability functions ##
###########################
def _chisqprob(chisq,df,Z_MAX=6.0):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: _chisqprob(chisq,df,Z_MAX=6.0)
"""
BIG = 20.0
def ex(x):
mask=x.mask
tmp=numpy.ma.masked_less(x,-BIG)
tmp=numpy.ma.exp(tmp).filled(0)
if mask is not None:
tmp=numpy.ma.masked_where(mask,tmp)
return tmp
c1=numpy.ma.logical_or(numpy.ma.less_equal(chisq,0.),numpy.ma.less(df,1))
result=c1*1.
a = 0.5 * chisq
even=numpy.ma.equal(0,numpy.ma.remainder(df,2.))
y=numpy.ma.where(numpy.ma.greater(df,1),ex(-a),0.)
s=numpy.ma.where(even,y,2.0 * _zprob(-numpy.ma.sqrt(chisq),Z_MAX))
## Part 1 df>2
c1=numpy.ma.logical_not(c1)
cdf2=numpy.ma.logical_and(numpy.ma.greater(df,2),c1)
chisq=numpy.ma.where(cdf2,.5*(df-1),chisq)
z=numpy.ma.where(even,1.,.5)
## Where a > BIG
c2=numpy.ma.greater(a,BIG)
e=numpy.ma.where(even,0.,numpy.ma.log(numpy.ma.sqrt(numpy.pi)))
c=numpy.ma.log(a)
e2=numpy.ma.where(even,1.,1.0 / numpy.ma.sqrt(numpy.pi) / numpy.ma.sqrt(a))
cc=numpy.ma.zeros(e.shape)
c3=numpy.ma.less_equal(z,chisq)
c2a=numpy.ma.logical_and(c2,cdf2)
c2b=numpy.ma.logical_and(numpy.ma.logical_not(c2),cdf2)
#c4=numpy.ma.logical_and(c3,c2b)
while not numpy.ma.allequal(numpy.ma.logical_and(c3,cdf2),0):
c4=numpy.ma.logical_and(c3,c2a)
e=numpy.ma.where(c4,numpy.ma.log(z)+e,e)
s=numpy.ma.where(c4,s+ex(c*z-a-e),s)
z=numpy.ma.where(c4,z+1.,z)
result=numpy.ma.where(c4,s,result)
c4=numpy.ma.logical_and(c3,c2b)
e2=numpy.ma.where(c4,e2*a/z,e2)
cc=cc+e2
z=numpy.ma.where(c4,z+1.,z)
c3=numpy.ma.less_equal(z,chisq)
result=numpy.ma.where(c4,cc*y+s,result)
result=numpy.ma.where(numpy.ma.logical_and(numpy.ma.logical_not(cdf2),c1),s,result)
return result
def chisqprob(chisq,df,Z_MAX=6.0):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: prob = chisqprob(chisq,df)
Options:
Z_MAX: Maximum meaningfull value for z probability (default=6.0)
"""
chisq = _fixScalar(chisq)
df = _fixScalar(df)
isvar=0
if cdms2.isVariable(chisq) :
isvar=1
ax=chisq.getAxisList()
p=_chisqprob(chisq,df)
if isvar:
p=cdms2.createVariable(p,axes=ax,id='probability',copy=0)
## in case we passed 2 numpy
if not numpy.ma.isMA(chisq):
p=p.filled(1.e20)
return p
def _inversechi(prob, df):
"""This function calculates the inverse of the chi square function. Given
a p-value and a df, it should approximate the critical value needed to
achieve these functions. Adapted from Gary Perlmans critchi function in
C. Apologies if this breaks copyright, but no copyright notice was
attached to the relevant file.
"""
minchisq = numpy.ma.zeros(df.shape)
maxchisq = numpy.ma.ones(df.shape)*99999.0
chi_epsilon = 0.000001
c1=numpy.ma.less_equal(prob,0.)
chisqval=c1*maxchisq
chisqval=numpy.ma.masked_where(c1,chisqval)
chisqval=numpy.ma.masked_where(numpy.ma.greater_equal(prob,1.),chisqval)
c1=numpy.ma.logical_not(numpy.ma.logical_or(numpy.ma.greater_equal(prob,1.),c1)) ## slots left to be set
chisqval = numpy.ma.where(c1,df / numpy.ma.sqrt(prob),chisqval)
c2=numpy.ma.greater(maxchisq - minchisq,chi_epsilon)
while not numpy.ma.allequal(c2,0.):
c=numpy.ma.less(_chisqprob(chisqval, df),prob)
maxchisq=numpy.ma.where(c,chisqval,maxchisq)
minchisq=numpy.ma.where(numpy.ma.logical_not(c),chisqval,minchisq)
chisqval = numpy.ma.where(c2,(maxchisq + minchisq) * 0.5,chisqval)
c2=numpy.ma.greater(maxchisq - minchisq,chi_epsilon)
chisqval=numpy.ma.where(numpy.ma.less_equal(prob,0.),99999.0,chisqval)
chisqval=numpy.ma.where(numpy.ma.greater_equal(prob,1.),0.0,chisqval)
return chisqval
def inversechi(prob, df):
"""This function calculates the inverse of the chi square function. Given
a p-value and a df, it should approximate the critical value needed to
achieve these functions. Adapted from Gary Perlmans critchi function in
C. Apologies if this breaks copyright, but no copyright notice was
attached to the relevant file.
Usage invchi = inversechi(prob,df,axis=axisoptions)
"""
prob = _fixScalar(prob)
df = _fixScalar(df)
isvar=0
if cdms2.isVariable(prob) :
isvar=1
ax=prob.getAxisList()
invchi=_inversechi(prob,df)
if isvar:
invchi=cdms2.createVariable(invchi,axes=ax,id='inversechi',copy=0)
## in case we passed 2 numpy
if not numpy.ma.isMA(prob):
invchi=invchi.filled(1.e20)
return invchi
def _erfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from MAal Recipies.
Usage: _erfcc(x)
"""
z = numpy.ma.absolute(x)
t = 1.0 / (1.0+0.5*z)
ans = t * numpy.ma.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t* \
(0.09678418+t*(-0.18628806+t* \
(0.27886807+t*(-1.13520398+t* \
(1.48851587+t*(-0.82215223+t* \
0.17087277)))))))))
return numpy.ma.where(numpy.ma.greater_equal(x,0),ans,2.-ans)
def erfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from MAal Recipies.
Usage: err = erfcc(x)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
isvar=0
if cdms2.isVariable(x) :
isvar=1
ax=x.getAxisList()
err =_erfcc(x)
if isvar:
err = cdms2.createVariable(err,axes=ax,id='erfcc',copy=0)
## in case we passed a numpy
if not numpy.ma.isMA(x):
err=err.filled(1.e20)
return err
def _zprob(z,Z_MAX = 6.0):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, _zprob(z) = 1-tail probability
for z>0, 1.0-_zprob(z) = 1-tail probability
for any z, 2.0*(1.0-_zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat.
Usage: z = _zprob(z,Z_MAX = 6.0)
"""
## Z_MAX = 6.0 # maximum meaningful z-value
y=.5*numpy.ma.absolute(z)
c1=numpy.ma.greater_equal(y,Z_MAX*.5)
c2=numpy.ma.less(y,1.)
x=numpy.ma.not_equal(z,0)*1.
w=numpy.ma.where(c2,y*y,1.)
x=numpy.ma.where(c2,((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * y * 2.0,x)
c2=numpy.ma.logical_not(numpy.ma.logical_or(c1,c2))
y=numpy.ma.where(c2,y-2.,y)
x=numpy.ma.where(c2, (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524,x)
prob=numpy.ma.where(numpy.ma.greater(z,0.),((x+1.0)*0.5),((1.0-x)*0.5))
return prob
def zprob(z,Z_MAX = 6.0):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat.
Z_MAX: Maximum meaningfull value for z probability (default = 6)
Usage: z = zprob(z,Z_MAX=6.0 )
"""
z = _fixScalar(z)
isvar=0
if cdms2.isVariable(z) :
isvar=1
ax=z.getAxisList()
prob =_zprob(z, Z_MAX)
if isvar:
prob = cdms2.createVariable(prob,axes=ax,id='zprob',copy=0)
## in case we passed a numpy
if not numpy.ma.isMA(z):
prob=prob.filled(1.e20)
return prob
def _ksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
MAal Recipies.
Usage: ks = _ksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
c=numpy.ma.not_equal(alam,0)
ans=numpy.ma.ones(alam.shape)
for j in range(1,201):
## Avoiding overflow....
ae=a2*j*j
ae=numpy.ma.where(numpy.ma.less(ae,-745),-745,ae)
term = fac*numpy.ma.exp(ae)
sum = sum + term
a=numpy.ma.absolute(term)
c1=numpy.ma.less_equal(a,.001*termbf)
c2=numpy.ma.less(a,1.E-8*sum).filled(0)
c2=numpy.ma.logical_or(c1,c2)
## To avoid overflow on exp....
a2=numpy.ma.masked_where(c2,a2)
c2=numpy.ma.logical_and(c2,c)
ans=numpy.ma.where(c2.filled(0),sum,ans)
c=numpy.ma.logical_and(c,numpy.ma.logical_not(c2))
fac = -fac
termbf = numpy.ma.absolute(term)
if numpy.ma.allequal(c.filled(0),0):
break
return ans # Get here only if fails to converge; was 0.0!!
def ksprob(x):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
MAal Recipies.
Usage: ks = ksprob(x)
"""
x = _fixScalar(x)
isvar=0
if cdms2.isVariable(x) :
isvar=1
ax=x.getAxisList()
prob =_ksprob(x)
if isvar:
prob = cdms2.createVariable(prob,axes=ax,id='ksprob',copy=0)
## in case we passed a numpy
if not numpy.ma.isMA(x):
prob=prob.filled(1.e20)
return prob
def _fprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: _fprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
return _betai(0.5*dfden, 0.5*dfnum, dfden/(dfden+dfnum*F))
def fprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: prob = fprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
dfnum = _fixScalar(dfnum)
dfden = _fixScalar(dfden)
F = _fixScalar(F)
isvar=0
if cdms2.isVariable(F) :
isvar=1
ax=F.getAxisList()
prob =_fprob(dfnum, dfden, F)
if isvar:
prob = cdms2.createVariable(prob,axes=ax,id='fprob',copy=0)
## in case we passed a numpy
if not numpy.ma.isMA(F):
prob=prob.filled(1.e20)
return prob
def _tprob(df, t):
return _betai(0.5*df,numpy.ma.ones(df.shape)*0.5,df/(1.*df+t*t))
def tprob(df, t):
"""Returns t probabilty given degree of freedom and T statistic
Usage: prob = tprob(df,t)
"""
df = _fixScalar(df)
t = _fixScalar(t)
isvar=0
if cdms2.isVariable(t) :
isvar=1
ax=t.getAxisList()
prob =_tprob(df,t)
if isvar:
prob = cdms2.createVariable(prob,axes=ax,id='tprob',copy=0)
## in case we passed a numpy
if not numpy.ma.isMA(t):
prob=prob.filled(1.e20)
return prob
def _inversef(prob, df1, df2):
"""This function returns the f value for a given probability and 2 given
degrees of freedom. It is an approximation using the fprob function.
Adapted from Gary Perlmans critf function - apologies if copyright is
broken, but no copyright notice was attached """
f_epsilon = 0.000001
maxf = numpy.ma.ones(prob.shape)*9999.0
minf = numpy.ma.zeros(prob.shape)
c1=numpy.ma.logical_or(numpy.ma.less_equal(prob,0.),numpy.ma.greater_equal(prob,1.))
c1=numpy.ma.logical_not(c1).filled(0) # Takes the oppsite, means can be set
fval = numpy.ma.where(c1,1.0 / prob,0.)
c2=numpy.ma.greater(numpy.ma.absolute(maxf-minf),f_epsilon)
c2=numpy.ma.logical_and(c1,c2).filled(0)
while not numpy.ma.allequal(c2,0.):
c1=numpy.ma.less(_fprob(df1,df2,fval),prob).filled(0)
maxf=numpy.ma.where(numpy.ma.logical_and(c1,c2).filled(0),fval,maxf)
minf=numpy.ma.where(numpy.ma.logical_and(numpy.ma.logical_not(c1),c2).filled(0),fval,minf)
fval = numpy.ma.where(c2,(maxf + minf) * 0.5,fval)
c1=numpy.ma.greater(numpy.ma.absolute(maxf-minf),f_epsilon)
c2=numpy.ma.logical_and(c1,c2).filled(0)
return fval
def inversef(prob, df1, df2):
"""This function returns the f value for a given probability and 2 given
degrees of freedom. It is an approximation using the fprob function.
Adapted from Gary Perlmans critf function - apologies if copyright is
broken, but no copyright notice was attached
Usage: fval = inversef(prob, df1, df2)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
prob = _fixScalar(prob)
df1 = _fixScalar(df1)
df2 = _fixScalar(df2)
isvar=0
if cdms2.isVariable(prob) :
isvar=1
ax=prob.getAxisList()
fval =_inversef(prob, df1, df2)
if isvar:
fval = cdms2.createVariable(fval,axes=ax,id='inversef',copy=0)
## in case we passed a numpy
if not numpy.ma.isMA(prob):
fval=fval.filled(1.e20)
return fval
def _betacf(a,b,x,ITMAX=200,EPS=3.0E-7):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: MAal Recipies in C.)
Usage: _betacf(a,b,x,ITMAX=200,EPS=3.0E-7)
ITMAX: Maximum number of iteration
EPS: Epsilon number
"""
a=numpy.ma.array(a,copy=0)
bm = az = am = numpy.ma.ones(a.shape)
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
ans=numpy.ma.ones(a.shape)
ans=numpy.ma.masked_equal(ans,1.)
c1=numpy.ma.ones(a.shape)
for i in range(ITMAX+1):
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
c=numpy.ma.less(numpy.ma.absolute(az-aold),EPS*numpy.ma.absolute(az))
c=numpy.ma.logical_and(c,c1)
ans=numpy.ma.where(c,az,ans)
c1=numpy.ma.logical_and(c1,numpy.ma.logical_not(c))
if numpy.ma.allequal(c1,0):
break
return ans
#print 'a or b too big, or ITMAX too small in Betacf.'
def betacf(a,b,x,ITMAX=200,EPS=3.0E-7):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: MAal Recipies in C.)
Usage: beta = betacf(a,b,x,ITMAX=200,EPS=3.0E-7)
ITMAX: Maximum number of iteration
EPS: Epsilon number
"""
a = _fixScalar(a)
b = _fixScalar(b)
x = _fixScalar(x)
isvar=0
if cdms2.isVariable(b) :
isvar=1
ax=b.getAxisList()
if cdms2.isVariable(a) :
isvar=1
ax=a.getAxisList()
beta =_betacf(a,b,x,ITMAX,EPS)
if isvar:
beta = cdms2.createVariable(beta,axes=ax,id='betacf',copy=0)
## in case we passed a numpy
if (not numpy.ma.isMA(a)) and (not numpy.ma.isMa(b)):
beta=beta.filled(1.e20)
return beta
def _gammaln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: MAal Recipies in C.)
Usage: _gammaln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*numpy.ma.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + numpy.ma.log(2.50662827465*ser)
def gamma(x):
"""
Returns the gamma function of x.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: MAal Recipies in C.)
Usage: _gammaln(xx)
"""
x = _fixScalar(x)
isvar=0
if cdms2.isVariable(x) :
isvar=1
ax=x.getAxisList()
g =_gammaln(x)
if isvar:
g = cdms2.createVariable(g,axes=ax,id='gamma',copy=0)
## in case we passed a numpy
if not numpy.ma.isMA(x):
g =g.filled(1.e20)
return g
def _betai(a,b,x,ITMAX=200,EPS=3.0E-7):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: MAal Recipies in C.)
Usage: b = _betai(a,b,x,ITMAX=200,EPS=3.0E-7)
ITMAX: Maximum number of iteration for betacf
EPS: Epsilon number
"""
a=numpy.ma.array(a,copy=0)
ans=numpy.ma.ones(a.shape)
ans=numpy.ma.masked_equal(ans,1)
c1=numpy.ma.logical_or(numpy.ma.equal(x,0),numpy.ma.equal(x,1.)).filled(0)
## Makes sure x is ok
x=numpy.ma.masked_less_equal(x,0.)
x=numpy.ma.masked_greater_equal(x,1.)
ans=numpy.ma.where(c1,0.,ans)
c1=numpy.ma.logical_not(c1)
bt = numpy.ma.exp(_gammaln(a+b)-_gammaln(a)-_gammaln(b)+a*numpy.ma.log(x)+b*
numpy.ma.log(1.0-x))
c2=numpy.ma.less(x,(a+1.0)/(a+b+2.0))
ans=numpy.ma.where(numpy.ma.logical_and(c2,c1),bt*_betacf(a,b,x,ITMAX,EPS)/a,ans)
ans=numpy.ma.where(numpy.ma.logical_and(numpy.ma.logical_not(c2),c1),1.0-bt*_betacf(b,a,1.0-x,ITMAX,EPS)/b,ans)
return ans
def betai(a,b,x,ITMAX=200,EPS=3.0E-7):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: MAal Recipies in C.)
Usage: beta = betai(a,b,x,ITMAX=200,EPS=3.0E-7)
ITMAX: Maximum number of iteration for betacf
EPS: Epsilon number
"""
a = _fixScalar(a)
b = _fixScalar(b)
x = _fixScalar(x)
isvar=0
if cdms2.isVariable(x) :
isvar=1
ax=x.getAxisList()
if cdms2.isVariable(b) :
isvar=1
ax=b.getAxisList()
if cdms2.isVariable(a) :
isvar=1
ax=a.getAxisList()
beta =_betai(a,b,x,ITMAX,EPS)
if isvar:
beta = cdms2.createVariable(beta,axes=ax,id='betai',copy=0)
## in case we passed numpys only
if (not numpy.ma.isMA(a)) and (not numpy.ma.isMA(b)) and (not numpy.ma.isMA(x)):
beta=beta.filled(1.e20)
return beta
###########################
## Test Classes ##
###########################
def _sumsquares(data,axis=0):
"""Return the sum of the squares
Usage:
sq=sumsquare(data)
"""
return numpy.ma.sum(data**2,axis=axis)
def sumsquares(x,axis=0):
"""Return the sum of the squares
Usage:
sq=sumsquare(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
sq=_sumsquares(x)
if not ax is None:
sq=cdms2.createVariable(sq,axes=ax,id='sumsquares',copy=0)
if 'units' in xatt.keys() : sq.units=xatt['units']+'*'+xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
sq=sq.filled(1.e20)
return sq
def _Range(data):
"""Returns the range of the data
Usage:
rg=_Range(data)
"""
return numpy.ma.maximum.reduce(data)-numpy.ma.minimum.reduce(data)
def Range(x,axis=0):
"""Returns the range of the data
Usage:
rg=Range(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_Range(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='range',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def harmonicmean(x,axis=0):
"""Returns the harmonicmean of the data
Usage:
h=harmonicmean(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_harmonicmean(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='harmonicmean',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _harmonicmean(data):
"""Returns the harmonicmean of the data
Usage:
h=_harmonicmean(data)
"""
return 1./numpy.ma.average(1./data,axis=0)
def _median(data):
"""Not really sophisticated median, based of arrays dimension,
Not to use with missing values
Usage:
med=_median(data)
"""
N = data.shape[0]
if (N % 2)==1:
median = numpy.ma.sort(data,axis=0)[(N - 1) / 2]
else:
median = numpy.ma.sort(data,axis=0)[N / 2] # not ideal, but works"""
return median
def median(x,axis=0):
"""Not really sophisticated median, based of arrays dimension,
Not to use with missing values
Usage:
med=_median(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_median(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='median',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _medianranks(data):
""" Return the ranks of the median
Usage:
medrk=_medianranks(data)
"""
return _median(_rankdata(numpy.ma.sort(data,axis=0)))
def medianranks(x,axis=0):
""" Return the ranks of the median
Usage:
medrk=medianranks(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_medianranks(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='medianranks',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _mad(data):
""" return the sum of the deviation from the median
Usage:
md = mad(data)
"""
return numpy.ma.sum(data-_median(data),axis=0)
def mad(x,axis=0):
""" return the sum of the deviation from the median
Usage:
md=_mad(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_mad(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='mad',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _numberuniques(data):
"""Return the number of unique values
Usage:
uniques=numberuniques(data)
"""
Uniques = numpy.ma.zeros(data.shape[1:])
N=data.shape[0]
for i in range(N):
uniques = numpy.ma.ones(data.shape[1:])
for j in range(N):
if (i != j):
uniques = numpy.ma.where(numpy.ma.equal(data[i],data[j]),0.,uniques)
Uniques = numpy.ma.where(uniques,Uniques +1 ,Uniques)
return Uniques
def numberuniques(x,axis=0):
"""Return the number of unique values
Usage:
uniques=numberuniques(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_numberuniques(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='mad',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _center(data):
"""Returns the deviation from the mean
Usage:
_centered=_center(data) # returns deviation from mean
"""
state=numpy.ma.average(data,axis=0)
return data-state
def center(x,axis=0):
"""Returns the deviation from the mean
Usage:
centered=center(data) # returns deviation from mean
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_center(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='center',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _ssdevs(data):
"""Return the sum of the square of the deviation from mean
Usage:
ss=_ssdevs(data)
"""
return numpy.ma.sum(_center(data)**2,axis=0)
def ssdevs(x,axis=0):
"""Return the sum of the square of the deviation from mean
Usage:
ss=_ssdevs(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_ssdevs(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='ssdevs',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
## def _geometricmean(data):
## """returns the geometric mean of the data, different form genutil !!!!
## Usage:
## g=geometricmean(data)
## """
## return reduce(numpy.ma.multiply, _center(data))
## def geometricmean(x,axis=0):
## """returns the geometric mean of the data, different form genutil !!!!
## Usage:
## g=geometricmean(data)
## Options:
## axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
## default value = 0. You can pass the name of the dimension or index
## (integer value 0...n) over which you want to compute the statistic.
## you can also pass 'xy' to work on both axes at once
## """
## if cdms2.isVariable(x) : xatt=x.attributes
## x,y,weights,axis,ax=__checker(x,None,None,axis)
## out=_geometricmean(x)
## if not ax is None:
## out=cdms2.createVariable(out,axes=ax,id='geometricmean',copy=0)
## if 'units' in xatt.keys() : out.units=xatt['units']+'*'+xatt['units']
## ## numpys only ?
## if not numpy.ma.isMA(x):
## out=out.filled(1.e20)
## return out
def _samplevariance(data):
"""Return the variance (Ssq/(N-1))
Usage:
svar=_samplevariance(data)
"""
return _ssdevs(data)/(numpy.ma.count(data,axis=0)-1.)
def unbiasedvariance(x,axis=0):
"""Return the variance (Ssq/(N-1))
Usage:
svar=unbiasedvariance(x,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_samplevariance(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='unbiasedvariance',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']+'*'+xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _variance(data):
"""Return the variance of data
Usage:
V=_variance(data)
"""
return numpy.ma.average(_center(data)**2,axis=0)
def variance(x,axis=0):
"""Return the variance of data
Usage:
V=variance(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_variance(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='variance',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']+'*'+xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _standarddeviation(data):
"""Returns stadard deviation of data
Usage:
std=_standarddeviation(data)
"""
return numpy.ma.sqrt(_samplevariance(data),axis=0)
def standarddeviation(x,axis=0):
"""Returns stadard deviation of data
Usage:
std=standarddeviation(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_standarddeviation(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='standarddeviation',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']+'*'+xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _coefficentvariance(data):
"""Returns the coefficents variance of data
Usage:
coefvar=_coefficentvariance(data)
"""
return _standarddeviation(data)/numpy.ma.average(data,axis=0)
def coefficentvariance(x,axis=0):
"""Returns the coefficents variance of data
Usage:
coefvar=coefficentvariance(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_coefficentvariance(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='coefficentvariance',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _skewness(data):
"""Return the skewness of data
Usage:
skew=_skewness(data)
"""
moment2=_variance(data)
moment3=mean(numpy.ma.power(_center(data),3))
return moment3 / (moment2 * numpy.ma.sqrt(moment2))
def skewness(x,axis=0):
"""Return the skewness of data
Usage:
skew=skewness(data, axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_skewness(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='skewness',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _kurtosis(data):
"""Return kurtosis value from dataset
Usage:
k=_kurtosis(data)
"""
moment2=_variance(data)
moment4=mean(numpy.ma.power(_center(data),4),axis=0)
return (moment4 / numpy.ma.power(moment2, 2)) - 3.0
def kurtosis(x,axis=0):
"""Return kurtosis value from dataset
Usage:
k=kurtosis(data, axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_kurtosis(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='kurtosis',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _standarderror(data):
"""Returns the standard error from dataset
Usage:
stderr=_standarderror(data)
"""
return _standarddeviation(data)/numpy.ma.sqrt(numpy.ma.count(data,axis=0),axis=0)
def standarderror(x,axis=0):
"""Returns the standard error from dataset
Usage:
stderr=standarderror(data,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_standarderror(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='standarderror',copy=0)
if 'units' in xatt.keys() : out.units=xatt['units']+'*'+xatt['units']
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _mode(data):
"""returns the mode of the data
Usage:
md=_mode(data)
"""
sortlist=numpy.ma.sort(data,axis=0)
mode=sortlist[0]
dupcount=numpy.ma.zeros(mode.shape)
dupmax=numpy.ma.zeros(mode.shape)
N=data.shape[0]
for i in range(1,N):
c=numpy.ma.equal(sortlist[i],sortlist[i-1])
dupcount=numpy.ma.where(c,dupcount+1,0.)
c2=numpy.ma.greater(dupcount,dupmax)
dupmax=numpy.ma.where(c2,dupcount,dupmax)
mode=numpy.ma.where(c2,sortlist[i],mode)
return mode
def mode(x,axis=0):
"""returns the mode of the data
Usage:
md=mode(data, axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,None,None,axis)
out=_mode(x)
if not ax is None:
out=cdms2.createVariable(out,axes=ax,id='kurtosis',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
out=out.filled(1.e20)
return out
def _OneSampleTTest(data, usermean):
"""
This performs a single factor t test for a set of data and a user
hypothesised mean value.
Usage: t, df = OneSampleTTest(data, usermean)
Returns: t, df (degrees of freedom), prob (probability)
"""
df=numpy.ma.count(data,axis=0)-1
svar = (df * _samplevariance(data)) / df
t = (mean(data) - usermean) / numpy.ma.sqrt(svar*(1.0/(df+1.)))
prob = _betai(0.5*df,0.5,df/(df+ t**2))
return t,df,prob
def OneSampleTTest(x,y,axis=0,df=1):
"""
This performs a single factor t test for a set of data and a user
hypothesised mean value.
Usage: t, prob [,df] = OneSampleTTest(data, usermean, axis=axisoptions, df=1)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df=1 : If set to 1 then the degrees of freedom are returned
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis,smally=1)
t,d,prob =_OneSampleTTest(x,y)
if not ax is None:
t=cdms2.createVariable(t,axes=ax,id='TTest',copy=0)
d=cdms2.createVariable(d,axes=ax,id='df',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
t=t.filled(1.e20)
d=d.filled(1.e20)
prob=prob.filled(1.e20)
out=[t,prob]
if df:
out.append(d)
return out
def _OneSampleSignTest(data, usermean):
"""
This method performs a single factor sign test. The data must be
supplied to this method along with a user hypothesised mean value.
Usage: nplus, nminus, ntotal, z, prob = OneSampleSignTest(data, usermean)
Returns: nplus, nminus, z, prob.
"""
nplus=numpy.ma.zeros(data.shape[1:])
nminus=numpy.ma.zeros(data.shape[1:])
for i in range(data.shape[0]):
c=numpy.ma.less(data[i],usermean)
nplus=numpy.ma.where(c,nplus+1,nplus)
c=numpy.ma.greater(data[i],usermean)
nminus=numpy.ma.where(c,nminus+1,nminus)
ntotal = add(nplus, nminus)
z=(nplus-(ntotal/2)/numpy.ma.sqrt(ntotal/2))
prob=_erfcc(numpy.ma.absolute(z) / numpy.ma.sqrt(2))
return nplus,nminus,ntotal,z,prob
def OneSampleSignTest(x,y,axis=0):
"""
OneSampleSignTest
This method performs a single factor sign test. The data must be
supplied to this method along with a user hypothesised mean value.
Usage:
nplus, nminus, z, prob = OneSampleSignTest(data, usermean, axis=axisoptions)
Returns: nplus, nminus, z, prob.
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis,smally=1)
nplus, nminus, ntotal, z, prob =_OneSampleSignTest(x,y)
if not ax is None:
z=cdms2.createVariable(z,axes=ax,id='z',copy=0)
nminus=cdms2.createVariable(nminus,axes=ax,id='nminus',copy=0)
nplus=cdms2.createVariable(nplus,axes=ax,id='nplus',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
z=z.filled(1.e20)
nplus=nplus.filled(1.e20)
nminus=nminus.filled(1.e20)
prob=prob.filled()
return nplus, nminus, ntotal, z, prob
def _ChiSquareVariance(data, usermean):
"""
This method performs a Chi Square test for the variance ratio.
Usage: chisquare, df, prob = ChiSquareVariance(data, usermean)
Returns: chisquare, df, prob
"""
df = numpy.ma.count(data,axis=0) - 1
chisquare = (_standarderror(data) / usermean) * df
prob = _chisqprob(chisquare, df)
return chisquare, df, prob
def ChiSquareVariance(x,y,axis=0, df=1):
"""
This method performs a Chi Square test for the variance ratio.
Usage:
chisquare, prob, [df] = ChiSquareVariance(data, usermean, axis=axisoptions, df=1)
Returns: chisquare, prob, [df] =
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis,smally=1)
chisquare, Df, prob =_ChiSquareVariance(x,y)
if not ax is None:
chisquare = cdms2.createVariable(chisquare,axes=ax,id='chisquare',copy=0)
Df=cdms2.createVariable(Df,axes=ax,id='df',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
chisquare=chisquare.filled(1.e20)
Df=Df.filled(1.e20)
prob=prob.filled()
out=[chisquare, prob]
if df:
out.append(Df)
return out
# two sample tests - instantiates descriptives class for both
# data sets, then has each test as a method
def _TTestUnpaired(data1, data2):
"""
This performs an unpaired t-test.
Usage: t, df, prob = TTestUnpaired(data1, data2)
Returns: t, df, prob
"""
N1s=numpy.ma.count(data1,axis=0)
N2s=numpy.ma.count(data2,axis=0)
df = (N1s + N2s) - 2
svar = ((N1s-1)*_samplevariance(data1)+\
(N2s-1)*_samplevariance(data2)) / df
t = (mean(data1)-mean(data2)) \
/ numpy.ma.sqrt(svar* (1.0/N1s + 1.0/N2s))
prob = _betai(0.5*df,0.5,df/(df+t**2))
return t, df, prob
def TTestUnpaired(x,y,axis=0,df=1):
"""
This performs an unpaired t-test.
Usage: t, prob, [df] = TTestUnpaired(data1, data2,axis=axisoptions, df=1)
Returns: t, df, prob
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df =0: if set to 1 returns degrees of freedom
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis)
t, Df, prob =_TTestUnpaired(x,y)
if not ax is None:
t = cdms2.createVariable(t,axes=ax,id='TTestUnpaired',copy=0)
Df=cdms2.createVariable(Df,axes=ax,id='df',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
t=t.filled(1.e20)
Df=Df.filled(1.e20)
prob=prob.filled()
out=[t, prob]
if df:
out.append(Df)
return out
def _TTestPaired(data1, data2):
"""
This method performs a paired t-test on two data sets.
Usage: t, df, prob = TTestPaired(data1, data2)
Returns: t, df, prob
"""
cov = 0.0
N1s=numpy.ma.count(data1,axis=0)
df = N1s - 1
cov=numpy.ma.sum((_center(data1) * _center(data2)), axis=0)
cov = cov / df
sd = numpy.ma.sqrt((_samplevariance(data1) + _samplevariance(data2) - 2.0 * \
cov) / N1s)
t = (mean(data1, axis=0) - mean(data2, axis=0)) / sd
prob = _betai(0.5*df,0.5,df/(df+ t**2))
return t, df, prob
def TTestPaired(x,y,axis=0,df=1):
"""
This performs an paired t-test.
Usage: t, prob, [df] = TTestUnpaired(data1, data2,axis=axisoptions, df=1)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df =0: if set to 1 returns degrees of freedom
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis)
t, Df, prob =_TTestPaired(x,y)
if not ax is None:
t = cdms2.createVariable(t,axes=ax,id='TTestPaired',copy=0)
Df=cdms2.createVariable(Df,axes=ax,id='df',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
t=t.filled(1.e20)
Df=Df.filled(1.e20)
prob=prob.filled()
out=[t, prob]
if df:
out.append(Df)
return out
def _PearsonsCorrelation(data1, data2):
"""
This method performs a Pearsons correlation upon two sets of data
Usage: r, t, df, prob = PearsonsCorrelation(data1, data2)
Returns: r, t, df, prob
"""
TINY = 1.0e-60
summult = reduce(add, map(multiply, data1, data2))
N1=numpy.ma.count(data1,axis=0)
N2=numpy.ma.count(data2,axis=0)
s1=numpy.ma.sum(data1,axis=0)
s2=numpy.ma.sum(data2,axis=0)
r_num = N1 * summult - s1 * s2
r_left = N1*_sumsquares(data1)-(s1**2)
r_right= N2*_sumsquares(data2)-(s2**2)
r_den = numpy.ma.sqrt(r_left*r_right)
r = r_num / r_den
df = N1 - 2
t = r*numpy.ma.sqrt(df/((1.0-r+TINY)* (1.0+r+TINY)))
prob = _betai(0.5*df,0.5,df/(df+t**2))
return r, t, df, prob
def PearsonsCorrelation(x,y,axis=0,df=1):
"""
This method performs a Pearsons correlation upon two sets of data
Usage: r, t, prob, [df] = PearsonsCorrelation(data1, data2,axis=0,df=1)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df =0: if set to 1 returns degrees of freedom
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis)
r, t, Df, prob =_PearsonsCorrelation(x,y)
if not ax is None:
r = cdms2.createVariable(r,axes=ax,id='PearsonsCorrelation',copy=0)
t = cdms2.createVariable(t,axes=ax,id='TTest',copy=0)
Df=cdms2.createVariable(Df,axes=ax,id='df',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
r=r.filled(1.e20)
t=t.filled(1.e20)
Df=Df.filled(1.e20)
prob=prob.filled()
out=[r, t, prob]
if df:
out.append(Df)
return out
def _FTest(data1, data2, uservar):
"""
This method performs a F test for variance ratio and needs a user
hypothesised variance to be supplied.
Usage: f, df1, df2, prob = FTest(uservar)
Returns: f, df1, df2, prob
"""
f = (_samplevariance(data1) / _samplevariance(data2)) / uservar
df1 = numpy.ma.count(data1,axis=0) - 1
df2 = numpy.ma.count(data2,axis=0) - 1
prob=_fprob(df1, df2, f)
return f, df1, df2, prob
def FTest(data1, data2, uservar, axis=0, df=1):
"""
This method performs a F test for variance ratio and needs a user
hypothesised variance to be supplied.
Usage: f, prob [,df1, df2] = FTest(data1, data2, uservar, axis=axisoptions, df=1)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df =0: if set to 1 returns degrees of freedom
"""
data1 = _fixScalar(data2)
data2 = _fixScalar(data2)
x,y,weights,axis,ax=__checker(data1,data2,None,axis)
x,z,weights,axis,ax=__checker(data1,uservar,None,axis,smally=1)
f, df1, df2, prob = _FTest(x,y,z)
if not ax is None:
f = cdms2.createVariable(f,axes=ax,id='Ftest',copy=0)
df1=cdms2.createVariable(df1,axes=ax,id='df1',copy=0)
df2=cdms2.createVariable(df2,axes=ax,id='df2',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
t=t.filled(1.e20)
df1=df1.filled(1.e20)
df2=df2.filled(1.e20)
prob=prob.filled()
out=[f, prob]
if df:
out.append(df1)
out.append(df2)
return out
def _TwoSampleSignTest(data1, data2):
"""
This method performs a 2 sample sign test for matched samples on 2
supplied data sets
Usage: nplus, nminus, ntotal, z, prob = TwoSampleSignTest(data1, data2)
Returns: nplus, nminus, ntotal, z, prob
"""
nplus=numpy.ma.zeros(data1.shape[1:])
nminus=numpy.ma.zeros(data1.shape[1:])
for i in range(data1.shape[0]):
c=numpy.ma.greater(data1[i],data2[i])
nplus=numpy.ma.where(c,nplus+1,nplus)
c=numpy.ma.less(data1[i],data2[i])
nminus=numpy.ma.where(c,nminus+1,nminus)
ntotal=nplus-nminus
mean=numpy.ma.count(data1,axis=0) / 2
sd = numpy.ma.sqrt(mean)
z = (nplus-mean)/sd
prob = _erfcc(numpy.ma.absolute(z)/numpy.ma.sqrt(2.))
return nplus, nminus, ntotal, z, prob
def TwoSampleSignTest(x,y,axis=0):
"""
This method performs a 2 sample sign test for matched samples on 2
supplied data sets
Usage: nplus, nminus, ntotal, z, prob = TwoSampleSignTest(data1, data2)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis)
nplus,nminus,ntotal,z, prob = _TwoSampleSignTest(x,y)
if not ax is None:
z=cdms2.createVariable(z,axes=ax,id='z',copy=0)
nminus=cdms2.createVariable(nminus,axes=ax,id='nminus',copy=0)
nplus=cdms2.createVariable(nplus,axes=ax,id='nplus',copy=0)
ntotal=cdms2.createVariable(ntotal,axes=ax,id='ntotal',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
z=z.filled(1.e20)
nplus=nplus.filled(1.e20)
nminus=nminus.filled(1.e20)
ntotal=ntotal.filled(1.e20)
prob=prob.filled()
return nplus,nminus,ntotal,z,prob
def _KendallsTau(data1, data2):
"""
This method performs a Kendalls tau correlation upon 2 data sets.
Usage: tau, z, prob = KendallsTau(data1, data2)
Returns: tau, z, prob
"""
n1 = numpy.ma.zeros(data1.shape[1:])
n2 = numpy.ma.zeros(data1.shape[1:])
iss = numpy.ma.zeros(data1.shape[1:])
N1=data1.shape[0]
N2=data2.shape[0]
for j in range(N1-1):
for k in range(j,N2):
a1 = data1[j] - data1[k]
a2 = data2[j] - data2[k]
aa = a1 * a2
c=numpy.ma.not_equal(aa,0)
c2=numpy.ma.greater(aa,0)
n1=numpy.ma.where(c,n1+1,n1)
n2=numpy.ma.where(c,n2+1,n2)
iss=numpy.ma.where(c2,iss+1,iss)
c2=numpy.ma.less(aa,0)
iss=numpy.ma.where(c2,iss-1,iss)
c=numpy.ma.logical_not(c)
c1=numpy.ma.logical_and(c,numpy.ma.not_equal(a1,0))
n1=numpy.ma.where(c1,n1+1,n1)
c1=numpy.ma.logical_and(c,numpy.ma.equal(a1,0))
n2=numpy.ma.where(c1,n2+1,n2)
tau = iss / numpy.ma.sqrt(n1*n2)
N1s=numpy.ma.count(data1,axis=0)
svar = (4.0*N1s+10.0) / (9.0*N1s*(N1s-1))
z = tau / numpy.ma.sqrt(svar)
prob = _erfcc(numpy.ma.absolute(z)/numpy.ma.sqrt(2.))
return tau, z, prob
def KendallsTau(x,y,axis=0):
"""
This method performs a Kendalls tau correlation upon 2 data sets.
Usage: tau, z, prob = KendallsTau(data1, data2)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis)
tau, z, prob = _KendallsTau(x,y)
if not ax is None:
z=cdms2.createVariable(z,axes=ax,id='z',copy=0)
tau=cdms2.createVariable(tau,axes=ax,id='tau',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
z=z.filled(1.e20)
tau=tau.filled(1.e20)
prob=prob.filled()
return tau ,z,prob
def _KolmogorovSmirnov(data1,data2):
"""
This method performs a Kolmogorov-Smirnov test for unmatched samples
upon 2 data vectors.
Usage: d, prob = KolmogorovSmirnov(data1, data2)
Returns: d, prob
"""
data3 = numpy.ma.sort(data1,axis=0)
data4 = numpy.ma.sort(data2,axis=0)
j1 = numpy.ma.zeros(data3.shape[1:],'d')
j2 = numpy.ma.zeros(data3.shape[1:],'d')
fn1 = numpy.ma.zeros(data3.shape[1:],'d')
fn2 = numpy.ma.zeros(data3.shape[1:],'d')
d = numpy.ma.zeros(data3.shape[1:],'d')
N1s=numpy.ma.count(data1,axis=0)
N2s=numpy.ma.count(data2,axis=0)
c1=N1s-j1
c2=N2s-j2
cc=c1-c1
while not numpy.ma.allequal(cc,1):
tmpc=numpy.ma.less(j1,N1s)
jj=numpy.ma.where(numpy.ma.less(j1,N1s),j1,N1s-1.)
d1=array_indexing.extract(data3,jj)
jj=numpy.ma.where(numpy.ma.less(j2,N2s),j2,N2s-1.)
d2=array_indexing.extract(data4,jj)
c3=numpy.ma.logical_and(numpy.ma.less_equal(d1,d2),numpy.ma.less(j1,N1s))
fn1=numpy.ma.where(c3,j1/N1s,fn1)
j1=numpy.ma.where(c3,j1+1,j1)
c3=numpy.ma.logical_and(numpy.ma.less_equal(d2,d1),numpy.ma.less(j2,N2s))
fn2=numpy.ma.where(c3,j2/N2s,fn2)
j2=numpy.ma.where(c3,j2+1,j2)
dt = fn2-fn1
c3=numpy.ma.greater(numpy.ma.absolute(dt),numpy.ma.absolute(d))
d=numpy.ma.where(c3,dt,d)
c1=N1s-j1
c2=N2s-j2
cc1=numpy.ma.equal(c1,0)
cc2=numpy.ma.equal(c2,0)
cc=numpy.ma.logical_or(cc1,cc2)
en = numpy.ma.sqrt(N1s*N2s/(N1s.astype('d')+N2s))
prob = _ksprob((en+0.12+0.11/en)*numpy.ma.absolute(d))
return d, prob
def KolmogorovSmirnov(x,y,axis=0):
"""
This method performs a Kolmogorov-Smirnov test for unmatched samples
upon 2 data vectors.
Usage: ks, prob = KolmogorovSmirnov(data1, data2)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis)
d, prob = _KolmogorovSmirnov(x,y)
if not ax is None:
d=cdms2.createVariable(d,axes=ax,id='KSTest',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
d=d.filled(1.e20)
prob=prob.filled()
return d,prob
def _SpearmansCorrelation(data1, data2):
"""
This method performs a Spearmans correlation upon 2 data sets
Usage: rho, t, df, prob = SpearmansCorrelation(data1, data2)
Returns: rho, t, df, prob
"""
TINY = 1e-30
rankx = _rankdata(data1)
ranky = _rankdata(data2)
dsq = numpy.ma.sum(map(_diffsquared, rankx, ranky),axis=0)
N1=numpy.ma.count(data1,axis=0)
rho = 1 - 6*dsq / (N1*(N1**2-1.))
t = rho * numpy.ma.sqrt((N1-2) / ((rho+1.0+TINY)*(1.0-rho+TINY)))
df = N1-2
prob = _betai(0.5*df,0.5,df/(df+t**2))
return rho, t, df, prob
def SpearmansCorrelation(x,y,axis=0,df=1):
"""
This method performs a Spearmans correlation upon 2 data sets
Usage: rho, t, df, prob = SpearmansCorrelation(data1, data2, axis=0, df=1)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df=1 : If set to 1 returns the degrees of freedom
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis)
rho, t, d, prob = _SpearmansCorrelation(x,y)
if not ax is None:
rho=cdms2.createVariable(rho,axes=ax,id='SpearmansCorrelation',copy=0)
t=cdms2.createVariable(t,axes=ax,id='t',copy=0)
d=cdms2.createVariable(d,axes=ax,id='df',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
d=d.filled(1.e20)
t=t.filled()
rho=rho.filled()
prob=prob.filled()
out=[rho,t,prob]
if df:
out.append(d)
return out
def _WilcoxonRankSums(data1, data2, Z_MAX = 6.0):
"""
This method performs a Wilcoxon rank sums test for unpaired designs
upon 2 data vectors.
Usage: z, prob = WilcoxonRankSums(data1, data2, Z_MAX = 6.0)
Returns: z, prob
"""
N=data1.shape[0]
alldata = numpy.ma.concatenate((data1,data2),axis=0)
ranked = _rankdata(alldata)
x = ranked[:N]
s = numpy.ma.sum(x, axis=0)
N1=numpy.ma.count(data1,axis=0)
N2=numpy.ma.count(data2,axis=0)
expected = N1*(N1+N2+1) / 2.0
z = (s - expected) / numpy.ma.sqrt(N1*N2 * (N2+N2+1.)/12.0)
prob = 2*(1.0 -_zprob(numpy.ma.absolute(z),Z_MAX))
return z, prob
def WilcoxonRankSums(x,y, Z_MAX = 6.0, axis=0):
"""
This method performs a Wilcoxon rank sums test for unpaired designs
upon 2 data vectors.
Usage: z, prob = WilcoxonRankSums(data1, data2, Z_MAX = 6.0, axis=axisoption)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
Z_MAX: Maximum meaningfull value for z probability (default = 6)
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x1,y,weights,axis,ax=__checker(x,y,None,axis)
if numpy.ma.isMA(Z_MAX):
x,Z_MAX,weights,axis,ax=__checker(x,Z_MAX,None,axis)
z, prob = _WilcoxonRankSums(x1,y,Z_MAX)
if not ax is None:
z=cdms2.createVariable(z,axes=ax,id='WilcoxonRankSumsTest',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
z=z.filled(1.e20)
prob=prob.filled()
return z,prob
def _WilcoxonSignedRanks(data1, data2, Z_MAX=6.):
"""
This method performs a Wilcoxon Signed Ranks test for matched samples
upon 2 data set.
Usage: wt, z, prob = WilcoxonSignedRanks(data1, data2, Z_MAX = 6.0)
Returns: wt, z, prob
"""
N=data1.shape[0]
d=data1-data2[:N]
d=numpy.ma.masked_equal(d,0.)
count = numpy.ma.count(d,axis=0)
absd = numpy.ma.absolute(d)
absranked = _rankdata(absd.filled(1.E20))
r_plus = numpy.ma.zeros(d.shape[1:])
r_minus = numpy.ma.zeros(d.shape[1:])
for i in range(len(absd)):
c=numpy.ma.less(d[i],0.)
r_minus=numpy.ma.where(c,r_minus + absranked[i],r_minus)
r_plus=numpy.ma.where(numpy.ma.logical_not(c),r_plus + absranked[i],r_plus)
wt = numpy.ma.where(numpy.ma.greater(r_plus,r_minus),r_minus,r_plus)
mn = count * (count+1) * 0.25
se = numpy.ma.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = numpy.ma.absolute(wt-mn) / se
prob = 2*(1.0 -_zprob(numpy.ma.absolute(z),Z_MAX))
return wt, z, prob
def WilcoxonSignedRanks(x,y, Z_MAX=6., axis=0):
"""
This method performs a Wilcoxon Signed Ranks test for matched samples
upon 2 data set.
Usage: wt, z, prob = WilcoxonSignedRanks(data1, data2, Z_MAX = 6.0, axis=0)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
Z_MAX: Maximum meaningfull value for z probability (default = 6)
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x1,y,weights,axis,ax=__checker(x,y,None,axis)
if numpy.ma.isMA(Z_MAX):
x,Z_MAX,weights,axis,ax=__checker(x,Z_MAX,None,axis)
wt, z, prob = _WilcoxonSignedRanks(x1,y,Z_MAX)
if not ax is None:
wt=cdms2.createVariable(wt,axes=ax,id='W',copy=0)
z=cdms2.createVariable(z,axes=ax,id='Z',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
wt=wt.filled(1.e20)
z=z.filled(1.e20)
prob=prob.filled()
return wt,z,prob
def _MannWhitneyU(data1, data2, Z_MAX=6.0):
"""
This method performs a Mann Whitney U test for unmatched samples on
2 data vectors.
Usage: bigu, smallu, z, prob = MannWhitneyU(data1, data2, Z_MAX=6.0)
Returns: bigu, smallu, z, prob
"""
N=data1.shape[0]
N1=numpy.ma.count(data1,axis=0)
N2=numpy.ma.count(data2,axis=0)
ranked = _rankdata(numpy.ma.concatenate((data1,data2),axis=0))
rankx = ranked[0:N]
u1 = N1*N2+(N1*(N1+1))/2.0-numpy.ma.sum(rankx,axis=0)
u2 = N1*N2 - u1
bigu = numpy.ma.maximum(u1,u2)
smallu = numpy.ma.minimum(u1,u2)
T = numpy.ma.sqrt(_tiecorrect(ranked))
sd = numpy.ma.sqrt(T*N1*N2*(N1+N2+1)/12.0)
z = numpy.ma.absolute((bigu-N1*N2/2.0) / sd)
prob = 1.0-_zprob(z,Z_MAX)
return bigu, smallu, z, prob
def MannWhitneyU(x, y, Z_MAX=6.0, axis=0):
"""
This method performs a Mann Whitney U test for unmatched samples on
2 data vectors.
Usage: bigu, smallu, z, prob = MannWhitneyU(data1, data2, Z_MAX=6.0, axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
Z_MAX: Maximum meaningfull value for z probability (default = 6)
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x1,y,weights,axis,ax=__checker(x,y,None,axis)
if numpy.ma.isMA(Z_MAX):
x,Z_MAX,weights,axis,ax=__checker(x,Z_MAX,None,axis)
bigu, smallu, z, prob = _MannWhitneyU(x1,y,Z_MAX)
if not ax is None:
bigu=cdms2.createVariable(bigu,axes=ax,id='bigU',copy=0)
smallu=cdms2.createVariable(smallu,axes=ax,id='smallU',copy=0)
z=cdms2.createVariable(z,axes=ax,id='z',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
bigu=bigu.filled(1.e20)
smallu=smallu.filled(1.e20)
z=z.filled(1.e20)
prob=prob.filled()
return bigu, smallu, z, prob
def _LinearRegression(x, y):
"""
This method performs a linear regression upon 2 data vectors.
Usage: r, df, t, prob, slope, intercept, sterrest = LinearRegression(x,y)
Returns: r, df, t, prob, slope, intercept, sterrest
"""
TINY = 1.0e-20
summult = numpy.ma.sum(x*y,axis=0)
N1=numpy.ma.count(x,axis=0)
N2=numpy.ma.count(y,axis=0)
s1=numpy.ma.sum(x,axis=0)
s2=numpy.ma.sum(y,axis=0)
sq1=_sumsquares(x)
r_num = N1*summult - s1*s2
r_den = numpy.ma.sqrt((N1*sq1 - (s1**2))*(N2*_sumsquares(y) - (s2**2)))
r = r_num / r_den
#[] warning - z not used - is there a line missing here?
## z = 0.5*math.log((1.0+self.r+TINY)/(1.0-self.r+TINY))
df = N1 - 2
t = r*numpy.ma.sqrt(df/((1.0-r+TINY)*(1.0+ r+TINY)))
prob = _betai(0.5*df,0.5,df/(df+t**2))
slope = r_num / (N1*sq1 - (s1**2))
intercept = mean(y, axis=0) - slope*mean(x, axis=0)
sterrest = numpy.ma.sqrt(1-r**2)*numpy.ma.sqrt(_variance(y))
return r, df, t, prob, slope, intercept, sterrest
def LinearRegression(x, y, df=1, axis=0):
"""
This method performs a linear regression upon 2 data vectors.
Usage: r, t, prob, slope, intercept, sterrest [,df] = LinearRegression(x,y,df=1,axis=axisoptions)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df=1: If set to 1 then df is returned
"""
x = _fixScalar(x)
y = _fixScalar(y)
x,y,weights,axis,ax=__checker(x,y,None,axis)
r, d, t, prob, slope, intercept, sterrest = _LinearRegression(x,y)
if not ax is None:
r=cdms2.createVariable(r,axes=ax,id='r',copy=0)
d=cdms2.createVariable(d,axes=ax,id='df',copy=0)
t=cdms2.createVariable(t,axes=ax,id='t',copy=0)
slope=cdms2.createVariable(slope,axes=ax,id='slope',copy=0)
intercept=cdms2.createVariable(intercept,axes=ax,id='intercept',copy=0)
sterrest=cdms2.createVariable(sterrest,axes=ax,id='standarderror',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
r=r.filled(1.e20)
t=t.filled(1.e20)
slope=slope.filled(1.e20)
intercept=intercept.filled(1.e20)
sterrest=sterrest.filled(1.e20)
d=d.filled(1.e20)
prob=prob.filled()
out = [ r, t, prob, slope, intercept, sterrest]
if df:
out.append(d)
return out
def _PairedPermutation(x, y, nperm=None):
"""
This method performs a permutation test for matched samples upon 2 set
This code was modified from Segal and further modifed by C. Doutriaux
Usage: utail, crit, prob = PairedPermutation(x,y,nperm=None)
nperm is the number of permutation wanted, default len(x)+1
Returns: utail, crit, prob
"""
utail = numpy.ma.zeros(x.shape[1:],'d')
sh=list(x.shape)
sh.insert(0,1)
## Figures out how many permutation we want to do
if nperm is None:
nperm = x.shape[0]+1
xy=numpy.ma.resize(x-y,sh)
yx=numpy.ma.resize(y-x,sh)
xy=numpy.ma.concatenate((xy,yx),axis=0)
crit = numpy.ma.sum(xy[0],axis=0)
for i in range(nperm):
index=numpy.random.randint(0,2,x.shape)
tmp=array_indexing.extract(xy,index)
sum=numpy.ma.sum(tmp,axis=0)
utail=numpy.ma.where(numpy.ma.greater_equal(sum,crit),utail+1.,utail)
prob = utail / nperm
return utail, crit, prob
def PairedPermutation(x, y, nperm=None, axis=0):
"""
This method performs a permutation test for matched samples upon 2 set
This code was modified from Segal and further modifed by C. Doutriaux
Usage: utail, crit, prob = PairedPermutation(x,y,nperm=None)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
nperm is the number of permutation wanted, default len(axis)+1
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis)
utail, crit, prob = _PairedPermutation(x,y,nperm)
if not ax is None:
utail=cdms2.createVariable(utail,axes=ax,id='utail',copy=0)
crit=cdms2.createVariable(crit,axes=ax,id='crit',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
crit=crit.filled(1.e20)
utail=utail.filled(1.e20)
prob=prob.filled(1.e20)
return utail, crit, prob
## def PointBiserialr(self, x, y):
## TINY = 1e-30
## if len(x) <> len(y):
## return -1.0, -1.0
## data = pstat.abut(x,y) # [] pstat module not available!
## categories = pstat.unique(x)
## if len(categories) <> 2:
## return -1.0, -2.0
## else: # [] there are 2 categories, continue
## codemap = pstat.abut(categories,range(2))
## recoded = pstat.recode(data,codemap,0) # [] can prob delete this line
## x = pstat.linexand(data,0,categories[0])
## y = pstat.linexand(data,0,categories[1])
## xmean = mean(pstat.colex(x,1)) # [] use descriptives!
## ymean = mean(pstat.colex(y,1)) # [] use descriptives!
## n = len(data)
## adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
## rpb = (ymean - xmean)/samplestdev(pstat.colex(data,1))*adjust
## df = n-2
## t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
## prob = _betai(0.5*df,0.5,df/(df+t*t)) # t already a float
## return rpb, prob
def _ChiSquare(x, y):
"""
This method performs a chi square on 2 data set.
Usage: chisq, df, prob = ChiSquare(x,y)
Returns: chisq, df, prob
"""
df = numpy.ma.count(x,axis=0)
chisq=numpy.ma.sum((x-y)**2/y,axis=0)
prob = _chisqprob(chisq, df)
return chisq, df, prob
def ChiSquare(x, y, axis=0, df=1):
"""
This method performs a chi square on 2 data set.
Usage: chisq, df, prob = ChiSquare(x,y)
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
nperm is the number of permutation wanted, default len(axis)+1
"""
x = _fixScalar(x)
y = _fixScalar(y)
if cdms2.isVariable(x) : xatt=x.attributes
if cdms2.isVariable(y) : yatt=y.attributes
x,y,weights,axis,ax=__checker(x,y,None,axis)
chisq, d, prob = _ChiSquare(x,y)
if not ax is None:
chisq=cdms2.createVariable(chisq,axes=ax,id='chisq',copy=0)
d=cdms2.createVariable(d,axes=ax,id='df',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
## numpys only ?
if not numpy.ma.isMA(x):
chisq=chisq.filled(1.e20)
d=d.filled(1.e20)
prob=prob.filled(1.e20)
out=[chisq,prob]
if df:
out.append(d)
return out
## 2 or more datasets from here
def pack(arrays):
""" Pack a list of arrays into one array
"""
k=len(arrays)
sh=list(arrays[0].shape)
sh.insert(0,k)
data=numpy.ma.zeros(sh,dtype='d')
for i in range(k):
data[i]=arrays[i]
return data
def _anovaWithin( *inlist):
"""
This method is specialised for SalStat, and is best left alone.
For the brave:
Usage:
SSint, SSres, SSbet, SStot, dfbet, dfwit, dfres, dftot, MSbet, MSwit, MSres, F, prob = anovaWithin(*inlist).
inlist, being as many arrays as you wish
"""
inlist=pack(inlist)
k = inlist.shape[0]
sums=numpy.ma.sum(inlist,axis=1)
Nlist=numpy.ma.count(inlist,axis=1)
meanlist=numpy.ma.average(inlist,axis=1)
GN=numpy.ma.sum(Nlist,axis=0)
GS=numpy.ma.sum(sums,axis=0)
GM=numpy.ma.average(meanlist,axis=0)
SSwit=inlist-meanlist[:,None,...]
SSwit=numpy.ma.sum(SSwit**2,axis=0)
SSwit=numpy.ma.sum(SSwit,axis=0)
SStot=inlist-GM
SStot=numpy.ma.sum(SStot**2,axis=0)
SStot=numpy.ma.sum(SStot,axis=0)
SSbet=meanlist-GM
SSbet=numpy.ma.sum(SSbet**2,axis=0)*GN/float(k)
SSint = 0.0
sh=range(len(inlist.shape))
sh[0]=1
sh[1]=0
mean=numpy.ma.average(inlist,axis=0)
SSint=numpy.ma.sum((mean-GM)**2, axis=0)*k
SSres = SSwit - SSint
dfbet = (k - 1)*numpy.ma.ones(GN.shape)
dfwit = GN - k
dfres = (Nlist[0] - 1) * (k - 1)
dftot = dfbet + dfwit + dfres
MSbet = SSbet / dfbet
MSwit = SSwit / dfwit
MSres = SSres / dfres
F = MSbet / MSres
prob = _fprob(dfbet, dfres, F)
return SSint, SSres, SSbet, SStot, dfbet, dfwit, dfres, dftot, MSbet, MSwit, MSres, F, prob
def anovaWithin( *inlist,**kw):
"""
This method is specialised for SalStat, and is best left alone.
For the brave:
Usage:
SSint, SSres, SSbet, SStot, MSbet, MSwit, MSres, F, prob [, dfbet, dfwit, dfres, dftot] = anovaWithin(*inlist,axis=axisoptions).
inlist, being as many arrays as you wish
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df=1 : if 1 then degrees of freedom are retuned
WARNING: axis and df MUST be passed as keyword, as all arguments are considered as arrays
"""
if len(inlist)<2:
raise 'Error must have at least 2 arrays!'
if not 'axis' in kw.keys():
axis=0
else:
axis=kw['axis']
if not 'df' in kw.keys():
df=1
else:
df=kw['df']
for i in range(1,len(inlist)):
x,y,weights,axis,ax=__checker(inlist[0],inlist[i],None,axis)
if i==1:
newlist=[x,y]
else:
newlist.append(y)
SSint, SSres, SSbet, SStot, dfbet, dfwit, dfres, dftot, MSbet, MSwit, MSres, F, prob = apply(_anovaWithin,newlist)
if not ax is None:
SSint=cdms2.createVariable(SSint,axes=ax,id='SSint',copy=0)
SSres=cdms2.createVariable(SSres,axes=ax,id='SSres',copy=0)
SSbet=cdms2.createVariable(SSbet,axes=ax,id='SSbet',copy=0)
SStot=cdms2.createVariable(SStot,axes=ax,id='SStot',copy=0)
dfbet=cdms2.createVariable(dfbet,axes=ax,id='dfbet',copy=0)
dfwit=cdms2.createVariable(dfwit,axes=ax,id='dfwit',copy=0)
dfres=cdms2.createVariable(dfres,axes=ax,id='dfres',copy=0)
dftot=cdms2.createVariable(dftot,axes=ax,id='dftot',copy=0)
MSbet=cdms2.createVariable(MSbet,axes=ax,id='MSbet',copy=0)
MSwit=cdms2.createVariable(MSwit,axes=ax,id='MSwit',copy=0)
MSres=cdms2.createVariable(MSres,axes=ax,id='MSres',copy=0)
F=cdms2.createVariable(F,axes=ax,id='F',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
out= [SSint, SSres, SSbet, SStot, MSbet, MSwit, MSres, F, prob]
if df:
out.append(dfbet)
out.append(dfwit)
out.append(dfres)
out.append(dftot)
return out
## To be tested
def _anovaBetween(*descs):
"""
This method performs a univariate single factor between-subjects
analysis of variance on a list of lists (or a numpy matrix). It is
specialised for SalStat and best left alone.
Usage: SSbet, SSwit, SStot, dfbet, dferr, dftot, MSbet, MSerr, F, prob = anovaBetween(*arrays).
"""
descs=pack(descs)
k = descs.shape[0]
M=numpy.ma.average(descs,axis=1)
ssdev=numpy.ma.sum((descs-M[:,None,...])**2,axis=1)
SSwit=numpy.ma.sum(ssdev,axis=0)
Ns=numpy.ma.count(descs,axis=1)
GN=numpy.ma.sum(Ns,axis=0)
GM=numpy.ma.average(M,axis=0)
SSbet=numpy.ma.sum((M-GM)**2,axis=0)
SSbet = SSbet * Ns[0]
SStot = SSwit + SSbet
dfbet = numpy.ma.ones(SSbet.shape)*(k - 1)
dferr = GN - k
dftot = dfbet + dferr
MSbet = SSbet / dfbet
MSerr = SSwit / dferr
F = MSbet / MSerr
prob = _fprob(dfbet, dferr, F)
return SSbet, SSwit, SStot, dfbet, dferr, dftot, MSbet, MSerr, F, prob
def anovaBetween(*inlist,**kw):
"""
This method performs a univariate single factor between-subjects
analysis of variance on a list of lists (or a numpy matrix). It is
specialised for SalStat and best left alone.
Usage: SSbet, SSwit, SStot, MSbet, MSerr, F, prob [, dfbet, dferr, dftot] = anovaBetween(*arrays,axis=axisoptions).
inlist, being as many arrays as you wish
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df=1 : if 1 then degrees of freedom are retuned
WARNING: axis and df MUST be passed as keyword, as all arguments are considered as arrays
"""
if len(inlist)<2:
raise 'Error must have at least 2 arrays!'
if not 'axis' in kw.keys():
axis=0
else:
axis=kw['axis']
if not 'df' in kw.keys():
df=1
else:
df=kw['df']
for i in range(1,len(inlist)):
x,y,weights,axis,ax=__checker(inlist[0],inlist[i],None,axis)
if i==1:
newlist=[x,y]
else:
newlist.append(y)
SSbet, SSwit, SStot, dfbet, dferr, dftot, MSbet, MSerr, F, prob = apply(_anovaBetween,newlist)
if not ax is None:
SSbet=cdms2.createVariable(SSbet,axes=ax,id='SSbet',copy=0)
SSwit=cdms2.createVariable(SSwit,axes=ax,id='SSwit',copy=0)
SStot=cdms2.createVariable(SStot,axes=ax,id='SStot',copy=0)
dfbet=cdms2.createVariable(dfbet,axes=ax,id='dfbet',copy=0)
dferr=cdms2.createVariable(dferr,axes=ax,id='dferr',copy=0)
dftot=cdms2.createVariable(dftot,axes=ax,id='dftot',copy=0)
MSbet=cdms2.createVariable(MSbet,axes=ax,id='MSbet',copy=0)
MSerr=cdms2.createVariable(MSerr,axes=ax,id='MSerr',copy=0)
F=cdms2.createVariable(F,axes=ax,id='F',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
out= [SSbet, SSwit, SStot, MSbet, MSerr, F, prob]
if df:
out.append(dfbet)
out.append(dferr)
out.append(dftot)
return out
def _KruskalWallisH(*args):
"""
This method performs a Kruskal Wallis test (like a nonparametric
between subjects anova) on a serie of arrays.
Usage: h, df, prob = KruskalWallisH(*args).
"""
narrays=len(args)
args = pack(args)
n = numpy.ma.count(args,axis=1)
all=numpy.ma.array(args[0],copy=1)
for i in range(1,narrays):
all = numpy.ma.concatenate((all,args[i]),axis=0)
ranked = _rankdata(all)
del(all)
T = _tiecorrect(ranked)
offset=0
for i in range(narrays):
nn=args[i].shape[0]
args[i] = ranked[offset:offset+nn]
offset+=nn
del(ranked)
rsums = numpy.ma.zeros(args[0].shape,'d')
ssbn=numpy.ma.zeros(args[0].shape[1:],'d')
totaln=numpy.ma.sum(n,axis=0)
rsums=numpy.ma.sum(args,axis=1)**2/n
ssbn = numpy.ma.sum(rsums,axis=0)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
h = h / T
df=numpy.ma.ones(h.shape)*(narrays-1.)
prob = _chisqprob(h,df)
return h, df, prob
def KruskalWallisH(*inlist,**kw):
"""
This method performs a Kruskal Wallis test (like a nonparametric
between subjects anova) on a serie of arrays.
Usage: h, df, prob = KruskalWallisH(*args,axis=axispoptions, df=1).
inlist, being as many arrays as you wish
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df=1 : if 1 then degrees of freedom are retuned
WARNING: axis and df MUST be passed as keyword, as all arguments are considered as arrays
"""
if len(inlist)<2:
raise 'Error must have at least 2 arrays!'
if not 'axis' in kw.keys():
axis=0
else:
axis=kw['axis']
if not 'df' in kw.keys():
df=1
else:
df=kw['df']
for i in range(1,len(inlist)):
x,y,weights,axis,ax=__checker(inlist[0],inlist[i],None,axis)
if i==1:
newlist=[x,y]
else:
newlist.append(y)
h, d, prob= apply(_KruskalWallisH,newlist)
if not ax is None:
h=cdms2.createVariable(h,axes=ax,id='KruskalWallisH',copy=0)
d=cdms2.createVariable(d,axes=ax,id='df',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
out=[h,prob]
if df:
out.append(d)
return out
def _FriedmanChiSquare( *args):
"""
This method performs a Friedman chi square (like a nonparametric
within subjects anova) on a list of lists.
Usage: sumranks, chisq, df, prob = FriedmanChiSqure(*args).
"""
## First put it all in a big array
data=pack(args)
k=data.shape[0]
n=data.shape[1]
## Transpose the data (nargs/0axis, rest is left identical)
tr=range(numpy.ma.rank(data[0])+1)
tr[0]=1
tr[1]=0
data=numpy.ma.transpose(data,tr)
data2=data*1.
## ranks it
for i in range(n):
data[i] = _rankdata(data[i])
sumranks = numpy.ma.sum(data,axis=0)
tmp=numpy.ma.sum(data,axis=0)
ssbn=numpy.ma.sum(tmp**2, axis=0)
sums=tmp/numpy.ma.count(data,axis=0)
chisq = (12.0 / (k*n*(k+1))) * ssbn - 3*n*(k+1)
df = numpy.ma.ones(chisq.shape)*(k-1)
prob = _chisqprob(chisq,df)
return sumranks, chisq, df, prob
def FriedmanChiSquare( *inlist, **kw):
"""
This method performs a Friedman chi square (like a nonparametric
within subjects anova) on a list of lists.
Usage: sumranks, chisq, df, prob = FriedmanChiSqure(*args, axis=axisoptions, df=1).
inlist, being as many arrays as you wish
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df=1 : if 1 then degrees of freedom are retuned
WARNING: axis and df MUST be passed as keyword, as all arguments are considered as arrays
"""
if len(inlist)<2:
raise 'Error must have at least 2 arrays!'
if not 'axis' in kw.keys():
axis=0
else:
axis=kw['axis']
if not 'df' in kw.keys():
df=1
else:
df=kw['df']
for i in range(1,len(inlist)):
x,y,weights,axis,ax=__checker(inlist[0],inlist[i],None,axis)
if i==1:
newlist=[x,y]
else:
newlist.append(y)
sumranks, h, d, prob= apply(_FriedmanChiSquare,newlist)
if not ax is None:
h=cdms2.createVariable(h,axes=ax,id='FriedmanChiSquare',copy=0)
d=cdms2.createVariable(d,axes=ax,id='df',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
sumranks=cdms2.createVariable(sumranks,id='sumranks',copy=0)
ax.insert(0,sumranks.getAxis(0))
sumranks.setAxisList(ax)
out=[sumranks,h,prob]
if df:
out.append(d)
return out
def _CochranesQ( *inlist):
"""
This method performs a Cochrances Q test upon a list of lists.
Usage: q, df, prob = CochranesQ(*inlist)
Returns: q, df, prob
"""
## First put it all in a big array
data=pack(inlist)
k=data.shape[0]
n=data.shape[1]
g=numpy.ma.sum(data,axis=1)
gtot=numpy.ma.sum(g**2,axis=0)
rowsum=numpy.ma.sum(data,axis=0)
l=numpy.ma.sum(rowsum,axis=0)
lsq=numpy.ma.sum(rowsum**2,axis=0)
q = ((k-1)*((k*gtot)-(l**2)))/((k*l)-lsq)
df = numpy.ma.ones(q.shape)*(k - 1)
prob = _chisqprob(q, df)
return q, df, prob
def CochranesQ( *inlist,**kw):
"""
This method performs a Cochrances Q test upon a list of lists.
Usage: q, df, prob = CochranesQ(*inlist)
inlist, being as many arrays as you wish
Options:
axisoptions 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n
default value = 0. You can pass the name of the dimension or index
(integer value 0...n) over which you want to compute the statistic.
you can also pass 'xy' to work on both axes at once
df=1 : if 1 then degrees of freedom are retuned
WARNING: axis and df MUST be passed as keyword, as all arguments are considered as arrays
"""
if len(inlist)<2:
raise 'Error must have at least 2 arrays!'
if not 'axis' in kw.keys():
axis=0
else:
axis=kw['axis']
if not 'df' in kw.keys():
df=1
else:
df=kw['df']
for i in range(1,len(inlist)):
x,y,weights,axis,ax=__checker(inlist[0],inlist[i],None,axis)
if i==1:
newlist=[x,y]
else:
newlist.append(y)
h, d, prob= apply(_CochranesQ,newlist)
if not ax is None:
h=cdms2.createVariable(h,axes=ax,id='CochranesQ',copy=0)
d=cdms2.createVariable(d,axes=ax,id='df',copy=0)
prob=cdms2.createVariable(prob,axes=ax,id='probability',copy=0)
out=[h,prob]
if df:
out.append(d)
return out
## class FriedmanComp:
## """This class performs multiple comparisons on a Freidmans
## test. Passed values are the medians, k (# conditions), n
## (# samples), and the alpha value. Currently, all comparisons
## are performed regardless. Assumes a balanced design.
## ALSO: does not work yet!
## """
## def __init__(self, medians, k, n, p):
## crit = _inversechi(p, k-1)
## value = crit * math.sqrt((k * (k + 1)) / (6 * n * k))
## self.outstr = '<p>Multiple Comparisons for Friedmans test:</p>'
## self.outstr=self.outstr+'<br>Critical Value (>= for sig) = '+str(crit)
## for i in range(len(medians)):
## for j in range(i+1, len(medians)):
## if (i != j):
## self.outstr = self.outstr+'<br>'+str(i+1)+' against '+str(j+1)
## diff = abs(medians[i] - medians[j])
## self.outstr = self.outstr+' = '+str(diff)
## class KWComp:
## """This class performs multiple comparisons on a Kruskal Wallis
## test. Passed values are the medians, k (# conditions), n
## (# samples), and the alpha value. Currently, all comparisons
## are performed regardless. Assumes a balanced design.
## Further note - not completed by any means! DO NO USE THIS YET!"""
## def __init__(self, medians, k, n, p):
## crit = _inversechi(p, k-1)
## value = crit * math.sqrt((k * (k + 1)) / (6 * n * k))
## self.outstr = '<p>Multiple Comparisons for Friedmans test:</p>'
## self.outstr=self.outstr+'<br>Critical Value (>= for sig) = '+str(crit)
## for i in range(len(medians)):
## for j in range(i+1, len(medians)):
## if (i != j):
## self.outstr = self.outstr+'<br>'+str(i+1)+' against '+str(j+1)
## diff = abs(medians[i] - medians[j])
## self.outstr = self.outstr+' = '+str(diff)
|
PypiClean
|
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/network/list_network_manager_deployment_status.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ListNetworkManagerDeploymentStatusResult',
'AwaitableListNetworkManagerDeploymentStatusResult',
'list_network_manager_deployment_status',
'list_network_manager_deployment_status_output',
]
@pulumi.output_type
class ListNetworkManagerDeploymentStatusResult:
"""
A list of Network Manager Deployment Status
"""
def __init__(__self__, skip_token=None, value=None):
if skip_token and not isinstance(skip_token, str):
raise TypeError("Expected argument 'skip_token' to be a str")
pulumi.set(__self__, "skip_token", skip_token)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="skipToken")
def skip_token(self) -> Optional[str]:
"""
When present, the value can be passed to a subsequent query call (together with the same query and scopes used in the current request) to retrieve the next page of data.
"""
return pulumi.get(self, "skip_token")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.NetworkManagerDeploymentStatusResponse']]:
"""
Gets a page of Network Manager Deployment Status
"""
return pulumi.get(self, "value")
class AwaitableListNetworkManagerDeploymentStatusResult(ListNetworkManagerDeploymentStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListNetworkManagerDeploymentStatusResult(
skip_token=self.skip_token,
value=self.value)
def list_network_manager_deployment_status(deployment_types: Optional[Sequence[Union[str, 'ConfigurationType']]] = None,
network_manager_name: Optional[str] = None,
regions: Optional[Sequence[str]] = None,
resource_group_name: Optional[str] = None,
skip_token: Optional[str] = None,
top: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListNetworkManagerDeploymentStatusResult:
"""
Post to List of Network Manager Deployment Status.
Azure REST API version: 2023-02-01.
:param Sequence[Union[str, 'ConfigurationType']] deployment_types: List of deployment types.
:param str network_manager_name: The name of the network manager.
:param Sequence[str] regions: List of locations.
:param str resource_group_name: The name of the resource group.
:param str skip_token: Continuation token for pagination, capturing the next page size and offset, as well as the context of the query.
:param int top: An optional query parameter which specifies the maximum number of records to be returned by the server.
"""
__args__ = dict()
__args__['deploymentTypes'] = deployment_types
__args__['networkManagerName'] = network_manager_name
__args__['regions'] = regions
__args__['resourceGroupName'] = resource_group_name
__args__['skipToken'] = skip_token
__args__['top'] = top
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network:listNetworkManagerDeploymentStatus', __args__, opts=opts, typ=ListNetworkManagerDeploymentStatusResult).value
return AwaitableListNetworkManagerDeploymentStatusResult(
skip_token=pulumi.get(__ret__, 'skip_token'),
value=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(list_network_manager_deployment_status)
def list_network_manager_deployment_status_output(deployment_types: Optional[pulumi.Input[Optional[Sequence[Union[str, 'ConfigurationType']]]]] = None,
network_manager_name: Optional[pulumi.Input[str]] = None,
regions: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
skip_token: Optional[pulumi.Input[Optional[str]]] = None,
top: Optional[pulumi.Input[Optional[int]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListNetworkManagerDeploymentStatusResult]:
"""
Post to List of Network Manager Deployment Status.
Azure REST API version: 2023-02-01.
:param Sequence[Union[str, 'ConfigurationType']] deployment_types: List of deployment types.
:param str network_manager_name: The name of the network manager.
:param Sequence[str] regions: List of locations.
:param str resource_group_name: The name of the resource group.
:param str skip_token: Continuation token for pagination, capturing the next page size and offset, as well as the context of the query.
:param int top: An optional query parameter which specifies the maximum number of records to be returned by the server.
"""
...
|
PypiClean
|
/commonmeta_py-0.7.1-py3-none-any.whl/commonmeta/readers/cff_reader.py
|
from typing import Optional
import requests
import yaml
from urllib.parse import urlparse
from ..utils import (
normalize_id,
name_to_fos,
dict_to_spdx,
normalize_orcid,
github_as_cff_url,
github_as_repo_url,
)
from ..base_utils import compact, wrap, presence, sanitize, parse_attributes
from ..date_utils import get_iso8601_date
from ..doi_utils import doi_from_url
from ..constants import Commonmeta
def get_cff(pid: str, **kwargs) -> dict:
"""get_cff"""
url = github_as_cff_url(pid)
response = requests.get(url, kwargs, timeout=10)
if response.status_code != 200:
return {"state": "not_found"}
text = response.text
repo_url = github_as_repo_url(url)
data = yaml.safe_load(text)
# collect metadata not included in the CFF file
if data.get("repository-code", None) is None:
data["repository-code"] = repo_url
return data
def read_cff(data: Optional[dict], **kwargs) -> Commonmeta:
"""read_cff"""
if data is None:
return {"state": "not_found"}
meta = data
read_options = kwargs or {}
# read_options = ActiveSupport::HashWithIndifferentAccess.new(options.except(:doi, :id, :url, :sandbox, :validate, :ra))
# identifiers = Array.wrap(meta.fetch('identifiers', nil)).map do |r|
# r = normalize_id(r) if r.is_a?(String)
# if r.is_a?(String) && URI(r).host != 'doi.org'
# { 'identifierType' => 'URL', 'identifier' => r }
# elsif r.is_a?(Hash)
# { 'identifierType' => get_identifier_type(r['propertyID']), 'identifier' => r['value'] }
# end
# end.compact.uniq
id_ = normalize_id(kwargs.get("doi", None) or meta.get("doi", None))
# Array.wrap(meta.fetch('identifiers', nil)).find do |i|
# i['type'] == 'doi'
# end.fetch('value', nil))
type_ = "Software"
url = normalize_id(meta.get("repository-code", None))
creators = cff_creators(wrap(meta.get("authors", None)))
if meta.get("title", None):
titles = [{"title": meta.get("title", None)}]
else:
titles = []
date = {
"published": get_iso8601_date(meta.get("date-released")) if meta.get("date-released", None) else None
}
publisher = {"name": "GitHub"} if url and urlparse(url).hostname == "github.com" else None
if meta.get("abstract", None):
descriptions = [
{
"description": sanitize(meta.get("abstract")),
"descriptionType": "Abstract",
}
]
else:
descriptions = []
subjects = [name_to_fos(i) for i in wrap(meta.get("keywords", None))]
license_ = meta.get("licenseId", None)
if license_ is not None:
license_ = dict_to_spdx({"id": meta.get("licenseId")})
references = cff_references(wrap(meta.get("references", None)))
state = "findable" if meta or read_options else "not_found"
return {
"id": id_,
"type": type_,
# 'identifiers' => identifiers,
"url": url,
"titles": titles,
"creators": creators,
"publisher": publisher,
"references": presence(references),
"date": date,
"descriptions": presence(descriptions),
"license": license_,
"version": meta.get("version", None),
"subjects": presence(subjects),
"provider": "DataCite" if id_ else "GitHub",
"state": state,
} | read_options
def cff_creators(creators):
"""cff_creators"""
def format_affiliation(affiliation):
"""format_affiliation"""
if isinstance(affiliation, str):
return {"name": affiliation}
if isinstance(affiliation, dict):
return compact(affiliation)
return None
# if a.is_a?(Hash)
# a
# elsif a.is_a?(Hash) && a.key?('#text_') && a['#text'].strip.blank?
# nil
# elsif a.is_a?(Hash) && a.key?('#text')
# { 'name' => a['#text'] }
# elsif a.strip.blank
def format_element(i):
"""format_element"""
if normalize_orcid(parse_attributes(i.get("orcid", None))):
id_ = normalize_orcid(parse_attributes(i.get("orcid", None)))
else:
id_ = None
if (
i.get("given-names", None)
or i.get("family-names", None)
or id_
):
given_name = parse_attributes(i.get("given-names", None))
family_name = parse_attributes(i.get("family-names", None))
affiliation = compact(
[format_affiliation(a) for a in wrap(i.get("affiliation", None))]
)
return compact(
{
"id": id_,
"type": "Person",
"givenName": given_name,
"familyName": family_name,
"affiliation": affiliation,
}
)
return {
"type": "Organization",
"name": i.get("name", None) or i.get("#text", None),
}
return [format_element(i) for i in creators]
def cff_references(references):
"""cff_references"""
def is_reference(i):
"""is_reference"""
return (
next(
(
item
for item in wrap(i.get("identifers", None))
if item.get("type", None) == "doi"
),
None,
)
is not None
)
def map_reference(i):
"""map_element"""
identifier = next(
(
item
for item in wrap(i.get("identifers", None))
if item.get("type", None) == "doi"
),
None,
)
return compact(
{"doi": normalize_id(parse_attributes(identifier.get("value", None)))}
)
return [map_reference(i) for i in references if is_reference(i)]
|
PypiClean
|
/dibrowse-0.0.1-py3-none-any.whl/dib/cli.py
|
# python std lib
import logging
import os
import pdb
import sys
import traceback
# 3rd party imports
from docopt import docopt, extras, Option, DocoptExit
base_args = """
Usage:
dib [<command>] [options] [<args> ...]
Commands:
cd Change to the specified directory
ls List all files in the specified directory
find Print absolute path for the specified file/directory
Options:
--help Show this help message and exit
--version Display the version number and exit
"""
sub_cd_args = """
Usage:
dib cd [options] [<path>]
Options:
-h, --help Show this help message and exit
"""
sub_ls_args = """
Usage:
dib ls [options] [<path>]
Options:
-a, --all List all files in a directory, includes hidden "." files (as ls -a)
-l Use a long list format (as ls -l)
-h, --help Show this help message and exit
"""
sub_find_args = """
Usage:
dib find (--type=<t>) [<path>]
Options:
-t=<t>, --type=<t> Defines either 'file' or 'dir'
-h, --help Show this help message and exit
"""
def parse_cli():
"""
Parse the CLI arguments and options
"""
import dib
try:
cli_args = docopt(
base_args,
options_first=True,
version=dib.__version__,
help=True,
)
except DocoptExit:
extras(
True,
dib.__version__,
[Option("-h", "--help", 0, True)],
base_args,
)
# Set INFO by default, else DEBUG log level
dib.init_logging(5 if "DEBUG" in os.environ else 4)
argv = [cli_args["<command>"]] + cli_args["<args>"]
if cli_args["<command>"] == "cd":
sub_args = docopt(sub_cd_args, argv=argv)
elif cli_args["<command>"] == "ls":
sub_args = docopt(sub_ls_args, argv=argv)
elif cli_args["<command>"] == "find":
sub_args = docopt(sub_find_args, argv=argv)
else:
extras(
True,
dib.__version__,
[Option("-h", "--help", 0, True)],
base_args,
)
sys.exit(1)
# In some cases there is no additional sub args of things to extract
if cli_args["<args>"]:
sub_args["<sub_command>"] = cli_args["<args>"][0]
return (cli_args, sub_args)
def run(cli_args, sub_args):
"""
Execute the CLI
"""
log = logging.getLogger(__name__)
retcode = 0
log.debug(cli_args)
log.debug(sub_args)
from dib.core import DIB
core = DIB(current_dir=sub_args.get("<path>"))
if cli_args["<command>"] == "cd":
retcode = core.cd()
if cli_args["<command>"] == "ls":
flags = ""
if sub_args["--all"] and sub_args["-l"]:
flags = "-al"
elif sub_args["--all"]:
flags = "-a"
elif sub_args["-l"]:
flags = "-l"
retcode = core.ls(flags)
if cli_args["<command>"] == "find":
flag = ""
if sub_args["--type"] == "f":
flag = "f"
elif sub_args["--type"] == "d":
flag = "d"
else:
log.error("Option: '--type' must be either 'f' for 'file', or 'd' for 'directory'")
log.error("Exiting with exitcode 1")
return 1
if flag:
retcode = core.find(flag)
return retcode
def cli_entrypoint():
"""
Used by setup.py to create a cli entrypoint script
"""
try:
cli_args, sub_args = parse_cli()
exit_code = run(cli_args, sub_args)
sys.exit(exit_code)
except Exception:
ex_type, ex_value, ex_traceback = sys.exc_info()
if "DEBUG" in os.environ:
extype, value, tb = sys.exc_info()
traceback.print_exc()
if "PDB" in os.environ:
pdb.post_mortem(tb)
raise
else:
print(f"Exception type : {ex_type.__name__}")
print(f"EXCEPTION MESSAGE: {ex_value}")
print("To get more detailed exception set environment variable 'DEBUG=1'")
print("To PDB debug set environment variable 'PDB=1'")
|
PypiClean
|
/mmkg_stvg_bert-0.1.0-cp39-cp39-manylinux1_x86_64.whl/mmkg_stvg_bert/models/networks/dlav0.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from os.path import join
import torch
from torch import nn
import torch.utils.model_zoo as model_zoo
import numpy as np
BatchNorm = nn.BatchNorm2d
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = BatchNorm(out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_channels)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, return_levels=False,
pool_size=7, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.return_levels = return_levels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
self.avgpool = nn.AvgPool2d(pool_size)
self.fc = nn.Conv2d(channels[-1], num_classes, kernel_size=1,
stride=1, padding=0, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
BatchNorm(planes),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
if self.return_levels:
return y
else:
x = self.avgpool(x)
x = self.fc(x)
x = x.view(x.size(0), -1)
return x
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
self.fc = fc
def dla34(pretrained, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
def dla46_c(pretrained=None, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla46_c')
return model
def dla46x_c(pretrained=None, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla46x_c')
return model
def dla60x_c(pretrained, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=None, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla60')
return model
def dla60x(pretrained=None, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla60x')
return model
def dla102(pretrained=None, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102')
return model
def dla102x(pretrained=None, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102x')
return model
def dla102x2(pretrained=None, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102x2')
return model
def dla169(pretrained=None, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla169')
return model
def set_bn(bn):
global BatchNorm
BatchNorm = bn
dla.BatchNorm = bn
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class IDAUp(nn.Module):
def __init__(self, node_kernel, out_dim, channels, up_factors):
super(IDAUp, self).__init__()
self.channels = channels
self.out_dim = out_dim
for i, c in enumerate(channels):
if c == out_dim:
proj = Identity()
else:
proj = nn.Sequential(
nn.Conv2d(c, out_dim,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_dim),
nn.ReLU(inplace=True))
f = int(up_factors[i])
if f == 1:
up = Identity()
else:
up = nn.ConvTranspose2d(
out_dim, out_dim, f * 2, stride=f, padding=f // 2,
output_padding=0, groups=out_dim, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
for i in range(1, len(channels)):
node = nn.Sequential(
nn.Conv2d(out_dim * 2, out_dim,
kernel_size=node_kernel, stride=1,
padding=node_kernel // 2, bias=False),
BatchNorm(out_dim),
nn.ReLU(inplace=True))
setattr(self, 'node_' + str(i), node)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, layers):
assert len(self.channels) == len(layers), \
'{} vs {} layers'.format(len(self.channels), len(layers))
layers = list(layers)
for i, l in enumerate(layers):
upsample = getattr(self, 'up_' + str(i))
project = getattr(self, 'proj_' + str(i))
layers[i] = upsample(project(l))
x = layers[0]
y = []
for i in range(1, len(layers)):
node = getattr(self, 'node_' + str(i))
x = node(torch.cat([x, layers[i]], 1))
y.append(x)
return x, y
class DLAUp(nn.Module):
def __init__(self, channels, scales=(1, 2, 4, 8, 16), in_channels=None):
super(DLAUp, self).__init__()
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(3, channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
layers = list(layers)
assert len(layers) > 1
for i in range(len(layers) - 1):
ida = getattr(self, 'ida_{}'.format(i))
x, y = ida(layers[-i - 2:])
layers[-i - 1:] = y
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class DLASeg(nn.Module):
def __init__(self, base_name, heads,
pretrained=True, down_ratio=4, head_conv=256):
super(DLASeg, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.heads = heads
self.first_level = int(np.log2(down_ratio))
self.base = globals()[base_name](
pretrained=pretrained, return_levels=True)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(channels[self.first_level:], scales=scales)
'''
self.fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], classes, kernel_size=1,
stride=1, padding=0, bias=True)
)
'''
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(channels[self.first_level], classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
'''
up_factor = 2 ** self.first_level
if up_factor > 1:
up = nn.ConvTranspose2d(classes, classes, up_factor * 2,
stride=up_factor, padding=up_factor // 2,
output_padding=0, groups=classes,
bias=False)
fill_up_weights(up)
up.weight.requires_grad = False
else:
up = Identity()
self.up = up
self.softmax = nn.LogSoftmax(dim=1)
for m in self.fc.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
'''
def forward(self, x):
x = self.base(x)
x = self.dla_up(x[self.first_level:])
# x = self.fc(x)
# y = self.softmax(self.up(x))
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
'''
def optim_parameters(self, memo=None):
for param in self.base.parameters():
yield param
for param in self.dla_up.parameters():
yield param
for param in self.fc.parameters():
yield param
'''
'''
def dla34up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla34', classes, pretrained_base=pretrained_base, **kwargs)
return model
def dla60up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla60', classes, pretrained_base=pretrained_base, **kwargs)
return model
def dla102up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla102', classes,
pretrained_base=pretrained_base, **kwargs)
return model
def dla169up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla169', classes,
pretrained_base=pretrained_base, **kwargs)
return model
'''
def get_pose_net(num_layers, heads, head_conv=256, bert_config_path='', bert_pretrained_model_path='', down_ratio=4):
model = DLASeg('dla{}'.format(num_layers), heads,
pretrained=True,
down_ratio=down_ratio,
head_conv=head_conv)
return model
|
PypiClean
|
/sequelize_to_excel-1.0.10.tar.gz/sequelize_to_excel-1.0.10/sequelize_to_excel.py
|
import re
import json;
import pandas as pd;
import logging
from pandas.io.json import json_normalize
class SequelizeToExcel(object):
def __init__(self,node_model_filename=None):
self.node_model_filename = node_model_filename
"""
This module will help you extract info from your standard
sequelize file to excel format , consisiting column name , field name ,
auto increment etc
"""
def extract_and_export(self,export_type="excel"):
"""
This function accepts filename , removes noise and
maps field to columns of excel
Args:
filename : command line input for file name
Returns :
Saves excel at the same location
"""
try:
# request for filename
if self.node_model_filename == None:
filename = input("pls enter the filename w/o extn :-> ")
else:
filename = self.node_model_filename
# Reads the file
f = open(filename+".js","r")
txt = f.read()
# uses regex to clear noise
x = re.sub("^\/*.*\n","",txt)
x2 = re.sub("^module.*\n..*',","[",x.strip())
x3 = x2.replace(");","")
x3 = x3.replace("};","]")
x3 = re.sub(r'DataTypes.',r'',x3)
x3 = re.sub("'",r'',x3)
x3 = re.sub(r'(\w+)',r'"\1"',x3)
x3 = x3.replace(" ","")
jsonObj = json.loads(x3)
listRows = []
# defines excel header and maps value
lstColumnHeader = ["name","type","allowNull","primaryKey","autoIncrement","field","defaultValue","references.model","references.key","tableName","version"]
for (k,v) in jsonObj[0].items():
listColumns = []
listColumns.append(k)
listColumns.append(v.get("type","na"))
listColumns.append(v.get("allowNull","na"))
listColumns.append(v.get("primaryKey","na"))
listColumns.append(v.get("autoIncrement","na"))
listColumns.append(v.get("field","na"))
listColumns.append(v.get("defaultValue","na"))
#listColumns.append(v.get("references","na"))
if v.get("references","na") != "na":
listColumns.append(v.get("references").get("model","na"))
listColumns.append(v.get("references").get("key","na"))
else:
listColumns.append("na")
listColumns.append("na")
listColumns.append(jsonObj[1].get("tableName","na"))
listColumns.append(jsonObj[1].get("version","na"))
listRows.append(listColumns)
if export_type == "excel":
df = pd.DataFrame(listRows,columns=lstColumnHeader)
df.to_excel(filename+".xlsx")
else
return listRows
except FileNotFoundError as fe:
logging.error(" File name passed is not present at the location ",stack_info=True)
except ValueError as ve:
logging.error(" attributes in the model are unknown ",stack_info=True)
except Exception as e:
logging.error(e)
|
PypiClean
|
/hbp_archive-1.1.1.tar.gz/hbp_archive-1.1.1/README.md
|
A high-level API for interacting with the Human Brain Project archival storage at CSCS.
Authors: Andrew Davison (CNRS), Shailesh Appukuttan (CNRS) and Eszter Agnes Papp (University of Oslo)
Documentation: https://hbp-archive.readthedocs.io
Installation: `pip install hbp_archive`
Usage:
```python
from hbp_archive import Container, PublicContainer, Project, Archive
# Working with a public container
container = PublicContainer("https://object.cscs.ch/v1/AUTH_id/my_container")
files = container.list()
local_file = container.download("README.txt")
print(container.read("README.txt"))
number_of_files = container.count()
size_in_MB = container.size("MB")
# Working with a private container
container = Container("MyContainer", username="xyzabc") # you will be prompted for your password
files = container.list()
local_file = container.download("README.txt", overwrite=True) # default is not to overwrite existing files
print(container.read("README.txt"))
number_of_files = container.count()
size_in_MB = container.size("MB")
container.move("my_file.dat", "a_subdirectory", "new_name.dat") # move/rename file within a container
# Reading a file directly, without downloading it
with container.open("my_data.txt") as fp:
data = np.loadtxt(fp)
# Working with a project
my_proj = Project('MyProject', username="xyzabc")
container = my_proj.get_container("MyContainer")
# Listing all your projects
archive = Archive(username="xyzabc")
projects = archive.projects
container = archive.find_container("MyContainer") # will search through all projects
```
<div><img src="https://raw.githubusercontent.com/HumanBrainProject/hbp-validation-client/master/eu_logo.jpg" alt="EU Logo" width="15%" align="right"></div>
### ACKNOWLEDGEMENTS
This open source software code was developed in part or in whole in the Human Brain Project, funded from the European Union's Horizon 2020 Framework Programme for Research and Innovation under Specific Grant Agreements No. 720270, No. 785907 and No. 945539 (Human Brain Project SGA1, SGA2 and SGA3).
|
PypiClean
|
/solarcurtailment-2.0.0-py3-none-any.whl/solarcurtailment-2.0.0.dist-info/LICENSE.md
|
MIT License
Copyright (c) University of New South Wales-UNSW (please contact project lead Baran Yildiz at [email protected])
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
PypiClean
|
/python_odata-0.5.4-py3-none-any.whl/odata/navproperty.py
|
import copy
import importlib
from typing import Union
from odata.exceptions import ODataReflectionError
try:
# noinspection PyUnresolvedReferences
from urllib.parse import urljoin
except ImportError:
# noinspection PyUnresolvedReferences
from urlparse import urljoin
class NavigationProperty(object):
"""
A Property-like object for marking relationships between entities, but does
not inherit from PropertyBase.
"""
def __init__(self, name, entitycls: Union[type, str], entity_package: str = None, collection=False, foreign_key=None):
from odata.property import PropertyBase
self.name = name
self.class_package = entity_package
self.entityclass = entitycls
self.is_collection = collection
if isinstance(foreign_key, PropertyBase):
self.foreign_key = foreign_key.name
else:
self.foreign_key = foreign_key
def __repr__(self):
return u'<NavigationProperty to {0}>'.format(self.entitycls)
def __populate_entity(self, data, connection, parent_navigation_url):
result = self.entitycls.__new__(self.entitycls, from_data=data, connection=connection)
es = result.__odata__
es.parent_navigation_url = parent_navigation_url
return result
@property
def entitycls(self):
# if we've been given the type as a string
# we need to look for the actual type now, at runtime
if isinstance(self.entityclass, str):
if not self.class_package:
raise ODataReflectionError("Entitycls is a string, if you specify the entity class as a string you also need to specify "
"the class_package where that class can be imported from at runtime")
module = importlib.import_module(self.class_package)
self.entityclass = getattr(module, self.entityclass)
return self.entityclass
def instances_from_data(self, raw_data, connection, parent_navigation_url):
if self.is_collection:
return [self.__populate_entity(d, connection, parent_navigation_url) for d in raw_data]
else:
return self.__populate_entity(raw_data, connection, parent_navigation_url)
def _get_parent_cache(self, instance):
es = instance.__odata__
ic = es.nav_cache
if self.name not in ic:
cache = {}
ic[self.name] = cache
else:
cache = ic[self.name]
return cache
def __set__(self, instance, value):
"""
:type instance: odata.entity.EntityBase
"""
cache = self._get_parent_cache(instance)
if self.is_collection:
cache['collection'] = value
else:
cache['single'] = value
instance.__odata__.set_property_dirty(self)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(f"Skipping recursive check for {item}")
if self.entitycls:
# we're doing a recursive query here
cpy = copy.copy(getattr(self.entitycls, item))
cpy.name = f"{self.name}/{item}"
return cpy
else:
raise Exception(f"Couldn't find {item} in {self.name}")
def navigation_url(self, instance):
es = instance.__odata__
parent_url = es.instance_url
if not parent_url:
parent_url = es.parent_navigation_url
if parent_url:
url = parent_url
if not url.endswith("/"):
url += "/"
url = urljoin(url, self.name)
return url
return None
def __get__(self, instance, owner):
"""
:type instance: odata.entity.EntityBase
"""
if instance is None:
return self
es = instance.__odata__
connection = es.connection
nav_url = self.navigation_url(instance)
new_object = nav_url is None
cache = self._get_parent_cache(instance)
if new_object:
if self.is_collection:
return cache.get('collection', [])
return cache.get('single', None)
if self.is_collection:
if 'collection' not in cache:
raw_data = connection.execute_get(nav_url)
if raw_data:
cache['collection'] = self.instances_from_data(raw_data['value'], connection, nav_url)
else:
cache['collection'] = []
return cache['collection']
else:
if 'single' not in cache:
raw_data = connection.execute_get(nav_url)
if raw_data:
value = self.instances_from_data(raw_data, connection, nav_url)
cache['single'] = value
else:
cache['single'] = None
return cache['single']
|
PypiClean
|
/tensorflow_fedora28-1.9.0rc01-cp27-cp27mu-manylinux1_x86_64.whl/tensorflow_fedora28-1.9.0rc0.data/purelib/tensorflow/contrib/framework/python/ops/prettyprint_ops.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
__all__ = ["print_op"]
def _get_tensor_repr(t,
print_tensor_name=True,
print_tensor_type=True,
print_shape=True,
summarize_indicator_vector=True):
"""Return a list of Tensors that summarize the given tensor t."""
tensor_list = []
if print_tensor_name and isinstance(t, ops.Tensor):
tensor_list.append(constant_op.constant("Name: " + t.name))
if print_tensor_type:
if isinstance(t, ops.Tensor):
t_type_str = "Type: Tensor ({})".format(t.dtype.name)
elif isinstance(t, sparse_tensor.SparseTensor):
t_type_str = "Type: SparseTensor ({})".format(t.dtype.name)
elif isinstance(t, tensor_array_ops.TensorArray):
t_type_str = "Type: TensorArray ({})".format(t.dtype.name)
elif isinstance(t, variables.Variable):
t_type_str = "Type: Variable ({})".format(t.dtype.name)
else:
raise ValueError("t must be a Tensor, SparseTensor, TensorArray or "
"Variable.")
tensor_list.append(constant_op.constant(t_type_str))
if print_shape:
if isinstance(t, sparse_tensor.SparseTensor):
tensor_list.append(constant_op.constant("Shape:"))
tensor_list.append(t.dense_shape)
elif isinstance(t, ops.Tensor):
tensor_list.append(constant_op.constant("Shape: " + str(t.get_shape(
).dims)))
elif isinstance(t, tensor_array_ops.TensorArray):
tensor_list.append(constant_op.constant("Size:"))
tensor_list.append(t.size())
if summarize_indicator_vector and t.dtype == dtypes.bool:
int_tensor = math_ops.cast(t, dtypes.uint8)
tensor_list.append(constant_op.constant("First True in Boolean tensor at:"))
tensor_list.append(math_ops.argmax(int_tensor, 0))
if isinstance(t, sparse_tensor.SparseTensor):
tensor_list.append(constant_op.constant("Sparse indices:"))
tensor_list.append(t.indices)
tensor_list.append(constant_op.constant("Sparse values:"))
tensor_list.append(t.values)
elif isinstance(t, ops.Tensor):
tensor_list.append(constant_op.constant("Value:"))
tensor_list.append(t)
elif isinstance(t, tensor_array_ops.TensorArray):
tensor_list.append(constant_op.constant("Value:"))
tensor_list.append(t.stack())
return tensor_list
def print_op(input_,
data=None,
message=None,
first_n=None,
summarize=20,
print_tensor_name=True,
print_tensor_type=True,
print_shape=True,
summarize_indicator_vector=True,
name=None):
"""Creates a print op that will print when a tensor is accessed.
Wraps the tensor passed in so that whenever that tensor is accessed,
the message `message` is printed, along with the current value of the
tensor `t` and an optional list of other tensors.
Args:
input_: A Tensor/SparseTensor/TensorArray to print when it is evaluated.
data: A list of other tensors to print.
message: A string message to print as a prefix.
first_n: Only log `first_n` number of times. Negative numbers log always;
this is the default.
summarize: Print this number of elements in the tensor.
print_tensor_name: Print the tensor name.
print_tensor_type: Print the tensor type.
print_shape: Print the tensor's shape.
summarize_indicator_vector: Whether to print the index of the first true
value in an indicator vector (a Boolean tensor).
name: The name to give this op.
Returns:
A Print op. The Print op returns `input_`.
Raises:
ValueError: If the tensor `input_` is not a Tensor, SparseTensor or
TensorArray.
"""
message = message or ""
if input_ is None:
raise ValueError("input_ must be of type "
"Tensor, SparseTensor or TensorArray")
tensor_list = _get_tensor_repr(input_, print_tensor_name, print_tensor_type,
print_shape, summarize_indicator_vector)
if data is not None:
for t in data:
tensor_list.extend(_get_tensor_repr(t, print_tensor_name,
print_tensor_type, print_shape,
summarize_indicator_vector))
if isinstance(input_, ops.Tensor) or isinstance(input_, variables.Variable):
input_ = logging_ops.Print(input_, tensor_list, message, first_n, summarize,
name)
elif isinstance(input_, sparse_tensor.SparseTensor):
p = logging_ops.Print(
constant_op.constant([]), tensor_list, message, first_n, summarize,
name)
with ops.control_dependencies([p]):
input_ = sparse_tensor.SparseTensor(
array_ops.identity(input_.indices),
array_ops.identity(input_.values),
array_ops.identity(input_.dense_shape))
elif isinstance(input_, tensor_array_ops.TensorArray):
p = logging_ops.Print(
constant_op.constant([]), tensor_list, message, first_n, summarize,
name)
with ops.control_dependencies([p]):
input_ = tensor_array_ops.TensorArray(dtype=input_.dtype,
handle=input_.handle,
flow=input_.flow)
else:
raise ValueError("input_ must be of type "
"Tensor, SparseTensor or TensorArray")
return input_
|
PypiClean
|
/seekret.apitest-0.3.1-py3-none-any.whl/seekret/apitest/context/context.py
|
import contextlib
import logging
from typing import Any, Optional, Union, NewType, cast
from seekret.apitest.context.session import Session
logger = logging.getLogger(__name__)
_NotSet = NewType("_NotSet", object)
NOT_SET = cast(_NotSet, object())
User = Optional[Union[str, _NotSet]]
class Context(object):
"""
Seekret context and functions.
This class is the interface from the test function to the Seekret testing infrastructure. It is intended to be used
as a fixture and returned from the `seekret` and `seekret_module` fixtures.
"""
def __init__(self, session: Session, scope: str):
"""
Initialize the context.
:param session: Seekret session associated with this context.
:param scope: The scope of this context. The scope is used for logging only, and is
specified in the stage start log.
"""
self.session = session
self._scope = scope
self._current_stage_index = 1 # 1-based.
self.default_user: User = 'default'
@contextlib.contextmanager
def stage(self, method: str, path: str):
"""
Declare the next test stage targets the given endpoint.
The purpose of this function is to create a readable structure to tests.
:param method: Method of the stage target endpoint.
:param path: Path of the stage target endpoint.
:return: Callable value for performing requests to the target endpoint.
"""
if self._scope == 'function':
# Special case: in function scope don't print a prefix at all.
prefix = ''
else:
prefix = self._scope.title() + ' '
logger.info(
prefix +
f'Test Stage #{self._current_stage_index}: {method} {path}')
try:
yield _StageWrapper(self, method, path)
finally:
self._current_stage_index += 1
def request(self, *args, user: User = NOT_SET, **kwargs):
return self.session.request(
*args,
user=(self.default_user if user is NOT_SET else user),
**kwargs)
class _StageWrapper(object):
def __init__(self, context: Context, method: str, path: str):
self._context = context
self.method = method
self.path = path
def __call__(self, json: Optional[Any] = None, **kwargs):
return self._context.request(self.method,
self.path,
json=json,
**kwargs)
|
PypiClean
|
/netbluemind4-4.9.2993.tar.gz/netbluemind4-4.9.2993/netbluemind/system/api/DomainTemplateKind.py
|
import requests
from netbluemind.python import serder
class DomainTemplateKind:
def __init__(self):
self.description = None
self.tags = None
self.id = None
pass
class __DomainTemplateKindSerDer__:
def __init__(self):
pass
def parse(self, value):
if (value == None):
return None
instance = DomainTemplateKind()
self.parseInternal(value, instance)
return instance
def parseInternal(self, value, instance):
from netbluemind.system.api.DomainTemplateDescription import DomainTemplateDescription
from netbluemind.system.api.DomainTemplateDescription import __DomainTemplateDescriptionSerDer__
descriptionValue = value['description']
instance.description = __DomainTemplateDescriptionSerDer__().parse(descriptionValue)
from netbluemind.system.api.DomainTemplateTag import DomainTemplateTag
from netbluemind.system.api.DomainTemplateTag import __DomainTemplateTagSerDer__
tagsValue = value['tags']
instance.tags = serder.ListSerDer(
__DomainTemplateTagSerDer__()).parse(tagsValue)
idValue = value['id']
instance.id = serder.STRING.parse(idValue)
return instance
def encode(self, value):
if (value == None):
return None
instance = dict()
self.encodeInternal(value, instance)
return instance
def encodeInternal(self, value, instance):
from netbluemind.system.api.DomainTemplateDescription import DomainTemplateDescription
from netbluemind.system.api.DomainTemplateDescription import __DomainTemplateDescriptionSerDer__
descriptionValue = value.description
instance["description"] = __DomainTemplateDescriptionSerDer__().encode(
descriptionValue)
from netbluemind.system.api.DomainTemplateTag import DomainTemplateTag
from netbluemind.system.api.DomainTemplateTag import __DomainTemplateTagSerDer__
tagsValue = value.tags
instance["tags"] = serder.ListSerDer(
__DomainTemplateTagSerDer__()).encode(tagsValue)
idValue = value.id
instance["id"] = serder.STRING.encode(idValue)
return instance
|
PypiClean
|
/node_managment_application-0.0.1.tar.gz/node_managment_application-0.0.1/nms_app/static/admin/js/vendor/xregexp/xregexp.min.js
|
(function(H){"object"===typeof exports&&"undefined"!==typeof module?module.exports=H():"function"===typeof define&&define.amd?define([],H):("undefined"!==typeof window?window:"undefined"!==typeof global?global:"undefined"!==typeof self?self:this).XRegExp=H()})(function(){return function c(d,g,p){function A(l,b){if(!g[l]){if(!d[l]){var k="function"==typeof require&&require;if(!b&&k)return k(l,!0);if(B)return B(l,!0);b=Error("Cannot find module '"+l+"'");throw b.code="MODULE_NOT_FOUND",b;}b=g[l]={exports:{}};
d[l][0].call(b.exports,function(b){var c=d[l][1][b];return A(c?c:b)},b,b.exports,c,d,g,p)}return g[l].exports}for(var B="function"==typeof require&&require,z=0;z<p.length;z++)A(p[z]);return A}({1:[function(d,g,p){g.exports=function(c){function A(b){var c=/^(?:\(\?:\))*\^/,l=/\$(?:\(\?:\))*$/;return c.test(b)&&l.test(b)&&l.test(b.replace(/\\[\s\S]/g,""))?b.replace(c,"").replace(l,""):b}function B(b,l){l=l?"x":"";return c.isRegExp(b)?b.xregexp&&b.xregexp.captureNames?b:c(b.source,l):c(b,l)}var z=/(\()(?!\?)|\\([1-9]\d*)|\\[\s\S]|\[(?:[^\\\]]|\\[\s\S])*\]/g,
l=c.union([/\({{([\w$]+)}}\)|{{([\w$]+)}}/,z],"g",{conjunction:"or"});c.build=function(b,k,g){g=g||"";var y=-1<g.indexOf("x"),m=/^\(\?([\w$]+)\)/.exec(b);m&&(g=c._clipDuplicates(g+m[1]));var h={},w;for(w in k)k.hasOwnProperty(w)&&(m=B(k[w],y),h[w]={pattern:A(m.source),names:m.xregexp.captureNames||[]});b=B(b,y);var x=0,v,q=0,f=[0],d=b.xregexp.captureNames||[];b=b.source.replace(l,function(b,c,m,l,y){var n=c||m;if(n){if(!h.hasOwnProperty(n))throw new ReferenceError("Undefined property "+b);if(c){var k=
d[q];f[++q]=++x;b="(?<"+(k||n)+">"}else b="(?:";v=x;return b+h[n].pattern.replace(z,function(f,b,c){if(b){if(k=h[n].names[x-v],++x,k)return"(?<"+k+">"}else if(c)return g=+c-1,h[n].names[g]?"\\k<"+h[n].names[g]+">":"\\"+(+c+v);return f})+")"}if(l){if(k=d[q],f[++q]=++x,k)return"(?<"+k+">"}else if(y){var g=+y-1;return d[g]?"\\k<"+d[g]+">":"\\"+f[+y]}return b});return c(b,g)}}},{}],2:[function(d,g,p){g.exports=function(c){function g(c,g,l,b){return{name:c,value:g,start:l,end:b}}c.matchRecursive=function(d,
p,l,b,k){b=b||"";k=k||{};var A=-1<b.indexOf("g"),y=-1<b.indexOf("y"),m=b.replace(/y/g,""),h=k.escapeChar;k=k.valueNames;var w=[],x=0,v=0,q=0,f=0;p=c(p,m);l=c(l,m);if(h){if(1<h.length)throw Error("Cannot use more than one escape character");h=c.escape(h);var z=new RegExp("(?:"+h+"[\\S\\s]|(?:(?!"+c.union([p,l],"",{conjunction:"or"}).source+")[^"+h+"])+)+",b.replace(/[^imu]+/g,""))}for(;;){h&&(q+=(c.exec(d,z,q,"sticky")||[""])[0].length);b=c.exec(d,p,q);m=c.exec(d,l,q);b&&m&&(b.index<=m.index?m=null:
b=null);if(b||m)v=(b||m).index,q=v+(b||m)[0].length;else if(!x)break;if(y&&!x&&v>f)break;if(b){if(!x){var n=v;var r=q}++x}else if(m&&x){if(!--x&&(k?(k[0]&&n>f&&w.push(g(k[0],d.slice(f,n),f,n)),k[1]&&w.push(g(k[1],d.slice(n,r),n,r)),k[2]&&w.push(g(k[2],d.slice(r,v),r,v)),k[3]&&w.push(g(k[3],d.slice(v,q),v,q))):w.push(d.slice(r,v)),f=q,!A))break}else throw Error("Unbalanced delimiter found in string");v===q&&++q}A&&!y&&k&&k[0]&&d.length>f&&w.push(g(k[0],d.slice(f),f,d.length));return w}}},{}],3:[function(d,
g,p){g.exports=function(c){function g(b){return b.replace(/[- _]+/g,"").toLowerCase()}function d(c){var m=/^\\[xu](.+)/.exec(c);return m?b(m[1]):c.charCodeAt("\\"===c.charAt(0)?1:0)}function p(b){var m="",h=-1;c.forEach(b,/(\\x..|\\u....|\\?[\s\S])(?:-(\\x..|\\u....|\\?[\s\S]))?/,function(b){var c=d(b[1]);c>h+1&&(m+="\\u"+C(k(h+1)),c>h+2&&(m+="-\\u"+C(k(c-1))));h=d(b[2]||b[1])});65535>h&&(m+="\\u"+C(k(h+1)),65534>h&&(m+="-\\uFFFF"));return m}var l={},b=c._dec,k=c._hex,C=c._pad4;c.addToken(/\\([pP])(?:{(\^?)([^}]*)}|([A-Za-z]))/,
function(b,c,h){var m="P"===b[1]||!!b[2],d=-1<h.indexOf("A");h=g(b[4]||b[3]);var k=l[h];if("P"===b[1]&&b[2])throw new SyntaxError("Invalid double negation "+b[0]);if(!l.hasOwnProperty(h))throw new SyntaxError("Unknown Unicode token "+b[0]);if(k.inverseOf){h=g(k.inverseOf);if(!l.hasOwnProperty(h))throw new ReferenceError("Unicode token missing data "+b[0]+" -> "+k.inverseOf);k=l[h];m=!m}if(!k.bmp&&!d)throw new SyntaxError("Astral mode required for Unicode token "+b[0]);if(d){if("class"===c)throw new SyntaxError("Astral mode does not support Unicode tokens within character classes");
b=m?"a!":"a=";(c=l[h][b])||(c=l[h],h=l[h],d="",h.bmp&&!h.isBmpLast&&(d="["+h.bmp+"]"+(h.astral?"|":"")),h.astral&&(d+=h.astral),h.isBmpLast&&h.bmp&&(d+=(h.astral?"|":"")+"["+h.bmp+"]"),c=c[b]=m?"(?:(?!"+d+")(?:[\ud800-\udbff][\udc00-\udfff]|[\x00-\uffff]))":"(?:"+d+")");return c}return"class"===c?m?l[h]["b!"]||(l[h]["b!"]=p(l[h].bmp)):k.bmp:(m?"[^":"[")+k.bmp+"]"},{scope:"all",optionalFlags:"A",leadChar:"\\"});c.addUnicodeData=function(b){for(var d,h=0;h<b.length;++h){d=b[h];if(!d.name)throw Error("Unicode token requires name");
if(!(d.inverseOf||d.bmp||d.astral))throw Error("Unicode token has no character data "+d.name);l[g(d.name)]=d;d.alias&&(l[g(d.alias)]=d)}c.cache.flush("patterns")};c._getUnicodeProperty=function(b){b=g(b);return l[b]}}},{}],4:[function(d,g,p){g.exports=function(c){if(!c.addUnicodeData)throw new ReferenceError("Unicode Base must be loaded before Unicode Blocks");c.addUnicodeData([{name:"InAdlam",astral:"\ud83a[\udd00-\udd5f]"},{name:"InAegean_Numbers",astral:"\ud800[\udd00-\udd3f]"},{name:"InAhom",
astral:"\ud805[\udf00-\udf3f]"},{name:"InAlchemical_Symbols",astral:"\ud83d[\udf00-\udf7f]"},{name:"InAlphabetic_Presentation_Forms",bmp:"\ufb00-\ufb4f"},{name:"InAnatolian_Hieroglyphs",astral:"\ud811[\udc00-\ude7f]"},{name:"InAncient_Greek_Musical_Notation",astral:"\ud834[\ude00-\ude4f]"},{name:"InAncient_Greek_Numbers",astral:"\ud800[\udd40-\udd8f]"},{name:"InAncient_Symbols",astral:"\ud800[\udd90-\uddcf]"},{name:"InArabic",bmp:"\u0600-\u06ff"},{name:"InArabic_Extended_A",bmp:"\u08a0-\u08ff"},{name:"InArabic_Mathematical_Alphabetic_Symbols",
astral:"\ud83b[\ude00-\udeff]"},{name:"InArabic_Presentation_Forms_A",bmp:"\ufb50-\ufdff"},{name:"InArabic_Presentation_Forms_B",bmp:"\ufe70-\ufeff"},{name:"InArabic_Supplement",bmp:"\u0750-\u077f"},{name:"InArmenian",bmp:"\u0530-\u058f"},{name:"InArrows",bmp:"\u2190-\u21ff"},{name:"InAvestan",astral:"\ud802[\udf00-\udf3f]"},{name:"InBalinese",bmp:"\u1b00-\u1b7f"},{name:"InBamum",bmp:"\ua6a0-\ua6ff"},{name:"InBamum_Supplement",astral:"\ud81a[\udc00-\ude3f]"},{name:"InBasic_Latin",bmp:"\x00-\u007f"},
{name:"InBassa_Vah",astral:"\ud81a[\uded0-\udeff]"},{name:"InBatak",bmp:"\u1bc0-\u1bff"},{name:"InBengali",bmp:"\u0980-\u09ff"},{name:"InBhaiksuki",astral:"\ud807[\udc00-\udc6f]"},{name:"InBlock_Elements",bmp:"\u2580-\u259f"},{name:"InBopomofo",bmp:"\u3100-\u312f"},{name:"InBopomofo_Extended",bmp:"\u31a0-\u31bf"},{name:"InBox_Drawing",bmp:"\u2500-\u257f"},{name:"InBrahmi",astral:"\ud804[\udc00-\udc7f]"},{name:"InBraille_Patterns",bmp:"\u2800-\u28ff"},{name:"InBuginese",bmp:"\u1a00-\u1a1f"},{name:"InBuhid",
bmp:"\u1740-\u175f"},{name:"InByzantine_Musical_Symbols",astral:"\ud834[\udc00-\udcff]"},{name:"InCJK_Compatibility",bmp:"\u3300-\u33ff"},{name:"InCJK_Compatibility_Forms",bmp:"\ufe30-\ufe4f"},{name:"InCJK_Compatibility_Ideographs",bmp:"\uf900-\ufaff"},{name:"InCJK_Compatibility_Ideographs_Supplement",astral:"\ud87e[\udc00-\ude1f]"},{name:"InCJK_Radicals_Supplement",bmp:"\u2e80-\u2eff"},{name:"InCJK_Strokes",bmp:"\u31c0-\u31ef"},{name:"InCJK_Symbols_and_Punctuation",bmp:"\u3000-\u303f"},{name:"InCJK_Unified_Ideographs",
bmp:"\u4e00-\u9fff"},{name:"InCJK_Unified_Ideographs_Extension_A",bmp:"\u3400-\u4dbf"},{name:"InCJK_Unified_Ideographs_Extension_B",astral:"[\ud840-\ud868][\udc00-\udfff]|\ud869[\udc00-\udedf]"},{name:"InCJK_Unified_Ideographs_Extension_C",astral:"\ud869[\udf00-\udfff]|[\ud86a-\ud86c][\udc00-\udfff]|\ud86d[\udc00-\udf3f]"},{name:"InCJK_Unified_Ideographs_Extension_D",astral:"\ud86d[\udf40-\udfff]|\ud86e[\udc00-\udc1f]"},{name:"InCJK_Unified_Ideographs_Extension_E",astral:"\ud86e[\udc20-\udfff]|[\ud86f-\ud872][\udc00-\udfff]|\ud873[\udc00-\udeaf]"},
{name:"InCarian",astral:"\ud800[\udea0-\udedf]"},{name:"InCaucasian_Albanian",astral:"\ud801[\udd30-\udd6f]"},{name:"InChakma",astral:"\ud804[\udd00-\udd4f]"},{name:"InCham",bmp:"\uaa00-\uaa5f"},{name:"InCherokee",bmp:"\u13a0-\u13ff"},{name:"InCherokee_Supplement",bmp:"\uab70-\uabbf"},{name:"InCombining_Diacritical_Marks",bmp:"\u0300-\u036f"},{name:"InCombining_Diacritical_Marks_Extended",bmp:"\u1ab0-\u1aff"},{name:"InCombining_Diacritical_Marks_Supplement",bmp:"\u1dc0-\u1dff"},{name:"InCombining_Diacritical_Marks_for_Symbols",
bmp:"\u20d0-\u20ff"},{name:"InCombining_Half_Marks",bmp:"\ufe20-\ufe2f"},{name:"InCommon_Indic_Number_Forms",bmp:"\ua830-\ua83f"},{name:"InControl_Pictures",bmp:"\u2400-\u243f"},{name:"InCoptic",bmp:"\u2c80-\u2cff"},{name:"InCoptic_Epact_Numbers",astral:"\ud800[\udee0-\udeff]"},{name:"InCounting_Rod_Numerals",astral:"\ud834[\udf60-\udf7f]"},{name:"InCuneiform",astral:"\ud808[\udc00-\udfff]"},{name:"InCuneiform_Numbers_and_Punctuation",astral:"\ud809[\udc00-\udc7f]"},{name:"InCurrency_Symbols",bmp:"\u20a0-\u20cf"},
{name:"InCypriot_Syllabary",astral:"\ud802[\udc00-\udc3f]"},{name:"InCyrillic",bmp:"\u0400-\u04ff"},{name:"InCyrillic_Extended_A",bmp:"\u2de0-\u2dff"},{name:"InCyrillic_Extended_B",bmp:"\ua640-\ua69f"},{name:"InCyrillic_Extended_C",bmp:"\u1c80-\u1c8f"},{name:"InCyrillic_Supplement",bmp:"\u0500-\u052f"},{name:"InDeseret",astral:"\ud801[\udc00-\udc4f]"},{name:"InDevanagari",bmp:"\u0900-\u097f"},{name:"InDevanagari_Extended",bmp:"\ua8e0-\ua8ff"},{name:"InDingbats",bmp:"\u2700-\u27bf"},{name:"InDomino_Tiles",
astral:"\ud83c[\udc30-\udc9f]"},{name:"InDuployan",astral:"\ud82f[\udc00-\udc9f]"},{name:"InEarly_Dynastic_Cuneiform",astral:"\ud809[\udc80-\udd4f]"},{name:"InEgyptian_Hieroglyphs",astral:"\ud80c[\udc00-\udfff]|\ud80d[\udc00-\udc2f]"},{name:"InElbasan",astral:"\ud801[\udd00-\udd2f]"},{name:"InEmoticons",astral:"\ud83d[\ude00-\ude4f]"},{name:"InEnclosed_Alphanumeric_Supplement",astral:"\ud83c[\udd00-\uddff]"},{name:"InEnclosed_Alphanumerics",bmp:"\u2460-\u24ff"},{name:"InEnclosed_CJK_Letters_and_Months",
bmp:"\u3200-\u32ff"},{name:"InEnclosed_Ideographic_Supplement",astral:"\ud83c[\ude00-\udeff]"},{name:"InEthiopic",bmp:"\u1200-\u137f"},{name:"InEthiopic_Extended",bmp:"\u2d80-\u2ddf"},{name:"InEthiopic_Extended_A",bmp:"\uab00-\uab2f"},{name:"InEthiopic_Supplement",bmp:"\u1380-\u139f"},{name:"InGeneral_Punctuation",bmp:"\u2000-\u206f"},{name:"InGeometric_Shapes",bmp:"\u25a0-\u25ff"},{name:"InGeometric_Shapes_Extended",astral:"\ud83d[\udf80-\udfff]"},{name:"InGeorgian",bmp:"\u10a0-\u10ff"},{name:"InGeorgian_Supplement",
bmp:"\u2d00-\u2d2f"},{name:"InGlagolitic",bmp:"\u2c00-\u2c5f"},{name:"InGlagolitic_Supplement",astral:"\ud838[\udc00-\udc2f]"},{name:"InGothic",astral:"\ud800[\udf30-\udf4f]"},{name:"InGrantha",astral:"\ud804[\udf00-\udf7f]"},{name:"InGreek_Extended",bmp:"\u1f00-\u1fff"},{name:"InGreek_and_Coptic",bmp:"\u0370-\u03ff"},{name:"InGujarati",bmp:"\u0a80-\u0aff"},{name:"InGurmukhi",bmp:"\u0a00-\u0a7f"},{name:"InHalfwidth_and_Fullwidth_Forms",bmp:"\uff00-\uffef"},{name:"InHangul_Compatibility_Jamo",bmp:"\u3130-\u318f"},
{name:"InHangul_Jamo",bmp:"\u1100-\u11ff"},{name:"InHangul_Jamo_Extended_A",bmp:"\ua960-\ua97f"},{name:"InHangul_Jamo_Extended_B",bmp:"\ud7b0-\ud7ff"},{name:"InHangul_Syllables",bmp:"\uac00-\ud7af"},{name:"InHanunoo",bmp:"\u1720-\u173f"},{name:"InHatran",astral:"\ud802[\udce0-\udcff]"},{name:"InHebrew",bmp:"\u0590-\u05ff"},{name:"InHigh_Private_Use_Surrogates",bmp:"\udb80-\udbff"},{name:"InHigh_Surrogates",bmp:"\ud800-\udb7f"},{name:"InHiragana",bmp:"\u3040-\u309f"},{name:"InIPA_Extensions",bmp:"\u0250-\u02af"},
{name:"InIdeographic_Description_Characters",bmp:"\u2ff0-\u2fff"},{name:"InIdeographic_Symbols_and_Punctuation",astral:"\ud81b[\udfe0-\udfff]"},{name:"InImperial_Aramaic",astral:"\ud802[\udc40-\udc5f]"},{name:"InInscriptional_Pahlavi",astral:"\ud802[\udf60-\udf7f]"},{name:"InInscriptional_Parthian",astral:"\ud802[\udf40-\udf5f]"},{name:"InJavanese",bmp:"\ua980-\ua9df"},{name:"InKaithi",astral:"\ud804[\udc80-\udccf]"},{name:"InKana_Supplement",astral:"\ud82c[\udc00-\udcff]"},{name:"InKanbun",bmp:"\u3190-\u319f"},
{name:"InKangxi_Radicals",bmp:"\u2f00-\u2fdf"},{name:"InKannada",bmp:"\u0c80-\u0cff"},{name:"InKatakana",bmp:"\u30a0-\u30ff"},{name:"InKatakana_Phonetic_Extensions",bmp:"\u31f0-\u31ff"},{name:"InKayah_Li",bmp:"\ua900-\ua92f"},{name:"InKharoshthi",astral:"\ud802[\ude00-\ude5f]"},{name:"InKhmer",bmp:"\u1780-\u17ff"},{name:"InKhmer_Symbols",bmp:"\u19e0-\u19ff"},{name:"InKhojki",astral:"\ud804[\ude00-\ude4f]"},{name:"InKhudawadi",astral:"\ud804[\udeb0-\udeff]"},{name:"InLao",bmp:"\u0e80-\u0eff"},{name:"InLatin_Extended_Additional",
bmp:"\u1e00-\u1eff"},{name:"InLatin_Extended_A",bmp:"\u0100-\u017f"},{name:"InLatin_Extended_B",bmp:"\u0180-\u024f"},{name:"InLatin_Extended_C",bmp:"\u2c60-\u2c7f"},{name:"InLatin_Extended_D",bmp:"\ua720-\ua7ff"},{name:"InLatin_Extended_E",bmp:"\uab30-\uab6f"},{name:"InLatin_1_Supplement",bmp:"\u0080-\u00ff"},{name:"InLepcha",bmp:"\u1c00-\u1c4f"},{name:"InLetterlike_Symbols",bmp:"\u2100-\u214f"},{name:"InLimbu",bmp:"\u1900-\u194f"},{name:"InLinear_A",astral:"\ud801[\ude00-\udf7f]"},{name:"InLinear_B_Ideograms",
astral:"\ud800[\udc80-\udcff]"},{name:"InLinear_B_Syllabary",astral:"\ud800[\udc00-\udc7f]"},{name:"InLisu",bmp:"\ua4d0-\ua4ff"},{name:"InLow_Surrogates",bmp:"\udc00-\udfff"},{name:"InLycian",astral:"\ud800[\ude80-\ude9f]"},{name:"InLydian",astral:"\ud802[\udd20-\udd3f]"},{name:"InMahajani",astral:"\ud804[\udd50-\udd7f]"},{name:"InMahjong_Tiles",astral:"\ud83c[\udc00-\udc2f]"},{name:"InMalayalam",bmp:"\u0d00-\u0d7f"},{name:"InMandaic",bmp:"\u0840-\u085f"},{name:"InManichaean",astral:"\ud802[\udec0-\udeff]"},
{name:"InMarchen",astral:"\ud807[\udc70-\udcbf]"},{name:"InMathematical_Alphanumeric_Symbols",astral:"\ud835[\udc00-\udfff]"},{name:"InMathematical_Operators",bmp:"\u2200-\u22ff"},{name:"InMeetei_Mayek",bmp:"\uabc0-\uabff"},{name:"InMeetei_Mayek_Extensions",bmp:"\uaae0-\uaaff"},{name:"InMende_Kikakui",astral:"\ud83a[\udc00-\udcdf]"},{name:"InMeroitic_Cursive",astral:"\ud802[\udda0-\uddff]"},{name:"InMeroitic_Hieroglyphs",astral:"\ud802[\udd80-\udd9f]"},{name:"InMiao",astral:"\ud81b[\udf00-\udf9f]"},
{name:"InMiscellaneous_Mathematical_Symbols_A",bmp:"\u27c0-\u27ef"},{name:"InMiscellaneous_Mathematical_Symbols_B",bmp:"\u2980-\u29ff"},{name:"InMiscellaneous_Symbols",bmp:"\u2600-\u26ff"},{name:"InMiscellaneous_Symbols_and_Arrows",bmp:"\u2b00-\u2bff"},{name:"InMiscellaneous_Symbols_and_Pictographs",astral:"\ud83c[\udf00-\udfff]|\ud83d[\udc00-\uddff]"},{name:"InMiscellaneous_Technical",bmp:"\u2300-\u23ff"},{name:"InModi",astral:"\ud805[\ude00-\ude5f]"},{name:"InModifier_Tone_Letters",bmp:"\ua700-\ua71f"},
{name:"InMongolian",bmp:"\u1800-\u18af"},{name:"InMongolian_Supplement",astral:"\ud805[\ude60-\ude7f]"},{name:"InMro",astral:"\ud81a[\ude40-\ude6f]"},{name:"InMultani",astral:"\ud804[\ude80-\udeaf]"},{name:"InMusical_Symbols",astral:"\ud834[\udd00-\uddff]"},{name:"InMyanmar",bmp:"\u1000-\u109f"},{name:"InMyanmar_Extended_A",bmp:"\uaa60-\uaa7f"},{name:"InMyanmar_Extended_B",bmp:"\ua9e0-\ua9ff"},{name:"InNKo",bmp:"\u07c0-\u07ff"},{name:"InNabataean",astral:"\ud802[\udc80-\udcaf]"},{name:"InNew_Tai_Lue",
bmp:"\u1980-\u19df"},{name:"InNewa",astral:"\ud805[\udc00-\udc7f]"},{name:"InNumber_Forms",bmp:"\u2150-\u218f"},{name:"InOgham",bmp:"\u1680-\u169f"},{name:"InOl_Chiki",bmp:"\u1c50-\u1c7f"},{name:"InOld_Hungarian",astral:"\ud803[\udc80-\udcff]"},{name:"InOld_Italic",astral:"\ud800[\udf00-\udf2f]"},{name:"InOld_North_Arabian",astral:"\ud802[\ude80-\ude9f]"},{name:"InOld_Permic",astral:"\ud800[\udf50-\udf7f]"},{name:"InOld_Persian",astral:"\ud800[\udfa0-\udfdf]"},{name:"InOld_South_Arabian",astral:"\ud802[\ude60-\ude7f]"},
{name:"InOld_Turkic",astral:"\ud803[\udc00-\udc4f]"},{name:"InOptical_Character_Recognition",bmp:"\u2440-\u245f"},{name:"InOriya",bmp:"\u0b00-\u0b7f"},{name:"InOrnamental_Dingbats",astral:"\ud83d[\ude50-\ude7f]"},{name:"InOsage",astral:"\ud801[\udcb0-\udcff]"},{name:"InOsmanya",astral:"\ud801[\udc80-\udcaf]"},{name:"InPahawh_Hmong",astral:"\ud81a[\udf00-\udf8f]"},{name:"InPalmyrene",astral:"\ud802[\udc60-\udc7f]"},{name:"InPau_Cin_Hau",astral:"\ud806[\udec0-\udeff]"},{name:"InPhags_pa",bmp:"\ua840-\ua87f"},
{name:"InPhaistos_Disc",astral:"\ud800[\uddd0-\uddff]"},{name:"InPhoenician",astral:"\ud802[\udd00-\udd1f]"},{name:"InPhonetic_Extensions",bmp:"\u1d00-\u1d7f"},{name:"InPhonetic_Extensions_Supplement",bmp:"\u1d80-\u1dbf"},{name:"InPlaying_Cards",astral:"\ud83c[\udca0-\udcff]"},{name:"InPrivate_Use_Area",bmp:"\ue000-\uf8ff"},{name:"InPsalter_Pahlavi",astral:"\ud802[\udf80-\udfaf]"},{name:"InRejang",bmp:"\ua930-\ua95f"},{name:"InRumi_Numeral_Symbols",astral:"\ud803[\ude60-\ude7f]"},{name:"InRunic",
bmp:"\u16a0-\u16ff"},{name:"InSamaritan",bmp:"\u0800-\u083f"},{name:"InSaurashtra",bmp:"\ua880-\ua8df"},{name:"InSharada",astral:"\ud804[\udd80-\udddf]"},{name:"InShavian",astral:"\ud801[\udc50-\udc7f]"},{name:"InShorthand_Format_Controls",astral:"\ud82f[\udca0-\udcaf]"},{name:"InSiddham",astral:"\ud805[\udd80-\uddff]"},{name:"InSinhala",bmp:"\u0d80-\u0dff"},{name:"InSinhala_Archaic_Numbers",astral:"\ud804[\udde0-\uddff]"},{name:"InSmall_Form_Variants",bmp:"\ufe50-\ufe6f"},{name:"InSora_Sompeng",
astral:"\ud804[\udcd0-\udcff]"},{name:"InSpacing_Modifier_Letters",bmp:"\u02b0-\u02ff"},{name:"InSpecials",bmp:"\ufff0-\uffff"},{name:"InSundanese",bmp:"\u1b80-\u1bbf"},{name:"InSundanese_Supplement",bmp:"\u1cc0-\u1ccf"},{name:"InSuperscripts_and_Subscripts",bmp:"\u2070-\u209f"},{name:"InSupplemental_Arrows_A",bmp:"\u27f0-\u27ff"},{name:"InSupplemental_Arrows_B",bmp:"\u2900-\u297f"},{name:"InSupplemental_Arrows_C",astral:"\ud83e[\udc00-\udcff]"},{name:"InSupplemental_Mathematical_Operators",bmp:"\u2a00-\u2aff"},
{name:"InSupplemental_Punctuation",bmp:"\u2e00-\u2e7f"},{name:"InSupplemental_Symbols_and_Pictographs",astral:"\ud83e[\udd00-\uddff]"},{name:"InSupplementary_Private_Use_Area_A",astral:"[\udb80-\udbbf][\udc00-\udfff]"},{name:"InSupplementary_Private_Use_Area_B",astral:"[\udbc0-\udbff][\udc00-\udfff]"},{name:"InSutton_SignWriting",astral:"\ud836[\udc00-\udeaf]"},{name:"InSyloti_Nagri",bmp:"\ua800-\ua82f"},{name:"InSyriac",bmp:"\u0700-\u074f"},{name:"InTagalog",bmp:"\u1700-\u171f"},{name:"InTagbanwa",
bmp:"\u1760-\u177f"},{name:"InTags",astral:"\udb40[\udc00-\udc7f]"},{name:"InTai_Le",bmp:"\u1950-\u197f"},{name:"InTai_Tham",bmp:"\u1a20-\u1aaf"},{name:"InTai_Viet",bmp:"\uaa80-\uaadf"},{name:"InTai_Xuan_Jing_Symbols",astral:"\ud834[\udf00-\udf5f]"},{name:"InTakri",astral:"\ud805[\ude80-\udecf]"},{name:"InTamil",bmp:"\u0b80-\u0bff"},{name:"InTangut",astral:"[\ud81c-\ud821][\udc00-\udfff]"},{name:"InTangut_Components",astral:"\ud822[\udc00-\udeff]"},{name:"InTelugu",bmp:"\u0c00-\u0c7f"},{name:"InThaana",
bmp:"\u0780-\u07bf"},{name:"InThai",bmp:"\u0e00-\u0e7f"},{name:"InTibetan",bmp:"\u0f00-\u0fff"},{name:"InTifinagh",bmp:"\u2d30-\u2d7f"},{name:"InTirhuta",astral:"\ud805[\udc80-\udcdf]"},{name:"InTransport_and_Map_Symbols",astral:"\ud83d[\ude80-\udeff]"},{name:"InUgaritic",astral:"\ud800[\udf80-\udf9f]"},{name:"InUnified_Canadian_Aboriginal_Syllabics",bmp:"\u1400-\u167f"},{name:"InUnified_Canadian_Aboriginal_Syllabics_Extended",bmp:"\u18b0-\u18ff"},{name:"InVai",bmp:"\ua500-\ua63f"},{name:"InVariation_Selectors",
bmp:"\ufe00-\ufe0f"},{name:"InVariation_Selectors_Supplement",astral:"\udb40[\udd00-\uddef]"},{name:"InVedic_Extensions",bmp:"\u1cd0-\u1cff"},{name:"InVertical_Forms",bmp:"\ufe10-\ufe1f"},{name:"InWarang_Citi",astral:"\ud806[\udca0-\udcff]"},{name:"InYi_Radicals",bmp:"\ua490-\ua4cf"},{name:"InYi_Syllables",bmp:"\ua000-\ua48f"},{name:"InYijing_Hexagram_Symbols",bmp:"\u4dc0-\u4dff"}])}},{}],5:[function(d,g,p){g.exports=function(c){if(!c.addUnicodeData)throw new ReferenceError("Unicode Base must be loaded before Unicode Categories");
c.addUnicodeData([{name:"C",alias:"Other",isBmpLast:!0,bmp:"\x00-\u001f\u007f-\u009f\u00ad\u0378\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557\u0558\u0560\u0588\u058b\u058c\u0590\u05c8-\u05cf\u05eb-\u05ef\u05f5-\u0605\u061c\u061d\u06dd\u070e\u070f\u074b\u074c\u07b2-\u07bf\u07fb-\u07ff\u082e\u082f\u083f\u085c\u085d\u085f-\u089f\u08b5\u08be-\u08d3\u08e2\u0984\u098d\u098e\u0991\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba\u09bb\u09c5\u09c6\u09c9\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4\u09e5\u09fc-\u0a00\u0a04\u0a0b-\u0a0e\u0a11\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a\u0a3b\u0a3d\u0a43-\u0a46\u0a49\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a76-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba\u0abb\u0ac6\u0aca\u0ace\u0acf\u0ad1-\u0adf\u0ae4\u0ae5\u0af2-\u0af8\u0afa-\u0b00\u0b04\u0b0d\u0b0e\u0b11\u0b12\u0b29\u0b31\u0b34\u0b3a\u0b3b\u0b45\u0b46\u0b49\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c04\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64\u0c65\u0c70-\u0c77\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4\u0ce5\u0cf0\u0cf3-\u0d00\u0d04\u0d0d\u0d11\u0d3b\u0d3c\u0d45\u0d49\u0d50-\u0d53\u0d64\u0d65\u0d80\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85\u0e86\u0e89\u0e8b\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8\u0ea9\u0eac\u0eba\u0ebe\u0ebf\u0ec5\u0ec7\u0ece\u0ecf\u0eda\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce\u10cf\u1249\u124e\u124f\u1257\u1259\u125e\u125f\u1289\u128e\u128f\u12b1\u12b6\u12b7\u12bf\u12c1\u12c6\u12c7\u12d7\u1311\u1316\u1317\u135b\u135c\u137d-\u137f\u139a-\u139f\u13f6\u13f7\u13fe\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de\u17df\u17ea-\u17ef\u17fa-\u17ff\u180e\u180f\u181a-\u181f\u1878-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c\u1a1d\u1a5f\u1a7d\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1cbf\u1cc8-\u1ccf\u1cf7\u1cfa-\u1cff\u1df6-\u1dfa\u1f16\u1f17\u1f1e\u1f1f\u1f46\u1f47\u1f4e\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e\u1f7f\u1fb5\u1fc5\u1fd4\u1fd5\u1fdc\u1ff0\u1ff1\u1ff5\u1fff\u200b-\u200f\u202a-\u202e\u2060-\u206f\u2072\u2073\u208f\u209d-\u209f\u20bf-\u20cf\u20f1-\u20ff\u218c-\u218f\u23ff\u2427-\u243f\u244b-\u245f\u2b74\u2b75\u2b96\u2b97\u2bba-\u2bbc\u2bc9\u2bd2-\u2beb\u2bf0-\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e45-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097\u3098\u3100-\u3104\u312e-\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9fd6-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7af\ua7b8-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua8fe\ua8ff\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e\uaa4f\uaa5a\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07\uab08\uab0f\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\uf8ff\ufa6e\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90\ufd91\ufdc8-\ufdef\ufdfe\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\uff00\uffbf-\uffc1\uffc8\uffc9\uffd0\uffd1\uffd8\uffd9\uffdd-\uffdf\uffe7\uffef-\ufffb\ufffe\uffff",
astral:"\ud800[\udc0c\udc27\udc3b\udc3e\udc4e\udc4f\udc5e-\udc7f\udcfb-\udcff\udd03-\udd06\udd34-\udd36\udd8f\udd9c-\udd9f\udda1-\uddcf\uddfe-\ude7f\ude9d-\ude9f\uded1-\udedf\udefc-\udeff\udf24-\udf2f\udf4b-\udf4f\udf7b-\udf7f\udf9e\udfc4-\udfc7\udfd6-\udfff]|\ud801[\udc9e\udc9f\udcaa-\udcaf\udcd4-\udcd7\udcfc-\udcff\udd28-\udd2f\udd64-\udd6e\udd70-\uddff\udf37-\udf3f\udf56-\udf5f\udf68-\udfff]|\ud802[\udc06\udc07\udc09\udc36\udc39-\udc3b\udc3d\udc3e\udc56\udc9f-\udca6\udcb0-\udcdf\udcf3\udcf6-\udcfa\udd1c-\udd1e\udd3a-\udd3e\udd40-\udd7f\uddb8-\uddbb\uddd0\uddd1\ude04\ude07-\ude0b\ude14\ude18\ude34-\ude37\ude3b-\ude3e\ude48-\ude4f\ude59-\ude5f\udea0-\udebf\udee7-\udeea\udef7-\udeff\udf36-\udf38\udf56\udf57\udf73-\udf77\udf92-\udf98\udf9d-\udfa8\udfb0-\udfff]|\ud803[\udc49-\udc7f\udcb3-\udcbf\udcf3-\udcf9\udd00-\ude5f\ude7f-\udfff]|\ud804[\udc4e-\udc51\udc70-\udc7e\udcbd\udcc2-\udccf\udce9-\udcef\udcfa-\udcff\udd35\udd44-\udd4f\udd77-\udd7f\uddce\uddcf\udde0\uddf5-\uddff\ude12\ude3f-\ude7f\ude87\ude89\ude8e\ude9e\udeaa-\udeaf\udeeb-\udeef\udefa-\udeff\udf04\udf0d\udf0e\udf11\udf12\udf29\udf31\udf34\udf3a\udf3b\udf45\udf46\udf49\udf4a\udf4e\udf4f\udf51-\udf56\udf58-\udf5c\udf64\udf65\udf6d-\udf6f\udf75-\udfff]|\ud805[\udc5a\udc5c\udc5e-\udc7f\udcc8-\udccf\udcda-\udd7f\uddb6\uddb7\uddde-\uddff\ude45-\ude4f\ude5a-\ude5f\ude6d-\ude7f\udeb8-\udebf\udeca-\udeff\udf1a-\udf1c\udf2c-\udf2f\udf40-\udfff]|\ud806[\udc00-\udc9f\udcf3-\udcfe\udd00-\udebf\udef9-\udfff]|\ud807[\udc09\udc37\udc46-\udc4f\udc6d-\udc6f\udc90\udc91\udca8\udcb7-\udfff]|\ud808[\udf9a-\udfff]|\ud809[\udc6f\udc75-\udc7f\udd44-\udfff]|[\ud80a\ud80b\ud80e-\ud810\ud812-\ud819\ud823-\ud82b\ud82d\ud82e\ud830-\ud833\ud837\ud839\ud83f\ud874-\ud87d\ud87f-\udb3f\udb41-\udbff][\udc00-\udfff]|\ud80d[\udc2f-\udfff]|\ud811[\ude47-\udfff]|\ud81a[\ude39-\ude3f\ude5f\ude6a-\ude6d\ude70-\udecf\udeee\udeef\udef6-\udeff\udf46-\udf4f\udf5a\udf62\udf78-\udf7c\udf90-\udfff]|\ud81b[\udc00-\udeff\udf45-\udf4f\udf7f-\udf8e\udfa0-\udfdf\udfe1-\udfff]|\ud821[\udfed-\udfff]|\ud822[\udef3-\udfff]|\ud82c[\udc02-\udfff]|\ud82f[\udc6b-\udc6f\udc7d-\udc7f\udc89-\udc8f\udc9a\udc9b\udca0-\udfff]|\ud834[\udcf6-\udcff\udd27\udd28\udd73-\udd7a\udde9-\uddff\ude46-\udeff\udf57-\udf5f\udf72-\udfff]|\ud835[\udc55\udc9d\udca0\udca1\udca3\udca4\udca7\udca8\udcad\udcba\udcbc\udcc4\udd06\udd0b\udd0c\udd15\udd1d\udd3a\udd3f\udd45\udd47-\udd49\udd51\udea6\udea7\udfcc\udfcd]|\ud836[\ude8c-\ude9a\udea0\udeb0-\udfff]|\ud838[\udc07\udc19\udc1a\udc22\udc25\udc2b-\udfff]|\ud83a[\udcc5\udcc6\udcd7-\udcff\udd4b-\udd4f\udd5a-\udd5d\udd60-\udfff]|\ud83b[\udc00-\uddff\ude04\ude20\ude23\ude25\ude26\ude28\ude33\ude38\ude3a\ude3c-\ude41\ude43-\ude46\ude48\ude4a\ude4c\ude50\ude53\ude55\ude56\ude58\ude5a\ude5c\ude5e\ude60\ude63\ude65\ude66\ude6b\ude73\ude78\ude7d\ude7f\ude8a\ude9c-\udea0\udea4\udeaa\udebc-\udeef\udef2-\udfff]|\ud83c[\udc2c-\udc2f\udc94-\udc9f\udcaf\udcb0\udcc0\udcd0\udcf6-\udcff\udd0d-\udd0f\udd2f\udd6c-\udd6f\uddad-\udde5\ude03-\ude0f\ude3c-\ude3f\ude49-\ude4f\ude52-\udeff]|\ud83d[\uded3-\udedf\udeed-\udeef\udef7-\udeff\udf74-\udf7f\udfd5-\udfff]|\ud83e[\udc0c-\udc0f\udc48-\udc4f\udc5a-\udc5f\udc88-\udc8f\udcae-\udd0f\udd1f\udd28-\udd2f\udd31\udd32\udd3f\udd4c-\udd4f\udd5f-\udd7f\udd92-\uddbf\uddc1-\udfff]|\ud869[\uded7-\udeff]|\ud86d[\udf35-\udf3f]|\ud86e[\udc1e\udc1f]|\ud873[\udea2-\udfff]|\ud87e[\ude1e-\udfff]|\udb40[\udc00-\udcff\uddf0-\udfff]"},
{name:"Cc",alias:"Control",bmp:"\x00-\u001f\u007f-\u009f"},{name:"Cf",alias:"Format",bmp:"\u00ad\u0600-\u0605\u061c\u06dd\u070f\u08e2\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb",astral:"\ud804\udcbd|\ud82f[\udca0-\udca3]|\ud834[\udd73-\udd7a]|\udb40[\udc01\udc20-\udc7f]"},{name:"Cn",alias:"Unassigned",bmp:"\u0378\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557\u0558\u0560\u0588\u058b\u058c\u0590\u05c8-\u05cf\u05eb-\u05ef\u05f5-\u05ff\u061d\u070e\u074b\u074c\u07b2-\u07bf\u07fb-\u07ff\u082e\u082f\u083f\u085c\u085d\u085f-\u089f\u08b5\u08be-\u08d3\u0984\u098d\u098e\u0991\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba\u09bb\u09c5\u09c6\u09c9\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4\u09e5\u09fc-\u0a00\u0a04\u0a0b-\u0a0e\u0a11\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a\u0a3b\u0a3d\u0a43-\u0a46\u0a49\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a76-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba\u0abb\u0ac6\u0aca\u0ace\u0acf\u0ad1-\u0adf\u0ae4\u0ae5\u0af2-\u0af8\u0afa-\u0b00\u0b04\u0b0d\u0b0e\u0b11\u0b12\u0b29\u0b31\u0b34\u0b3a\u0b3b\u0b45\u0b46\u0b49\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c04\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64\u0c65\u0c70-\u0c77\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4\u0ce5\u0cf0\u0cf3-\u0d00\u0d04\u0d0d\u0d11\u0d3b\u0d3c\u0d45\u0d49\u0d50-\u0d53\u0d64\u0d65\u0d80\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85\u0e86\u0e89\u0e8b\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8\u0ea9\u0eac\u0eba\u0ebe\u0ebf\u0ec5\u0ec7\u0ece\u0ecf\u0eda\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce\u10cf\u1249\u124e\u124f\u1257\u1259\u125e\u125f\u1289\u128e\u128f\u12b1\u12b6\u12b7\u12bf\u12c1\u12c6\u12c7\u12d7\u1311\u1316\u1317\u135b\u135c\u137d-\u137f\u139a-\u139f\u13f6\u13f7\u13fe\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1878-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c\u1a1d\u1a5f\u1a7d\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1cbf\u1cc8-\u1ccf\u1cf7\u1cfa-\u1cff\u1df6-\u1dfa\u1f16\u1f17\u1f1e\u1f1f\u1f46\u1f47\u1f4e\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e\u1f7f\u1fb5\u1fc5\u1fd4\u1fd5\u1fdc\u1ff0\u1ff1\u1ff5\u1fff\u2065\u2072\u2073\u208f\u209d-\u209f\u20bf-\u20cf\u20f1-\u20ff\u218c-\u218f\u23ff\u2427-\u243f\u244b-\u245f\u2b74\u2b75\u2b96\u2b97\u2bba-\u2bbc\u2bc9\u2bd2-\u2beb\u2bf0-\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e45-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097\u3098\u3100-\u3104\u312e-\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9fd6-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7af\ua7b8-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua8fe\ua8ff\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e\uaa4f\uaa5a\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07\uab08\uab0f\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90\ufd91\ufdc8-\ufdef\ufdfe\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd\ufefe\uff00\uffbf-\uffc1\uffc8\uffc9\uffd0\uffd1\uffd8\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe\uffff",
astral:"\ud800[\udc0c\udc27\udc3b\udc3e\udc4e\udc4f\udc5e-\udc7f\udcfb-\udcff\udd03-\udd06\udd34-\udd36\udd8f\udd9c-\udd9f\udda1-\uddcf\uddfe-\ude7f\ude9d-\ude9f\uded1-\udedf\udefc-\udeff\udf24-\udf2f\udf4b-\udf4f\udf7b-\udf7f\udf9e\udfc4-\udfc7\udfd6-\udfff]|\ud801[\udc9e\udc9f\udcaa-\udcaf\udcd4-\udcd7\udcfc-\udcff\udd28-\udd2f\udd64-\udd6e\udd70-\uddff\udf37-\udf3f\udf56-\udf5f\udf68-\udfff]|\ud802[\udc06\udc07\udc09\udc36\udc39-\udc3b\udc3d\udc3e\udc56\udc9f-\udca6\udcb0-\udcdf\udcf3\udcf6-\udcfa\udd1c-\udd1e\udd3a-\udd3e\udd40-\udd7f\uddb8-\uddbb\uddd0\uddd1\ude04\ude07-\ude0b\ude14\ude18\ude34-\ude37\ude3b-\ude3e\ude48-\ude4f\ude59-\ude5f\udea0-\udebf\udee7-\udeea\udef7-\udeff\udf36-\udf38\udf56\udf57\udf73-\udf77\udf92-\udf98\udf9d-\udfa8\udfb0-\udfff]|\ud803[\udc49-\udc7f\udcb3-\udcbf\udcf3-\udcf9\udd00-\ude5f\ude7f-\udfff]|\ud804[\udc4e-\udc51\udc70-\udc7e\udcc2-\udccf\udce9-\udcef\udcfa-\udcff\udd35\udd44-\udd4f\udd77-\udd7f\uddce\uddcf\udde0\uddf5-\uddff\ude12\ude3f-\ude7f\ude87\ude89\ude8e\ude9e\udeaa-\udeaf\udeeb-\udeef\udefa-\udeff\udf04\udf0d\udf0e\udf11\udf12\udf29\udf31\udf34\udf3a\udf3b\udf45\udf46\udf49\udf4a\udf4e\udf4f\udf51-\udf56\udf58-\udf5c\udf64\udf65\udf6d-\udf6f\udf75-\udfff]|\ud805[\udc5a\udc5c\udc5e-\udc7f\udcc8-\udccf\udcda-\udd7f\uddb6\uddb7\uddde-\uddff\ude45-\ude4f\ude5a-\ude5f\ude6d-\ude7f\udeb8-\udebf\udeca-\udeff\udf1a-\udf1c\udf2c-\udf2f\udf40-\udfff]|\ud806[\udc00-\udc9f\udcf3-\udcfe\udd00-\udebf\udef9-\udfff]|\ud807[\udc09\udc37\udc46-\udc4f\udc6d-\udc6f\udc90\udc91\udca8\udcb7-\udfff]|\ud808[\udf9a-\udfff]|\ud809[\udc6f\udc75-\udc7f\udd44-\udfff]|[\ud80a\ud80b\ud80e-\ud810\ud812-\ud819\ud823-\ud82b\ud82d\ud82e\ud830-\ud833\ud837\ud839\ud83f\ud874-\ud87d\ud87f-\udb3f\udb41-\udb7f][\udc00-\udfff]|\ud80d[\udc2f-\udfff]|\ud811[\ude47-\udfff]|\ud81a[\ude39-\ude3f\ude5f\ude6a-\ude6d\ude70-\udecf\udeee\udeef\udef6-\udeff\udf46-\udf4f\udf5a\udf62\udf78-\udf7c\udf90-\udfff]|\ud81b[\udc00-\udeff\udf45-\udf4f\udf7f-\udf8e\udfa0-\udfdf\udfe1-\udfff]|\ud821[\udfed-\udfff]|\ud822[\udef3-\udfff]|\ud82c[\udc02-\udfff]|\ud82f[\udc6b-\udc6f\udc7d-\udc7f\udc89-\udc8f\udc9a\udc9b\udca4-\udfff]|\ud834[\udcf6-\udcff\udd27\udd28\udde9-\uddff\ude46-\udeff\udf57-\udf5f\udf72-\udfff]|\ud835[\udc55\udc9d\udca0\udca1\udca3\udca4\udca7\udca8\udcad\udcba\udcbc\udcc4\udd06\udd0b\udd0c\udd15\udd1d\udd3a\udd3f\udd45\udd47-\udd49\udd51\udea6\udea7\udfcc\udfcd]|\ud836[\ude8c-\ude9a\udea0\udeb0-\udfff]|\ud838[\udc07\udc19\udc1a\udc22\udc25\udc2b-\udfff]|\ud83a[\udcc5\udcc6\udcd7-\udcff\udd4b-\udd4f\udd5a-\udd5d\udd60-\udfff]|\ud83b[\udc00-\uddff\ude04\ude20\ude23\ude25\ude26\ude28\ude33\ude38\ude3a\ude3c-\ude41\ude43-\ude46\ude48\ude4a\ude4c\ude50\ude53\ude55\ude56\ude58\ude5a\ude5c\ude5e\ude60\ude63\ude65\ude66\ude6b\ude73\ude78\ude7d\ude7f\ude8a\ude9c-\udea0\udea4\udeaa\udebc-\udeef\udef2-\udfff]|\ud83c[\udc2c-\udc2f\udc94-\udc9f\udcaf\udcb0\udcc0\udcd0\udcf6-\udcff\udd0d-\udd0f\udd2f\udd6c-\udd6f\uddad-\udde5\ude03-\ude0f\ude3c-\ude3f\ude49-\ude4f\ude52-\udeff]|\ud83d[\uded3-\udedf\udeed-\udeef\udef7-\udeff\udf74-\udf7f\udfd5-\udfff]|\ud83e[\udc0c-\udc0f\udc48-\udc4f\udc5a-\udc5f\udc88-\udc8f\udcae-\udd0f\udd1f\udd28-\udd2f\udd31\udd32\udd3f\udd4c-\udd4f\udd5f-\udd7f\udd92-\uddbf\uddc1-\udfff]|\ud869[\uded7-\udeff]|\ud86d[\udf35-\udf3f]|\ud86e[\udc1e\udc1f]|\ud873[\udea2-\udfff]|\ud87e[\ude1e-\udfff]|\udb40[\udc00\udc02-\udc1f\udc80-\udcff\uddf0-\udfff]|[\udbbf\udbff][\udffe\udfff]"},
{name:"Co",alias:"Private_Use",bmp:"\ue000-\uf8ff",astral:"[\udb80-\udbbe\udbc0-\udbfe][\udc00-\udfff]|[\udbbf\udbff][\udc00-\udffd]"},{name:"Cs",alias:"Surrogate",bmp:"\ud800-\udfff"},{name:"L",alias:"Letter",bmp:"A-Za-z\u00aa\u00b5\u00ba\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0af9\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2183\u2184\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005\u3006\u3031-\u3035\u303b\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fd5\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6e5\ua717-\ua71f\ua722-\ua788\ua78b-\ua7ae\ua7b0-\ua7b7\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc",
astral:"\ud800[\udc00-\udc0b\udc0d-\udc26\udc28-\udc3a\udc3c\udc3d\udc3f-\udc4d\udc50-\udc5d\udc80-\udcfa\ude80-\ude9c\udea0-\uded0\udf00-\udf1f\udf30-\udf40\udf42-\udf49\udf50-\udf75\udf80-\udf9d\udfa0-\udfc3\udfc8-\udfcf]|\ud801[\udc00-\udc9d\udcb0-\udcd3\udcd8-\udcfb\udd00-\udd27\udd30-\udd63\ude00-\udf36\udf40-\udf55\udf60-\udf67]|\ud802[\udc00-\udc05\udc08\udc0a-\udc35\udc37\udc38\udc3c\udc3f-\udc55\udc60-\udc76\udc80-\udc9e\udce0-\udcf2\udcf4\udcf5\udd00-\udd15\udd20-\udd39\udd80-\uddb7\uddbe\uddbf\ude00\ude10-\ude13\ude15-\ude17\ude19-\ude33\ude60-\ude7c\ude80-\ude9c\udec0-\udec7\udec9-\udee4\udf00-\udf35\udf40-\udf55\udf60-\udf72\udf80-\udf91]|\ud803[\udc00-\udc48\udc80-\udcb2\udcc0-\udcf2]|\ud804[\udc03-\udc37\udc83-\udcaf\udcd0-\udce8\udd03-\udd26\udd50-\udd72\udd76\udd83-\uddb2\uddc1-\uddc4\uddda\udddc\ude00-\ude11\ude13-\ude2b\ude80-\ude86\ude88\ude8a-\ude8d\ude8f-\ude9d\ude9f-\udea8\udeb0-\udede\udf05-\udf0c\udf0f\udf10\udf13-\udf28\udf2a-\udf30\udf32\udf33\udf35-\udf39\udf3d\udf50\udf5d-\udf61]|\ud805[\udc00-\udc34\udc47-\udc4a\udc80-\udcaf\udcc4\udcc5\udcc7\udd80-\uddae\uddd8-\udddb\ude00-\ude2f\ude44\ude80-\udeaa\udf00-\udf19]|\ud806[\udca0-\udcdf\udcff\udec0-\udef8]|\ud807[\udc00-\udc08\udc0a-\udc2e\udc40\udc72-\udc8f]|\ud808[\udc00-\udf99]|\ud809[\udc80-\udd43]|[\ud80c\ud81c-\ud820\ud840-\ud868\ud86a-\ud86c\ud86f-\ud872][\udc00-\udfff]|\ud80d[\udc00-\udc2e]|\ud811[\udc00-\ude46]|\ud81a[\udc00-\ude38\ude40-\ude5e\uded0-\udeed\udf00-\udf2f\udf40-\udf43\udf63-\udf77\udf7d-\udf8f]|\ud81b[\udf00-\udf44\udf50\udf93-\udf9f\udfe0]|\ud821[\udc00-\udfec]|\ud822[\udc00-\udef2]|\ud82c[\udc00\udc01]|\ud82f[\udc00-\udc6a\udc70-\udc7c\udc80-\udc88\udc90-\udc99]|\ud835[\udc00-\udc54\udc56-\udc9c\udc9e\udc9f\udca2\udca5\udca6\udca9-\udcac\udcae-\udcb9\udcbb\udcbd-\udcc3\udcc5-\udd05\udd07-\udd0a\udd0d-\udd14\udd16-\udd1c\udd1e-\udd39\udd3b-\udd3e\udd40-\udd44\udd46\udd4a-\udd50\udd52-\udea5\udea8-\udec0\udec2-\udeda\udedc-\udefa\udefc-\udf14\udf16-\udf34\udf36-\udf4e\udf50-\udf6e\udf70-\udf88\udf8a-\udfa8\udfaa-\udfc2\udfc4-\udfcb]|\ud83a[\udc00-\udcc4\udd00-\udd43]|\ud83b[\ude00-\ude03\ude05-\ude1f\ude21\ude22\ude24\ude27\ude29-\ude32\ude34-\ude37\ude39\ude3b\ude42\ude47\ude49\ude4b\ude4d-\ude4f\ude51\ude52\ude54\ude57\ude59\ude5b\ude5d\ude5f\ude61\ude62\ude64\ude67-\ude6a\ude6c-\ude72\ude74-\ude77\ude79-\ude7c\ude7e\ude80-\ude89\ude8b-\ude9b\udea1-\udea3\udea5-\udea9\udeab-\udebb]|\ud869[\udc00-\uded6\udf00-\udfff]|\ud86d[\udc00-\udf34\udf40-\udfff]|\ud86e[\udc00-\udc1d\udc20-\udfff]|\ud873[\udc00-\udea1]|\ud87e[\udc00-\ude1d]"},
{name:"Ll",alias:"Lowercase_Letter",bmp:"a-z\u00b5\u00df-\u00f6\u00f8-\u00ff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0561-\u0587\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6\u1fc7\u1fd0-\u1fd3\u1fd6\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6\u1ff7\u210a\u210e\u210f\u2113\u212f\u2134\u2139\u213c\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7b5\ua7b7\ua7fa\uab30-\uab5a\uab60-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a",
astral:"\ud801[\udc28-\udc4f\udcd8-\udcfb]|\ud803[\udcc0-\udcf2]|\ud806[\udcc0-\udcdf]|\ud835[\udc1a-\udc33\udc4e-\udc54\udc56-\udc67\udc82-\udc9b\udcb6-\udcb9\udcbb\udcbd-\udcc3\udcc5-\udccf\udcea-\udd03\udd1e-\udd37\udd52-\udd6b\udd86-\udd9f\uddba-\uddd3\uddee-\ude07\ude22-\ude3b\ude56-\ude6f\ude8a-\udea5\udec2-\udeda\udedc-\udee1\udefc-\udf14\udf16-\udf1b\udf36-\udf4e\udf50-\udf55\udf70-\udf88\udf8a-\udf8f\udfaa-\udfc2\udfc4-\udfc9\udfcb]|\ud83a[\udd22-\udd43]"},{name:"Lm",alias:"Modifier_Letter",
bmp:"\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5\u06e6\u07f4\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua69c\ua69d\ua717-\ua71f\ua770\ua788\ua7f8\ua7f9\ua9cf\ua9e6\uaa70\uaadd\uaaf3\uaaf4\uab5c-\uab5f\uff70\uff9e\uff9f",astral:"\ud81a[\udf40-\udf43]|\ud81b[\udf93-\udf9f\udfe0]"},
{name:"Lo",alias:"Other_Letter",bmp:"\u00aa\u00ba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05f0-\u05f2\u0620-\u063f\u0641-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0980\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0af9\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e45\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10d0-\u10fa\u10fd-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1877\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fd5\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a\ua62b\ua66e\ua6a0-\ua6e5\ua78f\ua7f7\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9e0-\ua9e4\ua9e7-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc",
astral:"\ud800[\udc00-\udc0b\udc0d-\udc26\udc28-\udc3a\udc3c\udc3d\udc3f-\udc4d\udc50-\udc5d\udc80-\udcfa\ude80-\ude9c\udea0-\uded0\udf00-\udf1f\udf30-\udf40\udf42-\udf49\udf50-\udf75\udf80-\udf9d\udfa0-\udfc3\udfc8-\udfcf]|\ud801[\udc50-\udc9d\udd00-\udd27\udd30-\udd63\ude00-\udf36\udf40-\udf55\udf60-\udf67]|\ud802[\udc00-\udc05\udc08\udc0a-\udc35\udc37\udc38\udc3c\udc3f-\udc55\udc60-\udc76\udc80-\udc9e\udce0-\udcf2\udcf4\udcf5\udd00-\udd15\udd20-\udd39\udd80-\uddb7\uddbe\uddbf\ude00\ude10-\ude13\ude15-\ude17\ude19-\ude33\ude60-\ude7c\ude80-\ude9c\udec0-\udec7\udec9-\udee4\udf00-\udf35\udf40-\udf55\udf60-\udf72\udf80-\udf91]|\ud803[\udc00-\udc48]|\ud804[\udc03-\udc37\udc83-\udcaf\udcd0-\udce8\udd03-\udd26\udd50-\udd72\udd76\udd83-\uddb2\uddc1-\uddc4\uddda\udddc\ude00-\ude11\ude13-\ude2b\ude80-\ude86\ude88\ude8a-\ude8d\ude8f-\ude9d\ude9f-\udea8\udeb0-\udede\udf05-\udf0c\udf0f\udf10\udf13-\udf28\udf2a-\udf30\udf32\udf33\udf35-\udf39\udf3d\udf50\udf5d-\udf61]|\ud805[\udc00-\udc34\udc47-\udc4a\udc80-\udcaf\udcc4\udcc5\udcc7\udd80-\uddae\uddd8-\udddb\ude00-\ude2f\ude44\ude80-\udeaa\udf00-\udf19]|\ud806[\udcff\udec0-\udef8]|\ud807[\udc00-\udc08\udc0a-\udc2e\udc40\udc72-\udc8f]|\ud808[\udc00-\udf99]|\ud809[\udc80-\udd43]|[\ud80c\ud81c-\ud820\ud840-\ud868\ud86a-\ud86c\ud86f-\ud872][\udc00-\udfff]|\ud80d[\udc00-\udc2e]|\ud811[\udc00-\ude46]|\ud81a[\udc00-\ude38\ude40-\ude5e\uded0-\udeed\udf00-\udf2f\udf63-\udf77\udf7d-\udf8f]|\ud81b[\udf00-\udf44\udf50]|\ud821[\udc00-\udfec]|\ud822[\udc00-\udef2]|\ud82c[\udc00\udc01]|\ud82f[\udc00-\udc6a\udc70-\udc7c\udc80-\udc88\udc90-\udc99]|\ud83a[\udc00-\udcc4]|\ud83b[\ude00-\ude03\ude05-\ude1f\ude21\ude22\ude24\ude27\ude29-\ude32\ude34-\ude37\ude39\ude3b\ude42\ude47\ude49\ude4b\ude4d-\ude4f\ude51\ude52\ude54\ude57\ude59\ude5b\ude5d\ude5f\ude61\ude62\ude64\ude67-\ude6a\ude6c-\ude72\ude74-\ude77\ude79-\ude7c\ude7e\ude80-\ude89\ude8b-\ude9b\udea1-\udea3\udea5-\udea9\udeab-\udebb]|\ud869[\udc00-\uded6\udf00-\udfff]|\ud86d[\udc00-\udf34\udf40-\udfff]|\ud86e[\udc00-\udc1d\udc20-\udfff]|\ud873[\udc00-\udea1]|\ud87e[\udc00-\ude1d]"},
{name:"Lt",alias:"Titlecase_Letter",bmp:"\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc"},{name:"Lu",alias:"Uppercase_Letter",bmp:"A-Z\u00c0-\u00d6\u00d8-\u00de\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178\u0179\u017b\u017d\u0181\u0182\u0184\u0186\u0187\u0189-\u018b\u018e-\u0191\u0193\u0194\u0196-\u0198\u019c\u019d\u019f\u01a0\u01a2\u01a4\u01a6\u01a7\u01a9\u01ac\u01ae\u01af\u01b1-\u01b3\u01b5\u01b7\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a\u023b\u023d\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\uff21-\uff3a",
astral:"\ud801[\udc00-\udc27\udcb0-\udcd3]|\ud803[\udc80-\udcb2]|\ud806[\udca0-\udcbf]|\ud835[\udc00-\udc19\udc34-\udc4d\udc68-\udc81\udc9c\udc9e\udc9f\udca2\udca5\udca6\udca9-\udcac\udcae-\udcb5\udcd0-\udce9\udd04\udd05\udd07-\udd0a\udd0d-\udd14\udd16-\udd1c\udd38\udd39\udd3b-\udd3e\udd40-\udd44\udd46\udd4a-\udd50\udd6c-\udd85\udda0-\uddb9\uddd4-\udded\ude08-\ude21\ude3c-\ude55\ude70-\ude89\udea8-\udec0\udee2-\udefa\udf1c-\udf34\udf56-\udf6e\udf90-\udfa8\udfca]|\ud83a[\udd00-\udd21]"},{name:"M",
alias:"Mark",bmp:"\u0300-\u036f\u0483-\u0489\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d4-\u08e1\u08e3-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962\u0963\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09cb-\u09cd\u09d7\u09e2\u09e3\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a70\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2\u0ae3\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b62\u0b63\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0c00-\u0c03\u0c3e-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62\u0c63\u0c81-\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2\u0ce3\u0d01-\u0d03\u0d3e-\u0d44\u0d46-\u0d48\u0d4a-\u0d4d\u0d57\u0d62\u0d63\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb\u0ebc\u0ec8-\u0ecd\u0f18\u0f19\u0f35\u0f37\u0f39\u0f3e\u0f3f\u0f71-\u0f84\u0f86\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102b-\u103e\u1056-\u1059\u105e-\u1060\u1062-\u1064\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f\u109a-\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752\u1753\u1772\u1773\u17b4-\u17d3\u17dd\u180b-\u180d\u1885\u1886\u18a9\u1920-\u192b\u1930-\u193b\u1a17-\u1a1b\u1a55-\u1a5e\u1a60-\u1a7c\u1a7f\u1ab0-\u1abe\u1b00-\u1b04\u1b34-\u1b44\u1b6b-\u1b73\u1b80-\u1b82\u1ba1-\u1bad\u1be6-\u1bf3\u1c24-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce8\u1ced\u1cf2-\u1cf4\u1cf8\u1cf9\u1dc0-\u1df5\u1dfb-\u1dff\u20d0-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302f\u3099\u309a\ua66f-\ua672\ua674-\ua67d\ua69e\ua69f\ua6f0\ua6f1\ua802\ua806\ua80b\ua823-\ua827\ua880\ua881\ua8b4-\ua8c5\ua8e0-\ua8f1\ua926-\ua92d\ua947-\ua953\ua980-\ua983\ua9b3-\ua9c0\ua9e5\uaa29-\uaa36\uaa43\uaa4c\uaa4d\uaa7b-\uaa7d\uaab0\uaab2-\uaab4\uaab7\uaab8\uaabe\uaabf\uaac1\uaaeb-\uaaef\uaaf5\uaaf6\uabe3-\uabea\uabec\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f",
astral:"\ud800[\uddfd\udee0\udf76-\udf7a]|\ud802[\ude01-\ude03\ude05\ude06\ude0c-\ude0f\ude38-\ude3a\ude3f\udee5\udee6]|\ud804[\udc00-\udc02\udc38-\udc46\udc7f-\udc82\udcb0-\udcba\udd00-\udd02\udd27-\udd34\udd73\udd80-\udd82\uddb3-\uddc0\uddca-\uddcc\ude2c-\ude37\ude3e\udedf-\udeea\udf00-\udf03\udf3c\udf3e-\udf44\udf47\udf48\udf4b-\udf4d\udf57\udf62\udf63\udf66-\udf6c\udf70-\udf74]|\ud805[\udc35-\udc46\udcb0-\udcc3\uddaf-\uddb5\uddb8-\uddc0\udddc\udddd\ude30-\ude40\udeab-\udeb7\udf1d-\udf2b]|\ud807[\udc2f-\udc36\udc38-\udc3f\udc92-\udca7\udca9-\udcb6]|\ud81a[\udef0-\udef4\udf30-\udf36]|\ud81b[\udf51-\udf7e\udf8f-\udf92]|\ud82f[\udc9d\udc9e]|\ud834[\udd65-\udd69\udd6d-\udd72\udd7b-\udd82\udd85-\udd8b\uddaa-\uddad\ude42-\ude44]|\ud836[\ude00-\ude36\ude3b-\ude6c\ude75\ude84\ude9b-\ude9f\udea1-\udeaf]|\ud838[\udc00-\udc06\udc08-\udc18\udc1b-\udc21\udc23\udc24\udc26-\udc2a]|\ud83a[\udcd0-\udcd6\udd44-\udd4a]|\udb40[\udd00-\uddef]"},
{name:"Mc",alias:"Spacing_Mark",bmp:"\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e\u094f\u0982\u0983\u09be-\u09c0\u09c7\u09c8\u09cb\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb\u0acc\u0b02\u0b03\u0b3e\u0b40\u0b47\u0b48\u0b4b\u0b4c\u0b57\u0bbe\u0bbf\u0bc1\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7\u0cc8\u0cca\u0ccb\u0cd5\u0cd6\u0d02\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2\u0df3\u0f3e\u0f3f\u0f7f\u102b\u102c\u1031\u1038\u103b\u103c\u1056\u1057\u1062-\u1064\u1067-\u106d\u1083\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7\u17c8\u1923-\u1926\u1929-\u192b\u1930\u1931\u1933-\u1938\u1a19\u1a1a\u1a55\u1a57\u1a61\u1a63\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43\u1b44\u1b82\u1ba1\u1ba6\u1ba7\u1baa\u1be7\u1bea-\u1bec\u1bee\u1bf2\u1bf3\u1c24-\u1c2b\u1c34\u1c35\u1ce1\u1cf2\u1cf3\u302e\u302f\ua823\ua824\ua827\ua880\ua881\ua8b4-\ua8c3\ua952\ua953\ua983\ua9b4\ua9b5\ua9ba\ua9bb\ua9bd-\ua9c0\uaa2f\uaa30\uaa33\uaa34\uaa4d\uaa7b\uaa7d\uaaeb\uaaee\uaaef\uaaf5\uabe3\uabe4\uabe6\uabe7\uabe9\uabea\uabec",
astral:"\ud804[\udc00\udc02\udc82\udcb0-\udcb2\udcb7\udcb8\udd2c\udd82\uddb3-\uddb5\uddbf\uddc0\ude2c-\ude2e\ude32\ude33\ude35\udee0-\udee2\udf02\udf03\udf3e\udf3f\udf41-\udf44\udf47\udf48\udf4b-\udf4d\udf57\udf62\udf63]|\ud805[\udc35-\udc37\udc40\udc41\udc45\udcb0-\udcb2\udcb9\udcbb-\udcbe\udcc1\uddaf-\uddb1\uddb8-\uddbb\uddbe\ude30-\ude32\ude3b\ude3c\ude3e\udeac\udeae\udeaf\udeb6\udf20\udf21\udf26]|\ud807[\udc2f\udc3e\udca9\udcb1\udcb4]|\ud81b[\udf51-\udf7e]|\ud834[\udd65\udd66\udd6d-\udd72]"},
{name:"Me",alias:"Enclosing_Mark",bmp:"\u0488\u0489\u1abe\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672"},{name:"Mn",alias:"Nonspacing_Mark",bmp:"\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d4-\u08e1\u08e3-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2\u09e3\u0a01\u0a02\u0a3c\u0a41\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a70\u0a71\u0a75\u0a81\u0a82\u0abc\u0ac1-\u0ac5\u0ac7\u0ac8\u0acd\u0ae2\u0ae3\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62\u0b63\u0b82\u0bc0\u0bcd\u0c00\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62\u0c63\u0c81\u0cbc\u0cbf\u0cc6\u0ccc\u0ccd\u0ce2\u0ce3\u0d01\u0d41-\u0d44\u0d4d\u0d62\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb\u0ebc\u0ec8-\u0ecd\u0f18\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039\u103a\u103d\u103e\u1058\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752\u1753\u1772\u1773\u17b4\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u1885\u1886\u18a9\u1920-\u1922\u1927\u1928\u1932\u1939-\u193b\u1a17\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1ab0-\u1abd\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80\u1b81\u1ba2-\u1ba5\u1ba8\u1ba9\u1bab-\u1bad\u1be6\u1be8\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8\u1cf9\u1dc0-\u1df5\u1dfb-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099\u309a\ua66f\ua674-\ua67d\ua69e\ua69f\ua6f0\ua6f1\ua802\ua806\ua80b\ua825\ua826\ua8c4\ua8c5\ua8e0-\ua8f1\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\ua9e5\uaa29-\uaa2e\uaa31\uaa32\uaa35\uaa36\uaa43\uaa4c\uaa7c\uaab0\uaab2-\uaab4\uaab7\uaab8\uaabe\uaabf\uaac1\uaaec\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f",
astral:"\ud800[\uddfd\udee0\udf76-\udf7a]|\ud802[\ude01-\ude03\ude05\ude06\ude0c-\ude0f\ude38-\ude3a\ude3f\udee5\udee6]|\ud804[\udc01\udc38-\udc46\udc7f-\udc81\udcb3-\udcb6\udcb9\udcba\udd00-\udd02\udd27-\udd2b\udd2d-\udd34\udd73\udd80\udd81\uddb6-\uddbe\uddca-\uddcc\ude2f-\ude31\ude34\ude36\ude37\ude3e\udedf\udee3-\udeea\udf00\udf01\udf3c\udf40\udf66-\udf6c\udf70-\udf74]|\ud805[\udc38-\udc3f\udc42-\udc44\udc46\udcb3-\udcb8\udcba\udcbf\udcc0\udcc2\udcc3\uddb2-\uddb5\uddbc\uddbd\uddbf\uddc0\udddc\udddd\ude33-\ude3a\ude3d\ude3f\ude40\udeab\udead\udeb0-\udeb5\udeb7\udf1d-\udf1f\udf22-\udf25\udf27-\udf2b]|\ud807[\udc30-\udc36\udc38-\udc3d\udc3f\udc92-\udca7\udcaa-\udcb0\udcb2\udcb3\udcb5\udcb6]|\ud81a[\udef0-\udef4\udf30-\udf36]|\ud81b[\udf8f-\udf92]|\ud82f[\udc9d\udc9e]|\ud834[\udd67-\udd69\udd7b-\udd82\udd85-\udd8b\uddaa-\uddad\ude42-\ude44]|\ud836[\ude00-\ude36\ude3b-\ude6c\ude75\ude84\ude9b-\ude9f\udea1-\udeaf]|\ud838[\udc00-\udc06\udc08-\udc18\udc1b-\udc21\udc23\udc24\udc26-\udc2a]|\ud83a[\udcd0-\udcd6\udd44-\udd4a]|\udb40[\udd00-\uddef]"},
{name:"N",alias:"Number",bmp:"0-9\u00b2\u00b3\u00b9\u00bc-\u00be\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u09f4-\u09f9\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0b72-\u0b77\u0be6-\u0bf2\u0c66-\u0c6f\u0c78-\u0c7e\u0ce6-\u0cef\u0d58-\u0d5e\u0d66-\u0d78\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f33\u1040-\u1049\u1090-\u1099\u1369-\u137c\u16ee-\u16f0\u17e0-\u17e9\u17f0-\u17f9\u1810-\u1819\u1946-\u194f\u19d0-\u19da\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\u2070\u2074-\u2079\u2080-\u2089\u2150-\u2182\u2185-\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3007\u3021-\u3029\u3038-\u303a\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua620-\ua629\ua6e6-\ua6ef\ua830-\ua835\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19",
astral:"\ud800[\udd07-\udd33\udd40-\udd78\udd8a\udd8b\udee1-\udefb\udf20-\udf23\udf41\udf4a\udfd1-\udfd5]|\ud801[\udca0-\udca9]|\ud802[\udc58-\udc5f\udc79-\udc7f\udca7-\udcaf\udcfb-\udcff\udd16-\udd1b\uddbc\uddbd\uddc0-\uddcf\uddd2-\uddff\ude40-\ude47\ude7d\ude7e\ude9d-\ude9f\udeeb-\udeef\udf58-\udf5f\udf78-\udf7f\udfa9-\udfaf]|\ud803[\udcfa-\udcff\ude60-\ude7e]|\ud804[\udc52-\udc6f\udcf0-\udcf9\udd36-\udd3f\uddd0-\uddd9\udde1-\uddf4\udef0-\udef9]|\ud805[\udc50-\udc59\udcd0-\udcd9\ude50-\ude59\udec0-\udec9\udf30-\udf3b]|\ud806[\udce0-\udcf2]|\ud807[\udc50-\udc6c]|\ud809[\udc00-\udc6e]|\ud81a[\ude60-\ude69\udf50-\udf59\udf5b-\udf61]|\ud834[\udf60-\udf71]|\ud835[\udfce-\udfff]|\ud83a[\udcc7-\udccf\udd50-\udd59]|\ud83c[\udd00-\udd0c]"},
{name:"Nd",alias:"Decimal_Number",bmp:"0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19",
astral:"\ud801[\udca0-\udca9]|\ud804[\udc66-\udc6f\udcf0-\udcf9\udd36-\udd3f\uddd0-\uddd9\udef0-\udef9]|\ud805[\udc50-\udc59\udcd0-\udcd9\ude50-\ude59\udec0-\udec9\udf30-\udf39]|\ud806[\udce0-\udce9]|\ud807[\udc50-\udc59]|\ud81a[\ude60-\ude69\udf50-\udf59]|\ud835[\udfce-\udfff]|\ud83a[\udd50-\udd59]"},{name:"Nl",alias:"Letter_Number",bmp:"\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef",astral:"\ud800[\udd40-\udd74\udf41\udf4a\udfd1-\udfd5]|\ud809[\udc00-\udc6e]"},
{name:"No",alias:"Other_Number",bmp:"\u00b2\u00b3\u00b9\u00bc-\u00be\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d58-\u0d5e\u0d70-\u0d78\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835",astral:"\ud800[\udd07-\udd33\udd75-\udd78\udd8a\udd8b\udee1-\udefb\udf20-\udf23]|\ud802[\udc58-\udc5f\udc79-\udc7f\udca7-\udcaf\udcfb-\udcff\udd16-\udd1b\uddbc\uddbd\uddc0-\uddcf\uddd2-\uddff\ude40-\ude47\ude7d\ude7e\ude9d-\ude9f\udeeb-\udeef\udf58-\udf5f\udf78-\udf7f\udfa9-\udfaf]|\ud803[\udcfa-\udcff\ude60-\ude7e]|\ud804[\udc52-\udc65\udde1-\uddf4]|\ud805[\udf3a\udf3b]|\ud806[\udcea-\udcf2]|\ud807[\udc5a-\udc6c]|\ud81a[\udf5b-\udf61]|\ud834[\udf60-\udf71]|\ud83a[\udcc7-\udccf]|\ud83c[\udd00-\udd0c]"},
{name:"P",alias:"Punctuation",bmp:"!-#%-\\x2A,-/:;\\x3F@\\x5B-\\x5D_\\x7B}\u00a1\u00a7\u00ab\u00b6\u00b7\u00bb\u00bf\u037e\u0387\u055a-\u055f\u0589\u058a\u05be\u05c0\u05c3\u05c6\u05f3\u05f4\u0609\u060a\u060c\u060d\u061b\u061e\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964\u0965\u0970\u0af0\u0df4\u0e4f\u0e5a\u0e5b\u0f04-\u0f12\u0f14\u0f3a-\u0f3d\u0f85\u0fd0-\u0fd4\u0fd9\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u1400\u166d\u166e\u169b\u169c\u16eb-\u16ed\u1735\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u180a\u1944\u1945\u1a1e\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e\u1c7f\u1cc0-\u1cc7\u1cd3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205e\u207d\u207e\u208d\u208e\u2308-\u230b\u2329\u232a\u2768-\u2775\u27c5\u27c6\u27e6-\u27ef\u2983-\u2998\u29d8-\u29db\u29fc\u29fd\u2cf9-\u2cfc\u2cfe\u2cff\u2d70\u2e00-\u2e2e\u2e30-\u2e44\u3001-\u3003\u3008-\u3011\u3014-\u301f\u3030\u303d\u30a0\u30fb\ua4fe\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e\ua92f\ua95f\ua9c1-\ua9cd\ua9de\ua9df\uaa5c-\uaa5f\uaade\uaadf\uaaf0\uaaf1\uabeb\ufd3e\ufd3f\ufe10-\ufe19\ufe30-\ufe52\ufe54-\ufe61\ufe63\ufe68\ufe6a\ufe6b\uff01-\uff03\uff05-\uff0a\uff0c-\uff0f\uff1a\uff1b\uff1f\uff20\uff3b-\uff3d\uff3f\uff5b\uff5d\uff5f-\uff65",
astral:"\ud800[\udd00-\udd02\udf9f\udfd0]|\ud801\udd6f|\ud802[\udc57\udd1f\udd3f\ude50-\ude58\ude7f\udef0-\udef6\udf39-\udf3f\udf99-\udf9c]|\ud804[\udc47-\udc4d\udcbb\udcbc\udcbe-\udcc1\udd40-\udd43\udd74\udd75\uddc5-\uddc9\uddcd\udddb\udddd-\udddf\ude38-\ude3d\udea9]|\ud805[\udc4b-\udc4f\udc5b\udc5d\udcc6\uddc1-\uddd7\ude41-\ude43\ude60-\ude6c\udf3c-\udf3e]|\ud807[\udc41-\udc45\udc70\udc71]|\ud809[\udc70-\udc74]|\ud81a[\ude6e\ude6f\udef5\udf37-\udf3b\udf44]|\ud82f\udc9f|\ud836[\ude87-\ude8b]|\ud83a[\udd5e\udd5f]"},
{name:"Pc",alias:"Connector_Punctuation",bmp:"_\u203f\u2040\u2054\ufe33\ufe34\ufe4d-\ufe4f\uff3f"},{name:"Pd",alias:"Dash_Punctuation",bmp:"\\x2D\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a\u2e3b\u2e40\u301c\u3030\u30a0\ufe31\ufe32\ufe58\ufe63\uff0d"},{name:"Pe",alias:"Close_Punctuation",bmp:"\\x29\\x5D}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e\u301f\ufd3e\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63"},
{name:"Pf",alias:"Final_Punctuation",bmp:"\u00bb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21"},{name:"Pi",alias:"Initial_Punctuation",bmp:"\u00ab\u2018\u201b\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20"},{name:"Po",alias:"Other_Punctuation",bmp:"!-#%-'\\x2A,\\x2E/:;\\x3F@\\x5C\u00a1\u00a7\u00b6\u00b7\u00bf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3\u05f4\u0609\u060a\u060c\u060d\u061b\u061e\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964\u0965\u0970\u0af0\u0df4\u0e4f\u0e5a\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d\u166e\u16eb-\u16ed\u1735\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944\u1945\u1a1e\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e\u1c7f\u1cc0-\u1cc7\u1cd3\u2016\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe\u2cff\u2d70\u2e00\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18\u2e19\u2e1b\u2e1e\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u2e3c-\u2e3f\u2e41\u2e43\u2e44\u3001-\u3003\u303d\u30fb\ua4fe\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e\ua92f\ua95f\ua9c1-\ua9cd\ua9de\ua9df\uaa5c-\uaa5f\uaade\uaadf\uaaf0\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e\uff0f\uff1a\uff1b\uff1f\uff20\uff3c\uff61\uff64\uff65",
astral:"\ud800[\udd00-\udd02\udf9f\udfd0]|\ud801\udd6f|\ud802[\udc57\udd1f\udd3f\ude50-\ude58\ude7f\udef0-\udef6\udf39-\udf3f\udf99-\udf9c]|\ud804[\udc47-\udc4d\udcbb\udcbc\udcbe-\udcc1\udd40-\udd43\udd74\udd75\uddc5-\uddc9\uddcd\udddb\udddd-\udddf\ude38-\ude3d\udea9]|\ud805[\udc4b-\udc4f\udc5b\udc5d\udcc6\uddc1-\uddd7\ude41-\ude43\ude60-\ude6c\udf3c-\udf3e]|\ud807[\udc41-\udc45\udc70\udc71]|\ud809[\udc70-\udc74]|\ud81a[\ude6e\ude6f\udef5\udf37-\udf3b\udf44]|\ud82f\udc9f|\ud836[\ude87-\ude8b]|\ud83a[\udd5e\udd5f]"},
{name:"Ps",alias:"Open_Punctuation",bmp:"\\x28\\x5B\\x7B\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u2e42\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3f\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62"},{name:"S",
alias:"Symbol",bmp:"\\x24\\x2B<->\\x5E`\\x7C~\u00a2-\u00a6\u00a8\u00a9\u00ac\u00ae-\u00b1\u00b4\u00b8\u00d7\u00f7\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384\u0385\u03f6\u0482\u058d-\u058f\u0606-\u0608\u060b\u060e\u060f\u06de\u06e9\u06fd\u06fe\u07f6\u09f2\u09f3\u09fa\u09fb\u0af1\u0b70\u0bf3-\u0bfa\u0c7f\u0d4f\u0d79\u0e3f\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce\u0fcf\u0fd5-\u0fd8\u109e\u109f\u1390-\u1399\u17db\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd\u1ffe\u2044\u2052\u207a-\u207c\u208a-\u208c\u20a0-\u20be\u2100\u2101\u2103-\u2106\u2108\u2109\u2114\u2116-\u2118\u211e-\u2123\u2125\u2127\u2129\u212e\u213a\u213b\u2140-\u2144\u214a-\u214d\u214f\u218a\u218b\u2190-\u2307\u230c-\u2328\u232b-\u23fe\u2400-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u2767\u2794-\u27c4\u27c7-\u27e5\u27f0-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2b73\u2b76-\u2b95\u2b98-\u2bb9\u2bbd-\u2bc8\u2bca-\u2bd1\u2bec-\u2bef\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012\u3013\u3020\u3036\u3037\u303e\u303f\u309b\u309c\u3190\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua700-\ua716\ua720\ua721\ua789\ua78a\ua828-\ua82b\ua836-\ua839\uaa77-\uaa79\uab5b\ufb29\ufbb2-\ufbc1\ufdfc\ufdfd\ufe62\ufe64-\ufe66\ufe69\uff04\uff0b\uff1c-\uff1e\uff3e\uff40\uff5c\uff5e\uffe0-\uffe6\uffe8-\uffee\ufffc\ufffd",
astral:"\ud800[\udd37-\udd3f\udd79-\udd89\udd8c-\udd8e\udd90-\udd9b\udda0\uddd0-\uddfc]|\ud802[\udc77\udc78\udec8]|\ud805\udf3f|\ud81a[\udf3c-\udf3f\udf45]|\ud82f\udc9c|\ud834[\udc00-\udcf5\udd00-\udd26\udd29-\udd64\udd6a-\udd6c\udd83\udd84\udd8c-\udda9\uddae-\udde8\ude00-\ude41\ude45\udf00-\udf56]|\ud835[\udec1\udedb\udefb\udf15\udf35\udf4f\udf6f\udf89\udfa9\udfc3]|\ud836[\udc00-\uddff\ude37-\ude3a\ude6d-\ude74\ude76-\ude83\ude85\ude86]|\ud83b[\udef0\udef1]|\ud83c[\udc00-\udc2b\udc30-\udc93\udca0-\udcae\udcb1-\udcbf\udcc1-\udccf\udcd1-\udcf5\udd10-\udd2e\udd30-\udd6b\udd70-\uddac\udde6-\ude02\ude10-\ude3b\ude40-\ude48\ude50\ude51\udf00-\udfff]|\ud83d[\udc00-\uded2\udee0-\udeec\udef0-\udef6\udf00-\udf73\udf80-\udfd4]|\ud83e[\udc00-\udc0b\udc10-\udc47\udc50-\udc59\udc60-\udc87\udc90-\udcad\udd10-\udd1e\udd20-\udd27\udd30\udd33-\udd3e\udd40-\udd4b\udd50-\udd5e\udd80-\udd91\uddc0]"},
{name:"Sc",alias:"Currency_Symbol",bmp:"\\x24\u00a2-\u00a5\u058f\u060b\u09f2\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20be\ua838\ufdfc\ufe69\uff04\uffe0\uffe1\uffe5\uffe6"},{name:"Sk",alias:"Modifier_Symbol",bmp:"\\x5E`\u00a8\u00af\u00b4\u00b8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd\u1ffe\u309b\u309c\ua700-\ua716\ua720\ua721\ua789\ua78a\uab5b\ufbb2-\ufbc1\uff3e\uff40\uffe3",astral:"\ud83c[\udffb-\udfff]"},
{name:"Sm",alias:"Math_Symbol",bmp:"\\x2B<->\\x7C~\u00ac\u00b1\u00d7\u00f7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a\u219b\u21a0\u21a3\u21a6\u21ae\u21ce\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec",
astral:"\ud835[\udec1\udedb\udefb\udf15\udf35\udf4f\udf6f\udf89\udfa9\udfc3]|\ud83b[\udef0\udef1]"},{name:"So",alias:"Other_Symbol",bmp:"\u00a6\u00a9\u00ae\u00b0\u0482\u058d\u058e\u060e\u060f\u06de\u06e9\u06fd\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d4f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce\u0fcf\u0fd5-\u0fd8\u109e\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100\u2101\u2103-\u2106\u2108\u2109\u2114\u2116\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a\u213b\u214a\u214c\u214d\u214f\u218a\u218b\u2195-\u2199\u219c-\u219f\u21a1\u21a2\u21a4\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u23fe\u2400-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45\u2b46\u2b4d-\u2b73\u2b76-\u2b95\u2b98-\u2bb9\u2bbd-\u2bc8\u2bca-\u2bd1\u2bec-\u2bef\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012\u3013\u3020\u3036\u3037\u303e\u303f\u3190\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed\uffee\ufffc\ufffd",
astral:"\ud800[\udd37-\udd3f\udd79-\udd89\udd8c-\udd8e\udd90-\udd9b\udda0\uddd0-\uddfc]|\ud802[\udc77\udc78\udec8]|\ud805\udf3f|\ud81a[\udf3c-\udf3f\udf45]|\ud82f\udc9c|\ud834[\udc00-\udcf5\udd00-\udd26\udd29-\udd64\udd6a-\udd6c\udd83\udd84\udd8c-\udda9\uddae-\udde8\ude00-\ude41\ude45\udf00-\udf56]|\ud836[\udc00-\uddff\ude37-\ude3a\ude6d-\ude74\ude76-\ude83\ude85\ude86]|\ud83c[\udc00-\udc2b\udc30-\udc93\udca0-\udcae\udcb1-\udcbf\udcc1-\udccf\udcd1-\udcf5\udd10-\udd2e\udd30-\udd6b\udd70-\uddac\udde6-\ude02\ude10-\ude3b\ude40-\ude48\ude50\ude51\udf00-\udffa]|\ud83d[\udc00-\uded2\udee0-\udeec\udef0-\udef6\udf00-\udf73\udf80-\udfd4]|\ud83e[\udc00-\udc0b\udc10-\udc47\udc50-\udc59\udc60-\udc87\udc90-\udcad\udd10-\udd1e\udd20-\udd27\udd30\udd33-\udd3e\udd40-\udd4b\udd50-\udd5e\udd80-\udd91\uddc0]"},
{name:"Z",alias:"Separator",bmp:" \u00a0\u1680\u2000-\u200a\u2028\u2029\u202f\u205f\u3000"},{name:"Zl",alias:"Line_Separator",bmp:"\u2028"},{name:"Zp",alias:"Paragraph_Separator",bmp:"\u2029"},{name:"Zs",alias:"Space_Separator",bmp:" \u00a0\u1680\u2000-\u200a\u202f\u205f\u3000"}])}},{}],6:[function(d,g,p){g.exports=function(c){if(!c.addUnicodeData)throw new ReferenceError("Unicode Base must be loaded before Unicode Properties");var d=[{name:"ASCII",bmp:"\x00-\u007f"},{name:"Alphabetic",bmp:"A-Za-z\u00aa\u00b5\u00ba\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0345\u0370-\u0374\u0376\u0377\u037a-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0561-\u0587\u05b0-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u05d0-\u05ea\u05f0-\u05f2\u0610-\u061a\u0620-\u0657\u0659-\u065f\u066e-\u06d3\u06d5-\u06dc\u06e1-\u06e8\u06ed-\u06ef\u06fa-\u06fc\u06ff\u0710-\u073f\u074d-\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0817\u081a-\u082c\u0840-\u0858\u08a0-\u08b4\u08b6-\u08bd\u08d4-\u08df\u08e3-\u08e9\u08f0-\u093b\u093d-\u094c\u094e-\u0950\u0955-\u0963\u0971-\u0983\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd-\u09c4\u09c7\u09c8\u09cb\u09cc\u09ce\u09d7\u09dc\u09dd\u09df-\u09e3\u09f0\u09f1\u0a01-\u0a03\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a3e-\u0a42\u0a47\u0a48\u0a4b\u0a4c\u0a51\u0a59-\u0a5c\u0a5e\u0a70-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd-\u0ac5\u0ac7-\u0ac9\u0acb\u0acc\u0ad0\u0ae0-\u0ae3\u0af9\u0b01-\u0b03\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d-\u0b44\u0b47\u0b48\u0b4b\u0b4c\u0b56\u0b57\u0b5c\u0b5d\u0b5f-\u0b63\u0b71\u0b82\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd0\u0bd7\u0c00-\u0c03\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4c\u0c55\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccc\u0cd5\u0cd6\u0cde\u0ce0-\u0ce3\u0cf1\u0cf2\u0d01-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d-\u0d44\u0d46-\u0d48\u0d4a-\u0d4c\u0d4e\u0d54-\u0d57\u0d5f-\u0d63\u0d7a-\u0d7f\u0d82\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e01-\u0e3a\u0e40-\u0e46\u0e4d\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ecd\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f71-\u0f81\u0f88-\u0f97\u0f99-\u0fbc\u1000-\u1036\u1038\u103b-\u103f\u1050-\u1062\u1065-\u1068\u106e-\u1086\u108e\u109c\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135f\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1713\u1720-\u1733\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772\u1773\u1780-\u17b3\u17b6-\u17c8\u17d7\u17dc\u1820-\u1877\u1880-\u18aa\u18b0-\u18f5\u1900-\u191e\u1920-\u192b\u1930-\u1938\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a1b\u1a20-\u1a5e\u1a61-\u1a74\u1aa7\u1b00-\u1b33\u1b35-\u1b43\u1b45-\u1b4b\u1b80-\u1ba9\u1bac-\u1baf\u1bba-\u1be5\u1be7-\u1bf1\u1c00-\u1c35\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1ce9-\u1cec\u1cee-\u1cf3\u1cf5\u1cf6\u1d00-\u1dbf\u1de7-\u1df4\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u24b6-\u24e9\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fd5\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua674-\ua67b\ua67f-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7ae\ua7b0-\ua7b7\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua827\ua840-\ua873\ua880-\ua8c3\ua8c5\ua8f2-\ua8f7\ua8fb\ua8fd\ua90a-\ua92a\ua930-\ua952\ua960-\ua97c\ua980-\ua9b2\ua9b4-\ua9bf\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa36\uaa40-\uaa4d\uaa60-\uaa76\uaa7a\uaa7e-\uaabe\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf5\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabea\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc",
astral:"\ud800[\udc00-\udc0b\udc0d-\udc26\udc28-\udc3a\udc3c\udc3d\udc3f-\udc4d\udc50-\udc5d\udc80-\udcfa\udd40-\udd74\ude80-\ude9c\udea0-\uded0\udf00-\udf1f\udf30-\udf4a\udf50-\udf7a\udf80-\udf9d\udfa0-\udfc3\udfc8-\udfcf\udfd1-\udfd5]|\ud801[\udc00-\udc9d\udcb0-\udcd3\udcd8-\udcfb\udd00-\udd27\udd30-\udd63\ude00-\udf36\udf40-\udf55\udf60-\udf67]|\ud802[\udc00-\udc05\udc08\udc0a-\udc35\udc37\udc38\udc3c\udc3f-\udc55\udc60-\udc76\udc80-\udc9e\udce0-\udcf2\udcf4\udcf5\udd00-\udd15\udd20-\udd39\udd80-\uddb7\uddbe\uddbf\ude00-\ude03\ude05\ude06\ude0c-\ude13\ude15-\ude17\ude19-\ude33\ude60-\ude7c\ude80-\ude9c\udec0-\udec7\udec9-\udee4\udf00-\udf35\udf40-\udf55\udf60-\udf72\udf80-\udf91]|\ud803[\udc00-\udc48\udc80-\udcb2\udcc0-\udcf2]|\ud804[\udc00-\udc45\udc82-\udcb8\udcd0-\udce8\udd00-\udd32\udd50-\udd72\udd76\udd80-\uddbf\uddc1-\uddc4\uddda\udddc\ude00-\ude11\ude13-\ude34\ude37\ude3e\ude80-\ude86\ude88\ude8a-\ude8d\ude8f-\ude9d\ude9f-\udea8\udeb0-\udee8\udf00-\udf03\udf05-\udf0c\udf0f\udf10\udf13-\udf28\udf2a-\udf30\udf32\udf33\udf35-\udf39\udf3d-\udf44\udf47\udf48\udf4b\udf4c\udf50\udf57\udf5d-\udf63]|\ud805[\udc00-\udc41\udc43-\udc45\udc47-\udc4a\udc80-\udcc1\udcc4\udcc5\udcc7\udd80-\uddb5\uddb8-\uddbe\uddd8-\udddd\ude00-\ude3e\ude40\ude44\ude80-\udeb5\udf00-\udf19\udf1d-\udf2a]|\ud806[\udca0-\udcdf\udcff\udec0-\udef8]|\ud807[\udc00-\udc08\udc0a-\udc36\udc38-\udc3e\udc40\udc72-\udc8f\udc92-\udca7\udca9-\udcb6]|\ud808[\udc00-\udf99]|\ud809[\udc00-\udc6e\udc80-\udd43]|[\ud80c\ud81c-\ud820\ud840-\ud868\ud86a-\ud86c\ud86f-\ud872][\udc00-\udfff]|\ud80d[\udc00-\udc2e]|\ud811[\udc00-\ude46]|\ud81a[\udc00-\ude38\ude40-\ude5e\uded0-\udeed\udf00-\udf36\udf40-\udf43\udf63-\udf77\udf7d-\udf8f]|\ud81b[\udf00-\udf44\udf50-\udf7e\udf93-\udf9f\udfe0]|\ud821[\udc00-\udfec]|\ud822[\udc00-\udef2]|\ud82c[\udc00\udc01]|\ud82f[\udc00-\udc6a\udc70-\udc7c\udc80-\udc88\udc90-\udc99\udc9e]|\ud835[\udc00-\udc54\udc56-\udc9c\udc9e\udc9f\udca2\udca5\udca6\udca9-\udcac\udcae-\udcb9\udcbb\udcbd-\udcc3\udcc5-\udd05\udd07-\udd0a\udd0d-\udd14\udd16-\udd1c\udd1e-\udd39\udd3b-\udd3e\udd40-\udd44\udd46\udd4a-\udd50\udd52-\udea5\udea8-\udec0\udec2-\udeda\udedc-\udefa\udefc-\udf14\udf16-\udf34\udf36-\udf4e\udf50-\udf6e\udf70-\udf88\udf8a-\udfa8\udfaa-\udfc2\udfc4-\udfcb]|\ud838[\udc00-\udc06\udc08-\udc18\udc1b-\udc21\udc23\udc24\udc26-\udc2a]|\ud83a[\udc00-\udcc4\udd00-\udd43\udd47]|\ud83b[\ude00-\ude03\ude05-\ude1f\ude21\ude22\ude24\ude27\ude29-\ude32\ude34-\ude37\ude39\ude3b\ude42\ude47\ude49\ude4b\ude4d-\ude4f\ude51\ude52\ude54\ude57\ude59\ude5b\ude5d\ude5f\ude61\ude62\ude64\ude67-\ude6a\ude6c-\ude72\ude74-\ude77\ude79-\ude7c\ude7e\ude80-\ude89\ude8b-\ude9b\udea1-\udea3\udea5-\udea9\udeab-\udebb]|\ud83c[\udd30-\udd49\udd50-\udd69\udd70-\udd89]|\ud869[\udc00-\uded6\udf00-\udfff]|\ud86d[\udc00-\udf34\udf40-\udfff]|\ud86e[\udc00-\udc1d\udc20-\udfff]|\ud873[\udc00-\udea1]|\ud87e[\udc00-\ude1d]"},
{name:"Any",isBmpLast:!0,bmp:"\x00-\uffff",astral:"[\ud800-\udbff][\udc00-\udfff]"},{name:"Default_Ignorable_Code_Point",bmp:"\u00ad\u034f\u061c\u115f\u1160\u17b4\u17b5\u180b-\u180e\u200b-\u200f\u202a-\u202e\u2060-\u206f\u3164\ufe00-\ufe0f\ufeff\uffa0\ufff0-\ufff8",astral:"\ud82f[\udca0-\udca3]|\ud834[\udd73-\udd7a]|[\udb40-\udb43][\udc00-\udfff]"},{name:"Lowercase",bmp:"a-z\u00aa\u00b5\u00ba\u00df-\u00f6\u00f8-\u00ff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02b8\u02c0\u02c1\u02e0-\u02e4\u0345\u0371\u0373\u0377\u037a-\u037d\u0390\u03ac-\u03ce\u03d0\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0561-\u0587\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1dbf\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6\u1fc7\u1fd0-\u1fd3\u1fd6\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6\u1ff7\u2071\u207f\u2090-\u209c\u210a\u210e\u210f\u2113\u212f\u2134\u2139\u213c\u213d\u2146-\u2149\u214e\u2170-\u217f\u2184\u24d0-\u24e9\u2c30-\u2c5e\u2c61\u2c65\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73\u2c74\u2c76-\u2c7d\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b-\ua69d\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7b5\ua7b7\ua7f8-\ua7fa\uab30-\uab5a\uab5c-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a",
astral:"\ud801[\udc28-\udc4f\udcd8-\udcfb]|\ud803[\udcc0-\udcf2]|\ud806[\udcc0-\udcdf]|\ud835[\udc1a-\udc33\udc4e-\udc54\udc56-\udc67\udc82-\udc9b\udcb6-\udcb9\udcbb\udcbd-\udcc3\udcc5-\udccf\udcea-\udd03\udd1e-\udd37\udd52-\udd6b\udd86-\udd9f\uddba-\uddd3\uddee-\ude07\ude22-\ude3b\ude56-\ude6f\ude8a-\udea5\udec2-\udeda\udedc-\udee1\udefc-\udf14\udf16-\udf1b\udf36-\udf4e\udf50-\udf55\udf70-\udf88\udf8a-\udf8f\udfaa-\udfc2\udfc4-\udfc9\udfcb]|\ud83a[\udd22-\udd43]"},{name:"Noncharacter_Code_Point",
bmp:"\ufdd0-\ufdef\ufffe\uffff",astral:"[\ud83f\ud87f\ud8bf\ud8ff\ud93f\ud97f\ud9bf\ud9ff\uda3f\uda7f\udabf\udaff\udb3f\udb7f\udbbf\udbff][\udffe\udfff]"},{name:"Uppercase",bmp:"A-Z\u00c0-\u00d6\u00d8-\u00de\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178\u0179\u017b\u017d\u0181\u0182\u0184\u0186\u0187\u0189-\u018b\u018e-\u0191\u0193\u0194\u0196-\u0198\u019c\u019d\u019f\u01a0\u01a2\u01a4\u01a6\u01a7\u01a9\u01ac\u01ae\u01af\u01b1-\u01b3\u01b5\u01b7\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a\u023b\u023d\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e\u213f\u2145\u2160-\u216f\u2183\u24b6-\u24cf\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\uff21-\uff3a",
astral:"\ud801[\udc00-\udc27\udcb0-\udcd3]|\ud803[\udc80-\udcb2]|\ud806[\udca0-\udcbf]|\ud835[\udc00-\udc19\udc34-\udc4d\udc68-\udc81\udc9c\udc9e\udc9f\udca2\udca5\udca6\udca9-\udcac\udcae-\udcb5\udcd0-\udce9\udd04\udd05\udd07-\udd0a\udd0d-\udd14\udd16-\udd1c\udd38\udd39\udd3b-\udd3e\udd40-\udd44\udd46\udd4a-\udd50\udd6c-\udd85\udda0-\uddb9\uddd4-\udded\ude08-\ude21\ude3c-\ude55\ude70-\ude89\udea8-\udec0\udee2-\udefa\udf1c-\udf34\udf56-\udf6e\udf90-\udfa8\udfca]|\ud83a[\udd00-\udd21]|\ud83c[\udd30-\udd49\udd50-\udd69\udd70-\udd89]"},
{name:"White_Space",bmp:"\t-\r \u0085\u00a0\u1680\u2000-\u200a\u2028\u2029\u202f\u205f\u3000"}];d.push({name:"Assigned",inverseOf:"Cn"});c.addUnicodeData(d)}},{}],7:[function(d,g,p){g.exports=function(c){if(!c.addUnicodeData)throw new ReferenceError("Unicode Base must be loaded before Unicode Scripts");c.addUnicodeData([{name:"Adlam",astral:"\ud83a[\udd00-\udd4a\udd50-\udd59\udd5e\udd5f]"},{name:"Ahom",astral:"\ud805[\udf00-\udf19\udf1d-\udf2b\udf30-\udf3f]"},{name:"Anatolian_Hieroglyphs",astral:"\ud811[\udc00-\ude46]"},
{name:"Arabic",bmp:"\u0600-\u0604\u0606-\u060b\u060d-\u061a\u061e\u0620-\u063f\u0641-\u064a\u0656-\u066f\u0671-\u06dc\u06de-\u06ff\u0750-\u077f\u08a0-\u08b4\u08b6-\u08bd\u08d4-\u08e1\u08e3-\u08ff\ufb50-\ufbc1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfd\ufe70-\ufe74\ufe76-\ufefc",astral:"\ud803[\ude60-\ude7e]|\ud83b[\ude00-\ude03\ude05-\ude1f\ude21\ude22\ude24\ude27\ude29-\ude32\ude34-\ude37\ude39\ude3b\ude42\ude47\ude49\ude4b\ude4d-\ude4f\ude51\ude52\ude54\ude57\ude59\ude5b\ude5d\ude5f\ude61\ude62\ude64\ude67-\ude6a\ude6c-\ude72\ude74-\ude77\ude79-\ude7c\ude7e\ude80-\ude89\ude8b-\ude9b\udea1-\udea3\udea5-\udea9\udeab-\udebb\udef0\udef1]"},
{name:"Armenian",bmp:"\u0531-\u0556\u0559-\u055f\u0561-\u0587\u058a\u058d-\u058f\ufb13-\ufb17"},{name:"Avestan",astral:"\ud802[\udf00-\udf35\udf39-\udf3f]"},{name:"Balinese",bmp:"\u1b00-\u1b4b\u1b50-\u1b7c"},{name:"Bamum",bmp:"\ua6a0-\ua6f7",astral:"\ud81a[\udc00-\ude38]"},{name:"Bassa_Vah",astral:"\ud81a[\uded0-\udeed\udef0-\udef5]"},{name:"Batak",bmp:"\u1bc0-\u1bf3\u1bfc-\u1bff"},{name:"Bengali",bmp:"\u0980-\u0983\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7\u09c8\u09cb-\u09ce\u09d7\u09dc\u09dd\u09df-\u09e3\u09e6-\u09fb"},
{name:"Bhaiksuki",astral:"\ud807[\udc00-\udc08\udc0a-\udc36\udc38-\udc45\udc50-\udc6c]"},{name:"Bopomofo",bmp:"\u02ea\u02eb\u3105-\u312d\u31a0-\u31ba"},{name:"Brahmi",astral:"\ud804[\udc00-\udc4d\udc52-\udc6f\udc7f]"},{name:"Braille",bmp:"\u2800-\u28ff"},{name:"Buginese",bmp:"\u1a00-\u1a1b\u1a1e\u1a1f"},{name:"Buhid",bmp:"\u1740-\u1753"},{name:"Canadian_Aboriginal",bmp:"\u1400-\u167f\u18b0-\u18f5"},{name:"Carian",astral:"\ud800[\udea0-\uded0]"},{name:"Caucasian_Albanian",astral:"\ud801[\udd30-\udd63\udd6f]"},
{name:"Chakma",astral:"\ud804[\udd00-\udd34\udd36-\udd43]"},{name:"Cham",bmp:"\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa5c-\uaa5f"},{name:"Cherokee",bmp:"\u13a0-\u13f5\u13f8-\u13fd\uab70-\uabbf"},{name:"Common",bmp:"\x00-@\\x5B-`\\x7B-\u00a9\u00ab-\u00b9\u00bb-\u00bf\u00d7\u00f7\u02b9-\u02df\u02e5-\u02e9\u02ec-\u02ff\u0374\u037e\u0385\u0387\u0589\u0605\u060c\u061b\u061c\u061f\u0640\u06dd\u08e2\u0964\u0965\u0e3f\u0fd5-\u0fd8\u10fb\u16eb-\u16ed\u1735\u1736\u1802\u1803\u1805\u1cd3\u1ce1\u1ce9-\u1cec\u1cee-\u1cf3\u1cf5\u1cf6\u2000-\u200b\u200e-\u2064\u2066-\u2070\u2074-\u207e\u2080-\u208e\u20a0-\u20be\u2100-\u2125\u2127-\u2129\u212c-\u2131\u2133-\u214d\u214f-\u215f\u2189-\u218b\u2190-\u23fe\u2400-\u2426\u2440-\u244a\u2460-\u27ff\u2900-\u2b73\u2b76-\u2b95\u2b98-\u2bb9\u2bbd-\u2bc8\u2bca-\u2bd1\u2bec-\u2bef\u2e00-\u2e44\u2ff0-\u2ffb\u3000-\u3004\u3006\u3008-\u3020\u3030-\u3037\u303c-\u303f\u309b\u309c\u30a0\u30fb\u30fc\u3190-\u319f\u31c0-\u31e3\u3220-\u325f\u327f-\u32cf\u3358-\u33ff\u4dc0-\u4dff\ua700-\ua721\ua788-\ua78a\ua830-\ua839\ua92e\ua9cf\uab5b\ufd3e\ufd3f\ufe10-\ufe19\ufe30-\ufe52\ufe54-\ufe66\ufe68-\ufe6b\ufeff\uff01-\uff20\uff3b-\uff40\uff5b-\uff65\uff70\uff9e\uff9f\uffe0-\uffe6\uffe8-\uffee\ufff9-\ufffd",
astral:"\ud800[\udd00-\udd02\udd07-\udd33\udd37-\udd3f\udd90-\udd9b\uddd0-\uddfc\udee1-\udefb]|\ud82f[\udca0-\udca3]|\ud834[\udc00-\udcf5\udd00-\udd26\udd29-\udd66\udd6a-\udd7a\udd83\udd84\udd8c-\udda9\uddae-\udde8\udf00-\udf56\udf60-\udf71]|\ud835[\udc00-\udc54\udc56-\udc9c\udc9e\udc9f\udca2\udca5\udca6\udca9-\udcac\udcae-\udcb9\udcbb\udcbd-\udcc3\udcc5-\udd05\udd07-\udd0a\udd0d-\udd14\udd16-\udd1c\udd1e-\udd39\udd3b-\udd3e\udd40-\udd44\udd46\udd4a-\udd50\udd52-\udea5\udea8-\udfcb\udfce-\udfff]|\ud83c[\udc00-\udc2b\udc30-\udc93\udca0-\udcae\udcb1-\udcbf\udcc1-\udccf\udcd1-\udcf5\udd00-\udd0c\udd10-\udd2e\udd30-\udd6b\udd70-\uddac\udde6-\uddff\ude01\ude02\ude10-\ude3b\ude40-\ude48\ude50\ude51\udf00-\udfff]|\ud83d[\udc00-\uded2\udee0-\udeec\udef0-\udef6\udf00-\udf73\udf80-\udfd4]|\ud83e[\udc00-\udc0b\udc10-\udc47\udc50-\udc59\udc60-\udc87\udc90-\udcad\udd10-\udd1e\udd20-\udd27\udd30\udd33-\udd3e\udd40-\udd4b\udd50-\udd5e\udd80-\udd91\uddc0]|\udb40[\udc01\udc20-\udc7f]"},
{name:"Coptic",bmp:"\u03e2-\u03ef\u2c80-\u2cf3\u2cf9-\u2cff"},{name:"Cuneiform",astral:"\ud808[\udc00-\udf99]|\ud809[\udc00-\udc6e\udc70-\udc74\udc80-\udd43]"},{name:"Cypriot",astral:"\ud802[\udc00-\udc05\udc08\udc0a-\udc35\udc37\udc38\udc3c\udc3f]"},{name:"Cyrillic",bmp:"\u0400-\u0484\u0487-\u052f\u1c80-\u1c88\u1d2b\u1d78\u2de0-\u2dff\ua640-\ua69f\ufe2e\ufe2f"},{name:"Deseret",astral:"\ud801[\udc00-\udc4f]"},{name:"Devanagari",bmp:"\u0900-\u0950\u0953-\u0963\u0966-\u097f\ua8e0-\ua8fd"},{name:"Duployan",
astral:"\ud82f[\udc00-\udc6a\udc70-\udc7c\udc80-\udc88\udc90-\udc99\udc9c-\udc9f]"},{name:"Egyptian_Hieroglyphs",astral:"\ud80c[\udc00-\udfff]|\ud80d[\udc00-\udc2e]"},{name:"Elbasan",astral:"\ud801[\udd00-\udd27]"},{name:"Ethiopic",bmp:"\u1200-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u137c\u1380-\u1399\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e"},
{name:"Georgian",bmp:"\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u10ff\u2d00-\u2d25\u2d27\u2d2d"},{name:"Glagolitic",bmp:"\u2c00-\u2c2e\u2c30-\u2c5e",astral:"\ud838[\udc00-\udc06\udc08-\udc18\udc1b-\udc21\udc23\udc24\udc26-\udc2a]"},{name:"Gothic",astral:"\ud800[\udf30-\udf4a]"},{name:"Grantha",astral:"\ud804[\udf00-\udf03\udf05-\udf0c\udf0f\udf10\udf13-\udf28\udf2a-\udf30\udf32\udf33\udf35-\udf39\udf3c-\udf44\udf47\udf48\udf4b-\udf4d\udf50\udf57\udf5d-\udf63\udf66-\udf6c\udf70-\udf74]"},{name:"Greek",
bmp:"\u0370-\u0373\u0375-\u0377\u037a-\u037d\u037f\u0384\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03e1\u03f0-\u03ff\u1d26-\u1d2a\u1d5d-\u1d61\u1d66-\u1d6a\u1dbf\u1f00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fc4\u1fc6-\u1fd3\u1fd6-\u1fdb\u1fdd-\u1fef\u1ff2-\u1ff4\u1ff6-\u1ffe\u2126\uab65",astral:"\ud800[\udd40-\udd8e\udda0]|\ud834[\ude00-\ude45]"},{name:"Gujarati",bmp:"\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0af1\u0af9"},
{name:"Gurmukhi",bmp:"\u0a01-\u0a03\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75"},{name:"Han",bmp:"\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u3005\u3007\u3021-\u3029\u3038-\u303b\u3400-\u4db5\u4e00-\u9fd5\uf900-\ufa6d\ufa70-\ufad9",astral:"[\ud840-\ud868\ud86a-\ud86c\ud86f-\ud872][\udc00-\udfff]|\ud869[\udc00-\uded6\udf00-\udfff]|\ud86d[\udc00-\udf34\udf40-\udfff]|\ud86e[\udc00-\udc1d\udc20-\udfff]|\ud873[\udc00-\udea1]|\ud87e[\udc00-\ude1d]"},
{name:"Hangul",bmp:"\u1100-\u11ff\u302e\u302f\u3131-\u318e\u3200-\u321e\u3260-\u327e\ua960-\ua97c\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc"},{name:"Hanunoo",bmp:"\u1720-\u1734"},{name:"Hatran",astral:"\ud802[\udce0-\udcf2\udcf4\udcf5\udcfb-\udcff]"},{name:"Hebrew",bmp:"\u0591-\u05c7\u05d0-\u05ea\u05f0-\u05f4\ufb1d-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufb4f"},{name:"Hiragana",bmp:"\u3041-\u3096\u309d-\u309f",astral:"\ud82c\udc01|\ud83c\ude00"},
{name:"Imperial_Aramaic",astral:"\ud802[\udc40-\udc55\udc57-\udc5f]"},{name:"Inherited",bmp:"\u0300-\u036f\u0485\u0486\u064b-\u0655\u0670\u0951\u0952\u1ab0-\u1abe\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8\u1cf9\u1dc0-\u1df5\u1dfb-\u1dff\u200c\u200d\u20d0-\u20f0\u302a-\u302d\u3099\u309a\ufe00-\ufe0f\ufe20-\ufe2d",astral:"\ud800[\uddfd\udee0]|\ud834[\udd67-\udd69\udd7b-\udd82\udd85-\udd8b\uddaa-\uddad]|\udb40[\udd00-\uddef]"},{name:"Inscriptional_Pahlavi",astral:"\ud802[\udf60-\udf72\udf78-\udf7f]"},
{name:"Inscriptional_Parthian",astral:"\ud802[\udf40-\udf55\udf58-\udf5f]"},{name:"Javanese",bmp:"\ua980-\ua9cd\ua9d0-\ua9d9\ua9de\ua9df"},{name:"Kaithi",astral:"\ud804[\udc80-\udcc1]"},{name:"Kannada",bmp:"\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1\u0cf2"},{name:"Katakana",bmp:"\u30a1-\u30fa\u30fd-\u30ff\u31f0-\u31ff\u32d0-\u32fe\u3300-\u3357\uff66-\uff6f\uff71-\uff9d",astral:"\ud82c\udc00"},
{name:"Kayah_Li",bmp:"\ua900-\ua92d\ua92f"},{name:"Kharoshthi",astral:"\ud802[\ude00-\ude03\ude05\ude06\ude0c-\ude13\ude15-\ude17\ude19-\ude33\ude38-\ude3a\ude3f-\ude47\ude50-\ude58]"},{name:"Khmer",bmp:"\u1780-\u17dd\u17e0-\u17e9\u17f0-\u17f9\u19e0-\u19ff"},{name:"Khojki",astral:"\ud804[\ude00-\ude11\ude13-\ude3e]"},{name:"Khudawadi",astral:"\ud804[\udeb0-\udeea\udef0-\udef9]"},{name:"Lao",bmp:"\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf"},
{name:"Latin",bmp:"A-Za-z\u00aa\u00ba\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02b8\u02e0-\u02e4\u1d00-\u1d25\u1d2c-\u1d5c\u1d62-\u1d65\u1d6b-\u1d77\u1d79-\u1dbe\u1e00-\u1eff\u2071\u207f\u2090-\u209c\u212a\u212b\u2132\u214e\u2160-\u2188\u2c60-\u2c7f\ua722-\ua787\ua78b-\ua7ae\ua7b0-\ua7b7\ua7f7-\ua7ff\uab30-\uab5a\uab5c-\uab64\ufb00-\ufb06\uff21-\uff3a\uff41-\uff5a"},{name:"Lepcha",bmp:"\u1c00-\u1c37\u1c3b-\u1c49\u1c4d-\u1c4f"},{name:"Limbu",bmp:"\u1900-\u191e\u1920-\u192b\u1930-\u193b\u1940\u1944-\u194f"},
{name:"Linear_A",astral:"\ud801[\ude00-\udf36\udf40-\udf55\udf60-\udf67]"},{name:"Linear_B",astral:"\ud800[\udc00-\udc0b\udc0d-\udc26\udc28-\udc3a\udc3c\udc3d\udc3f-\udc4d\udc50-\udc5d\udc80-\udcfa]"},{name:"Lisu",bmp:"\ua4d0-\ua4ff"},{name:"Lycian",astral:"\ud800[\ude80-\ude9c]"},{name:"Lydian",astral:"\ud802[\udd20-\udd39\udd3f]"},{name:"Mahajani",astral:"\ud804[\udd50-\udd76]"},{name:"Malayalam",bmp:"\u0d01-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d-\u0d44\u0d46-\u0d48\u0d4a-\u0d4f\u0d54-\u0d63\u0d66-\u0d7f"},
{name:"Mandaic",bmp:"\u0840-\u085b\u085e"},{name:"Manichaean",astral:"\ud802[\udec0-\udee6\udeeb-\udef6]"},{name:"Marchen",astral:"\ud807[\udc70-\udc8f\udc92-\udca7\udca9-\udcb6]"},{name:"Meetei_Mayek",bmp:"\uaae0-\uaaf6\uabc0-\uabed\uabf0-\uabf9"},{name:"Mende_Kikakui",astral:"\ud83a[\udc00-\udcc4\udcc7-\udcd6]"},{name:"Meroitic_Cursive",astral:"\ud802[\udda0-\uddb7\uddbc-\uddcf\uddd2-\uddff]"},{name:"Meroitic_Hieroglyphs",astral:"\ud802[\udd80-\udd9f]"},{name:"Miao",astral:"\ud81b[\udf00-\udf44\udf50-\udf7e\udf8f-\udf9f]"},
{name:"Modi",astral:"\ud805[\ude00-\ude44\ude50-\ude59]"},{name:"Mongolian",bmp:"\u1800\u1801\u1804\u1806-\u180e\u1810-\u1819\u1820-\u1877\u1880-\u18aa",astral:"\ud805[\ude60-\ude6c]"},{name:"Mro",astral:"\ud81a[\ude40-\ude5e\ude60-\ude69\ude6e\ude6f]"},{name:"Multani",astral:"\ud804[\ude80-\ude86\ude88\ude8a-\ude8d\ude8f-\ude9d\ude9f-\udea9]"},{name:"Myanmar",bmp:"\u1000-\u109f\ua9e0-\ua9fe\uaa60-\uaa7f"},{name:"Nabataean",astral:"\ud802[\udc80-\udc9e\udca7-\udcaf]"},{name:"New_Tai_Lue",bmp:"\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u19de\u19df"},
{name:"Newa",astral:"\ud805[\udc00-\udc59\udc5b\udc5d]"},{name:"Nko",bmp:"\u07c0-\u07fa"},{name:"Ogham",bmp:"\u1680-\u169c"},{name:"Ol_Chiki",bmp:"\u1c50-\u1c7f"},{name:"Old_Hungarian",astral:"\ud803[\udc80-\udcb2\udcc0-\udcf2\udcfa-\udcff]"},{name:"Old_Italic",astral:"\ud800[\udf00-\udf23]"},{name:"Old_North_Arabian",astral:"\ud802[\ude80-\ude9f]"},{name:"Old_Permic",astral:"\ud800[\udf50-\udf7a]"},{name:"Old_Persian",astral:"\ud800[\udfa0-\udfc3\udfc8-\udfd5]"},{name:"Old_South_Arabian",astral:"\ud802[\ude60-\ude7f]"},
{name:"Old_Turkic",astral:"\ud803[\udc00-\udc48]"},{name:"Oriya",bmp:"\u0b01-\u0b03\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5c\u0b5d\u0b5f-\u0b63\u0b66-\u0b77"},{name:"Osage",astral:"\ud801[\udcb0-\udcd3\udcd8-\udcfb]"},{name:"Osmanya",astral:"\ud801[\udc80-\udc9d\udca0-\udca9]"},{name:"Pahawh_Hmong",astral:"\ud81a[\udf00-\udf45\udf50-\udf59\udf5b-\udf61\udf63-\udf77\udf7d-\udf8f]"},{name:"Palmyrene",astral:"\ud802[\udc60-\udc7f]"},
{name:"Pau_Cin_Hau",astral:"\ud806[\udec0-\udef8]"},{name:"Phags_Pa",bmp:"\ua840-\ua877"},{name:"Phoenician",astral:"\ud802[\udd00-\udd1b\udd1f]"},{name:"Psalter_Pahlavi",astral:"\ud802[\udf80-\udf91\udf99-\udf9c\udfa9-\udfaf]"},{name:"Rejang",bmp:"\ua930-\ua953\ua95f"},{name:"Runic",bmp:"\u16a0-\u16ea\u16ee-\u16f8"},{name:"Samaritan",bmp:"\u0800-\u082d\u0830-\u083e"},{name:"Saurashtra",bmp:"\ua880-\ua8c5\ua8ce-\ua8d9"},{name:"Sharada",astral:"\ud804[\udd80-\uddcd\uddd0-\udddf]"},{name:"Shavian",
astral:"\ud801[\udc50-\udc7f]"},{name:"Siddham",astral:"\ud805[\udd80-\uddb5\uddb8-\udddd]"},{name:"SignWriting",astral:"\ud836[\udc00-\ude8b\ude9b-\ude9f\udea1-\udeaf]"},{name:"Sinhala",bmp:"\u0d82\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2-\u0df4",astral:"\ud804[\udde1-\uddf4]"},{name:"Sora_Sompeng",astral:"\ud804[\udcd0-\udce8\udcf0-\udcf9]"},{name:"Sundanese",bmp:"\u1b80-\u1bbf\u1cc0-\u1cc7"},{name:"Syloti_Nagri",bmp:"\ua800-\ua82b"},
{name:"Syriac",bmp:"\u0700-\u070d\u070f-\u074a\u074d-\u074f"},{name:"Tagalog",bmp:"\u1700-\u170c\u170e-\u1714"},{name:"Tagbanwa",bmp:"\u1760-\u176c\u176e-\u1770\u1772\u1773"},{name:"Tai_Le",bmp:"\u1950-\u196d\u1970-\u1974"},{name:"Tai_Tham",bmp:"\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa0-\u1aad"},{name:"Tai_Viet",bmp:"\uaa80-\uaac2\uaadb-\uaadf"},{name:"Takri",astral:"\ud805[\ude80-\udeb7\udec0-\udec9]"},{name:"Tamil",bmp:"\u0b82\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bfa"},
{name:"Tangut",astral:"\ud81b\udfe0|[\ud81c-\ud820][\udc00-\udfff]|\ud821[\udc00-\udfec]|\ud822[\udc00-\udef2]"},{name:"Telugu",bmp:"\u0c00-\u0c03\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c66-\u0c6f\u0c78-\u0c7f"},{name:"Thaana",bmp:"\u0780-\u07b1"},{name:"Thai",bmp:"\u0e01-\u0e3a\u0e40-\u0e5b"},{name:"Tibetan",bmp:"\u0f00-\u0f47\u0f49-\u0f6c\u0f71-\u0f97\u0f99-\u0fbc\u0fbe-\u0fcc\u0fce-\u0fd4\u0fd9\u0fda"},
{name:"Tifinagh",bmp:"\u2d30-\u2d67\u2d6f\u2d70\u2d7f"},{name:"Tirhuta",astral:"\ud805[\udc80-\udcc7\udcd0-\udcd9]"},{name:"Ugaritic",astral:"\ud800[\udf80-\udf9d\udf9f]"},{name:"Vai",bmp:"\ua500-\ua62b"},{name:"Warang_Citi",astral:"\ud806[\udca0-\udcf2\udcff]"},{name:"Yi",bmp:"\ua000-\ua48c\ua490-\ua4c6"}])}},{}],8:[function(d,g,p){p=d("./xregexp");d("./addons/build")(p);d("./addons/matchrecursive")(p);d("./addons/unicode-base")(p);d("./addons/unicode-blocks")(p);d("./addons/unicode-categories")(p);
d("./addons/unicode-properties")(p);d("./addons/unicode-scripts")(p);g.exports=p},{"./addons/build":1,"./addons/matchrecursive":2,"./addons/unicode-base":3,"./addons/unicode-blocks":4,"./addons/unicode-categories":5,"./addons/unicode-properties":6,"./addons/unicode-scripts":7,"./xregexp":9}],9:[function(d,g,p){function c(a){var e=!0;try{RegExp("",a)}catch(u){e=!1}return e}function A(a,e,u,b,c){var J;a.xregexp={captureNames:e};if(c)return a;if(a.__proto__)a.__proto__=f.prototype;else for(J in f.prototype)a[J]=
f.prototype[J];a.xregexp.source=u;a.xregexp.flags=b?b.split("").sort().join(""):b;return a}function B(a){return n.replace.call(a,/([\s\S])(?=[\s\S]*\1)/g,"")}function z(a,e){if(!f.isRegExp(a))throw new TypeError("Type RegExp expected");var u=a.xregexp||{},b=Q?a.flags:n.exec.call(/\/([a-z]*)$/i,RegExp.prototype.toString.call(a))[1],c="",d="",E=null,h=null;e=e||{};e.removeG&&(d+="g");e.removeY&&(d+="y");d&&(b=n.replace.call(b,new RegExp("["+d+"]+","g"),""));e.addG&&(c+="g");e.addY&&(c+="y");c&&(b=B(b+
c));e.isInternalOnly||(void 0!==u.source&&(E=u.source),null!=u.flags&&(h=c?B(u.flags+c):u.flags));return a=A(new RegExp(e.source||a.source,b),a.xregexp&&a.xregexp.captureNames?u.captureNames.slice(0):null,E,h,e.isInternalOnly)}function l(a){return parseInt(a,16)}function b(a,e,b){(e="("===a.input.charAt(a.index-1)||")"===a.input.charAt(a.index+a[0].length))||(e=a.input,a=a.index+a[0].length,b=-1<b.indexOf("x")?["\\s","#[^#\\n]*","\\(\\?#[^)]*\\)"]:["\\(\\?#[^)]*\\)"],e=n.test.call(new RegExp("^(?:"+
b.join("|")+")*(?:[?*+]|{\\d+(?:,\\d*)?})"),e.slice(a)));return e?"":"(?:)"}function k(a){return parseInt(a,10).toString(16)}function C(a,e){var b=a.length,c;for(c=0;c<b;++c)if(a[c]===e)return c;return-1}function y(a,e){return L.call(a)==="[object "+e+"]"}function m(a){for(;4>a.length;)a="0"+a;return a}function h(a,e){var b;if(B(e)!==e)throw new SyntaxError("Invalid duplicate regex flag "+e);a=n.replace.call(a,/^\(\?([\w$]+)\)/,function(a,b){if(n.test.call(/[gy]/,b))throw new SyntaxError("Cannot use flag g or y in mode modifier "+
a);e=B(e+b);return""});for(b=0;b<e.length;++b)if(!N[e.charAt(b)])throw new SyntaxError("Unknown regex flag "+e.charAt(b));return{pattern:a,flags:e}}function w(a){var e={};return y(a,"String")?(f.forEach(a,/[^\s,]+/,function(a){e[a]=!0}),e):a}function x(a){if(!/^[\w$]$/.test(a))throw Error("Flag must be a single character A-Za-z0-9_$");N[a]=!0}function v(a){RegExp.prototype.exec=(a?r:n).exec;RegExp.prototype.test=(a?r:n).test;String.prototype.match=(a?r:n).match;String.prototype.replace=(a?r:n).replace;
String.prototype.split=(a?r:n).split;D.natives=a}function q(a){if(null==a)throw new TypeError("Cannot convert null or undefined to object");return a}function f(a,e){if(f.isRegExp(a)){if(void 0!==e)throw new TypeError("Cannot supply flags when copying a RegExp");return z(a)}a=void 0===a?"":String(a);e=void 0===e?"":String(e);f.isInstalled("astral")&&-1===e.indexOf("A")&&(e+="A");F[a]||(F[a]={});if(!F[a][e]){var b={hasNamedCapture:!1,captureNames:[]},c="default",d="",g=0,E=h(a,e),k=E.pattern;for(E=
E.flags;g<k.length;){do{for(var l,m=k,p=E,q=g,r=c,v=b,w=I.length,x=m.charAt(q),y=null;w--;){var t=I[w];if(!(t.leadChar&&t.leadChar!==x||t.scope!==r&&"all"!==t.scope||t.flag&&-1===p.indexOf(t.flag))&&(l=f.exec(m,t.regex,q,"sticky"))){y={matchLength:l[0].length,output:t.handler.call(v,l,r,p),reparse:t.reparse};break}}(t=y)&&t.reparse&&(k=k.slice(0,g)+t.output+k.slice(g+t.matchLength))}while(t&&t.reparse);t?(d+=t.output,g+=t.matchLength||1):(t=f.exec(k,O[c],g,"sticky")[0],d+=t,g+=t.length,"["===t&&"default"===
c?c="class":"]"===t&&"class"===c&&(c="default"))}F[a][e]={pattern:n.replace.call(d,/(?:\(\?:\))+/g,"(?:)"),flags:n.replace.call(E,/[^gimuy]+/g,""),captures:b.hasNamedCapture?b.captureNames:null}}b=F[a][e];return A(new RegExp(b.pattern,b.flags),b.captures,a,e)}var D={astral:!1,natives:!1},n={exec:RegExp.prototype.exec,test:RegExp.prototype.test,match:String.prototype.match,replace:String.prototype.replace,split:String.prototype.split},r={},G={},F={},I=[],O={"default":/\\(?:0(?:[0-3][0-7]{0,2}|[4-7][0-7]?)?|[1-9]\d*|x[\dA-Fa-f]{2}|u(?:[\dA-Fa-f]{4}|{[\dA-Fa-f]+})|c[A-Za-z]|[\s\S])|\(\?(?:[:=!]|<[=!])|[?*+]\?|{\d+(?:,\d*)?}\??|[\s\S]/,
"class":/\\(?:[0-3][0-7]{0,2}|[4-7][0-7]?|x[\dA-Fa-f]{2}|u(?:[\dA-Fa-f]{4}|{[\dA-Fa-f]+})|c[A-Za-z]|[\s\S])|[\s\S]/},P=/\$(?:{([\w$]+)}|(\d\d?|[\s\S]))/g,R=void 0===n.exec.call(/()??/,"")[1],Q=void 0!==/x/.flags,L={}.toString,M=c("u"),K=c("y"),N={g:!0,i:!0,m:!0,u:M,y:K};f.prototype=RegExp();f.version="3.2.0";f._clipDuplicates=B;f._hasNativeFlag=c;f._dec=l;f._hex=k;f._pad4=m;f.addToken=function(a,e,b){b=b||{};var c=b.optionalFlags,d;b.flag&&x(b.flag);if(c)for(c=n.split.call(c,""),d=0;d<c.length;++d)x(c[d]);
I.push({regex:z(a,{addG:!0,addY:K,isInternalOnly:!0}),handler:e,scope:b.scope||"default",flag:b.flag,reparse:b.reparse,leadChar:b.leadChar});f.cache.flush("patterns")};f.cache=function(a,b){G[a]||(G[a]={});return G[a][b]||(G[a][b]=f(a,b))};f.cache.flush=function(a){"patterns"===a?F={}:G={}};f.escape=function(a){return n.replace.call(q(a),/[-\[\]{}()*+?.,\\^$|#\s]/g,"\\$&")};f.exec=function(a,b,c,d){var e="g",f,u=!1;(f=K&&!!(d||b.sticky&&!1!==d))?e+="y":d&&(u=!0,e+="FakeY");b.xregexp=b.xregexp||{};
d=b.xregexp[e]||(b.xregexp[e]=z(b,{addG:!0,addY:f,source:u?b.source+"|()":void 0,removeY:!1===d,isInternalOnly:!0}));d.lastIndex=c||0;a=r.exec.call(d,a);u&&a&&""===a.pop()&&(a=null);b.global&&(b.lastIndex=a?d.lastIndex:0);return a};f.forEach=function(a,b,c){for(var e=0,d=-1;e=f.exec(a,b,e);)c(e,++d,a,b),e=e.index+(e[0].length||1)};f.globalize=function(a){return z(a,{addG:!0})};f.install=function(a){a=w(a);!D.astral&&a.astral&&(D.astral=!0);!D.natives&&a.natives&&v(!0)};f.isInstalled=function(a){return!!D[a]};
f.isRegExp=function(a){return"[object RegExp]"===L.call(a)};f.match=function(a,b,c){var e=b.global&&"one"!==c||"all"===c,d=(e?"g":"")+(b.sticky?"y":"")||"noGY";b.xregexp=b.xregexp||{};d=b.xregexp[d]||(b.xregexp[d]=z(b,{addG:!!e,removeG:"one"===c,isInternalOnly:!0}));a=n.match.call(q(a),d);b.global&&(b.lastIndex="one"===c&&a?a.index+a[0].length:0);return e?a||[]:a&&a[0]};f.matchChain=function(a,b){return function S(a,e){function c(a){if(d.backref){if(!(a.hasOwnProperty(d.backref)||+d.backref<a.length))throw new ReferenceError("Backreference to undefined group: "+
d.backref);g.push(a[d.backref]||"")}else g.push(a[0])}for(var d=b[e].regex?b[e]:{regex:b[e]},g=[],h=0;h<a.length;++h)f.forEach(a[h],d.regex,c);return e!==b.length-1&&g.length?S(g,e+1):g}([a],0)};f.replace=function(a,b,c,d){var e=f.isRegExp(b),g=b.global&&"one"!==d||"all"===d,h=(g?"g":"")+(b.sticky?"y":"")||"noGY",u=b;e?(b.xregexp=b.xregexp||{},u=b.xregexp[h]||(b.xregexp[h]=z(b,{addG:!!g,removeG:"one"===d,isInternalOnly:!0}))):g&&(u=new RegExp(f.escape(String(b)),"g"));a=r.replace.call(q(a),u,c);e&&
b.global&&(b.lastIndex=0);return a};f.replaceEach=function(a,b){var c;for(c=0;c<b.length;++c){var e=b[c];a=f.replace(a,e[0],e[1],e[2])}return a};f.split=function(a,b,c){return r.split.call(q(a),b,c)};f.test=function(a,b,c,d){return!!f.exec(a,b,c,d)};f.uninstall=function(a){a=w(a);D.astral&&a.astral&&(D.astral=!1);D.natives&&a.natives&&v(!1)};f.union=function(a,b,c){function d(a,b,c){var d=m[e-u];if(b){if(++e,d)return"(?<"+d+">"}else if(c)return"\\"+(+c+u);return a}c=c||{};c=c.conjunction||"or";var e=
0;if(!y(a,"Array")||!a.length)throw new TypeError("Must provide a nonempty array of patterns to merge");for(var g=/(\()(?!\?)|\\([1-9]\d*)|\\[\s\S]|\[(?:[^\\\]]|\\[\s\S])*\]/g,h=[],k,l=0;l<a.length;++l)if(k=a[l],f.isRegExp(k)){var u=e;var m=k.xregexp&&k.xregexp.captureNames||[];h.push(n.replace.call(f(k.source).source,g,d))}else h.push(f.escape(k));return f(h.join("none"===c?"":"|"),b)};r.exec=function(a){var b=this.lastIndex,c=n.exec.apply(this,arguments),d;if(c){if(!R&&1<c.length&&-1<C(c,"")){var f=
z(this,{removeG:!0,isInternalOnly:!0});n.replace.call(String(a).slice(c.index),f,function(){var a=arguments.length,b;for(b=1;b<a-2;++b)void 0===arguments[b]&&(c[b]=void 0)})}if(this.xregexp&&this.xregexp.captureNames)for(d=1;d<c.length;++d)(f=this.xregexp.captureNames[d-1])&&(c[f]=c[d]);this.global&&!c[0].length&&this.lastIndex>c.index&&(this.lastIndex=c.index)}this.global||(this.lastIndex=b);return c};r.test=function(a){return!!r.exec.call(this,a)};r.match=function(a){if(!f.isRegExp(a))a=new RegExp(a);
else if(a.global){var b=n.match.apply(this,arguments);a.lastIndex=0;return b}return r.exec.call(a,q(this))};r.replace=function(a,b){var c=f.isRegExp(a);if(c){if(a.xregexp)var d=a.xregexp.captureNames;var e=a.lastIndex}else a+="";var g=y(b,"Function")?n.replace.call(String(this),a,function(){var e=arguments,f;if(d)for(e[0]=new String(e[0]),f=0;f<d.length;++f)d[f]&&(e[0][d[f]]=e[f+1]);c&&a.global&&(a.lastIndex=e[e.length-2]+e[0].length);return b.apply(void 0,e)}):n.replace.call(null==this?this:String(this),
a,function(){var a=arguments;return n.replace.call(String(b),P,function(b,c,e){if(c){e=+c;if(e<=a.length-3)return a[e]||"";e=d?C(d,c):-1;if(0>e)throw new SyntaxError("Backreference to undefined group "+b);return a[e+1]||""}if("$"===e)return"$";if("&"===e||0===+e)return a[0];if("`"===e)return a[a.length-1].slice(0,a[a.length-2]);if("'"===e)return a[a.length-1].slice(a[a.length-2]+a[0].length);e=+e;if(!isNaN(e)){if(e>a.length-3)throw new SyntaxError("Backreference to undefined group "+b);return a[e]||
""}throw new SyntaxError("Invalid token "+b);})});c&&(a.lastIndex=a.global?0:e);return g};r.split=function(a,b){if(!f.isRegExp(a))return n.split.apply(this,arguments);var c=String(this),d=[],e=a.lastIndex,g=0,h;b=(void 0===b?-1:b)>>>0;f.forEach(c,a,function(a){a.index+a[0].length>g&&(d.push(c.slice(g,a.index)),1<a.length&&a.index<c.length&&Array.prototype.push.apply(d,a.slice(1)),h=a[0].length,g=a.index+h)});g===c.length?(!n.test.call(a,"")||h)&&d.push(""):d.push(c.slice(g));a.lastIndex=e;return d.length>
b?d.slice(0,b):d};f.addToken(/\\([ABCE-RTUVXYZaeg-mopqyz]|c(?![A-Za-z])|u(?![\dA-Fa-f]{4}|{[\dA-Fa-f]+})|x(?![\dA-Fa-f]{2}))/,function(a,b){if("B"===a[1]&&"default"===b)return a[0];throw new SyntaxError("Invalid escape "+a[0]);},{scope:"all",leadChar:"\\"});f.addToken(/\\u{([\dA-Fa-f]+)}/,function(a,b,c){b=l(a[1]);if(1114111<b)throw new SyntaxError("Invalid Unicode code point "+a[0]);if(65535>=b)return"\\u"+m(k(b));if(M&&-1<c.indexOf("u"))return a[0];throw new SyntaxError("Cannot use Unicode code point above \\u{FFFF} without flag u");
},{scope:"all",leadChar:"\\"});f.addToken(/\[(\^?)\]/,function(a){return a[1]?"[\\s\\S]":"\\b\\B"},{leadChar:"["});f.addToken(/\(\?#[^)]*\)/,b,{leadChar:"("});f.addToken(/\s+|#[^\n]*\n?/,b,{flag:"x"});f.addToken(/\./,function(){return"[\\s\\S]"},{flag:"s",leadChar:"."});f.addToken(/\\k<([\w$]+)>/,function(a){var b=isNaN(a[1])?C(this.captureNames,a[1])+1:+a[1],c=a.index+a[0].length;if(!b||b>this.captureNames.length)throw new SyntaxError("Backreference to undefined group "+a[0]);return"\\"+b+(c===a.input.length||
isNaN(a.input.charAt(c))?"":"(?:)")},{leadChar:"\\"});f.addToken(/\\(\d+)/,function(a,b){if(!("default"===b&&/^[1-9]/.test(a[1])&&+a[1]<=this.captureNames.length)&&"0"!==a[1])throw new SyntaxError("Cannot use octal escape or backreference to undefined group "+a[0]);return a[0]},{scope:"all",leadChar:"\\"});f.addToken(/\(\?P?<([\w$]+)>/,function(a){if(!isNaN(a[1]))throw new SyntaxError("Cannot use integer as capture name "+a[0]);if("length"===a[1]||"__proto__"===a[1])throw new SyntaxError("Cannot use reserved word as capture name "+
a[0]);if(-1<C(this.captureNames,a[1]))throw new SyntaxError("Cannot use same name for multiple groups "+a[0]);this.captureNames.push(a[1]);this.hasNamedCapture=!0;return"("},{leadChar:"("});f.addToken(/\((?!\?)/,function(a,b,c){if(-1<c.indexOf("n"))return"(?:";this.captureNames.push(null);return"("},{optionalFlags:"n",leadChar:"("});g.exports=f},{}]},{},[8])(8)});
|
PypiClean
|
/bots-open-source-edi-translator-3.1.9.tar.gz/bots-3.1.9/bots/cleanup.py
|
import os
import glob
import time
import datetime
import stat
import shutil
from django.utils.translation import ugettext as _
#bots modules
import botslib
import botsglobal
#~ from botsconfig import *
def cleanup(do_cleanup_parameter,userscript,scriptname):
''' public function, does all cleanup of the database and file system.
most cleanup functions are by default done only once a day.
'''
if botsglobal.ini.getboolean('acceptance','runacceptancetest',False): # no cleanup during acceptance testing
return
whencleanup = botsglobal.ini.get('settings','whencleanup','daily')
if do_cleanup_parameter: #if explicit indicated via commandline parameter
do_full_cleanup = True
elif whencleanup in ['always','daily']:
#perform full cleanup only first run of the day.
cur_day = int(time.strftime('%Y%m%d')) #get current date, convert to int
if cur_day != botslib.unique('bots_cleanup_day',updatewith=cur_day):
do_full_cleanup = True
else:
do_full_cleanup = False
else:
do_full_cleanup = False
try:
if do_full_cleanup:
botsglobal.logger.info(u'Cleanup files')
_cleandatafile()
_cleanarchive()
botsglobal.logger.info(u'Cleanup database')
_cleanupsession()
_cleanpersist()
_cleantransactions()
botsglobal.logger.info(u'Vacuum database')
_vacuum()
# postcleanup user exit in botsengine script
botslib.tryrunscript(userscript,scriptname,'postcleanup',whencleanup=whencleanup)
botsglobal.logger.info(u'Done full cleanup.')
_cleanrunsnothingreceived() #do this every run, but not logged
except:
botsglobal.logger.exception(u'Cleanup error.')
def _vacuum():
''' Do VACUUM on sqlite database.'''
if botsglobal.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
botsglobal.db.execute('''VACUUM''')
def _cleanupsession():
''' delete all expired sessions. Bots-engine starts up much more often than web-server.'''
vanaf = datetime.datetime.today()
botslib.changeq('''DELETE FROM django_session WHERE expire_date < %(vanaf)s''', {'vanaf':vanaf})
def _cleanarchive():
''' delete all archive directories older than maxdaysarchive days. Errors are ignored.'''
vanaf_default = (datetime.date.today()-datetime.timedelta(days=botsglobal.ini.getint('settings','maxdaysarchive',180))).strftime('%Y%m%d')
for row in botslib.query('''SELECT archivepath,rsrv3 FROM channel WHERE archivepath != '' '''):
if row['rsrv3']:
vanaf = (datetime.date.today()-datetime.timedelta(days=row['rsrv3'])).strftime('%Y%m%d')
else:
vanaf = vanaf_default
vanafdir = botslib.join(row['archivepath'],vanaf)
for entry in glob.iglob(botslib.join(row['archivepath'],'*')):
if entry < vanafdir:
if entry.endswith('.zip'):
try:
os.remove(entry)
except:
pass
else:
shutil.rmtree(entry,ignore_errors=True)
def _cleandatafile():
''' delete all data files older than xx days.'''
vanaf = time.time() - (botsglobal.ini.getint('settings','maxdays',30) * 3600 * 24)
frompath = botslib.join(botsglobal.ini.get('directories','data','botssys/data'),'*')
for filename in glob.iglob(frompath):
statinfo = os.stat(filename)
if not stat.S_ISDIR(statinfo.st_mode):
try:
os.remove(filename) #remove files - should be no files in root of data dir
except:
botsglobal.logger.exception(_(u'Cleanup could not remove file'))
elif statinfo.st_mtime > vanaf :
continue #directory is newer than maxdays, which is also true for the data files in it. Skip it.
else: #check files in dir and remove all older than maxdays
frompath2 = botslib.join(filename,'*')
emptydir = True #track check if directory is empty after loop (should directory itself be deleted/)
for filename2 in glob.iglob(frompath2):
statinfo2 = os.stat(filename2)
if statinfo2.st_mtime > vanaf or stat.S_ISDIR(statinfo2.st_mode): #check files in dir and remove all older than maxdays
emptydir = False
else:
try:
os.remove(filename2)
except:
botsglobal.logger.exception(_(u'Cleanup could not remove file'))
if emptydir:
try:
os.rmdir(filename)
except:
botsglobal.logger.exception(_(u'Cleanup could not remove directory'))
def _cleanpersist():
'''delete all persist older than xx days.'''
vanaf = datetime.datetime.today() - datetime.timedelta(days=botsglobal.ini.getint('settings','maxdayspersist',30))
botslib.changeq('''DELETE FROM persist WHERE ts < %(vanaf)s''',{'vanaf':vanaf})
def _cleantransactions():
''' delete records from report, filereport and ta.
best indexes are on idta/reportidta; this should go fast.
'''
vanaf = datetime.datetime.today() - datetime.timedelta(days=botsglobal.ini.getint('settings','maxdays',30))
for row in botslib.query('''SELECT MAX(idta) as max_idta FROM report WHERE ts < %(vanaf)s''',{'vanaf':vanaf}):
maxidta = row['max_idta']
if maxidta is None: #if there is no maxidta to delete, do nothing
return
botslib.changeq('''DELETE FROM report WHERE idta < %(maxidta)s''',{'maxidta':maxidta})
botslib.changeq('''DELETE FROM filereport WHERE idta < %(maxidta)s''',{'maxidta':maxidta})
botslib.changeq('''DELETE FROM ta WHERE idta < %(maxidta)s''',{'maxidta':maxidta})
#the most recent run that is older than maxdays is kept (using < instead of <=).
#Reason: when deleting in ta this would leave the ta-records of the most recent run older than maxdays (except the first ta-record).
#this will not lead to problems.
def _cleanrunsnothingreceived():
''' delete all report off new runs that received no files and no process errors.
#20120830: if new run with nothing received and no process errors: ta's are already deleted in automaticmaintenance.
'''
vanaf = datetime.datetime.today() - datetime.timedelta(hours=botsglobal.ini.getint('settings','hoursrunwithoutresultiskept',1))
onlycheckrunsofoneday = datetime.datetime.today() - datetime.timedelta(hours=25)
botslib.changeq('''DELETE FROM report
WHERE ts < %(vanaf)s
AND ts >= %(onlycheckrunsofoneday)s
AND type = 'new'
AND lastreceived=0
AND processerrors=0 ''',
{'vanaf':vanaf,'onlycheckrunsofoneday':onlycheckrunsofoneday})
|
PypiClean
|
/mpesa_api-0.2.3-py3-none-any.whl/mpesa_api/util/mocks.py
|
SUCCESS_B2C_SEND_RESPONSE = {
"requestId": "4801-1149222-1",
"ConversationID": "AG_20171106_00004a65b655b9f47b4e",
"OriginatorConversationID": "Service is currently under maintenance. Please try again later",
"ResponseCode": "0",
"ResponseDescription": "The service request has been accepted successfully.",
}
SUCCESS_TOKEN_REQUEST = {
"access_token": "ugqniOdaIapbTs8AkGPZPGHmRzjm",
"expires_in": "3599",
}
FAILED_B2C_SEND_RESPONSE = {
"errorCode": "500.002.1001",
"errorMessage": "Service is currently under maintenance. Please try again later",
"requestId": "8953-1200747-1",
}
B2C_SUCCESSFUL_RESULT = {
"Result": {
"ResultType": 0,
"ResultCode": 0,
"ResultDesc": "The service request has been accepted successfully.",
"OriginatorConversationID": "19455-424535-1",
"ConversationID": "AG_20170717_00006be9c8b5cc46abb6",
"TransactionID": "LGH3197RIB",
"ResultParameters": {
"ResultParameter": [
{"Key": "TransactionReceipt", "Value": "LGH3197RIB"},
{"Key": "TransactionAmount", "Value": 8000},
{"Key": "B2CWorkingAccountAvailableFunds", "Value": 150000},
{"Key": "B2CUtilityAccountAvailableFunds", "Value": 133568},
{
"Key": "TransactionCompletedDateTime",
"Value": "17.07.2017 10:54:57",
},
{
"Key": "ReceiverPartyPublicName",
"Value": "254708374149 - John Doe",
},
{"Key": "B2CChargesPaidAccountAvailableFunds", "Value": 0},
{"Key": "B2CRecipientIsRegisteredCustomer", "Value": "Y"},
]
},
"ReferenceData": {
"ReferenceItem": {
"Key": "QueueTimeoutURL",
"Value": "https://internalsandbox.safaricom.co.ke/mpesa/b2cresults/v1/submit",
}
},
}
}
REGISTER_URL_SUCCESS = {
"ConversationID": "",
"OriginatorCoversationID": "",
"ResponseDescription": "success",
}
ONLINE_REQUEST_RESPONSE = {
"CheckoutRequestID": "ws_CO_12112017210342725",
"CustomerMessage": "Success. Request accepted for processing",
"MerchantRequestID": "4799-1246731-1",
"ResponseCode": "0",
"ResponseDescription": "Success. Request accepted for processing",
}
ONLINE_SUCCESS_RESPONSE = {
"Body": {
"stkCallback": {
"MerchantRequestID": "19465-780693-1",
"CheckoutRequestID": "ws_CO_27072017154747416",
"ResultCode": 0,
"ResultDesc": "The service request is processed successfully.",
"CallbackMetadata": {
"Item": [
{"Name": "Amount", "Value": 1},
{"Name": "MpesaReceiptNumber", "Value": "LGR7OWQX0R"},
{"Name": "Balance"},
{"Name": "TransactionDate", "Value": 20170727154800},
{"Name": "PhoneNumber", "Value": 254721566839},
]
},
}
}
}
PAYBILL_RESPONSE = {
"TransactionType": "Pay Bill",
"TransID": "LK631GQCSP",
"TransTime": "20171106225323",
"TransAmount": "100.00",
"BusinessShortCode": "600000",
"BillRefNumber": "Test",
"InvoiceNumber": "",
"OrgAccountBalance": "",
"ThirdPartyTransID": "",
"MSISDN": "254708374149",
"FirstName": "John",
"MiddleName": "J.",
"LastName": "Doe",
}
|
PypiClean
|
/formification-1.2.0-py3-none-any.whl/formulaic/static/admin/formulaic/ember-formulaic/node_modules/mout/doc/time.md
|
# time #
Utilities for time manipulation.
## convert(value, sourceUnit, [destinationUnit]):Number
Converts time between units.
Available units: `millisecond`, `second`, `minute`, `hour`, `day`, `week`.
Abbreviations: `ms`, `s`, `m`, `h`, `d`, `w`.
We do **not** support year and month as a time unit since their values are not
fixed.
The default `destinationUnit` is `ms`.
```js
convert(1, 'minute'); // 60000
convert(2.5, 's', 'ms'); // 2500
convert(2, 'm', 's'); // 120
convert(500, 'ms', 's'); // 0.5
```
## now():Number
Returns the number of milliseconds elapsed since 1 January 1970 00:00:00 UTC.
Uses `Date.now()` if available.
### Example
```js
now(); // 1335449614650
```
## parseMs(ms):Object
Parse timestamp (milliseconds) into an object `{milliseconds:number,
seconds:number, minutes:number, hours:number, days:number}`.
### Example
```js
// {days:27, hours:4, minutes:26, seconds:5, milliseconds:454}
parseMs(2348765454);
```
## toTimeString(ms):String
Convert timestamp (milliseconds) into a time string in the format "[H:]MM:SS".
### Example
```js
toTimeString(12513); // "00:12"
toTimeString(951233); // "15:51"
toTimeString(8765235); // "2:26:05"
```
|
PypiClean
|
/sample-factory-2.1.1.tar.gz/sample-factory-2.1.1/sample_factory/model/action_parameterization.py
|
import math
import torch
from torch import Tensor, nn
from sample_factory.algo.utils.action_distributions import (
calc_num_action_parameters,
get_action_distribution,
is_continuous_action_space,
)
class ActionsParameterization(nn.Module):
def __init__(self, cfg, action_space):
super().__init__()
self.cfg = cfg
self.action_space = action_space
class ActionParameterizationDefault(ActionsParameterization):
"""
A single fully-connected layer to output all parameters of the action distribution. Suitable for
categorical action distributions, as well as continuous actions with learned state-dependent stddev.
"""
def __init__(self, cfg, core_out_size, action_space):
super().__init__(cfg, action_space)
num_action_outputs = calc_num_action_parameters(action_space)
self.distribution_linear = nn.Linear(core_out_size, num_action_outputs)
def forward(self, actor_core_output):
"""Just forward the FC layer and generate the distribution object."""
action_distribution_params = self.distribution_linear(actor_core_output)
action_distribution = get_action_distribution(self.action_space, raw_logits=action_distribution_params)
return action_distribution_params, action_distribution
class ActionParameterizationContinuousNonAdaptiveStddev(ActionsParameterization):
"""Use a single learned parameter for action stddevs."""
def __init__(self, cfg, core_out_size, action_space):
super().__init__(cfg, action_space)
assert not cfg.adaptive_stddev
assert is_continuous_action_space(
self.action_space
), "Non-adaptive stddev makes sense only for continuous action spaces"
num_action_outputs = calc_num_action_parameters(action_space)
# calculate only action means using the policy neural network
self.distribution_linear = nn.Linear(core_out_size, num_action_outputs // 2)
self.tanh_scale: float = cfg.continuous_tanh_scale
# stddev is a single learned parameter
initial_stddev = torch.empty([num_action_outputs // 2])
initial_stddev.fill_(math.log(self.cfg.initial_stddev))
self.learned_stddev = nn.Parameter(initial_stddev, requires_grad=True)
def forward(self, actor_core_output: Tensor):
action_means = self.distribution_linear(actor_core_output)
if self.tanh_scale > 0:
# scale the action means to be in the range [-tanh_scale, tanh_scale]
# TODO: implement this for adaptive stddev case also?
action_means = torch.tanh(action_means / self.tanh_scale) * self.tanh_scale
batch_size = action_means.shape[0]
action_stddevs = self.learned_stddev.repeat(batch_size, 1)
action_distribution_params = torch.cat((action_means, action_stddevs), dim=1)
action_distribution = get_action_distribution(self.action_space, raw_logits=action_distribution_params)
return action_distribution_params, action_distribution
|
PypiClean
|
/django-post-request-task-0.5.tar.gz/django-post-request-task-0.5/post_request_task/task.py
|
import logging
import threading
from functools import partial
from django.core.signals import (got_request_exception, request_finished,
request_started)
from celery import shared_task as base_task
from celery import Task
log = logging.getLogger('post_request_task')
_locals = threading.local()
def _get_task_queue():
"""Returns the calling thread's task queue."""
return _locals.__dict__.setdefault('task_queue', [])
def _start_queuing_tasks(**kwargs):
"""Starts queuing tasks for this thread.
Not supposed to be called directly, instead it's connected to the
request_started signal.
If not called, tasks are delayed normally (so tasks still function without
having to call _send_tasks_and_stop_queuing() manually when we're outside
the request-response cycle.."""
_locals.__dict__['task_queue_enabled'] = True
def _stop_queuing_tasks(**kwargs):
"""Stops queuing tasks for this thread.
Not supposed to be called directly, only useful for tests cleanup."""
_locals.__dict__['task_queue_enabled'] = False
def is_task_queuing_enabled_for_this_thread():
"""Returns whether post request task queuing is enabled for this thread."""
return _locals.__dict__.get('task_queue_enabled', False)
def _send_tasks_and_stop_queuing(**kwargs):
"""Sends all delayed Celery tasks and stop queuing new ones for now."""
log.info('Stopping queueing tasks and sending already queued ones.')
_stop_queuing_tasks()
task_queue = _get_task_queue()
while task_queue:
task, args, kwargs, extrakw = task_queue.pop(0)
task.original_apply_async(args=args, kwargs=kwargs, **extrakw)
def _discard_tasks(**kwargs):
"""Discards all delayed Celery tasks."""
task_queue = _get_task_queue()
log.info('Discarding %d queued tasks.', len(task_queue))
task_queue[:] = []
def _append_task(t):
"""Append a task to the queue.
Expected argument is a tuple of the following form:
(task class, args, kwargs, extra kwargs).
This doesn't append to queue if the argument is already in the queue.
"""
task_queue = _get_task_queue()
if t not in task_queue:
log.debug('Appended new task to the queue: %s.', t)
task_queue.append(t)
else:
log.debug('Did not append duplicate task to the queue: %s.', t)
return None
class PostRequestTask(Task):
"""A task whose execution is delayed until after the request finishes.
This simply wraps celery's `@app.task` and `@shared_task` decorators and
stores the task calls until after the request is finished, then fires them
off.
If no request was started in this thread, behaves exactly like the original
decorator, sending tasks to celery directly.
"""
abstract = True
def original_apply_async(self, args=None, kwargs=None, **extrakw):
return super(PostRequestTask, self).apply_async(
args=args, kwargs=kwargs, **extrakw)
def apply_async(self, args=None, kwargs=None, **extrakw):
if is_task_queuing_enabled_for_this_thread():
result = _append_task((self, args, kwargs, extrakw))
else:
result = self.original_apply_async(
args=args, kwargs=kwargs, **extrakw)
return result
# Replacement `@shared_task` decorator.
shared_task = partial(base_task, base=PostRequestTask)
# Hook the signal handlers up.
# Start queuing the tasks only if we're inside a request-response cycle thread.
request_started.connect(
_start_queuing_tasks, dispatch_uid='{}.request_started'.format(__name__))
# Send the tasks to celery and stop queuing when the request is finished.
request_finished.connect(
_send_tasks_and_stop_queuing,
dispatch_uid='{}.request_finished'.format(__name__))
# And make sure to discard the task queue when we have an exception in the
# request-response cycle.
got_request_exception.connect(
_discard_tasks, dispatch_uid='{}.got_request_exception'.format(__name__))
|
PypiClean
|
/mw-adapter-transformers-3.2.1.tar.gz/mw-adapter-transformers-3.2.1/src/adapter_transformers/data/datasets/squad.py
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from torch.utils.data import Dataset
from filelock import FileLock
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
logger = logging.get_logger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SquadDataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
model_type: str = field(
default=None, metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_TYPES)}
)
data_dir: str = field(
default=None, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
doc_stride: int = field(
default=128,
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
)
max_query_length: int = field(
default=64,
metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
},
)
max_answer_length: int = field(
default=30,
metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
)
n_best_size: int = field(
default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
)
lang_id: int = field(
default=0,
metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
},
)
threads: int = field(default=1, metadata={"help": "multiple threads for converting example to features"})
class Split(Enum):
train = "train"
dev = "dev"
class SquadDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
args: SquadDataTrainingArguments
features: List[SquadFeatures]
mode: Split
is_language_sensitive: bool
def __init__(
self,
args: SquadDataTrainingArguments,
tokenizer: PreTrainedTokenizer,
limit_length: Optional[int] = None,
mode: Union[str, Split] = Split.train,
is_language_sensitive: Optional[bool] = False,
cache_dir: Optional[str] = None,
dataset_format: Optional[str] = "pt",
):
self.args = args
self.is_language_sensitive = is_language_sensitive
self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
self.mode = mode
# Load data features from cache or dataset file
version_tag = "v2" if args.version_2_with_negative else "v1"
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,
f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}",
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
self.old_features = torch.load(cached_features_file)
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
self.features = self.old_features["features"]
self.dataset = self.old_features.get("dataset", None)
self.examples = self.old_features.get("examples", None)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run"
)
else:
if mode == Split.dev:
self.examples = self.processor.get_dev_examples(args.data_dir)
else:
self.examples = self.processor.get_train_examples(args.data_dir)
self.features, self.dataset = squad_convert_examples_to_features(
examples=self.examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=mode == Split.train,
threads=args.threads,
return_dataset=dataset_format,
)
start = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},
cached_features_file,
)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
feature = self.features[i]
input_ids = torch.tensor(feature.input_ids, dtype=torch.long)
attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long)
token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long)
cls_index = torch.tensor(feature.cls_index, dtype=torch.long)
p_mask = torch.tensor(feature.p_mask, dtype=torch.float)
is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float)
inputs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask})
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible})
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id)})
if self.mode == Split.train:
start_positions = torch.tensor(feature.start_position, dtype=torch.long)
end_positions = torch.tensor(feature.end_position, dtype=torch.long)
inputs.update({"start_positions": start_positions, "end_positions": end_positions})
return inputs
|
PypiClean
|
/django-codemirror2-0.2.tar.gz/django-codemirror2-0.2/codemirror2/static/codemirror2/mode/vb/vb.js
|
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("vb", function(conf, parserConf) {
var ERRORCLASS = 'error';
function wordRegexp(words) {
return new RegExp("^((" + words.join(")|(") + "))\\b", "i");
}
var singleOperators = new RegExp("^[\\+\\-\\*/%&\\\\|\\^~<>!]");
var singleDelimiters = new RegExp('^[\\(\\)\\[\\]\\{\\}@,:`=;\\.]');
var doubleOperators = new RegExp("^((==)|(<>)|(<=)|(>=)|(<>)|(<<)|(>>)|(//)|(\\*\\*))");
var doubleDelimiters = new RegExp("^((\\+=)|(\\-=)|(\\*=)|(%=)|(/=)|(&=)|(\\|=)|(\\^=))");
var tripleDelimiters = new RegExp("^((//=)|(>>=)|(<<=)|(\\*\\*=))");
var identifiers = new RegExp("^[_A-Za-z][_A-Za-z0-9]*");
var openingKeywords = ['class','module', 'sub','enum','select','while','if','function', 'get','set','property', 'try'];
var middleKeywords = ['else','elseif','case', 'catch'];
var endKeywords = ['next','loop'];
var operatorKeywords = ['and', 'or', 'not', 'xor', 'in'];
var wordOperators = wordRegexp(operatorKeywords);
var commonKeywords = ['as', 'dim', 'break', 'continue','optional', 'then', 'until',
'goto', 'byval','byref','new','handles','property', 'return',
'const','private', 'protected', 'friend', 'public', 'shared', 'static', 'true','false'];
var commontypes = ['integer','string','double','decimal','boolean','short','char', 'float','single'];
var keywords = wordRegexp(commonKeywords);
var types = wordRegexp(commontypes);
var stringPrefixes = '"';
var opening = wordRegexp(openingKeywords);
var middle = wordRegexp(middleKeywords);
var closing = wordRegexp(endKeywords);
var doubleClosing = wordRegexp(['end']);
var doOpening = wordRegexp(['do']);
var indentInfo = null;
CodeMirror.registerHelper("hintWords", "vb", openingKeywords.concat(middleKeywords).concat(endKeywords)
.concat(operatorKeywords).concat(commonKeywords).concat(commontypes));
function indent(_stream, state) {
state.currentIndent++;
}
function dedent(_stream, state) {
state.currentIndent--;
}
// tokenizers
function tokenBase(stream, state) {
if (stream.eatSpace()) {
return null;
}
var ch = stream.peek();
// Handle Comments
if (ch === "'") {
stream.skipToEnd();
return 'comment';
}
// Handle Number Literals
if (stream.match(/^((&H)|(&O))?[0-9\.a-f]/i, false)) {
var floatLiteral = false;
// Floats
if (stream.match(/^\d*\.\d+F?/i)) { floatLiteral = true; }
else if (stream.match(/^\d+\.\d*F?/)) { floatLiteral = true; }
else if (stream.match(/^\.\d+F?/)) { floatLiteral = true; }
if (floatLiteral) {
// Float literals may be "imaginary"
stream.eat(/J/i);
return 'number';
}
// Integers
var intLiteral = false;
// Hex
if (stream.match(/^&H[0-9a-f]+/i)) { intLiteral = true; }
// Octal
else if (stream.match(/^&O[0-7]+/i)) { intLiteral = true; }
// Decimal
else if (stream.match(/^[1-9]\d*F?/)) {
// Decimal literals may be "imaginary"
stream.eat(/J/i);
// TODO - Can you have imaginary longs?
intLiteral = true;
}
// Zero by itself with no other piece of number.
else if (stream.match(/^0(?![\dx])/i)) { intLiteral = true; }
if (intLiteral) {
// Integer literals may be "long"
stream.eat(/L/i);
return 'number';
}
}
// Handle Strings
if (stream.match(stringPrefixes)) {
state.tokenize = tokenStringFactory(stream.current());
return state.tokenize(stream, state);
}
// Handle operators and Delimiters
if (stream.match(tripleDelimiters) || stream.match(doubleDelimiters)) {
return null;
}
if (stream.match(doubleOperators)
|| stream.match(singleOperators)
|| stream.match(wordOperators)) {
return 'operator';
}
if (stream.match(singleDelimiters)) {
return null;
}
if (stream.match(doOpening)) {
indent(stream,state);
state.doInCurrentLine = true;
return 'keyword';
}
if (stream.match(opening)) {
if (! state.doInCurrentLine)
indent(stream,state);
else
state.doInCurrentLine = false;
return 'keyword';
}
if (stream.match(middle)) {
return 'keyword';
}
if (stream.match(doubleClosing)) {
dedent(stream,state);
dedent(stream,state);
return 'keyword';
}
if (stream.match(closing)) {
dedent(stream,state);
return 'keyword';
}
if (stream.match(types)) {
return 'keyword';
}
if (stream.match(keywords)) {
return 'keyword';
}
if (stream.match(identifiers)) {
return 'variable';
}
// Handle non-detected items
stream.next();
return ERRORCLASS;
}
function tokenStringFactory(delimiter) {
var singleline = delimiter.length == 1;
var OUTCLASS = 'string';
return function(stream, state) {
while (!stream.eol()) {
stream.eatWhile(/[^'"]/);
if (stream.match(delimiter)) {
state.tokenize = tokenBase;
return OUTCLASS;
} else {
stream.eat(/['"]/);
}
}
if (singleline) {
if (parserConf.singleLineStringErrors) {
return ERRORCLASS;
} else {
state.tokenize = tokenBase;
}
}
return OUTCLASS;
};
}
function tokenLexer(stream, state) {
var style = state.tokenize(stream, state);
var current = stream.current();
// Handle '.' connected identifiers
if (current === '.') {
style = state.tokenize(stream, state);
current = stream.current();
if (style === 'variable') {
return 'variable';
} else {
return ERRORCLASS;
}
}
var delimiter_index = '[({'.indexOf(current);
if (delimiter_index !== -1) {
indent(stream, state );
}
if (indentInfo === 'dedent') {
if (dedent(stream, state)) {
return ERRORCLASS;
}
}
delimiter_index = '])}'.indexOf(current);
if (delimiter_index !== -1) {
if (dedent(stream, state)) {
return ERRORCLASS;
}
}
return style;
}
var external = {
electricChars:"dDpPtTfFeE ",
startState: function() {
return {
tokenize: tokenBase,
lastToken: null,
currentIndent: 0,
nextLineIndent: 0,
doInCurrentLine: false
};
},
token: function(stream, state) {
if (stream.sol()) {
state.currentIndent += state.nextLineIndent;
state.nextLineIndent = 0;
state.doInCurrentLine = 0;
}
var style = tokenLexer(stream, state);
state.lastToken = {style:style, content: stream.current()};
return style;
},
indent: function(state, textAfter) {
var trueText = textAfter.replace(/^\s+|\s+$/g, '') ;
if (trueText.match(closing) || trueText.match(doubleClosing) || trueText.match(middle)) return conf.indentUnit*(state.currentIndent-1);
if(state.currentIndent < 0) return 0;
return state.currentIndent * conf.indentUnit;
},
lineComment: "'"
};
return external;
});
CodeMirror.defineMIME("text/x-vb", "vb");
});
|
PypiClean
|
/GenMotion-0.0.4-py3-none-any.whl/genmotion/algorithm/action_conditioned/utils/rotation_conversions.py
|
import functools
from typing import Optional
import torch
import torch.nn.functional as F
"""
The transformation matrices returned from the functions in this file assume
the points on which the transformation will be applied are column vectors.
i.e. the R matrix is structured as
R = [
[Rxx, Rxy, Rxz],
[Ryx, Ryy, Ryz],
[Rzx, Rzy, Rzz],
] # (3, 3)
This matrix can be applied to column vectors by post multiplication
by the points e.g.
points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point
transformed_points = R * points
To apply the same matrix to points which are row vectors, the R matrix
can be transposed and pre multiplied by the points:
e.g.
points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point
transformed_points = points * R.transpose(1, 0)
"""
def quaternion_to_matrix(quaternions):
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
def _copysign(a, b):
"""
Return a tensor where each element has the absolute value taken from the,
corresponding element of a, with sign taken from the corresponding
element of b. This is like the standard copysign floating-point operation,
but is not careful about negative 0 and NaN.
Args:
a: source tensor.
b: tensor whose signs will be used, of the same shape as a.
Returns:
Tensor of the same shape as a with the signs of b.
"""
signs_differ = (a < 0) != (b < 0)
return torch.where(signs_differ, -a, a)
def _sqrt_positive_part(x):
"""
Returns torch.sqrt(torch.max(0, x))
but with a zero subgradient where x is 0.
"""
ret = torch.zeros_like(x)
positive_mask = x > 0
ret[positive_mask] = torch.sqrt(x[positive_mask])
return ret
def matrix_to_quaternion(matrix):
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
m00 = matrix[..., 0, 0]
m11 = matrix[..., 1, 1]
m22 = matrix[..., 2, 2]
o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22)
x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22)
y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22)
z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22)
o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2])
o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0])
o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1])
return torch.stack((o0, o1, o2, o3), -1)
def _axis_angle_rotation(axis: str, angle):
"""
Return the rotation matrices for one of the rotations about an axis
of which Euler angles describe, for each value of the angle given.
Args:
axis: Axis label "X" or "Y or "Z".
angle: any shape tensor of Euler angles in radians
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
cos = torch.cos(angle)
sin = torch.sin(angle)
one = torch.ones_like(angle)
zero = torch.zeros_like(angle)
if axis == "X":
R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
if axis == "Y":
R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
if axis == "Z":
R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3))
def euler_angles_to_matrix(euler_angles, convention: str):
"""
Convert rotations given as Euler angles in radians to rotation matrices.
Args:
euler_angles: Euler angles in radians as tensor of shape (..., 3).
convention: Convention string of three uppercase letters from
{"X", "Y", and "Z"}.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3:
raise ValueError("Invalid input euler angles.")
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1))
return functools.reduce(torch.matmul, matrices)
def _angle_from_tan(
axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool
):
"""
Extract the first or third Euler angle from the two members of
the matrix which are positive constant times its sine and cosine.
Args:
axis: Axis label "X" or "Y or "Z" for the angle we are finding.
other_axis: Axis label "X" or "Y or "Z" for the middle axis in the
convention.
data: Rotation matrices as tensor of shape (..., 3, 3).
horizontal: Whether we are looking for the angle for the third axis,
which means the relevant entries are in the same row of the
rotation matrix. If not, they are in the same column.
tait_bryan: Whether the first and third axes in the convention differ.
Returns:
Euler Angles in radians for each matrix in data as a tensor
of shape (...).
"""
i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis]
if horizontal:
i2, i1 = i1, i2
even = (axis + other_axis) in ["XY", "YZ", "ZX"]
if horizontal == even:
return torch.atan2(data[..., i1], data[..., i2])
if tait_bryan:
return torch.atan2(-data[..., i2], data[..., i1])
return torch.atan2(data[..., i2], -data[..., i1])
def _index_from_letter(letter: str):
if letter == "X":
return 0
if letter == "Y":
return 1
if letter == "Z":
return 2
def matrix_to_euler_angles(matrix, convention: str):
"""
Convert rotations given as rotation matrices to Euler angles in radians.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
convention: Convention string of three uppercase letters.
Returns:
Euler angles in radians as tensor of shape (..., 3).
"""
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
i0 = _index_from_letter(convention[0])
i2 = _index_from_letter(convention[2])
tait_bryan = i0 != i2
if tait_bryan:
central_angle = torch.asin(
matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0)
)
else:
central_angle = torch.acos(matrix[..., i0, i0])
o = (
_angle_from_tan(
convention[0], convention[1], matrix[..., i2], False, tait_bryan
),
central_angle,
_angle_from_tan(
convention[2], convention[1], matrix[..., i0, :], True, tait_bryan
),
)
return torch.stack(o, -1)
def random_quaternions(
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate random quaternions representing rotations,
i.e. versors with nonnegative real part.
Args:
n: Number of quaternions in a batch to return.
dtype: Type to return.
device: Desired device of returned tensor. Default:
uses the current device for the default tensor type.
requires_grad: Whether the resulting tensor should have the gradient
flag set.
Returns:
Quaternions as tensor of shape (N, 4).
"""
o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad)
s = (o * o).sum(1)
o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None]
return o
def random_rotations(
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate random rotations as 3x3 rotation matrices.
Args:
n: Number of rotation matrices in a batch to return.
dtype: Type to return.
device: Device of returned tensor. Default: if None,
uses the current device for the default tensor type.
requires_grad: Whether the resulting tensor should have the gradient
flag set.
Returns:
Rotation matrices as tensor of shape (n, 3, 3).
"""
quaternions = random_quaternions(
n, dtype=dtype, device=device, requires_grad=requires_grad
)
return quaternion_to_matrix(quaternions)
def random_rotation(
dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate a single random 3x3 rotation matrix.
Args:
dtype: Type to return
device: Device of returned tensor. Default: if None,
uses the current device for the default tensor type
requires_grad: Whether the resulting tensor should have the gradient
flag set
Returns:
Rotation matrix as tensor of shape (3, 3).
"""
return random_rotations(1, dtype, device, requires_grad)[0]
def standardize_quaternion(quaternions):
"""
Convert a unit quaternion to a standard form: one in which the real
part is non negative.
Args:
quaternions: Quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Standardized quaternions as tensor of shape (..., 4).
"""
return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions)
def quaternion_raw_multiply(a, b):
"""
Multiply two quaternions.
Usual torch rules for broadcasting apply.
Args:
a: Quaternions as tensor of shape (..., 4), real part first.
b: Quaternions as tensor of shape (..., 4), real part first.
Returns:
The product of a and b, a tensor of quaternions shape (..., 4).
"""
aw, ax, ay, az = torch.unbind(a, -1)
bw, bx, by, bz = torch.unbind(b, -1)
ow = aw * bw - ax * bx - ay * by - az * bz
ox = aw * bx + ax * bw + ay * bz - az * by
oy = aw * by - ax * bz + ay * bw + az * bx
oz = aw * bz + ax * by - ay * bx + az * bw
return torch.stack((ow, ox, oy, oz), -1)
def quaternion_multiply(a, b):
"""
Multiply two quaternions representing rotations, returning the quaternion
representing their composition, i.e. the versor with nonnegative real part.
Usual torch rules for broadcasting apply.
Args:
a: Quaternions as tensor of shape (..., 4), real part first.
b: Quaternions as tensor of shape (..., 4), real part first.
Returns:
The product of a and b, a tensor of quaternions of shape (..., 4).
"""
ab = quaternion_raw_multiply(a, b)
return standardize_quaternion(ab)
def quaternion_invert(quaternion):
"""
Given a quaternion representing rotation, get the quaternion representing
its inverse.
Args:
quaternion: Quaternions as tensor of shape (..., 4), with real part
first, which must be versors (unit quaternions).
Returns:
The inverse, a tensor of quaternions of shape (..., 4).
"""
return quaternion * quaternion.new_tensor([1, -1, -1, -1])
def quaternion_apply(quaternion, point):
"""
Apply the rotation given by a quaternion to a 3D point.
Usual torch rules for broadcasting apply.
Args:
quaternion: Tensor of quaternions, real part first, of shape (..., 4).
point: Tensor of 3D points of shape (..., 3).
Returns:
Tensor of rotated points of shape (..., 3).
"""
if point.size(-1) != 3:
raise ValueError(f"Points are not in 3D, f{point.shape}.")
real_parts = point.new_zeros(point.shape[:-1] + (1,))
point_as_quaternion = torch.cat((real_parts, point), -1)
out = quaternion_raw_multiply(
quaternion_raw_multiply(quaternion, point_as_quaternion),
quaternion_invert(quaternion),
)
return out[..., 1:]
def axis_angle_to_matrix(axis_angle):
"""
Convert rotations given as axis/angle to rotation matrices.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle))
def matrix_to_axis_angle(matrix):
"""
Convert rotations given as rotation matrices to axis/angle.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
return quaternion_to_axis_angle(matrix_to_quaternion(matrix))
def axis_angle_to_quaternion(axis_angle):
"""
Convert rotations given as axis/angle to quaternions.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True)
half_angles = 0.5 * angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
quaternions = torch.cat(
[torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1
)
return quaternions
def quaternion_to_axis_angle(quaternions):
"""
Convert rotations given as quaternions to axis/angle.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
half_angles = torch.atan2(norms, quaternions[..., :1])
angles = 2 * half_angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
return quaternions[..., 1:] / sin_half_angles_over_angles
def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
"""
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
using Gram--Schmidt orthogonalisation per Section B of [1].
Args:
d6: 6D rotation representation, of size (*, 6)
Returns:
batch of rotation matrices of size (*, 3, 3)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
a1, a2 = d6[..., :3], d6[..., 3:]
b1 = F.normalize(a1, dim=-1)
b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
b2 = F.normalize(b2, dim=-1)
b3 = torch.cross(b1, b2, dim=-1)
return torch.stack((b1, b2, b3), dim=-2)
def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
"""
Converts rotation matrices to 6D rotation representation by Zhou et al. [1]
by dropping the last row. Note that 6D representation is not unique.
Args:
matrix: batch of rotation matrices of size (*, 3, 3)
Returns:
6D rotation representation, of size (*, 6)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6)
|
PypiClean
|
/odoo13_addon_ddmrp_packaging-13.0.1.7.0-py3-none-any.whl/odoo/addons/ddmrp_packaging/models/stock_buffer.py
|
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class StockBuffer(models.Model):
_inherit = "stock.buffer"
packaging_id = fields.Many2one(
string="Packaging",
comodel_name="product.packaging",
check_company=True,
domain="[('product_id', '=', product_id), "
"'|', ('company_id', '=', False), "
"('company_id', '=', company_id)]",
compute="_compute_packaging_id",
store=True,
readonly=False,
)
package_multiple = fields.Float()
# make qty_multiple a stored computed field:
qty_multiple = fields.Float(
compute="_compute_qty_multiple", store=True, readonly=False,
)
@api.constrains("product_id", "packaging_id")
def _check_product_packaging(self):
for rec in self:
if (
rec.packaging_id.product_id
and rec.packaging_id.product_id != rec.product_id
):
raise ValidationError(
_("Please, select a packaging of the buffered product.")
)
@api.depends("packaging_id", "packaging_id.qty", "package_multiple")
def _compute_qty_multiple(self):
for rec in self:
if rec.packaging_id.qty:
rec.qty_multiple = rec.packaging_id.qty * rec.package_multiple
else:
# Default value on parent definition
rec.qty_multiple = 1
@api.depends("product_id")
def _compute_packaging_id(self):
for rec in self:
if (
rec.product_id
and rec.packaging_id
and rec.packaging_id.product_id != rec.product_id
):
rec.packaging_id = False
@api.onchange("packaging_id", "procure_uom_id", "qty_multiple")
def _onchange_packaging_id(self):
res = self._check_package()
if not res:
# Check is Ok, we can change package multiple to keep alignment:
if self.packaging_id.qty:
self.package_multiple = self.qty_multiple / self.packaging_id.qty
return res
def _check_package(self):
pack = self.packaging_id
qty = self.qty_multiple
procure_uom = self.procure_uom_id or self.product_uom
q = self.product_uom._compute_quantity(pack.qty, procure_uom)
if qty and q and round(qty % q, 2):
newqty = qty - (qty % q) + q
return {
"warning": {
"title": _("Warning"),
"message": _(
"This product is packaged by %.2f %s. You should "
"set 'Qty Multiple' to %.2f %s."
)
% (pack.qty, self.product_uom.name, newqty, procure_uom.name),
},
}
return {}
def _prepare_procurement_values(
self, product_qty, date=False, group=False,
):
values = super()._prepare_procurement_values(
product_qty=product_qty, date=date, group=group
)
if self.packaging_id:
values["product_packaging_id"] = self.packaging_id
return values
|
PypiClean
|
/tinc_graphs-0.3.11.tar.gz/tinc_graphs-0.3.11/tinc_graphs/Graph.py
|
from .BackwardsReader import BackwardsReader
import sys,json,os
from .Supernodes import check_all_the_super
from .Services import add_services
from .Availability import get_node_availability
import sys,json
from time import time
DUMP_FILE = os.environ.get("AVAILABILITY_FILE", "tinc-availability.json")
hostpath=os.environ.get("TINC_HOSTPATH", "/etc/tinc/retiolum/hosts")
# will be filled later
supernodes= []
def resolve_myself(nodes):
#resolve MYSELF to the real ip
for k,v in nodes.items():
if v["external-ip"] == "MYSELF":
for nodek,node in nodes.items():
for to in node['to']:
if to['name'] == k:
v["external-ip"] = to["addr"]
return nodes
def dump_graph(nodes):
from time import time
graph = {}
graph['nodes'] = nodes
graph['timestamp'] = time()
f = open(DUMP_FILE,'a')
json.dump(graph,f)
f.write('\n')
f.close()
def generate_availability_stats(nodes):
""" generates stats of from availability
"""
jlines = []
# try:
# f = BackwardsReader(DUMP_FILE)
# lines_to_use = 1000
# while True:
# if lines_to_use == 0: break
# line = f.readline()
# if not line: break
# jline = json.loads(line)
# if not jline['nodes']: continue
# jlines.append(jline)
# lines_to_use -=1
# except Exception as e: sys.stderr.write(str(e))
for k,v in nodes.items():
# TODO: get this information in a different way
v['availability'] = get_node_availability(k,[])
def generate_stats(nodes):
""" Generates some statistics of the network and nodes
"""
for k,v in nodes.items():
conns = v.get('to',[])
for c in conns: #sanitize weights
if float(c['weight']) > 9000: c['weight'] = str(9001)
elif float(c['weight']) < 0: c['weight'] = str(0)
v['num_conns'] = len(conns)
v['avg_weight'] = get_node_avg_weight(conns)
def get_node_avg_weight(conns):
""" calculates the average weight for the given connections """
if not conns:
sys.syderr.write("get_node_avg_weight: connection parameter empty")
return 9001
else:
return sum([float(c['weight']) for c in conns])/len(conns)
def delete_unused_nodes(nodes):
""" Deletes all the nodes which are currently not connected to the network"""
new_nodes = {}
for k,v in nodes.items():
if v['external-ip'] == "(null)":
continue
if v.get('to',[]):
new_nodes[k] = v
for k,v in new_nodes.items():
if not [ i for i in v['to'] if i['name'] in new_nodes]:
del(k)
return new_nodes
def merge_edges(nodes):
""" merge back and forth edges into one
DESTRUCTS the current structure by deleting "connections" in the nodes
"""
for k,v in nodes.items():
for con in v.get('to',[]):
for i,secon in enumerate(nodes.get(con['name'],{}).get('to',[])):
if k == secon['name']:
del (nodes[con['name']]['to'][i])
con['bidirectional'] = True
def print_head():
print ('digraph retiolum {')
print (' graph [center=true packMode="clust"]')
print (' node[shape=box,style=filled,fillcolor=grey]')
print (' overlap=false')
def print_stat_node(nodes):
''' Write a `stats` node in the corner
This node contains infos about the current number of active nodes and connections inside the network
'''
from time import localtime,strftime
num_conns = 0
num_nodes = len(nodes)
try:
msg = '%s.num_nodes %d %d\r\n' %(g_path,num_nodes,begin)
s.send(msg)
except Exception as e: pass
for k,v in nodes.items():
num_conns+= len(v['to'])
node_text = " stats_node [label=\"Statistics\\l"
node_text += "Build Date : %s\\l" % strftime("%Y-%m-%d %H:%M:%S",localtime())
node_text += "Active Nodes: %s\\l" % num_nodes
node_text += "Connections : %s\\l" % num_conns
node_text += "\""
node_text += ",fillcolor=green"
node_text += "]"
print(node_text)
def print_node(k,v):
""" writes a single node and its edges
edges are weightet with the informations inside the nodes provided by
tinc
"""
node = " "+k+"[label=<<TABLE border='0' title='%s' cellborder='1' >" %k
node += "<TR><TD colspan='2'><B>%s</B></TD></TR>"%k
if 'availability' in v:
node += "<TR><TD>availability:</TD><TD>%f</TD></TR>" % v['availability']
if 'num_conns' in v:
node += "<TR><TD>Num Connects:</TD><TD>%s</TD></TR>"%str(v['num_conns'])
node += "<TR><TD>external:</TD><TD>"+v['external-ip']+":"+v['external-port']+"</TD></TR>"
for addr in v.get('internal-ip',['dunno lol']):
node += "<TR><TD>internal:</TD><TD>%s</TD></TR>"%addr
if 'services' in v:
node +="<TR><TD colspan='2'><B>Services:</B></TD></TR>"
for service in v['services']:
try:uri,comment = service.split(" ",1)
except:
uri = service
comment =""
node +="<TR >"
uri_proto=uri.split(":")[0]
uri_rest = uri.split(":")[1]
if not uri_rest:
node +="<TD title='{0}' align='left' colspan='2' \
href='{0}'><font color='darkred'>{0}</font>".format(uri)
else:
node +="<TD title='{0}' align='left' colspan='2' \
href='{0}'><U>{0}</U>".format(uri)
if comment:
node += "<br align='left'/> <I>{0}</I>".format(comment)
node +="</TD></TR>"
# end label
node +="</TABLE>>"
if v['num_conns'] == 1:
node += ",fillcolor=red"
elif k in supernodes:
node += ",fillcolor=steelblue1"
node += "]"
print(node)
def print_anonymous_node(k,v):
""" writes a single node and its edges
edges are weightet with the informations inside the nodes provided by
tinc
"""
node = " "+k #+"[label=\""
print(node)
def print_edge(k,v):
for con in v.get('to',[]):
label = con['weight']
w = int(con['weight'])
weight = str(1000 - (((w - 150) * (1000 - 0)) / (1000 -150 )) + 0)
length = str(float(w)/1500)
if float(weight) < 0 :
weight= "1"
edge = " "+k+ " -> " +con['name'] + " [label="+label + " weight="+weight
if con.get('bidirectional',False):
edge += ",dir=both"
edge += "]"
print(edge)
def anonymize_nodes(nodes):
#anonymizes all nodes
i = "0"
newnodes = {}
for k,v in nodes.items():
for nodek,node in nodes.items():
for to in node['to']:
if to['name'] == k:
to['name'] = i
newnodes[i] = v
i = str(int(i)+1)
return newnodes
def main():
if len(sys.argv) != 2 or sys.argv[1] not in ["anonymous","complete"]:
print("usage: %s (anonymous|complete)")
sys.exit(1)
nodes = json.load(sys.stdin)
nodes = delete_unused_nodes(nodes)
print_head()
generate_stats(nodes)
merge_edges(nodes)
if sys.argv[1] == "anonymous":
nodes = anonymize_nodes(nodes)
for k,v in nodes.items():
print_anonymous_node(k,v)
print_edge(k,v)
elif sys.argv[1] == "complete":
try:
for supernode,addr in check_all_the_super(hostpath):
supernodes.append(supernode)
except FileNotFoundError as e:
print("!! cannot load list of supernodes ({})".format(hostpath))
print("!! Use TINC_HOSTPATH env to override")
sys.exit(1)
generate_availability_stats(nodes)
add_services(nodes)
for k,v in nodes.items():
print_node(k,v)
print_edge(k,v)
#TODO: get availability somehow else
# try:
# dump_graph(nodes)
# except Exception as e:
# sys.stderr.write("Cannot dump graph: %s" % str(e))
else:
pass
print_stat_node(nodes)
print ('}')
if __name__ == "__main__":
main()
# vim: set sw=2:ts=2
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/domain/DatadigitalFincloudFinsaasPutplanListBatchqueryModel.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
class DatadigitalFincloudFinsaasPutplanListBatchqueryModel(object):
def __init__(self):
self._channel_category = None
self._name = None
self._page = None
self._size = None
self._status = None
self._tenant_code = None
self._user_id = None
@property
def channel_category(self):
return self._channel_category
@channel_category.setter
def channel_category(self, value):
self._channel_category = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def page(self):
return self._page
@page.setter
def page(self, value):
self._page = value
@property
def size(self):
return self._size
@size.setter
def size(self, value):
self._size = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def tenant_code(self):
return self._tenant_code
@tenant_code.setter
def tenant_code(self, value):
self._tenant_code = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.channel_category:
if hasattr(self.channel_category, 'to_alipay_dict'):
params['channel_category'] = self.channel_category.to_alipay_dict()
else:
params['channel_category'] = self.channel_category
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.page:
if hasattr(self.page, 'to_alipay_dict'):
params['page'] = self.page.to_alipay_dict()
else:
params['page'] = self.page
if self.size:
if hasattr(self.size, 'to_alipay_dict'):
params['size'] = self.size.to_alipay_dict()
else:
params['size'] = self.size
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.tenant_code:
if hasattr(self.tenant_code, 'to_alipay_dict'):
params['tenant_code'] = self.tenant_code.to_alipay_dict()
else:
params['tenant_code'] = self.tenant_code
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DatadigitalFincloudFinsaasPutplanListBatchqueryModel()
if 'channel_category' in d:
o.channel_category = d['channel_category']
if 'name' in d:
o.name = d['name']
if 'page' in d:
o.page = d['page']
if 'size' in d:
o.size = d['size']
if 'status' in d:
o.status = d['status']
if 'tenant_code' in d:
o.tenant_code = d['tenant_code']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
PypiClean
|
/vioneta-2023.7.3.tar.gz/vioneta-2023.7.3/homeassistant/util/unit_system.py
|
from __future__ import annotations
from numbers import Number
from typing import TYPE_CHECKING, Final
import voluptuous as vol
from homeassistant.const import (
ACCUMULATED_PRECIPITATION,
LENGTH,
MASS,
PRESSURE,
TEMPERATURE,
UNIT_NOT_RECOGNIZED_TEMPLATE,
VOLUME,
WIND_SPEED,
UnitOfLength,
UnitOfMass,
UnitOfPrecipitationDepth,
UnitOfPressure,
UnitOfSpeed,
UnitOfTemperature,
UnitOfVolume,
UnitOfVolumetricFlux,
)
from .unit_conversion import (
DistanceConverter,
PressureConverter,
SpeedConverter,
TemperatureConverter,
VolumeConverter,
)
if TYPE_CHECKING:
from homeassistant.components.sensor import SensorDeviceClass
_CONF_UNIT_SYSTEM_IMPERIAL: Final = "imperial"
_CONF_UNIT_SYSTEM_METRIC: Final = "metric"
_CONF_UNIT_SYSTEM_US_CUSTOMARY: Final = "us_customary"
LENGTH_UNITS = DistanceConverter.VALID_UNITS
MASS_UNITS: set[str] = {
UnitOfMass.POUNDS,
UnitOfMass.OUNCES,
UnitOfMass.KILOGRAMS,
UnitOfMass.GRAMS,
}
PRESSURE_UNITS = PressureConverter.VALID_UNITS
VOLUME_UNITS = VolumeConverter.VALID_UNITS
WIND_SPEED_UNITS = SpeedConverter.VALID_UNITS
TEMPERATURE_UNITS: set[str] = {UnitOfTemperature.FAHRENHEIT, UnitOfTemperature.CELSIUS}
def _is_valid_unit(unit: str, unit_type: str) -> bool:
"""Check if the unit is valid for it's type."""
if unit_type == LENGTH:
return unit in LENGTH_UNITS
if unit_type == ACCUMULATED_PRECIPITATION:
return unit in LENGTH_UNITS
if unit_type == WIND_SPEED:
return unit in WIND_SPEED_UNITS
if unit_type == TEMPERATURE:
return unit in TEMPERATURE_UNITS
if unit_type == MASS:
return unit in MASS_UNITS
if unit_type == VOLUME:
return unit in VOLUME_UNITS
if unit_type == PRESSURE:
return unit in PRESSURE_UNITS
return False
class UnitSystem:
"""A container for units of measure."""
def __init__(
self,
name: str,
*,
accumulated_precipitation: UnitOfPrecipitationDepth,
conversions: dict[tuple[SensorDeviceClass | str | None, str | None], str],
length: UnitOfLength,
mass: UnitOfMass,
pressure: UnitOfPressure,
temperature: UnitOfTemperature,
volume: UnitOfVolume,
wind_speed: UnitOfSpeed,
) -> None:
"""Initialize the unit system object."""
errors: str = ", ".join(
UNIT_NOT_RECOGNIZED_TEMPLATE.format(unit, unit_type)
for unit, unit_type in (
(accumulated_precipitation, ACCUMULATED_PRECIPITATION),
(temperature, TEMPERATURE),
(length, LENGTH),
(wind_speed, WIND_SPEED),
(volume, VOLUME),
(mass, MASS),
(pressure, PRESSURE),
)
if not _is_valid_unit(unit, unit_type)
)
if errors:
raise ValueError(errors)
self._name = name
self.accumulated_precipitation_unit = accumulated_precipitation
self.temperature_unit = temperature
self.length_unit = length
self.mass_unit = mass
self.pressure_unit = pressure
self.volume_unit = volume
self.wind_speed_unit = wind_speed
self._conversions = conversions
def temperature(self, temperature: float, from_unit: str) -> float:
"""Convert the given temperature to this unit system."""
if not isinstance(temperature, Number):
raise TypeError(f"{temperature!s} is not a numeric value.")
return TemperatureConverter.convert(
temperature, from_unit, self.temperature_unit
)
def length(self, length: float | None, from_unit: str) -> float:
"""Convert the given length to this unit system."""
if not isinstance(length, Number):
raise TypeError(f"{length!s} is not a numeric value.")
# type ignore: https://github.com/python/mypy/issues/7207
return DistanceConverter.convert( # type: ignore[unreachable]
length, from_unit, self.length_unit
)
def accumulated_precipitation(self, precip: float | None, from_unit: str) -> float:
"""Convert the given length to this unit system."""
if not isinstance(precip, Number):
raise TypeError(f"{precip!s} is not a numeric value.")
# type ignore: https://github.com/python/mypy/issues/7207
return DistanceConverter.convert( # type: ignore[unreachable]
precip, from_unit, self.accumulated_precipitation_unit
)
def pressure(self, pressure: float | None, from_unit: str) -> float:
"""Convert the given pressure to this unit system."""
if not isinstance(pressure, Number):
raise TypeError(f"{pressure!s} is not a numeric value.")
# type ignore: https://github.com/python/mypy/issues/7207
return PressureConverter.convert( # type: ignore[unreachable]
pressure, from_unit, self.pressure_unit
)
def wind_speed(self, wind_speed: float | None, from_unit: str) -> float:
"""Convert the given wind_speed to this unit system."""
if not isinstance(wind_speed, Number):
raise TypeError(f"{wind_speed!s} is not a numeric value.")
# type ignore: https://github.com/python/mypy/issues/7207
return SpeedConverter.convert( # type: ignore[unreachable]
wind_speed, from_unit, self.wind_speed_unit
)
def volume(self, volume: float | None, from_unit: str) -> float:
"""Convert the given volume to this unit system."""
if not isinstance(volume, Number):
raise TypeError(f"{volume!s} is not a numeric value.")
# type ignore: https://github.com/python/mypy/issues/7207
return VolumeConverter.convert( # type: ignore[unreachable]
volume, from_unit, self.volume_unit
)
def as_dict(self) -> dict[str, str]:
"""Convert the unit system to a dictionary."""
return {
LENGTH: self.length_unit,
ACCUMULATED_PRECIPITATION: self.accumulated_precipitation_unit,
MASS: self.mass_unit,
PRESSURE: self.pressure_unit,
TEMPERATURE: self.temperature_unit,
VOLUME: self.volume_unit,
WIND_SPEED: self.wind_speed_unit,
}
def get_converted_unit(
self,
device_class: SensorDeviceClass | str | None,
original_unit: str | None,
) -> str | None:
"""Return converted unit given a device class or an original unit."""
return self._conversions.get((device_class, original_unit))
def get_unit_system(key: str) -> UnitSystem:
"""Get unit system based on key."""
if key == _CONF_UNIT_SYSTEM_US_CUSTOMARY:
return US_CUSTOMARY_SYSTEM
if key == _CONF_UNIT_SYSTEM_METRIC:
return METRIC_SYSTEM
raise ValueError(f"`{key}` is not a valid unit system key")
def _deprecated_unit_system(value: str) -> str:
"""Convert deprecated unit system."""
if value == _CONF_UNIT_SYSTEM_IMPERIAL:
# need to add warning in 2023.1
return _CONF_UNIT_SYSTEM_US_CUSTOMARY
return value
validate_unit_system = vol.All(
vol.Lower,
_deprecated_unit_system,
vol.Any(_CONF_UNIT_SYSTEM_METRIC, _CONF_UNIT_SYSTEM_US_CUSTOMARY),
)
METRIC_SYSTEM = UnitSystem(
_CONF_UNIT_SYSTEM_METRIC,
accumulated_precipitation=UnitOfPrecipitationDepth.MILLIMETERS,
conversions={
# Force atmospheric pressures to hPa
**{
("atmospheric_pressure", unit): UnitOfPressure.HPA
for unit in UnitOfPressure
if unit != UnitOfPressure.HPA
},
# Convert non-metric distances
("distance", UnitOfLength.FEET): UnitOfLength.METERS,
("distance", UnitOfLength.INCHES): UnitOfLength.MILLIMETERS,
("distance", UnitOfLength.MILES): UnitOfLength.KILOMETERS,
("distance", UnitOfLength.YARDS): UnitOfLength.METERS,
# Convert non-metric volumes of gas meters
("gas", UnitOfVolume.CENTUM_CUBIC_FEET): UnitOfVolume.CUBIC_METERS,
("gas", UnitOfVolume.CUBIC_FEET): UnitOfVolume.CUBIC_METERS,
# Convert non-metric precipitation
("precipitation", UnitOfLength.INCHES): UnitOfLength.MILLIMETERS,
# Convert non-metric precipitation intensity
(
"precipitation_intensity",
UnitOfVolumetricFlux.INCHES_PER_DAY,
): UnitOfVolumetricFlux.MILLIMETERS_PER_DAY,
(
"precipitation_intensity",
UnitOfVolumetricFlux.INCHES_PER_HOUR,
): UnitOfVolumetricFlux.MILLIMETERS_PER_HOUR,
# Convert non-metric pressure
("pressure", UnitOfPressure.PSI): UnitOfPressure.KPA,
("pressure", UnitOfPressure.INHG): UnitOfPressure.HPA,
# Convert non-metric speeds except knots to km/h
("speed", UnitOfSpeed.FEET_PER_SECOND): UnitOfSpeed.KILOMETERS_PER_HOUR,
("speed", UnitOfSpeed.MILES_PER_HOUR): UnitOfSpeed.KILOMETERS_PER_HOUR,
(
"speed",
UnitOfVolumetricFlux.INCHES_PER_DAY,
): UnitOfVolumetricFlux.MILLIMETERS_PER_DAY,
(
"speed",
UnitOfVolumetricFlux.INCHES_PER_HOUR,
): UnitOfVolumetricFlux.MILLIMETERS_PER_HOUR,
# Convert non-metric volumes
("volume", UnitOfVolume.CENTUM_CUBIC_FEET): UnitOfVolume.CUBIC_METERS,
("volume", UnitOfVolume.CUBIC_FEET): UnitOfVolume.CUBIC_METERS,
("volume", UnitOfVolume.FLUID_OUNCES): UnitOfVolume.MILLILITERS,
("volume", UnitOfVolume.GALLONS): UnitOfVolume.LITERS,
# Convert non-metric volumes of water meters
("water", UnitOfVolume.CENTUM_CUBIC_FEET): UnitOfVolume.CUBIC_METERS,
("water", UnitOfVolume.CUBIC_FEET): UnitOfVolume.CUBIC_METERS,
("water", UnitOfVolume.GALLONS): UnitOfVolume.LITERS,
# Convert wind speeds except knots to km/h
**{
("wind_speed", unit): UnitOfSpeed.KILOMETERS_PER_HOUR
for unit in UnitOfSpeed
if unit not in (UnitOfSpeed.KILOMETERS_PER_HOUR, UnitOfSpeed.KNOTS)
},
},
length=UnitOfLength.KILOMETERS,
mass=UnitOfMass.GRAMS,
pressure=UnitOfPressure.PA,
temperature=UnitOfTemperature.CELSIUS,
volume=UnitOfVolume.LITERS,
wind_speed=UnitOfSpeed.METERS_PER_SECOND,
)
US_CUSTOMARY_SYSTEM = UnitSystem(
_CONF_UNIT_SYSTEM_US_CUSTOMARY,
accumulated_precipitation=UnitOfPrecipitationDepth.INCHES,
conversions={
# Force atmospheric pressures to inHg
**{
("atmospheric_pressure", unit): UnitOfPressure.INHG
for unit in UnitOfPressure
if unit != UnitOfPressure.INHG
},
# Convert non-USCS distances
("distance", UnitOfLength.CENTIMETERS): UnitOfLength.INCHES,
("distance", UnitOfLength.KILOMETERS): UnitOfLength.MILES,
("distance", UnitOfLength.METERS): UnitOfLength.FEET,
("distance", UnitOfLength.MILLIMETERS): UnitOfLength.INCHES,
# Convert non-USCS volumes of gas meters
("gas", UnitOfVolume.CUBIC_METERS): UnitOfVolume.CUBIC_FEET,
# Convert non-USCS precipitation
("precipitation", UnitOfLength.CENTIMETERS): UnitOfLength.INCHES,
("precipitation", UnitOfLength.MILLIMETERS): UnitOfLength.INCHES,
# Convert non-USCS precipitation intensity
(
"precipitation_intensity",
UnitOfVolumetricFlux.MILLIMETERS_PER_DAY,
): UnitOfVolumetricFlux.INCHES_PER_DAY,
(
"precipitation_intensity",
UnitOfVolumetricFlux.MILLIMETERS_PER_HOUR,
): UnitOfVolumetricFlux.INCHES_PER_HOUR,
# Convert non-USCS pressure
("pressure", UnitOfPressure.MBAR): UnitOfPressure.PSI,
("pressure", UnitOfPressure.CBAR): UnitOfPressure.PSI,
("pressure", UnitOfPressure.BAR): UnitOfPressure.PSI,
("pressure", UnitOfPressure.PA): UnitOfPressure.PSI,
("pressure", UnitOfPressure.HPA): UnitOfPressure.PSI,
("pressure", UnitOfPressure.KPA): UnitOfPressure.PSI,
("pressure", UnitOfPressure.MMHG): UnitOfPressure.INHG,
# Convert non-USCS speeds, except knots, to mph
("speed", UnitOfSpeed.METERS_PER_SECOND): UnitOfSpeed.MILES_PER_HOUR,
("speed", UnitOfSpeed.KILOMETERS_PER_HOUR): UnitOfSpeed.MILES_PER_HOUR,
(
"speed",
UnitOfVolumetricFlux.MILLIMETERS_PER_DAY,
): UnitOfVolumetricFlux.INCHES_PER_DAY,
(
"speed",
UnitOfVolumetricFlux.MILLIMETERS_PER_HOUR,
): UnitOfVolumetricFlux.INCHES_PER_HOUR,
# Convert non-USCS volumes
("volume", UnitOfVolume.CUBIC_METERS): UnitOfVolume.CUBIC_FEET,
("volume", UnitOfVolume.LITERS): UnitOfVolume.GALLONS,
("volume", UnitOfVolume.MILLILITERS): UnitOfVolume.FLUID_OUNCES,
# Convert non-USCS volumes of water meters
("water", UnitOfVolume.CUBIC_METERS): UnitOfVolume.CUBIC_FEET,
("water", UnitOfVolume.LITERS): UnitOfVolume.GALLONS,
# Convert wind speeds except knots to mph
**{
("wind_speed", unit): UnitOfSpeed.MILES_PER_HOUR
for unit in UnitOfSpeed
if unit not in (UnitOfSpeed.KNOTS, UnitOfSpeed.MILES_PER_HOUR)
},
},
length=UnitOfLength.MILES,
mass=UnitOfMass.POUNDS,
pressure=UnitOfPressure.PSI,
temperature=UnitOfTemperature.FAHRENHEIT,
volume=UnitOfVolume.GALLONS,
wind_speed=UnitOfSpeed.MILES_PER_HOUR,
)
IMPERIAL_SYSTEM = US_CUSTOMARY_SYSTEM
"""IMPERIAL_SYSTEM is deprecated. Please use US_CUSTOMARY_SYSTEM instead."""
|
PypiClean
|
/ethereum-etl-optimized-2.1.2.tar.gz/ethereum-etl-optimized-2.1.2/ethereumetl/service/eth_token_service.py
|
import logging
from web3.exceptions import BadFunctionCallOutput, ContractLogicError
from ethereumetl.domain.token import EthToken
from ethereumetl.erc20_abi import ERC20_ABI, ERC20_ABI_ALTERNATIVE_1
logger = logging.getLogger('eth_token_service')
class EthTokenService(object):
def __init__(self, web3, function_call_result_transformer=None):
self._web3 = web3
self._function_call_result_transformer = function_call_result_transformer
def get_token(self, token_address):
checksum_address = self._web3.toChecksumAddress(token_address)
contract = self._web3.eth.contract(address=checksum_address, abi=ERC20_ABI)
contract_alternative_1 = self._web3.eth.contract(address=checksum_address, abi=ERC20_ABI_ALTERNATIVE_1)
symbol = self._get_first_result(
contract.functions.symbol(),
contract.functions.SYMBOL(),
contract_alternative_1.functions.symbol(),
contract_alternative_1.functions.SYMBOL(),
)
if isinstance(symbol, bytes):
symbol = self._bytes_to_string(symbol)
name = self._get_first_result(
contract.functions.name(),
contract.functions.NAME(),
contract_alternative_1.functions.name(),
contract_alternative_1.functions.NAME(),
)
if isinstance(name, bytes):
name = self._bytes_to_string(name)
decimals = self._get_first_result(contract.functions.decimals(), contract.functions.DECIMALS())
total_supply = self._get_first_result(contract.functions.totalSupply())
token = EthToken()
token.address = token_address
token.symbol = symbol
token.name = name
token.decimals = decimals
token.total_supply = total_supply
return token
def _get_first_result(self, *funcs):
for func in funcs:
result = self._call_contract_function(func)
if result is not None:
return result
return None
def _call_contract_function(self, func):
# BadFunctionCallOutput exception happens if the token doesn't implement a particular function
# or was self-destructed
# OverflowError exception happens if the return type of the function doesn't match the expected type
result = call_contract_function(
func=func,
ignore_errors=(BadFunctionCallOutput, ContractLogicError, OverflowError, ValueError),
default_value=None)
if self._function_call_result_transformer is not None:
return self._function_call_result_transformer(result)
else:
return result
def _bytes_to_string(self, b, ignore_errors=True):
if b is None:
return b
try:
b = b.decode('utf-8')
except UnicodeDecodeError as e:
if ignore_errors:
logger.debug('A UnicodeDecodeError exception occurred while trying to decode bytes to string', exc_info=True)
b = None
else:
raise e
if self._function_call_result_transformer is not None:
b = self._function_call_result_transformer(b)
return b
def call_contract_function(func, ignore_errors, default_value=None):
try:
result = func.call()
return result
except Exception as ex:
if type(ex) in ignore_errors:
logger.debug('An exception occurred in function {} of contract {}. '.format(func.fn_name, func.address)
+ 'This exception can be safely ignored.', exc_info=True)
return default_value
else:
raise ex
|
PypiClean
|
/cnext_test-0.7.5-py3-none-any.whl/cnext_server/server/publish/_next/static/chunks/651.bfd69ea01d64d954.js
|
"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[651],{99651:function(e,t,n){var r=n(33227),a=n(88361),o=n(85971),i=n(52715),l=n(91193);function u(e){var t=function(){if("undefined"===typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"===typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var n,r=l(e);if(t){var a=l(this).constructor;n=Reflect.construct(r,arguments,a)}else n=r.apply(this,arguments);return i(this,n)}}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var d=n(92648).Z,c=d(n(67294)),f=d(n(72717)),s={400:"Bad Request",404:"This page could not be found",405:"Method Not Allowed",500:"Internal Server Error"};function p(e){var t=e.res,n=e.err;return{statusCode:t&&t.statusCode?t.statusCode:n?n.statusCode:404}}var h={error:{fontFamily:'-apple-system, BlinkMacSystemFont, Roboto, "Segoe UI", "Fira Sans", Avenir, "Helvetica Neue", "Lucida Grande", sans-serif',height:"100vh",textAlign:"center",display:"flex",flexDirection:"column",alignItems:"center",justifyContent:"center"},desc:{display:"inline-block",textAlign:"left",lineHeight:"49px",height:"49px",verticalAlign:"middle"},h1:{display:"inline-block",margin:0,marginRight:"20px",padding:"0 23px 0 0",fontSize:"24px",fontWeight:500,verticalAlign:"top",lineHeight:"49px"},h2:{fontSize:"14px",fontWeight:"normal",lineHeight:"49px",margin:0,padding:0}},m=function(e){o(n,e);var t=u(n);function n(){return r(this,n),t.apply(this,arguments)}return a(n,[{key:"render",value:function(){var e=this.props,t=e.statusCode,n=e.withDarkMode,r=void 0===n||n,a=this.props.title||s[t]||"An unexpected error has occurred";return c.default.createElement("div",{style:h.error},c.default.createElement(f.default,null,c.default.createElement("title",null,t?"".concat(t,": ").concat(a):"Application error: a client-side exception has occurred")),c.default.createElement("div",null,c.default.createElement("style",{dangerouslySetInnerHTML:{__html:"\n body { margin: 0; color: #000; background: #fff; }\n .next-error-h1 {\n border-right: 1px solid rgba(0, 0, 0, .3);\n }\n\n ".concat(r?"@media (prefers-color-scheme: dark) {\n body { color: #fff; background: #000; }\n .next-error-h1 {\n border-right: 1px solid rgba(255, 255, 255, .3);\n }\n }":"")}}),t?c.default.createElement("h1",{className:"next-error-h1",style:h.h1},t):null,c.default.createElement("div",{style:h.desc},c.default.createElement("h2",{style:h.h2},this.props.title||t?a:c.default.createElement(c.default.Fragment,null,"Application error: a client-side exception has occurred (see the browser console for more information)"),"."))))}}]),n}(c.default.Component);m.displayName="ErrorPage",m.getInitialProps=p,m.origGetInitialProps=p,t.default=m},78e3:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0}),t.AmpStateContext=void 0;var r=(0,n(92648).Z)(n(67294)).default.createContext({});t.AmpStateContext=r},9470:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.isInAmpMode=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.ampFirst,n=void 0!==t&&t,r=e.hybrid,a=void 0!==r&&r,o=e.hasQuery,i=void 0!==o&&o;return n||a&&i}},72717:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0}),t.defaultHead=c,t.default=void 0;var r=n(6495).Z,a=n(92648).Z,o=(0,n(91598).Z)(n(67294)),i=a(n(11585)),l=n(78e3),u=n(15850),d=n(9470);n(99475);function c(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0],t=[o.default.createElement("meta",{charSet:"utf-8"})];return e||t.push(o.default.createElement("meta",{name:"viewport",content:"width=device-width"})),t}function f(e,t){return"string"===typeof t||"number"===typeof t?e:t.type===o.default.Fragment?e.concat(o.default.Children.toArray(t.props.children).reduce((function(e,t){return"string"===typeof t||"number"===typeof t?e:e.concat(t)}),[])):e.concat(t)}var s=["name","httpEquiv","charSet","itemProp"];function p(e,t){return e.reduce(f,[]).reverse().concat(c(t.inAmpMode).reverse()).filter(function(){var e=new Set,t=new Set,n=new Set,r={};return function(a){var o=!0,i=!1;if(a.key&&"number"!==typeof a.key&&a.key.indexOf("$")>0){i=!0;var l=a.key.slice(a.key.indexOf("$")+1);e.has(l)?o=!1:e.add(l)}switch(a.type){case"title":case"base":t.has(a.type)?o=!1:t.add(a.type);break;case"meta":for(var u=0,d=s.length;u<d;u++){var c=s[u];if(a.props.hasOwnProperty(c))if("charSet"===c)n.has(c)?o=!1:n.add(c);else{var f=a.props[c],p=r[c]||new Set;"name"===c&&i||!p.has(f)?(p.add(f),r[c]=p):o=!1}}}return o}}()).reverse().map((function(e,n){var a=e.key||n;if(!t.inAmpMode&&"link"===e.type&&e.props.href&&["https://fonts.googleapis.com/css","https://use.typekit.net/"].some((function(t){return e.props.href.startsWith(t)}))){var i=r({},e.props||{});return i["data-href"]=i.href,i.href=void 0,i["data-optimized-fonts"]=!0,o.default.cloneElement(e,i)}return o.default.cloneElement(e,{key:a})}))}var h=function(e){var t=e.children,n=o.useContext(l.AmpStateContext),r=o.useContext(u.HeadManagerContext);return o.default.createElement(i.default,{reduceComponentsToState:p,headManager:r,inAmpMode:d.isInAmpMode(n)},t)};t.default=h,("function"===typeof t.default||"object"===typeof t.default&&null!==t.default)&&"undefined"===typeof t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},11585:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e){var t=e.headManager,n=e.reduceComponentsToState;function l(){if(t&&t.mountedInstances){var a=r.Children.toArray(Array.from(t.mountedInstances).filter(Boolean));t.updateHead(n(a,e))}}if(a){var u;null==t||null==(u=t.mountedInstances)||u.add(e.children),l()}return o((function(){var n;return null==t||null==(n=t.mountedInstances)||n.add(e.children),function(){var n;null==t||null==(n=t.mountedInstances)||n.delete(e.children)}})),o((function(){return t&&(t._pendingUpdate=l),function(){t&&(t._pendingUpdate=l)}})),i((function(){return t&&t._pendingUpdate&&(t._pendingUpdate(),t._pendingUpdate=null),function(){t&&t._pendingUpdate&&(t._pendingUpdate(),t._pendingUpdate=null)}})),null};var r=(0,n(91598).Z)(n(67294));var a=!1,o=a?function(){}:r.useLayoutEffect,i=a?function(){}:r.useEffect}}]);
|
PypiClean
|
/shapelets_solo-0.5.2-py3-none-any.whl/shapelets/shapelets.py
|
import base64
from enum import Enum
import dill
import numpy as np
import os
import pandas as pd
import time
import typing
import urllib.parse
from shapelets.dsl import (
DataApp,
NodeReturnType,
SupportedTypes
)
from shapelets.model import (
Collection,
CollectionType,
Dataframe,
FunctionDescription,
Model,
NDArray,
Permission,
Sequence,
SequenceMetadata
)
from shapelets.services import (
CollectionsService,
DataAppService,
DataframeService,
ExecutionService,
FunctionsService,
LoginService,
MetadataService,
ModelsService,
NDArraysService,
read_user_from_login_file,
ShapeletsLoginException,
SequencesService,
TestService,
UsersService
)
class Services(Enum):
COLLECTIONS = "collections_service"
DATA_APP = "data_app_service"
DATAFRAMES = "dataframe_service"
EXECUTION = "execution_service"
FUNCTIONS = "functions_service"
MODELS = "models_service"
METADATA = "metadata_service"
NDARRAYS = "ndarrays_service"
SEQUENCES = "sequences_service"
TEST = "test_service"
USERS = "users_service"
class Shapelets:
"""
This class acts as a client for the Shapelets platform, it holds the user session.
"""
def __init__(self, login_service: LoginService):
base_url = login_service.base_url
cookies = login_service.cookies
self.services = {
Services.COLLECTIONS: CollectionsService(base_url, cookies),
Services.DATA_APP: DataAppService(base_url, cookies),
Services.DATAFRAMES: DataframeService(base_url, cookies),
Services.EXECUTION: ExecutionService(base_url, cookies),
Services.FUNCTIONS: FunctionsService(base_url, cookies),
Services.METADATA: MetadataService(base_url, cookies),
Services.MODELS: ModelsService(base_url, cookies),
Services.NDARRAYS: NDArraysService(base_url, cookies),
Services.SEQUENCES: SequencesService(base_url, cookies),
Services.TEST: TestService(base_url, cookies),
Services.USERS: UsersService(base_url, cookies)
}
self.services[Services.FUNCTIONS].download_dsl()
# ########################### #
# CollectionsService methods: #
# ########################### #
def create_collection(self,
name: str = "",
description: str = "",
tags: typing.List[str] = None,
collection_type: CollectionType = CollectionType.GENERAL) -> Collection:
"""
This function creates a new collection in Shapelets.
:param name: A String with the name of the collection.
:param description: A String which describes the purpose of this collection.
:param tags: A list of String, that represent the features of this collection.
:param collection_type: A String to represent the type of this collection.
:return: A new Shapelets Collection.
"""
return self.services[Services.COLLECTIONS].create_collection(
name=name,
description=description,
tags=tags,
collection_type=collection_type)
def create_default_collections(self, collection_name: str = "ENERNOC") -> None:
"""
This function creates a default collection in the Shapelets instance.
It is a collection with some sequences extracted from the Dataset passed
as argument, by default ENERNOC.
:param collection_name: The collection name, ENERNOC as example.
:return: The default collection of interest.
"""
self.services[Services.COLLECTIONS].create_default_collections(collection_name)
def get_collections(self) -> typing.List[Collection]:
"""
This function returns a list containing all the user's collections.
:return: A list of Shapelets Collections.
"""
return self.services[Services.COLLECTIONS].get_collections()
def get_collection(self, collection_id):
"""
This functions returns the collection with the id passed as argument.
:param collection_id: The collection id.
:return: A Shapelets Collection.
"""
return self.services[Services.COLLECTIONS].get_collection(collection_id)
def update_collection(self,
collection,
name=None,
favorite=None,
description=None,
tags=None,
collection_type=None):
"""
This function updates a collection with the arguments passed to this function.
:param collection: The Shapelets Collection.
:param name: A String with the name of the Collection.
:param favorite: Boolean to indicate if it is favourite or not.
:param description: A String with the description of this collection.
:param tags: A list of Strings with containing the tags f the collection.
:param collection_type: The collection type.
:return: The update collection.
"""
return self.services[Services.COLLECTIONS].update_collection(
collection,
name=name,
favorite=favorite,
description=description,
tags=tags,
collection_type=collection_type)
def delete_collection(self, collection):
"""
This function deletes a collection.
:param collection: The Shapelets Collection.
:return: Returns True if the operation was successful False otherwise.
"""
return self.services[Services.COLLECTIONS].delete_collection(collection)
def get_collection_sequences(self, collection: Collection) -> typing.List[Sequence]:
"""
This function gets all Shapelets Sequences from the given Collection.
:param collection: The Collection.
:return: List of Shapelets Sequences.
"""
return self.services[Services.COLLECTIONS].get_collection_sequences(collection)
def get_collection_types(self):
"""
This function returns a list with all types of Collections.
:return: A list of strings with all collection types.
"""
return self.services[Services.COLLECTIONS].get_collection_types()
def share_collection(self, collection: Collection,
subject: typing.Any,
grant: Permission):
"""
This function shares a collection with the given sid, which can be an user or group.
:param collection: Collection of sequences.
:param subject: Subject can be an user or group.
:param grant: The Permission of access.
"""
self.services[Services.COLLECTIONS].share_collection(collection, subject, grant)
def unshare_collection(self, collection, subject):
"""
This function unshares a collection with the given sid, which can be either an User or Group.
:param collection: Collection of sequences.
:param subject: Subject, can be an User or Group.
"""
self.services[Services.COLLECTIONS].unshare_collection(collection, subject)
def get_collection_sharing(self, collection):
"""
This function returns a List containing the users with access to the given collection.
:param collection: The Collection.
:return: List of Users with access Permission.
"""
self.services[Services.COLLECTIONS].get_collection_sharing(collection)
def get_collection_privileges(self, collection):
"""
This function returns the List of Permissions that the calling user has on the Collection.
:param collection: The Collection.
:return: List of Users with access Permission.
"""
self.services[Services.COLLECTIONS].get_collection_privileges(collection)
# ######################### #
# NDArraysService methods: #
# ######################### #
def create_nd_array(self,
array: np.ndarray,
name: str = None,
description: str = None) -> NDArray:
"""
This function registers a new NDArray into Shapelets.
:param array: The numpy ndarray to be stored.
:param name: The name of the NDArrray.
:param description: The description of the NDArray.
:return: The registered NDArray.
"""
return self.services[Services.NDARRAYS].create_nd_array(array, name, description)
def get_nd_array_data(self, ndarray: NDArray) -> np.ndarray:
"""
This function returns an existing NDArray in Shapelets.
:param ndarray: The ndarray to be returned.
:return: A numpy ndarray.
"""
return self.services[Services.NDARRAYS].get_nd_array_data(ndarray)
def update_nd_array(self, nd_array: NDArray, array: np.ndarray = None) -> NDArray:
"""
This function updates a NDArray. This function checks dimensionality to ensure integrity between
array's data and array's metadata.
:param nd_array: The NDArray to be updated.
:param array: This parameter is optional, if present the array's data is updated as well.
:return: The registered NDArray.
"""
return self.services[Services.NDARRAYS].update_nd_array(nd_array, array)
def delete_nd_array(self, nd_array: NDArray) -> bool:
"""
This function deletes the given NDArray.
:param nd_array: The NDArray to be deleted.
returns A bool indicating if the NDArray was deleted or not.
"""
return self.services[Services.NDARRAYS].delete_nd_array(nd_array)
# ######################### #
# ModelsService methods: #
# ######################### #
def create_model(self,
model,
name: str = None,
description: str = None,
metadata: typing.Dict[str, str] = None) -> Model:
"""
This function registers a new Model into Shapelets.
:param model: The data model to be stored.
:param name: The name of the Model.
:param description: The description of the Model.
:param metadata: The metadata of the Model.
:return: The registered Model.
"""
return self.services[Services.MODELS].create_model(model, name, description, metadata)
def get_model_data(self, model: Model) -> str:
"""
This function returns the Model data of an existing Model in Shapelets.
:param model: The Model to be returned.
:return: A string with the Model.
"""
date_encode = bytes(model.data, encoding='utf-8')
data_bytes = base64.b64decode(date_encode)
return dill.loads(data_bytes)
# return self.services[Services.MODELS].get_model_data(model)
def update_model(self, model: Model, new_model: str = None) -> Model:
"""
This function updates a Model.
:param model: The Model to be updated.
:param new_model: This parameter is optional, if present the new model is updated as well.
:return: The registered Model.
"""
return self.services[Services.MODELS].update_model(model, new_model)
def delete_model(self, model: Model) -> bool:
"""
This function deletes the given Model.
:param model: The Model to be deleted.
returns A bool indicating if the Model was deleted or not.
"""
return self.services[Services.MODELS].delete_model(model)
# ######################### #
# DataframesService methods: #
# ######################### #
def create_dataframe(self,
dataframe: pd.DataFrame,
name: str = None,
description: str = None) -> Dataframe:
"""
This function registers a new Dataframe into Shapelets.
:param dataframe: The dataframe from pandas to be stored.
:param name: The name of the Dataframe.
:param description: The description of the Dataframe.
:return: The registered Dataframe.
"""
return self.services[Services.DATAFRAMES].create_dataframe(dataframe, name, description)
def get_dataframe_data(self, dataframe: Dataframe) -> pd.DataFrame:
"""
This function returns an existing Dataframe in Shapelets.
:param dataframe: The Dataframe to be returned.
:return: A pandas dataframe.
"""
return self.services[Services.DATAFRAMES].get_dataframe_data(dataframe)
def update_dataframe(self, dataframe: Dataframe, new_data: pd.DataFrame = None) -> Dataframe:
"""
This function updates a Dataframe.
:param dataframe: The Dataframe to be updated.
:param new_data: This parameter is optional, if present the data of the given Dataframe is updated with the new
pandas dataframe.
:return: The registered Dataframe.
"""
return self.services[Services.DATAFRAMES].update_dataframe(dataframe, new_data)
def delete_dataframe(self, dataframe: Dataframe) -> bool:
"""
This function deletes the given Dataframe.
:param dataframe: The Dataframe to be deleted.
returns A bool indicating if the given Dataframe was deleted or not.
"""
return self.services[Services.DATAFRAMES].delete_dataframe(dataframe)
# ######################### #
# SequencesService methods: #
# ######################### #
def create_sequence(self,
dataframe: pd.DataFrame,
name: str = "",
starts: np.datetime64 = None,
every=None,
collection=None) -> Sequence:
"""
This function creates a sequence from a dataframe and stores it into Shapelets.
NOTE: Only regular (evenly spaced) series are allowed.
:param dataframe: A pandas dataframe. If it has a datetime64 index it will be used.
:param name: name of the sequence.
:param every: Time in milliseconds for regular series. The parameter is mandatory
if the dataframe has not a datetime64 index.
:param starts: Start is the timestamp of the beginning of the sequence. The
parameter is mandatory if the dataframe has not a datetime64 index.
:param collection: The Collection that sets if the sequence should be add to
a Collection. None if it is not required.
:return: The Sequence.
"""
if collection is None:
collections = self.services[Services.COLLECTIONS].get_collections()
collection = next(
col for col in collections if col.name == "Default Collection")
return self.services[Services.SEQUENCES].create_sequence(
dataframe,
name,
starts,
every,
collection)
def update_sequence(self, sequence, dataframe):
"""
This function updates a Sequence.
:param sequence: The sequence to be updated.
:param dataframe: A pandas dataframe containing the new information to be
stored in the sequence.
"""
self.services[Services.SEQUENCES].update_sequence(sequence, dataframe)
def get_sequence_data(self, sequence):
return self.services[Services.SEQUENCES].get_sequence_data(sequence)
# ######################## #
# MetadataService methods: #
# ######################## #
def get_metadata(self, collection: Collection) -> pd.DataFrame:
"""
This function returns all the metadata for the given Collection.
:param collection: The given Collection.
:return: A dataframe with all de metadata with sequence names as index and each column
name as the metadata field.
"""
return self.services[Services.METADATA].get_metadata(collection)
def add_metadata(self, collection: Collection, sequence: Sequence, metadata: SequenceMetadata):
"""
This function adds MetaData to a Sequence in a Collection.
:param collection: The Collection which the sequence belongs to.
:param sequence: The Sequence.
:param metadata: the metadata to be added.
"""
self.services[Services.METADATA].add_metadata(collection, sequence, metadata)
def add_metadata_from_pandas(self, collection: Collection, dataframe: pd.DataFrame):
"""
This function adds a pandas dataframe containing metadata to sequences in a Collection.
The dataframe has to be of the following shape:
- It must have an index with the name of the sequences.
- Each column name will be the metadata name and the value of each
row will be the value of this metadata for the sequence in the
index of the row.
The supported types are:
- float
- str
- datetime.datetime
- np.datetime64
- Shapelets.MetadataCoordinates
:param collection: The target Collection.
:param dataframe: The dataframe containing the metadata.
"""
self.services[Services.METADATA].add_metadata_from_pandas(
collection,
self.get_collection_sequences(collection),
dataframe)
# ##################### #
# UsersService methods: #
# ##################### #
def get_users(self):
"""
This function returns the list of Users in Shapelets.
:return: List of Users.
"""
return self.services[Services.USERS].get_users()
def get_groups(self):
"""
This function returns the list of Groups in Shapelets.
:return: List of Groups.
"""
return self.services[Services.USERS].get_groups()
def get_my_user_details(self):
"""
This function returns the calling User's details.
:return: UserDetails
"""
return self.services[Services.USERS].get_my_user_details()
def get_user_details(self, subject_id):
"""
Returns the user details for the given subject_id.
:param subject_id: The User.
:return: An instance of User.
"""
return self.services[Services.USERS].get_user_details(subject_id)
# ######################### #
# DataAppService methods: #
# ######################### #
def get_data_apps(self) -> typing.List[DataApp]:
"""
This function returns the list of DataApps that the calling User has read permissions.
:return: A list of DataApps.
"""
return self.services[Services.DATA_APP].get_data_apps()
def register_data_app(self, app: DataApp):
"""
This function registers the given DataApp into Shapelets.
"""
dataapp = self.services[Services.DATA_APP].register_data_app(app)
print(f"Registered data-app: {self.services[Services.DATA_APP].base_url}/app/data-apps/{urllib.parse.quote(dataapp.name)}")
return dataapp
def delete_data_app(self, data_app_id: str) -> bool:
"""
This function removes the given DataApp.
"""
return self.services[Services.DATA_APP].delete_data_app(data_app_id)
# ######################### #
# ExecutionService methods: #
# ######################### #
def run(self, output_nodes: NodeReturnType) -> SupportedTypes:
"""
This function executes and wait for the completion of the given computing graph.
:param output_nodes: The output nodes of the graph to be executed.
:return: The list of computed output_nodes.
"""
return self.services[Services.EXECUTION].run_and_wait_for_all(output_nodes)
def run_async(self, output_nodes: NodeReturnType) -> int:
"""
This function executes asynchronously the given computing graph.
:param output_nodes: The output nodes of the graph to be executed.
:return: The number of the computing job that represents the enqueued computation.
"""
return self.services[Services.EXECUTION].run_async(output_nodes)
def wait_for_result(self, job_id) -> SupportedTypes:
"""
This function waits until the computation of the given job_id is finished.
:param job_id: The number of a previously enqueued Job.
:return: The list of computed output_nodes.
"""
return self.services[Services.EXECUTION].wait_for_result(job_id)
def get_all_analysis(self) -> typing.List[str]:
"""
This function returns a list of all Analysis registers in the system.
"""
return self.services[Services.EXECUTION].get_all_analysis()
# ######################### #
# FunctionsService methods: #
# ######################### #
def register_custom_function(self,
custom_function: typing.Callable,
description: FunctionDescription = None,
force: bool = True,
persist_results: bool = True):
"""
This function registers a new User function in the system.
:param custom_function: The function to be registered.
:param description: The description of tee function.
:param force: Force overwriting the function if there is one function with this name already registered.
:param persist_results: This parameter activates the result caching mechanism.
"""
self.services[Services.FUNCTIONS].register_custom_function(custom_function, description, force, persist_results)
def register_custom_splitter(self,
custom_function: typing.Callable,
description: FunctionDescription = None,
force: bool = True,
persist_results: bool = True):
"""
This function registers a new Splitter user function in the system.
:param custom_function: The function to be registered.
:param description: The description of the function.
:param force: Force overwriting the function if there is one function with this name already registered.
:param persist_results: This parameter activates the result caching mechanism.
"""
self.services[Services.FUNCTIONS].register_custom_splitter(custom_function, description, force, persist_results)
def register_custom_reducer(self,
custom_function: typing.Callable,
description: FunctionDescription = None,
force: bool = True,
persist_results: bool = True):
"""
This function registers a new Reducer function in the system.
:param custom_function: The function to be registered.
:param description: The description of the function.
:param force: Force overwriting the function if there is one function with this name already registered.
:param persist_results: This parameter activates the result caching mechanism.
"""
self.services[Services.FUNCTIONS].register_custom_reducer(custom_function, description, force, persist_results)
def register_flow(self,
name: str,
output_nodes: NodeReturnType,
output_names: typing.Optional[typing.List[str]] = None,
persist_results: bool = True,
documentation: str = "No documentation provided for this function."):
"""
This function registers a new User flow in the system.
:param name: The name of the flow.
:param output_nodes: The output nodes of the flow.
:param output_names: List of names for the outputs of the flow.
:param persist_results: This parameter activates the result caching mechanism.
:param documentation: The doc string of the function.
"""
self.services[Services.FUNCTIONS].register_flow(name, documentation, output_nodes, output_names,
persist_results)
def register_analysis(self,
name: str,
output_nodes: NodeReturnType,
output_names: typing.Optional[typing.List[str]] = None,
persist_results: bool = True,
documentation: str = "No documentation provided for this function."
):
"""
This function registers a new User analysis in the system.
:param name: The name of the flow.
:param output_nodes: List of output nodes.
:param output_names: List of names for the outputs of the flow.
:param persist_results: This parameter activates the result caching mechanism.
:param documentation: The doc string of the function.
"""
self.services[Services.FUNCTIONS].register_analysis(name, documentation, output_nodes, output_names,
persist_results)
def delete_analysis(self, name: str):
"""
This function deletes the analysis with the names passed as argument.
:param name: The name of hte analysis to be deleted.
"""
self.services[Services.FUNCTIONS].delete_analysis(name)
def delete_all_analysis(self):
"""
This function deletes all analysis registered in Shapelets.
"""
self.services[Services.FUNCTIONS].delete_all_analysis()
def get_function_parameters(self, name: str = None):
"""
This function return a FunctionParametersDescription or List[FunctionParametersDescription] depending on the
name parameter. If it is not given, this function will return a list of all functions within the system,
otherwise it will return the FunctionParametersDescription of the requested function.
:param name: The function name to be returned.
"""
return self.services[Services.FUNCTIONS].get_function_parameters(name)
# #################### #
# TestService methods: #
# #################### #
def ping(self):
"""
This function performs a ping action.
:return True if it receives the pong message.
"""
return self.services[Services.TEST].ping()
def test_get(self, api_path):
"""
This function allows to perform a get action against Shapelets.
:param api_path: The path of the API to be tested.
"""
return self.services[Services.TEST].test_get(api_path)
def test_get_raw(self, api_path):
"""
This function allows to perform a get action against Shapelets and returns the raw response.
:param api_path: The path of the API to be tested.
:return: The raw response of the get action.
"""
return self.services[Services.TEST].test_get_raw(api_path)
def test_delete(self, api_path):
"""
This function allows to perform a delete action against Shapelets.
:param api_path: The path of the API to be tested.
"""
return self.services[Services.TEST].test_delete(api_path)
def test_post(self, api_path, data):
"""
This function allows to perform a post action against Shapelets.
:param api_path: The path of the API to be tested.
"""
return self.services[Services.TEST].test_post(api_path, data)
def test_put(self, api_path, data):
"""
This function allows to perform a put action against Shapelets.
:param api_path: The path of the API to be tested.
"""
return self.services[Services.TEST].test_put(api_path, data)
def start_shapelet_processes():
"""
This function launches Shapelets processes in background.
"""
from shapelets.__main__ import start_all_command
start_all_command()
login_service = LoginService('https://localhost', 443)
while True:
try:
login_service.login_user("admin", "admin")
print(f"server is up...")
break
except:
print(f"server is starting, takes a few seconds...")
time.sleep(5)
def stop_shapelet_processes():
"""
This function stops Shapelets processes running in background.
"""
from shapelets.__main__ import stop_command
stop_command()
def close_session():
"""
This function closes the Shapelets processes running in background.
"""
stop_shapelet_processes()
def init_session(username: str = None,
password: str = None,
address: str = "https://localhost",
port: int = None,
start_shapelets_processes: bool = True) -> Shapelets:
"""
This function initializes the session in Shapelets with the given user, password and address.
:param username: The username of the user to be logged in.
:param password: The password of the user.
:param address: The address of the Shapelets instance to be hit, default to localhost.
:param port: The number of the port, default to None.
:param start_shapelets_processes: Launches Shapelets processes in background , default to True.
:return: The Shapelets object to access all the system API.
"""
if start_shapelets_processes:
start_shapelet_processes()
if username and password and address:
print(f"Login as {username} for address {address}{':' + str(port) if port else ''}")
login_service = LoginService(address, port)
login_service.login_user(username, password)
return Shapelets(login_service)
if username and address:
user_info = read_user_from_login_file(address, username)
if user_info:
print(f"Found {username} info in login file for address {address}")
return init_session(
user_info["user"],
user_info["password"],
user_info["server"],
port)
elif os.environ.get("SHAPELETS_PWD"):
print(f"Found {username} info in Env Variable")
return init_session(
username,
os.environ.get("SHAPELETS_PWD"),
address,
port)
else:
raise ShapeletsLoginException(f"{username} information not found for address {address}")
if address:
user_info = read_user_from_login_file(address)
if user_info:
print(f"Found default user info in login file for {address}")
return init_session(
user_info["user"],
user_info["password"],
user_info["server"],
port)
elif os.environ.get("SHAPELETS_USER"):
print("Found user name in Env Variable")
return init_session(
os.environ.get("SHAPELETS_USER"),
None,
address,
port)
else:
raise ShapeletsLoginException(f"Login information not found for address {address}")
else:
raise ShapeletsLoginException("Login information not found")
def update_password(user: str, password: str, new_password: str, address: str, port: int = None):
"""
This function updates the password for an User.
:param user: The user for which the password is going to be updated.
:param password: The old password.
:param new_password: The new password.
:param address: The address of the Shapelets instance.
:param port: The port.
:return: True if the password was successfully updated.
"""
login_service = LoginService(address, port)
login_service.update_password(user, password, new_password)
return Shapelets(login_service)
|
PypiClean
|
/pygame_cards-0.1.3-py3-none-any.whl/pygame_cards/board.py
|
from __future__ import annotations
from functools import cached_property
import sys
import pygame
from pygame_cards.abstract import AbstractGraphic
from pygame_cards.set import CardsSet
from pygame_cards import constants
class GameBoardGraphic(AbstractGraphic):
"""Represent the graphics of the game board."""
game_board: GameBoard
size: tuple[int, int]
# Maps the cardsets to the position they should go
cardsets_rel_pos: dict[CardsSet, tuple[float, float]]
cardsets_rel_size: dict[CardsSet, tuple[float, float]]
card_sizes: dict[CardsSet, tuple[int, int]] | tuple[int, int]
def __init__(
self,
cardsets_rel_pos: dict[CardsSet, tuple[float, float]] = {},
cardsets_rel_size: dict[CardsSet, tuple[float, float]]
| tuple[int, int] = (0.2, 0.8),
size: tuple[int, int] = constants.BOARD_SIZE,
) -> None:
"""Initialize a game board.
Different cardsets are places at different places on the board.
:arg cardsets_rel_pos: Relative positions of the cardsets.
:arg cardsets_rel_size: Relative size of the cardsets on the
board.
:arg size: The size of the boad in pixels.
"""
self._size = size
self.cardsets_rel_pos = cardsets_rel_pos
self.cardsets_rel_size = cardsets_rel_size
def clear_cache(self) -> None:
super().clear_cache()
self.__dict__.pop("background", None)
@cached_property
def background(self) -> pygame.Surface:
# Transparent surface as default
return pygame.Surface(self.size, pygame.SRCALPHA)
@cached_property
def surface(self) -> pygame.Surface:
"""Show the game board with the gards."""
surf = self.background
for cardset in self.game_board.cardsets:
rel_pos = self.cardsets_rel_pos[cardset]
pos = rel_pos[0] * self.size[0], rel_pos[1] * self.size[1]
rel_size = (
self.cardsets_rel_size[cardset]
if isinstance(self.cardsets_rel_size, dict)
else self.cardsets_rel_size
)
size = rel_size[0] * self.size[0], rel_size[1] * self.size[1]
cardset.graphics.size = size
self.logger.debug(f"{cardset}, {pos=}")
surf.blit(cardset.graphics.surface, pos)
return surf
class GameBoard:
"""Base class for game boards."""
graphics: GameBoardGraphic
cardsets: list[CardsSet]
def __init__(self, cardsets: list[CardsSet] = []) -> None:
self.cardsets = cardsets
class ColumnsBoardGraphic(GameBoardGraphic):
"""A game board organized in columns."""
def __init__(
self,
game_board: GameBoard,
size: tuple[int, int] = constants.BOARD_SIZE,
space_ratio: float = constants.COLUMN_SPACING,
horizontal_margins_ratio: float = constants.COLUMN_SPACING,
vertical_margins_ratio: float = constants.COLUMN_SPACING,
card_size_ratio: tuple[int | float, int | float] = constants.CARD_SIZE,
):
"""Create the graphics.
The number of columns is determined by the number of cards sets
in the game_board.
The colums are spaced on the game board based on the required
settings.
Card size are also determined based on the
"""
self.game_board = game_board
self.size = size
self.space_ratio = space_ratio
self.horizontal_margins_ratio = horizontal_margins_ratio
self.vertical_margins_ratio = vertical_margins_ratio
self.card_size_ratio = card_size_ratio
def clear_cache(self) -> None:
"""Clear the cache."""
super().clear_cache()
for prop in ["margins", "card_size"]:
self.__dict__.pop(prop, None)
@cached_property
def margins(self) -> tuple[int, int]:
"""Margins for the sides of the board."""
card_size = self.card_size
return (
card_size[0] * self.horizontal_margins_ratio,
card_size[1] * self.vertical_margins_ratio,
)
@cached_property
def card_size(self) -> tuple[int, int]:
"""Define the card size based on the layout of the board."""
n_cols = len(self.game_board.cardsets)
# look at all elements consisitng of the board
ratio_sum = (
n_cols + 2 * self.horizontal_margins_ratio + (n_cols - 1) * self.space_ratio
)
# a card has ratio 1
card_width = self.size[0] * ratio_sum
card_height = (card_width / self.card_size_ratio[0]) * self.card_size_ratio[1]
return card_width, card_height
if __name__ == "__main__":
import logging
from pygame_cards.classics import CardSets
from pygame_cards.hands import VerticalPileGraphic
logging.basicConfig()
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
set_1 = CardSets.n52[:10]
logger.debug(f"{type(set_1)}")
set_1.graphics = VerticalPileGraphic(set_1)
set_2 = CardSets.n52[-10:]
set_2.graphics = VerticalPileGraphic(set_2)
set_2.graphics.logger.setLevel(logging.DEBUG)
board = GameBoard([set_1, set_2])
board_graphics = GameBoardGraphic(
cardsets_rel_pos={set_1: (0, 0.1), set_2: (0.5, 0.1)},
cardsets_rel_size={set_1: (0.2, 0.5), set_2: (0.1, 0.8)},
)
# Show the graphics surface in teh main pygame loop
pygame.init()
screen = pygame.display.set_mode((1000, 800))
pygame.display.set_caption("Game Board")
board.graphics = board_graphics
board.graphics.logger.setLevel(logging.DEBUG)
board_graphics.game_board = board
board_graphics.size = screen.get_size()
surf = board.graphics.surface
fps = 10
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
raise SystemExit
screen.blit(surf, (0, 0))
clock.tick(fps)
pygame.display.flip()
|
PypiClean
|
/mera_tvm_full-1.4.0-cp36-cp36m-manylinux_2_27_x86_64.whl/tvm/topi/nn/conv3d_transpose.py
|
"""Transposed 3D convolution operators (sometimes called Deconvolution)."""
import tvm
from tvm import te
from tvm import relay
from .dilate import dilate
from .pad import pad
from .utils import get_pad_tuple3d
from ..utils import simplify
def conv3d_transpose_ncdhw(Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 3D convolution ncdhw forward operator.
Parameters
----------
Input : tvm.te.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
Filter : tvm.te.Tensor
5-D with shape [in_channel, num_filter, filter_depth, filter_height, filter_width]
strides : int or a list/tuple of three ints
The spatial stride along depth,height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
Returns
-------
Output : tvm.te.Tensor
5-D with shape [batch, out_channel, out_depth, out_height, out_width]
"""
return declaration_conv3d_transpose_impl(
Input, Filter, strides, padding, out_dtype, output_padding
)
def conv3d_transpose_ncdhw_preprocess(data, kernel, strides, padding, out_dtype, output_padding):
"""Preprocess data and kernel to make the compute pattern
of conv3d_transpose the same as conv3d"""
batch, in_c, in_d, in_h, in_w = data.shape
_, out_c, filter_d, filter_h, filter_w = kernel.shape
stride_d, stride_h, stride_w = strides
opad_d, opad_h, opad_w = output_padding
assert opad_d < stride_d and opad_h < stride_h and opad_w < stride_w
# dilate data
data_dilate = dilate(data, [1, 1, stride_d, stride_h, stride_w], name="data_dilate")
# pad data
fpad_front, fpad_top, fpad_left, fpad_back, fpad_bottom, fpad_right = get_pad_tuple3d(
padding, (filter_d, filter_h, filter_w)
)
bpad_front = filter_d - 1 - fpad_front
bpad_back = filter_d - 1 - fpad_back + opad_d
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate,
[0, 0, bpad_front, bpad_top, bpad_left],
[0, 0, bpad_back, bpad_bottom, bpad_right],
name="data_pad",
)
# transform kernel layout from IODHW to OIDHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_c, filter_d, filter_h, filter_w),
lambda o, i, d, h, w: kernel[i][o][filter_d - 1 - d][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
return data_pad, kernel_transform
def declaration_conv3d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):
"""Implementation of conv3d transpose"""
data_pad, kernel_transform = conv3d_transpose_ncdhw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
batch, in_c, in_d, in_h, in_w = data_pad.shape
out_c, _, filter_d, filter_h, filter_w = kernel_transform.shape
stride_d, stride_h, stride_w = strides
# convolution stage
out_c = simplify(out_c)
out_d = simplify(in_d - filter_d + 1)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_c), name="dc")
dd = te.reduce_axis((0, filter_d), name="dd")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
Output = te.compute(
(batch, out_c, out_d, out_h, out_w),
lambda b, c, d, h, w: te.sum(
data_pad[b, dc, d + dd, h + dh, w + dw].astype(out_dtype)
* kernel_transform[c, dc, dd, dh, dw].astype(out_dtype),
axis=[dc, dd, dh, dw],
),
tag="conv3d_transpose_ncdhw",
)
return Output
@tvm.target.generic_func
def conv3d_transpose_legalize(attrs, inputs, types):
"""Legalizes Transposed 3D convolution op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed 3D convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
if attrs["data_layout"] == "NDHWC":
data, kernel = inputs
kernel_layout = attrs["kernel_layout"]
# Convert Kernel layout to IODHW
# kernel_layout is different from input kernel layout - IO is swapped
if kernel_layout == "DHWIO":
# input kernel layout is swapped to DHWOI
# output kernel layout will be IODHW
kernel = relay.transpose(kernel, axes=(4, 3, 0, 1, 2))
elif kernel_layout == "DHWOI":
# input kernel layout is swapped to DHWIO
# output kernel layout will be IODHW
kernel = relay.transpose(kernel, axes=(3, 4, 0, 1, 2))
elif kernel_layout == "IODHW":
# input kernel layout is swapped to OIDHW
# output kernel layout will be IODHW
kernel = relay.transpose(kernel, axes=(1, 0, 2, 3, 4))
elif kernel_layout == "OIDHW":
# input kernel layout is swapped to IODHW
# output kernel layout will be IODHW
pass
else:
# Skip legalize. Let relay.nn.conv2d_transpose to handle the case
return None
# Set new attrs for conv3d_transpose.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["data_layout"] = "NCDHW"
# layout of kernel should be IODHW, but kernel_layout should be swapped - OIDHW
new_attrs["kernel_layout"] = "OIDHW"
# Convert data to NCDHW.
data = relay.transpose(data, axes=(0, 4, 1, 2, 3))
deconv = relay.nn.conv3d_transpose(data, kernel, **new_attrs)
# Convert back to original NDHWC layout.
out = relay.transpose(deconv, axes=(0, 2, 3, 4, 1))
return out
return None
|
PypiClean
|
/eventsourcer-0.1.1.tar.gz/eventsourcer-0.1.1/README.rst
|
============
eventsourcer
============
.. image:: https://img.shields.io/pypi/v/eventsourcer.svg
:target: https://pypi.python.org/pypi/eventsourcer
.. image:: https://img.shields.io/travis/ivangeorgiev/eventsourcer.svg
:target: https://travis-ci.com/ivangeorgiev/eventsourcer
.. image:: https://readthedocs.org/projects/eventsourcer/badge/?version=latest
:target: https://eventsourcer.readthedocs.io/en/latest/?version=latest
:alt: Documentation Status
Python Eventsourcing
* Free software: GNU General Public License v3
* Documentation: https://eventsourcer.readthedocs.io.
Features
--------
* TODO
Credits
-------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
|
PypiClean
|
/git-pr-0.2.0.tar.gz/git-pr-0.2.0/docs/usage.rst
|
=======
Usage
=======
``git-pr`` provides a simple command line interface with the following
synopsis:
.. code::
git pr [repository] pull_request [--branch test]
Fetch
=====
Usually you just need to run the following short command in order to fetch
pull request with ID=42:
.. code:: bash
$ git pr 42
When it's done, you'll be automatically switched to fetched commit(s).
.. note::
By default, the ``origin`` remote is assumed. So please make sure it
points to GitHub, otherwise the command will fail.
Fetch From Remote
=================
Sometimes ``origin`` remote is configured to point to some internal or
private repo. In this case you must specify **explicitly** remote that
points to GitHub mirror:
.. code:: bash
$ git pr github 42
It behaves exactly like the command above, but unlike last one the ``github``
remote will be used instead of ``origin``.
Fetch From URL
==============
If, by some reason, you don't have a remote pointed to GitHub, you can
specify repo URL instead:
.. code:: bash
$ git pr https://github.com/ikalnitsky/git-pr.git 42
Fetch To New Branch
===================
By default, you're in **detached** state after fetching. So if you switch
to other branch in order to do some work you won't be able to switch back.
That's why ``git-pr`` supports a way to fetch a pull request into a new
branch. In order to do so you have to pass either ``-b`` or ``--branch``
argument with a branch name:
.. code:: bash
$ git pr 42 -b pr/42
When it's done, the ``pr/42`` local branch is created with content of the
pull request with ID=42.
Fetch Merge Commit
==================
Each pull request produces two refs:
* one, that points to submitted pull request as its author submitted it;
* one, that points to a merge commit of a pull request and a branch it's
submitted to;
By default, ``git-pr`` fetches the first one. If you want to fetch the second
one, you've got to pass either ``-m`` or ``--merge`` argument:
.. code:: bash
$ git pr 42 --merge
Fetch Without Checkout
======================
By default, when a pull request is fetched, ``git-pr`` automatically checkouts
to the fetched copy. It's not something you always want, so if you need to
turn it off just pass either ``-C`` or ``--no-checkout`` argument:
.. code:: bash
$ git pr 42 --no-checkout
|
PypiClean
|
/alipay-python-3.3.17.tar.gz/alipay-python-3.3.17/alipay/aop/api/request/KoubeiCateringPosDishSyncRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiCateringPosDishSyncModel import KoubeiCateringPosDishSyncModel
class KoubeiCateringPosDishSyncRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiCateringPosDishSyncModel):
self._biz_content = value
else:
self._biz_content = KoubeiCateringPosDishSyncModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.catering.pos.dish.sync'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/jsonschema-codelane-3.1.2b0.tar.gz/jsonschema-codelane-3.1.2b0/docs/faq.rst
|
==========================
Frequently Asked Questions
==========================
Why doesn't my schema's default property set the default on my instance?
------------------------------------------------------------------------
The basic answer is that the specification does not require that
:validator:`default` actually do anything.
For an inkling as to *why* it doesn't actually do anything, consider
that none of the other validators modify the instance either. More
importantly, having :validator:`default` modify the instance can produce
quite peculiar things. It's perfectly valid (and perhaps even useful)
to have a default that is not valid under the schema it lives in! So an
instance modified by the default would pass validation the first time,
but fail the second!
Still, filling in defaults is a thing that is useful. `jsonschema`
allows you to `define your own validator classes and callables
<creating>`, so you can easily create an `jsonschema.IValidator` that
does do default setting. Here's some code to get you started. (In
this code, we add the default properties to each object *before* the
properties are validated, so the default values themselves will need to
be valid under the schema.)
.. code-block:: python
from jsonschema import Draft7Validator, validators
def extend_with_default(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for property, subschema in properties.items():
if "default" in subschema:
instance.setdefault(property, subschema["default"])
for error in validate_properties(
validator, properties, instance, schema,
):
yield error
return validators.extend(
validator_class, {"properties" : set_defaults},
)
DefaultValidatingDraft7Validator = extend_with_default(Draft7Validator)
# Example usage:
obj = {}
schema = {'properties': {'foo': {'default': 'bar'}}}
# Note jsonschem.validate(obj, schema, cls=DefaultValidatingDraft7Validator)
# will not work because the metaschema contains `default` directives.
DefaultValidatingDraft7Validator(schema).validate(obj)
assert obj == {'foo': 'bar'}
See the above-linked document for more info on how this works, but
basically, it just extends the :validator:`properties` validator on
a `jsonschema.Draft7Validator` to then go ahead and update all the
defaults.
.. note::
If you're interested in a more interesting solution to a larger
class of these types of transformations, keep an eye on `Seep
<https://github.com/Julian/Seep>`_, which is an experimental
data transformation and extraction library written on top of
`jsonschema`.
.. hint::
The above code can provide default values for an entire object and
all of its properties, but only if your schema provides a default
value for the object itself, like so:
.. code-block:: python
schema = {
"type": "object",
"properties": {
"outer-object": {
"type": "object",
"properties" : {
"inner-object": {
"type": "string",
"default": "INNER-DEFAULT"
}
},
"default": {} # <-- MUST PROVIDE DEFAULT OBJECT
}
}
}
obj = {}
DefaultValidatingDraft7Validator(schema).validate(obj)
assert obj == {'outer-object': {'inner-object': 'INNER-DEFAULT'}}
...but if you don't provide a default value for your object, then
it won't be instantiated at all, much less populated with default
properties.
.. code-block:: python
del schema["properties"]["outer-object"]["default"]
obj2 = {}
DefaultValidatingDraft7Validator(schema).validate(obj2)
assert obj2 == {} # whoops
How do jsonschema version numbers work?
---------------------------------------
``jsonschema`` tries to follow the `Semantic Versioning
<https://semver.org/>`_ specification.
This means broadly that no backwards-incompatible changes should be made
in minor releases (and certainly not in dot releases).
The full picture requires defining what constitutes a
backwards-incompatible change.
The following are simple examples of things considered public API,
and therefore should *not* be changed without bumping a major version
number:
* module names and contents, when not marked private by Python
convention (a single leading underscore)
* function and object signature (parameter order and name)
The following are *not* considered public API and may change without
notice:
* the exact wording and contents of error messages; typical
reasons to do this seem to involve unit tests. API users are
encouraged to use the extensive introspection provided in
`jsonschema.exceptions.ValidationError`\s instead to make meaningful
assertions about what failed.
* the order in which validation errors are returned or raised
* the contents of the ``jsonschema.tests`` package
* the contents of the ``jsonschema.benchmarks`` package
* the ``jsonschema.compat`` module, which is for internal
compatibility use
* anything marked private
With the exception of the last two of those, flippant changes are
avoided, but changes can and will be made if there is improvement to be
had. Feel free to open an issue ticket if there is a specific issue or
question worth raising.
|
PypiClean
|
/django-ug-4.2.41.tar.gz/django-ug-4.2.41/django/http/request.py
|
import codecs
import copy
from io import BytesIO
from itertools import chain
from urllib.parse import parse_qsl, quote, urlencode, urljoin, urlsplit
from django.conf import settings
from django.core import signing
from django.core.exceptions import (
DisallowedHost,
ImproperlyConfigured,
RequestDataTooBig,
TooManyFieldsSent,
)
from django.core.files import uploadhandler
from django.http.multipartparser import (
MultiPartParser,
MultiPartParserError,
TooManyFilesSent,
)
from django.utils.datastructures import (
CaseInsensitiveMapping,
ImmutableList,
MultiValueDict,
)
from django.utils.encoding import escape_uri_path, iri_to_uri
from django.utils.functional import cached_property
from django.utils.http import is_same_domain, parse_header_parameters
from django.utils.regex_helper import _lazy_re_compile
RAISE_ERROR = object()
host_validation_re = _lazy_re_compile(
r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:[0-9]+)?$"
)
class UnreadablePostError(OSError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest:
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ""
self.path_info = ""
self.method = None
self.resolver_match = None
self.content_type = None
self.content_params = None
def __repr__(self):
if self.method is None or not self.get_full_path():
return "<%s>" % self.__class__.__name__
return "<%s: %s %r>" % (
self.__class__.__name__,
self.method,
self.get_full_path(),
)
@cached_property
def headers(self):
return HttpHeaders(self.META)
@cached_property
def accepted_types(self):
"""Return a list of MediaType instances."""
return parse_accept_header(self.headers.get("Accept", "*/*"))
def accepts(self, media_type):
return any(
accepted_type.match(media_type) for accepted_type in self.accepted_types
)
def _set_content_type_params(self, meta):
"""Set content_type, content_params, and encoding."""
self.content_type, self.content_params = parse_header_parameters(
meta.get("CONTENT_TYPE", "")
)
if "charset" in self.content_params:
try:
codecs.lookup(self.content_params["charset"])
except LookupError:
pass
else:
self.encoding = self.content_params["charset"]
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and ("HTTP_X_FORWARDED_HOST" in self.META):
host = self.META["HTTP_X_FORWARDED_HOST"]
elif "HTTP_HOST" in self.META:
host = self.META["HTTP_HOST"]
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META["SERVER_NAME"]
server_port = self.get_port()
if server_port != ("443" if self.is_secure() else "80"):
host = "%s:%s" % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = [".localhost", "127.0.0.1", "[::1]"]
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += (
" The domain name provided is not valid according to RFC 1034/1035."
)
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and "HTTP_X_FORWARDED_PORT" in self.META:
port = self.META["HTTP_X_FORWARDED_PORT"]
else:
port = self.META["SERVER_PORT"]
return str(port)
def get_full_path(self, force_append_slash=False):
return self._get_full_path(self.path, force_append_slash)
def get_full_path_info(self, force_append_slash=False):
return self._get_full_path(self.path_info, force_append_slash)
def _get_full_path(self, path, force_append_slash):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return "%s%s%s" % (
escape_uri_path(path),
"/" if force_append_slash and not path.endswith("/") else "",
("?" + iri_to_uri(self.META.get("QUERY_STRING", "")))
if self.META.get("QUERY_STRING", "")
else "",
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt="", max_age=None):
"""
Attempt to return a signed cookie. If the signature fails or the
cookie has expired, raise an exception, unless the `default` argument
is provided, in which case return that value.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age
)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Build an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, build the absolute URI
using request.get_full_path(). If the location is absolute, convert it
to an RFC 3987 compliant URI and return it. If location is relative or
is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base
URL constructed from the request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = "//%s" % self.get_full_path()
else:
# Coerce lazy locations.
location = str(location)
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
# Handle the simple, most common case. If the location is absolute
# and a scheme or host (netloc) isn't provided, skip an expensive
# urljoin() as long as no path segments are '.' or '..'.
if (
bits.path.startswith("/")
and not bits.scheme
and not bits.netloc
and "/./" not in bits.path
and "/../" not in bits.path
):
# If location starts with '//' but has no netloc, reuse the
# schema and netloc from the current request. Strip the double
# slashes and continue as if it wasn't specified.
if location.startswith("//"):
location = location[2:]
location = self._current_scheme_host + location
else:
# Join the constructed URL with the provided location, which
# allows the provided location to apply query strings to the
# base path.
location = urljoin(self._current_scheme_host + self.path, location)
return iri_to_uri(location)
@cached_property
def _current_scheme_host(self):
return "{}://{}".format(self.scheme, self.get_host())
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Return 'http' by
default.
"""
return "http"
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, secure_value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
"The SECURE_PROXY_SSL_HEADER setting must be a tuple containing "
"two values."
)
header_value = self.META.get(header)
if header_value is not None:
header_value, *_ = header_value.split(",", 1)
return "https" if header_value.strip() == secure_value else "http"
return self._get_scheme()
def is_secure(self):
return self.scheme == "https"
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Set the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, remove and recreate it on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, "GET"):
del self.GET
if hasattr(self, "_post"):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [
uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS
]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, "_files"):
raise AttributeError(
"You cannot set the upload handlers after the upload has been "
"processed."
)
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Return a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning=(
"You cannot alter upload handlers after the upload has been "
"processed."
),
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, "_body"):
if self._read_started:
raise RawPostDataException(
"You cannot access body after reading from request's data stream"
)
# Limit the maximum request data size that will be handled in-memory.
if (
settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
and int(self.META.get("CONTENT_LENGTH") or 0)
> settings.DATA_UPLOAD_MAX_MEMORY_SIZE
):
raise RequestDataTooBig(
"Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE."
)
try:
self._body = self.read()
except OSError as e:
raise UnreadablePostError(*e.args) from e
finally:
self._stream.close()
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict()
self._files = MultiValueDict()
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != "POST":
self._post, self._files = (
QueryDict(encoding=self._encoding),
MultiValueDict(),
)
return
if self._read_started and not hasattr(self, "_body"):
self._mark_post_parse_error()
return
if self.content_type == "multipart/form-data":
if hasattr(self, "_body"):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except (MultiPartParserError, TooManyFilesSent):
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
self._mark_post_parse_error()
raise
elif self.content_type == "application/x-www-form-urlencoded":
self._post, self._files = (
QueryDict(self.body, encoding=self._encoding),
MultiValueDict(),
)
else:
self._post, self._files = (
QueryDict(encoding=self._encoding),
MultiValueDict(),
)
def close(self):
if hasattr(self, "_files"):
for f in chain.from_iterable(list_[1] for list_ in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def __iter__(self):
return iter(self.readline, b"")
def readlines(self):
return list(self)
class HttpHeaders(CaseInsensitiveMapping):
HTTP_PREFIX = "HTTP_"
# PEP 333 gives two headers which aren't prepended with HTTP_.
UNPREFIXED_HEADERS = {"CONTENT_TYPE", "CONTENT_LENGTH"}
def __init__(self, environ):
headers = {}
for header, value in environ.items():
name = self.parse_header_name(header)
if name:
headers[name] = value
super().__init__(headers)
def __getitem__(self, key):
"""Allow header lookup using underscores in place of hyphens."""
return super().__getitem__(key.replace("_", "-"))
@classmethod
def parse_header_name(cls, header):
if header.startswith(cls.HTTP_PREFIX):
header = header[len(cls.HTTP_PREFIX) :]
elif header not in cls.UNPREFIXED_HEADERS:
return None
return header.replace("_", "-").title()
@classmethod
def to_wsgi_name(cls, header):
header = header.replace("-", "_").upper()
if header in cls.UNPREFIXED_HEADERS:
return header
return f"{cls.HTTP_PREFIX}{header}"
@classmethod
def to_asgi_name(cls, header):
return header.replace("-", "_").upper()
@classmethod
def to_wsgi_names(cls, headers):
return {
cls.to_wsgi_name(header_name): value
for header_name, value in headers.items()
}
@classmethod
def to_asgi_names(cls, headers):
return {
cls.to_asgi_name(header_name): value
for header_name, value in headers.items()
}
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to str.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super().__init__()
self.encoding = encoding or settings.DEFAULT_CHARSET
query_string = query_string or ""
parse_qsl_kwargs = {
"keep_blank_values": True,
"encoding": self.encoding,
"max_num_fields": settings.DATA_UPLOAD_MAX_NUMBER_FIELDS,
}
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(self.encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode("iso-8859-1")
try:
for key, value in parse_qsl(query_string, **parse_qsl_kwargs):
self.appendlist(key, value)
except ValueError as e:
# ValueError can also be raised if the strict_parsing argument to
# parse_qsl() is True. As that is not used by Django, assume that
# the exception was raised by exceeding the value of max_num_fields
# instead of fragile checks of exception message strings.
raise TooManyFieldsSent(
"The number of GET/POST parameters exceeded "
"settings.DATA_UPLOAD_MAX_NUMBER_FIELDS."
) from e
self._mutable = mutable
@classmethod
def fromkeys(cls, iterable, value="", mutable=False, encoding=None):
"""
Return a new QueryDict with keys (may be repeated) from an iterable and
values from value.
"""
q = cls("", mutable=True, encoding=encoding)
for key in iterable:
q.appendlist(key, value)
if not mutable:
q._mutable = False
return q
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super().__delitem__(key)
def __copy__(self):
result = self.__class__("", mutable=True, encoding=self.encoding)
for key, value in self.lists():
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__("", mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in self.lists():
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super().setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super().setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super().pop(key, *args)
def popitem(self):
self._assert_mutable()
return super().popitem()
def clear(self):
self._assert_mutable()
super().clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super().setdefault(key, default)
def copy(self):
"""Return a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Return an encoded string of all query string arguments.
`safe` specifies characters which don't require quoting, for example::
>>> q = QueryDict(mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = safe.encode(self.encoding)
def encode(k, v):
return "%s=%s" % ((quote(k, safe), quote(v, safe)))
else:
def encode(k, v):
return urlencode({k: v})
for k, list_ in self.lists():
output.extend(
encode(k.encode(self.encoding), str(v).encode(self.encoding))
for v in list_
)
return "&".join(output)
class MediaType:
def __init__(self, media_type_raw_line):
full_type, self.params = parse_header_parameters(
media_type_raw_line if media_type_raw_line else ""
)
self.main_type, _, self.sub_type = full_type.partition("/")
def __str__(self):
params_str = "".join("; %s=%s" % (k, v) for k, v in self.params.items())
return "%s%s%s" % (
self.main_type,
("/%s" % self.sub_type) if self.sub_type else "",
params_str,
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__qualname__, self)
@property
def is_all_types(self):
return self.main_type == "*" and self.sub_type == "*"
def match(self, other):
if self.is_all_types:
return True
other = MediaType(other)
if self.main_type == other.main_type and self.sub_type in {"*", other.sub_type}:
return True
return False
# It's neither necessary nor appropriate to use
# django.utils.encoding.force_str() for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Convert bytes objects to strings, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Return any non-bytes objects without change.
"""
if isinstance(s, bytes):
return str(s, encoding, "replace")
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lowercased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return "", ""
if host[-1] == "]":
# It's an IPv6 address without a port.
return host, ""
bits = host.rsplit(":", 1)
domain, port = bits if len(bits) == 2 else (bits[0], "")
# Remove a trailing dot (if present) from the domain.
domain = domain[:-1] if domain.endswith(".") else domain
return domain, port
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lowercased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
return any(
pattern == "*" or is_same_domain(host, pattern) for pattern in allowed_hosts
)
def parse_accept_header(header):
return [MediaType(token) for token in header.split(",") if token.strip()]
|
PypiClean
|
/myams_js-1.16.0.tar.gz/myams_js-1.16.0/pkg/js/ext/ace/worker-base.min.js
|
!function(t){if(!(void 0!==t.window&&t.document||t.require&&t.define)){t.console||(t.console=function(){var t=Array.prototype.slice.call(arguments,0);postMessage({type:"log",data:t})},t.console.error=t.console.warn=t.console.log=t.console.trace=t.console),t.window=t,t.ace=t,t.onerror=function(t,e,n,i,r){postMessage({type:"error",data:{message:t,data:r.data,file:e,line:n,col:i,stack:r.stack}})},t.normalizeModule=function(e,n){if(-1!==n.indexOf("!")){var i=n.split("!");return t.normalizeModule(e,i[0])+"!"+t.normalizeModule(e,i[1])}if("."==n.charAt(0)){var r=e.split("/").slice(0,-1).join("/");for(n=(r?r+"/":"")+n;-1!==n.indexOf(".")&&s!=n;){var s=n;n=n.replace(/^\.\//,"").replace(/\/\.\//,"/").replace(/[^\/]+\/\.\.\//,"")}}return n},t.require=function(e,n){if(n||(n=e,e=null),!n.charAt)throw new Error("worker.js require() accepts only (parentId, id) as arguments");n=t.normalizeModule(e,n);var i=t.require.modules[n];if(i)return i.initialized||(i.initialized=!0,i.exports=i.factory().exports),i.exports;if(!t.require.tlns)return console.log("unable to load "+n);var r=function(t,e){var n=t,i="";for(;n;){var r=e[n];if("string"==typeof r)return r+i;if(r)return r.location.replace(/\/*$/,"/")+(i||r.main||r.name);if(!1===r)return"";var s=n.lastIndexOf("/");if(-1===s)break;i=n.substr(s)+i,n=n.slice(0,s)}return t}(n,t.require.tlns);return".js"!=r.slice(-3)&&(r+=".js"),t.require.id=n,t.require.modules[n]={},importScripts(r),t.require(e,n)},t.require.modules={},t.require.tlns={},t.define=function(e,n,i){if(2==arguments.length?(i=n,"string"!=typeof e&&(n=e,e=t.require.id)):1==arguments.length&&(i=e,n=[],e=t.require.id),"function"==typeof i){n.length||(n=["require","exports","module"]);var r=function(n){return t.require(e,n)};t.require.modules[e]={exports:{},factory:function(){var t=this,e=i.apply(this,n.slice(0,i.length).map((function(e){switch(e){case"require":return r;case"exports":return t.exports;case"module":return t;default:return r(e)}})));return e&&(t.exports=e),t}}}else t.require.modules[e]={exports:i,initialized:!0}},t.define.amd={},require.tlns={},t.initBaseUrls=function(t){for(var e in t)require.tlns[e]=t[e]},t.initSender=function(){var e=t.require("ace/lib/event_emitter").EventEmitter,n=t.require("ace/lib/oop"),i=function(){};return function(){n.implement(this,e),this.callback=function(t,e){postMessage({type:"call",id:e,data:t})},this.emit=function(t,e){postMessage({type:"event",name:t,data:e})}}.call(i.prototype),new i};var e=t.main=null,n=t.sender=null;t.onmessage=function(i){var r=i.data;if(r.event&&n)n._signal(r.event,r.data);else if(r.command)if(e[r.command])e[r.command].apply(e,r.args);else{if(!t[r.command])throw new Error("Unknown command:"+r.command);t[r.command].apply(t,r.args)}else if(r.init){t.initBaseUrls(r.tlns),n=t.sender=t.initSender();var s=require(r.module)[r.classname];e=t.main=new s(n)}}}}(this),define("ace/range",[],(function(t,e,n){"use strict";var i=function(t,e,n,i){this.start={row:t,column:e},this.end={row:n,column:i}};(function(){this.isEqual=function(t){return this.start.row===t.start.row&&this.end.row===t.end.row&&this.start.column===t.start.column&&this.end.column===t.end.column},this.toString=function(){return"Range: ["+this.start.row+"/"+this.start.column+"] -> ["+this.end.row+"/"+this.end.column+"]"},this.contains=function(t,e){return 0==this.compare(t,e)},this.compareRange=function(t){var e,n=t.end,i=t.start;return 1==(e=this.compare(n.row,n.column))?1==(e=this.compare(i.row,i.column))?2:0==e?1:0:-1==e?-2:-1==(e=this.compare(i.row,i.column))?-1:1==e?42:0},this.comparePoint=function(t){return this.compare(t.row,t.column)},this.containsRange=function(t){return 0==this.comparePoint(t.start)&&0==this.comparePoint(t.end)},this.intersects=function(t){var e=this.compareRange(t);return-1==e||0==e||1==e},this.isEnd=function(t,e){return this.end.row==t&&this.end.column==e},this.isStart=function(t,e){return this.start.row==t&&this.start.column==e},this.setStart=function(t,e){"object"==typeof t?(this.start.column=t.column,this.start.row=t.row):(this.start.row=t,this.start.column=e)},this.setEnd=function(t,e){"object"==typeof t?(this.end.column=t.column,this.end.row=t.row):(this.end.row=t,this.end.column=e)},this.inside=function(t,e){return 0==this.compare(t,e)&&(!this.isEnd(t,e)&&!this.isStart(t,e))},this.insideStart=function(t,e){return 0==this.compare(t,e)&&!this.isEnd(t,e)},this.insideEnd=function(t,e){return 0==this.compare(t,e)&&!this.isStart(t,e)},this.compare=function(t,e){return this.isMultiLine()||t!==this.start.row?t<this.start.row?-1:t>this.end.row?1:this.start.row===t?e>=this.start.column?0:-1:this.end.row===t?e<=this.end.column?0:1:0:e<this.start.column?-1:e>this.end.column?1:0},this.compareStart=function(t,e){return this.start.row==t&&this.start.column==e?-1:this.compare(t,e)},this.compareEnd=function(t,e){return this.end.row==t&&this.end.column==e?1:this.compare(t,e)},this.compareInside=function(t,e){return this.end.row==t&&this.end.column==e?1:this.start.row==t&&this.start.column==e?-1:this.compare(t,e)},this.clipRows=function(t,e){if(this.end.row>e)var n={row:e+1,column:0};else if(this.end.row<t)n={row:t,column:0};if(this.start.row>e)var r={row:e+1,column:0};else if(this.start.row<t)r={row:t,column:0};return i.fromPoints(r||this.start,n||this.end)},this.extend=function(t,e){var n=this.compare(t,e);if(0==n)return this;if(-1==n)var r={row:t,column:e};else var s={row:t,column:e};return i.fromPoints(r||this.start,s||this.end)},this.isEmpty=function(){return this.start.row===this.end.row&&this.start.column===this.end.column},this.isMultiLine=function(){return this.start.row!==this.end.row},this.clone=function(){return i.fromPoints(this.start,this.end)},this.collapseRows=function(){return 0==this.end.column?new i(this.start.row,0,Math.max(this.start.row,this.end.row-1),0):new i(this.start.row,0,this.end.row,0)},this.toScreenRange=function(t){var e=t.documentToScreenPosition(this.start),n=t.documentToScreenPosition(this.end);return new i(e.row,e.column,n.row,n.column)},this.moveBy=function(t,e){this.start.row+=t,this.start.column+=e,this.end.row+=t,this.end.column+=e}}).call(i.prototype),i.fromPoints=function(t,e){return new i(t.row,t.column,e.row,e.column)},i.comparePoints=function(t,e){return t.row-e.row||t.column-e.column},i.comparePoints=function(t,e){return t.row-e.row||t.column-e.column},e.Range=i})),define("ace/lib/oop",[],(function(t,e,n){"use strict";e.inherits=function(t,e){t.super_=e,t.prototype=Object.create(e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}})},e.mixin=function(t,e){for(var n in e)t[n]=e[n];return t},e.implement=function(t,n){e.mixin(t,n)}})),define("ace/apply_delta",[],(function(t,e,n){"use strict";e.applyDelta=function(t,e,n){var i=e.start.row,r=e.start.column,s=t[i]||"";switch(e.action){case"insert":if(1===e.lines.length)t[i]=s.substring(0,r)+e.lines[0]+s.substring(r);else{var o=[i,1].concat(e.lines);t.splice.apply(t,o),t[i]=s.substring(0,r)+t[i],t[i+e.lines.length-1]+=s.substring(r)}break;case"remove":var a=e.end.column,c=e.end.row;i===c?t[i]=s.substring(0,r)+s.substring(a):t.splice(i,c-i+1,s.substring(0,r)+t[c].substring(a))}}})),define("ace/lib/event_emitter",[],(function(t,e,n){"use strict";var i={},r=function(){this.propagationStopped=!0},s=function(){this.defaultPrevented=!0};i._emit=i._dispatchEvent=function(t,e){this._eventRegistry||(this._eventRegistry={}),this._defaultHandlers||(this._defaultHandlers={});var n=this._eventRegistry[t]||[],i=this._defaultHandlers[t];if(n.length||i){"object"==typeof e&&e||(e={}),e.type||(e.type=t),e.stopPropagation||(e.stopPropagation=r),e.preventDefault||(e.preventDefault=s),n=n.slice();for(var o=0;o<n.length&&(n[o](e,this),!e.propagationStopped);o++);return i&&!e.defaultPrevented?i(e,this):void 0}},i._signal=function(t,e){var n=(this._eventRegistry||{})[t];if(n){n=n.slice();for(var i=0;i<n.length;i++)n[i](e,this)}},i.once=function(t,e){var n=this;if(this.on(t,(function i(){n.off(t,i),e.apply(null,arguments)})),!e)return new Promise((function(t){e=t}))},i.setDefaultHandler=function(t,e){var n=this._defaultHandlers;if(n||(n=this._defaultHandlers={_disabled_:{}}),n[t]){var i=n[t],r=n._disabled_[t];r||(n._disabled_[t]=r=[]),r.push(i);var s=r.indexOf(e);-1!=s&&r.splice(s,1)}n[t]=e},i.removeDefaultHandler=function(t,e){var n=this._defaultHandlers;if(n){var i=n._disabled_[t];if(n[t]==e)i&&this.setDefaultHandler(t,i.pop());else if(i){var r=i.indexOf(e);-1!=r&&i.splice(r,1)}}},i.on=i.addEventListener=function(t,e,n){this._eventRegistry=this._eventRegistry||{};var i=this._eventRegistry[t];return i||(i=this._eventRegistry[t]=[]),-1==i.indexOf(e)&&i[n?"unshift":"push"](e),e},i.off=i.removeListener=i.removeEventListener=function(t,e){this._eventRegistry=this._eventRegistry||{};var n=this._eventRegistry[t];if(n){var i=n.indexOf(e);-1!==i&&n.splice(i,1)}},i.removeAllListeners=function(t){t||(this._eventRegistry=this._defaultHandlers=void 0),this._eventRegistry&&(this._eventRegistry[t]=void 0),this._defaultHandlers&&(this._defaultHandlers[t]=void 0)},e.EventEmitter=i})),define("ace/anchor",[],(function(t,e,n){"use strict";var i=t("./lib/oop"),r=t("./lib/event_emitter").EventEmitter,s=e.Anchor=function(t,e,n){this.$onChange=this.onChange.bind(this),this.attach(t),void 0===n?this.setPosition(e.row,e.column):this.setPosition(e,n)};(function(){function t(t,e,n){var i=n?t.column<=e.column:t.column<e.column;return t.row<e.row||t.row==e.row&&i}i.implement(this,r),this.getPosition=function(){return this.$clipPositionToDocument(this.row,this.column)},this.getDocument=function(){return this.document},this.$insertRight=!1,this.onChange=function(e){if(!(e.start.row==e.end.row&&e.start.row!=this.row||e.start.row>this.row)){var n=function(e,n,i){var r="insert"==e.action,s=(r?1:-1)*(e.end.row-e.start.row),o=(r?1:-1)*(e.end.column-e.start.column),a=e.start,c=r?a:e.end;if(t(n,a,i))return{row:n.row,column:n.column};if(t(c,n,!i))return{row:n.row+s,column:n.column+(n.row==c.row?o:0)};return{row:a.row,column:a.column}}(e,{row:this.row,column:this.column},this.$insertRight);this.setPosition(n.row,n.column,!0)}},this.setPosition=function(t,e,n){var i;if(i=n?{row:t,column:e}:this.$clipPositionToDocument(t,e),this.row!=i.row||this.column!=i.column){var r={row:this.row,column:this.column};this.row=i.row,this.column=i.column,this._signal("change",{old:r,value:i})}},this.detach=function(){this.document.off("change",this.$onChange)},this.attach=function(t){this.document=t||this.document,this.document.on("change",this.$onChange)},this.$clipPositionToDocument=function(t,e){var n={};return t>=this.document.getLength()?(n.row=Math.max(0,this.document.getLength()-1),n.column=this.document.getLine(n.row).length):t<0?(n.row=0,n.column=0):(n.row=t,n.column=Math.min(this.document.getLine(n.row).length,Math.max(0,e))),e<0&&(n.column=0),n}}).call(s.prototype)})),define("ace/document",[],(function(t,e,n){"use strict";var i=t("./lib/oop"),r=t("./apply_delta").applyDelta,s=t("./lib/event_emitter").EventEmitter,o=t("./range").Range,a=t("./anchor").Anchor,c=function(t){this.$lines=[""],0===t.length?this.$lines=[""]:Array.isArray(t)?this.insertMergedLines({row:0,column:0},t):this.insert({row:0,column:0},t)};(function(){i.implement(this,s),this.setValue=function(t){var e=this.getLength()-1;this.remove(new o(0,0,e,this.getLine(e).length)),this.insert({row:0,column:0},t)},this.getValue=function(){return this.getAllLines().join(this.getNewLineCharacter())},this.createAnchor=function(t,e){return new a(this,t,e)},0==="aaa".split(/a/).length?this.$split=function(t){return t.replace(/\r\n|\r/g,"\n").split("\n")}:this.$split=function(t){return t.split(/\r\n|\r|\n/)},this.$detectNewLine=function(t){var e=t.match(/^.*?(\r\n|\r|\n)/m);this.$autoNewLine=e?e[1]:"\n",this._signal("changeNewLineMode")},this.getNewLineCharacter=function(){switch(this.$newLineMode){case"windows":return"\r\n";case"unix":return"\n";default:return this.$autoNewLine||"\n"}},this.$autoNewLine="",this.$newLineMode="auto",this.setNewLineMode=function(t){this.$newLineMode!==t&&(this.$newLineMode=t,this._signal("changeNewLineMode"))},this.getNewLineMode=function(){return this.$newLineMode},this.isNewLine=function(t){return"\r\n"==t||"\r"==t||"\n"==t},this.getLine=function(t){return this.$lines[t]||""},this.getLines=function(t,e){return this.$lines.slice(t,e+1)},this.getAllLines=function(){return this.getLines(0,this.getLength())},this.getLength=function(){return this.$lines.length},this.getTextRange=function(t){return this.getLinesForRange(t).join(this.getNewLineCharacter())},this.getLinesForRange=function(t){var e;if(t.start.row===t.end.row)e=[this.getLine(t.start.row).substring(t.start.column,t.end.column)];else{(e=this.getLines(t.start.row,t.end.row))[0]=(e[0]||"").substring(t.start.column);var n=e.length-1;t.end.row-t.start.row==n&&(e[n]=e[n].substring(0,t.end.column))}return e},this.insertLines=function(t,e){return console.warn("Use of document.insertLines is deprecated. Use the insertFullLines method instead."),this.insertFullLines(t,e)},this.removeLines=function(t,e){return console.warn("Use of document.removeLines is deprecated. Use the removeFullLines method instead."),this.removeFullLines(t,e)},this.insertNewLine=function(t){return console.warn("Use of document.insertNewLine is deprecated. Use insertMergedLines(position, ['', '']) instead."),this.insertMergedLines(t,["",""])},this.insert=function(t,e){return this.getLength()<=1&&this.$detectNewLine(e),this.insertMergedLines(t,this.$split(e))},this.insertInLine=function(t,e){var n=this.clippedPos(t.row,t.column),i=this.pos(t.row,t.column+e.length);return this.applyDelta({start:n,end:i,action:"insert",lines:[e]},!0),this.clonePos(i)},this.clippedPos=function(t,e){var n=this.getLength();void 0===t?t=n:t<0?t=0:t>=n&&(t=n-1,e=void 0);var i=this.getLine(t);return null==e&&(e=i.length),{row:t,column:e=Math.min(Math.max(e,0),i.length)}},this.clonePos=function(t){return{row:t.row,column:t.column}},this.pos=function(t,e){return{row:t,column:e}},this.$clipPosition=function(t){var e=this.getLength();return t.row>=e?(t.row=Math.max(0,e-1),t.column=this.getLine(e-1).length):(t.row=Math.max(0,t.row),t.column=Math.min(Math.max(t.column,0),this.getLine(t.row).length)),t},this.insertFullLines=function(t,e){var n=0;(t=Math.min(Math.max(t,0),this.getLength()))<this.getLength()?(e=e.concat([""]),n=0):(e=[""].concat(e),t--,n=this.$lines[t].length),this.insertMergedLines({row:t,column:n},e)},this.insertMergedLines=function(t,e){var n=this.clippedPos(t.row,t.column),i={row:n.row+e.length-1,column:(1==e.length?n.column:0)+e[e.length-1].length};return this.applyDelta({start:n,end:i,action:"insert",lines:e}),this.clonePos(i)},this.remove=function(t){var e=this.clippedPos(t.start.row,t.start.column),n=this.clippedPos(t.end.row,t.end.column);return this.applyDelta({start:e,end:n,action:"remove",lines:this.getLinesForRange({start:e,end:n})}),this.clonePos(e)},this.removeInLine=function(t,e,n){var i=this.clippedPos(t,e),r=this.clippedPos(t,n);return this.applyDelta({start:i,end:r,action:"remove",lines:this.getLinesForRange({start:i,end:r})},!0),this.clonePos(i)},this.removeFullLines=function(t,e){t=Math.min(Math.max(0,t),this.getLength()-1);var n=(e=Math.min(Math.max(0,e),this.getLength()-1))==this.getLength()-1&&t>0,i=e<this.getLength()-1,r=n?t-1:t,s=n?this.getLine(r).length:0,a=i?e+1:e,c=i?0:this.getLine(a).length,u=new o(r,s,a,c),l=this.$lines.slice(t,e+1);return this.applyDelta({start:u.start,end:u.end,action:"remove",lines:this.getLinesForRange(u)}),l},this.removeNewLine=function(t){t<this.getLength()-1&&t>=0&&this.applyDelta({start:this.pos(t,this.getLine(t).length),end:this.pos(t+1,0),action:"remove",lines:["",""]})},this.replace=function(t,e){return t instanceof o||(t=o.fromPoints(t.start,t.end)),0===e.length&&t.isEmpty()?t.start:e==this.getTextRange(t)?t.end:(this.remove(t),e?this.insert(t.start,e):t.start)},this.applyDeltas=function(t){for(var e=0;e<t.length;e++)this.applyDelta(t[e])},this.revertDeltas=function(t){for(var e=t.length-1;e>=0;e--)this.revertDelta(t[e])},this.applyDelta=function(t,e){var n="insert"==t.action;(n?t.lines.length<=1&&!t.lines[0]:!o.comparePoints(t.start,t.end))||(n&&t.lines.length>2e4?this.$splitAndapplyLargeDelta(t,2e4):(r(this.$lines,t,e),this._signal("change",t)))},this.$safeApplyDelta=function(t){var e=this.$lines.length;("remove"==t.action&&t.start.row<e&&t.end.row<e||"insert"==t.action&&t.start.row<=e)&&this.applyDelta(t)},this.$splitAndapplyLargeDelta=function(t,e){for(var n=t.lines,i=n.length-e+1,r=t.start.row,s=t.start.column,o=0,a=0;o<i;o=a){a+=e-1;var c=n.slice(o,a);c.push(""),this.applyDelta({start:this.pos(r+o,s),end:this.pos(r+a,s=0),action:t.action,lines:c},!0)}t.lines=n.slice(o),t.start.row=r+o,t.start.column=s,this.applyDelta(t,!0)},this.revertDelta=function(t){this.$safeApplyDelta({start:this.clonePos(t.start),end:this.clonePos(t.end),action:"insert"==t.action?"remove":"insert",lines:t.lines.slice()})},this.indexToPosition=function(t,e){for(var n=this.$lines||this.getAllLines(),i=this.getNewLineCharacter().length,r=e||0,s=n.length;r<s;r++)if((t-=n[r].length+i)<0)return{row:r,column:t+n[r].length+i};return{row:s-1,column:t+n[s-1].length+i}},this.positionToIndex=function(t,e){for(var n=this.$lines||this.getAllLines(),i=this.getNewLineCharacter().length,r=0,s=Math.min(t.row,n.length),o=e||0;o<s;++o)r+=n[o].length+i;return r+t.column}}).call(c.prototype),e.Document=c})),define("ace/lib/lang",[],(function(t,e,n){"use strict";e.last=function(t){return t[t.length-1]},e.stringReverse=function(t){return t.split("").reverse().join("")},e.stringRepeat=function(t,e){for(var n="";e>0;)1&e&&(n+=t),(e>>=1)&&(t+=t);return n};var i=/^\s\s*/,r=/\s\s*$/;e.stringTrimLeft=function(t){return t.replace(i,"")},e.stringTrimRight=function(t){return t.replace(r,"")},e.copyObject=function(t){var e={};for(var n in t)e[n]=t[n];return e},e.copyArray=function(t){for(var e=[],n=0,i=t.length;n<i;n++)t[n]&&"object"==typeof t[n]?e[n]=this.copyObject(t[n]):e[n]=t[n];return e},e.deepCopy=function t(e){if("object"!=typeof e||!e)return e;var n;if(Array.isArray(e)){n=[];for(var i=0;i<e.length;i++)n[i]=t(e[i]);return n}if("[object Object]"!==Object.prototype.toString.call(e))return e;for(var i in n={},e)n[i]=t(e[i]);return n},e.arrayToMap=function(t){for(var e={},n=0;n<t.length;n++)e[t[n]]=1;return e},e.createMap=function(t){var e=Object.create(null);for(var n in t)e[n]=t[n];return e},e.arrayRemove=function(t,e){for(var n=0;n<=t.length;n++)e===t[n]&&t.splice(n,1)},e.escapeRegExp=function(t){return t.replace(/([.*+?^${}()|[\]\/\\])/g,"\\$1")},e.escapeHTML=function(t){return(""+t).replace(/&/g,"&").replace(/"/g,""").replace(/'/g,"'").replace(/</g,"<")},e.getMatchOffsets=function(t,e){var n=[];return t.replace(e,(function(t){n.push({offset:arguments[arguments.length-2],length:t.length})})),n},e.deferredCall=function(t){var e=null,n=function(){e=null,t()},i=function(t){return i.cancel(),e=setTimeout(n,t||0),i};return i.schedule=i,i.call=function(){return this.cancel(),t(),i},i.cancel=function(){return clearTimeout(e),e=null,i},i.isPending=function(){return e},i},e.delayedCall=function(t,e){var n=null,i=function(){n=null,t()},r=function(t){null==n&&(n=setTimeout(i,t||e))};return r.delay=function(t){n&&clearTimeout(n),n=setTimeout(i,t||e)},r.schedule=r,r.call=function(){this.cancel(),t()},r.cancel=function(){n&&clearTimeout(n),n=null},r.isPending=function(){return n},r}})),define("ace/worker/mirror",[],(function(t,e,n){"use strict";t("../range").Range;var i=t("../document").Document,r=t("../lib/lang"),s=e.Mirror=function(t){this.sender=t;var e=this.doc=new i(""),n=this.deferredUpdate=r.delayedCall(this.onUpdate.bind(this)),s=this;t.on("change",(function(t){var i=t.data;if(i[0].start)e.applyDeltas(i);else for(var r=0;r<i.length;r+=2){if(Array.isArray(i[r+1]))var o={action:"insert",start:i[r],lines:i[r+1]};else o={action:"remove",start:i[r],end:i[r+1]};e.applyDelta(o,!0)}if(s.$timeout)return n.schedule(s.$timeout);s.onUpdate()}))};(function(){this.$timeout=500,this.setTimeout=function(t){this.$timeout=t},this.setValue=function(t){this.doc.setValue(t),this.deferredUpdate.schedule(this.$timeout)},this.getValue=function(t){this.sender.callback(this.doc.getValue(),t)},this.onUpdate=function(){},this.isPending=function(){return this.deferredUpdate.isPending()}}).call(s.prototype)}));
|
PypiClean
|
/bob.spear-1.1.8.zip/bob.spear-1.1.8/spear/script/spkverif_ivector.py
|
import sys, os
import argparse
from . import ToolChainExecutor
from .. import toolchain
class ToolChainExecutorIVector (ToolChainExecutor.ToolChainExecutor):
"""Class that executes the I-Vector tool chain (locally or in the grid)"""
def __init__(self, args):
# call base class constructor
ToolChainExecutor.ToolChainExecutor.__init__(self, args)
# specify the file selector and tool chain objects to be used by this class (and its base class)
self.m_file_selector = toolchain.FileSelector(self.m_configuration, self.m_database_config)
self.m_tool_chain = toolchain.ToolChainIvector(self.m_file_selector)
def zt_norm_configuration(self):
"""Special configuration for ZT-Norm computation"""
if self.m_database_config.protocol is not None:
self.m_configuration.models_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.models_dirs[0], self.m_database_config.protocol)
self.m_configuration.tnorm_models_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.models_dirs[1], self.m_database_config.protocol)
self.m_configuration.zt_norm_A_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.score_sub_dir, self.m_database_config.protocol, self.m_args.zt_dirs[0])
self.m_configuration.zt_norm_B_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.score_sub_dir, self.m_database_config.protocol, self.m_args.zt_dirs[1])
self.m_configuration.zt_norm_C_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.score_sub_dir, self.m_database_config.protocol, self.m_args.zt_dirs[2])
self.m_configuration.zt_norm_D_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.score_sub_dir, self.m_database_config.protocol, self.m_args.zt_dirs[3])
self.m_configuration.zt_norm_D_sameValue_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.score_sub_dir, self.m_database_config.protocol, self.m_args.zt_dirs[4])
self.m_configuration.scores_nonorm_dir = os.path.join(self.m_configuration.base_output_USER_dir, self.m_args.score_sub_dir, self.m_database_config.protocol, self.m_args.score_dirs[0])
self.m_configuration.scores_ztnorm_dir = os.path.join(self.m_configuration.base_output_USER_dir, self.m_args.score_sub_dir, self.m_database_config.protocol, self.m_args.score_dirs[1])
else:
self.m_configuration.models_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.models_dirs[0])
self.m_configuration.tnorm_models_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.models_dirs[1])
self.m_configuration.zt_norm_A_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.score_sub_dir, self.m_args.zt_dirs[0])
self.m_configuration.zt_norm_B_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.score_sub_dir, self.m_args.zt_dirs[1])
self.m_configuration.zt_norm_C_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.score_sub_dir, self.m_args.zt_dirs[2])
self.m_configuration.zt_norm_D_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.score_sub_dir, self.m_args.zt_dirs[3])
self.m_configuration.zt_norm_D_sameValue_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.score_sub_dir, self.m_args.zt_dirs[4])
self.m_configuration.scores_nonorm_dir = os.path.join(self.m_configuration.base_output_USER_dir, self.m_args.score_sub_dir, self.m_args.score_dirs[0])
self.m_configuration.scores_ztnorm_dir = os.path.join(self.m_configuration.base_output_USER_dir, self.m_args.score_sub_dir, self.m_args.score_dirs[1])
self.m_configuration.default_extension = ".hdf5"
def ivector_specific_configuration(self):
self.m_configuration.whitening_enroler_file = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.whitening_enroler_file)
self.m_configuration.lda_projector_file = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.lda_projector_file)
self.m_configuration.wccn_projector_file = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.wccn_projector_file)
self.m_configuration.plda_enroler_file = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.plda_enroler_file)
self.m_configuration.projected_ivector_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.projected_ivector_dir)
self.m_configuration.whitened_ivector_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.whitened_ivector_dir)
self.m_configuration.lnorm_ivector_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.lnorm_ivector_dir)
self.m_configuration.lda_projected_ivector_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.lda_projected_ivector_dir)
self.m_configuration.wccn_projected_ivector_dir = os.path.join(self.m_configuration.base_output_TEMP_dir, self.m_args.wccn_projected_ivector_dir)
def protocol_specific_configuration(self):
"""Special configuration specific for this toolchain"""
self.zt_norm_configuration()
self.ivector_specific_configuration()
def execute_tool_chain(self):
"""Executes the ZT tool chain on the local machine"""
# preprocessing
if not self.m_args.skip_preprocessing:
self.m_tool_chain.preprocess_audio_files(self.m_preprocessor, self.m_tool, force = self.m_args.force)
# feature extraction
#if not self.m_args.skip_feature_extraction_training and hasattr(self.m_feature_extractor, 'train'):
# self.m_tool_chain.train_extractor(self.m_feature_extractor, force = self.m_args.force)
if not self.m_args.skip_feature_extraction:
self.m_tool_chain.extract_features(self.m_feature_extractor, self.m_tool, force = self.m_args.force)
# feature projection
if not self.m_args.skip_projection_training and hasattr(self.m_tool, 'train_projector'):
self.m_tool_chain.train_projector(self.m_tool, force = self.m_args.force)
if not self.m_args.skip_projection_ubm and hasattr(self.m_tool, 'project_gmm'):
self.m_tool_chain.project_gmm_features(self.m_tool, force = self.m_args.force, extractor = self.m_feature_extractor)
# train enroler
if not self.m_args.skip_enroler_training and hasattr(self.m_tool, 'train_enroler'):
self.m_tool_chain.train_enroler(self.m_tool, force = self.m_args.force)
# IVector projection
if not self.m_args.skip_projection_ivector and hasattr(self.m_tool, 'project_ivector'):
self.m_tool_chain.project_ivector_features(self.m_tool, force = self.m_args.force, extractor = self.m_feature_extractor)
# train whitening enroler
if not self.m_args.skip_whitening_enroler_training and hasattr(self.m_tool, 'train_whitening_enroler'):
self.m_tool_chain.train_whitening_enroler(self.m_tool, dir_type='projected_ivector', force = self.m_args.force)
# whitening i-vectors
if not self.m_args.skip_whitening_ivector and hasattr(self.m_tool, 'whitening_ivector'):
self.m_tool_chain.whitening_ivector(self.m_tool, dir_type='projected_ivector', force = self.m_args.force)
# lnorm i-vectors
if not self.m_args.skip_lnorm_ivector and hasattr(self.m_tool, 'lnorm_ivector'):
self.m_tool_chain.lnorm_ivector(self.m_tool, dir_type='whitened_ivector', force = self.m_args.force)
# train LDA projector
if not self.m_args.skip_lda_train_projector and hasattr(self.m_tool, 'lda_train_projector'):
self.m_tool_chain.lda_train_projector(self.m_tool, dir_type='lnorm_ivector', force = self.m_args.force)
# project i-vectors using LDA
if not self.m_args.skip_lda_projection and hasattr(self.m_tool, 'lda_project_ivector'):
self.m_tool_chain.lda_project_ivector(self.m_tool, dir_type='lnorm_ivector', force = self.m_args.force)
# train WCCN projector
if not self.m_args.skip_wccn_train_projector and hasattr(self.m_tool, 'wccn_train_projector'):
self.m_tool_chain.wccn_train_projector(self.m_tool, dir_type='lda_projected_ivector', force = self.m_args.force)
# project i-vectors using WCCN
if not self.m_args.skip_wccn_projection and hasattr(self.m_tool, 'wccn_project_ivector'):
self.m_tool_chain.wccn_project_ivector(self.m_tool, dir_type='lda_projected_ivector', force = self.m_args.force)
cur_type = 'wccn_projected_ivector'
# train plda enroler
if not self.m_args.skip_train_plda_enroler and hasattr(self.m_tool, 'train_plda_enroler'):
self.m_tool_chain.train_plda_enroler(self.m_tool, dir_type=cur_type, force = self.m_args.force)
# PLDA enrollment of the models
if not self.m_args.skip_model_enrolment:
self.m_tool_chain.enrol_models(self.m_tool, self.m_feature_extractor, not self.m_args.no_zt_norm, dir_type=cur_type, groups = self.m_args.groups, force = self.m_args.force)
# score computation
if not self.m_args.skip_score_computation:
self.m_tool_chain.compute_scores(self.m_tool, not self.m_args.no_zt_norm, dir_type=cur_type, groups = self.m_args.groups, preload_probes = self.m_args.preload_probes, force = self.m_args.force)
if not self.m_args.no_zt_norm:
self.m_tool_chain.zt_norm(self.m_tool, groups = self.m_args.groups)
# concatenation of scores
if not self.m_args.skip_concatenation:
self.m_tool_chain.concatenate(not self.m_args.no_zt_norm, groups = self.m_args.groups)
def add_jobs_to_grid(self, external_dependencies):
"""Adds all (desired) jobs of the tool chain to the grid"""
# collect the job ids
job_ids = {}
# if there are any external dependencies, we need to respect them
deps = external_dependencies[:]
# VAD preprocessing; never has any dependencies.
if not self.m_args.skip_preprocessing:
job_ids['preprocessing'] = self.submit_grid_job(
'--preprocess',
list_to_split = self.m_file_selector.original_wav_list('IVector'),
number_of_files_per_job = self.m_grid_config.number_of_audio_files_per_job,
dependencies = [],
**self.m_grid_config.preprocessing_queue)
deps.append(job_ids['preprocessing'])
# feature extraction
if not self.m_args.skip_feature_extraction:
job_ids['feature_extraction'] = self.submit_grid_job(
'--feature-extraction',
list_to_split = self.m_file_selector.feature_list('IVector'),
number_of_files_per_job = self.m_grid_config.number_of_audio_files_per_job,
dependencies = deps,
**self.m_grid_config.extraction_queue)
deps.append(job_ids['feature_extraction'])
# feature projection training
if not self.m_args.skip_projection_training and hasattr(self.m_tool, 'train_projector'):
job_ids['projector_training'] = self.submit_grid_job(
'--train-projector',
name="p-training",
dependencies = deps,
**self.m_grid_config.training_queue)
deps.append(job_ids['projector_training'])
# feature UBM projection
if not self.m_args.skip_projection_ubm and hasattr(self.m_tool, 'project_gmm'):
job_ids['feature_projection_ubm'] = self.submit_grid_job(
'--feature-projection-ubm',
list_to_split = self.m_file_selector.feature_list('IVector'),
number_of_files_per_job = self.m_grid_config.number_of_projections_per_job,
dependencies = deps,
**self.m_grid_config.projection_queue)
deps.append(job_ids['feature_projection_ubm'])
# TV training
if not self.m_args.skip_enroler_training and hasattr(self.m_tool, 'train_enroler'):
job_ids['enrolment_training'] = self.submit_grid_job(
'--train-enroler',
name = "e-training",
dependencies = deps,
**self.m_grid_config.training_queue)
deps.append(job_ids['enrolment_training'])
# i-vectors extraction
if not self.m_args.skip_projection_ivector and hasattr(self.m_tool, 'project_ivector'):
job_ids['feature_projection_ivector'] = self.submit_grid_job(
'--feature-projection-ivector',
list_to_split = self.m_file_selector.feature_list('IVector'),
number_of_files_per_job = self.m_grid_config.number_of_projections_per_job,
dependencies = deps,
**self.m_grid_config.projection_queue)
deps.append(job_ids['feature_projection_ivector'])
# train whitening
if not self.m_args.skip_whitening_enroler_training and hasattr(self.m_tool, 'train_whitening_enroler'):
job_ids['whitening_enrolment_training'] = self.submit_grid_job(
'--train-whitening-enroler',
name = "w-e-training",
dependencies = deps,
**self.m_grid_config.training_queue)
deps.append(job_ids['whitening_enrolment_training'])
# whitening i-vectors
if not self.m_args.skip_whitening_ivector and hasattr(self.m_tool, 'whitening_ivector'):
job_ids['whitening_ivector'] = self.submit_grid_job(
'--whitening-ivector',
list_to_split = self.m_file_selector.feature_list('IVector'),
number_of_files_per_job = self.m_grid_config.number_of_projections_per_job,
dependencies = deps,
**self.m_grid_config.projection_queue)
deps.append(job_ids['whitening_ivector'])
# lnorm i-vectors
if not self.m_args.skip_lnorm_ivector and hasattr(self.m_tool, 'lnorm_ivector'):
job_ids['lnorm_ivector'] = self.submit_grid_job(
'--lnorm-ivector',
list_to_split = self.m_file_selector.feature_list('IVector'),
number_of_files_per_job = self.m_grid_config.number_of_projections_per_job,
dependencies = deps,
**self.m_grid_config.projection_queue)
deps.append(job_ids['lnorm_ivector'])
# train LDA projector
if not self.m_args.skip_lda_train_projector and hasattr(self.m_tool, 'lda_train_projector'):
job_ids['lda_train_projector'] = self.submit_grid_job(
'--lda-train-projector',
name = "lda-proj-training",
dependencies = deps,
**self.m_grid_config.training_queue)
deps.append(job_ids['lda_train_projector'])
# LDA projection
if not self.m_args.skip_lda_projection and hasattr(self.m_tool, 'lda_project_ivector'):
job_ids['lda_project_ivector'] = self.submit_grid_job(
'--lda-project-ivector',
list_to_split = self.m_file_selector.feature_list('IVector'),
number_of_files_per_job = self.m_grid_config.number_of_projections_per_job,
dependencies = deps,
**self.m_grid_config.projection_queue)
deps.append(job_ids['lda_project_ivector'])
# train WCCN projector
if not self.m_args.skip_wccn_train_projector and hasattr(self.m_tool, 'wccn_train_projector'):
job_ids['wccn_train_projector'] = self.submit_grid_job(
'--wccn-train-projector',
name = "wccn-proj-training",
dependencies = deps,
**self.m_grid_config.training_queue)
deps.append(job_ids['wccn_train_projector'])
# WCCN projection
if not self.m_args.skip_wccn_projection and hasattr(self.m_tool, 'wccn_project_ivector'):
job_ids['wccn_project_ivector'] = self.submit_grid_job(
'--wccn-project-ivector',
list_to_split = self.m_file_selector.feature_list('IVector'),
number_of_files_per_job = self.m_grid_config.number_of_projections_per_job,
dependencies = deps,
**self.m_grid_config.projection_queue)
deps.append(job_ids['wccn_project_ivector'])
# train PLDA
if not self.m_args.skip_train_plda_enroler and hasattr(self.m_tool, 'train_plda_enroler'):
job_ids['train_plda_enroler'] = self.submit_grid_job(
'--train-plda-enroler',
name = "plda-e-training",
dependencies = deps,
**self.m_grid_config.training_queue)
deps.append(job_ids['train_plda_enroler'])
# enrol models
enrol_deps_n = {}
enrol_deps_t = {}
score_deps = {}
concat_deps = {}
for group in self.m_args.groups:
enrol_deps_n[group] = deps[:]
enrol_deps_t[group] = deps[:]
list_to_split = self.m_file_selector.model_ids(group)
if not self.m_args.skip_model_enrolment:
job_ids['enrol_%s_N'%group] = self.submit_grid_job(
'--enrol-models --group=%s --model-type=N'%group,
name = "enrol-N-%s"%group,
list_to_split = self.m_file_selector.model_ids(group),
number_of_files_per_job = self.m_grid_config.number_of_models_per_enrol_job,
dependencies = deps,
**self.m_grid_config.enrol_queue)
enrol_deps_n[group].append(job_ids['enrol_%s_N'%group])
if not self.m_args.no_zt_norm:
job_ids['enrol_%s_T'%group] = self.submit_grid_job(
'--enrol-models --group=%s --model-type=T'%group,
name = "enrol-T-%s"%group,
list_to_split = self.m_file_selector.tmodel_ids(group),
number_of_files_per_job = self.m_grid_config.number_of_models_per_enrol_job,
dependencies = deps,
**self.m_grid_config.enrol_queue)
enrol_deps_t[group].append(job_ids['enrol_%s_T'%group])
# compute A,B,C, and D scores
if not self.m_args.skip_score_computation:
job_ids['score_%s_A'%group] = self.submit_grid_job(
'--compute-scores --group=%s --score-type=A'%group,
name = "score-A-%s"%group,
list_to_split = self.m_file_selector.model_ids(group),
number_of_files_per_job = self.m_grid_config.number_of_models_per_score_job,
dependencies = enrol_deps_n[group],
**self.m_grid_config.score_queue)
concat_deps[group] = [job_ids['score_%s_A'%group]]
if not self.m_args.no_zt_norm:
job_ids['score_%s_B'%group] = self.submit_grid_job(
'--compute-scores --group=%s --score-type=B'%group,
name = "score-B-%s"%group,
list_to_split = self.m_file_selector.model_ids(group),
number_of_files_per_job = self.m_grid_config.number_of_models_per_score_job,
dependencies = enrol_deps_n[group],
**self.m_grid_config.score_queue)
job_ids['score_%s_C'%group] = self.submit_grid_job(
'--compute-scores --group=%s --score-type=C'%group,
name = "score-C-%s"%group,
list_to_split = self.m_file_selector.tmodel_ids(group),
number_of_files_per_job = self.m_grid_config.number_of_models_per_score_job,
dependencies = enrol_deps_t[group],
**self.m_grid_config.score_queue)
job_ids['score_%s_D'%group] = self.submit_grid_job(
'--compute-scores --group=%s --score-type=D'%group,
name = "score-D-%s"%group,
list_to_split = self.m_file_selector.tmodel_ids(group),
number_of_files_per_job = self.m_grid_config.number_of_models_per_score_job,
dependencies = enrol_deps_t[group],
**self.m_grid_config.score_queue)
# compute zt-norm
score_deps[group] = [job_ids['score_%s_A'%group], job_ids['score_%s_B'%group], job_ids['score_%s_C'%group], job_ids['score_%s_D'%group]]
job_ids['score_%s_Z'%group] = self.submit_grid_job(
'--compute-scores --group=%s --score-type=Z'%group,
name = "score-Z-%s"%group,
dependencies = score_deps[group], **self.m_grid_config.score_queue)
concat_deps[group].extend([job_ids['score_%s_B'%group], job_ids['score_%s_C'%group], job_ids['score_%s_D'%group], job_ids['score_%s_Z'%group]])
else:
concat_deps[group] = []
# concatenate results
if not self.m_args.skip_concatenation:
job_ids['concat_%s'%group] = self.submit_grid_job(
'--concatenate --group=%s'%group,
name = "concat-%s"%group,
dependencies = concat_deps[group])
# return the job ids, in case anyone wants to know them
return job_ids
def execute_grid_job(self):
"""Run the desired job of the ZT tool chain that is specified on command line"""
# preprocess
if self.m_args.preprocess:
self.m_tool_chain.preprocess_audio_files(
self.m_preprocessor,
self.m_tool,
indices = self.indices(self.m_file_selector.original_wav_list('IVector'), self.m_grid_config.number_of_audio_files_per_job),
force = self.m_args.force)
# feature extraction
if self.m_args.feature_extraction:
self.m_tool_chain.extract_features(
self.m_feature_extractor,
self.m_tool,
indices = self.indices(self.m_file_selector.feature_list('IVector'), self.m_grid_config.number_of_audio_files_per_job),
force = self.m_args.force)
# train the feature projector
if self.m_args.train_projector:
self.m_tool_chain.train_projector(
self.m_tool,
force = self.m_args.force)
# project the features ubm
if self.m_args.projection_ubm:
self.m_tool_chain.project_gmm_features(
self.m_tool,
indices = self.indices(self.m_file_selector.feature_list('IVector'), self.m_grid_config.number_of_projections_per_job),
force = self.m_args.force,
extractor = self.m_feature_extractor)
# train model enroler
if self.m_args.train_enroler:
self.m_tool_chain.train_enroler(
self.m_tool,
force = self.m_args.force)
# project the features ivector
if self.m_args.projection_ivector:
self.m_tool_chain.project_ivector_features(
self.m_tool,
indices = self.indices(self.m_file_selector.feature_list('IVector'), self.m_grid_config.number_of_projections_per_job),
force = self.m_args.force,
extractor = self.m_feature_extractor)
# train model whitening enroler
if self.m_args.train_whitening_enroler:
self.m_tool_chain.train_whitening_enroler(
self.m_tool,
dir_type='projected_ivector',
force = self.m_args.force)
# project the features ivector
if self.m_args.whitening_ivector:
self.m_tool_chain.whitening_ivector(
self.m_tool,
dir_type='projected_ivector',
indices = self.indices(self.m_file_selector.feature_list('IVector'), self.m_grid_config.number_of_projections_per_job),
force = self.m_args.force)
# project the features ivector
if self.m_args.lnorm_ivector:
self.m_tool_chain.lnorm_ivector(
self.m_tool,
dir_type='whitened_ivector',
indices = self.indices(self.m_file_selector.feature_list('IVector'), self.m_grid_config.number_of_projections_per_job),
force = self.m_args.force)
# train LDA projector
if self.m_args.lda_train_projector:
self.m_tool_chain.lda_train_projector(
self.m_tool,
dir_type='lnorm_ivector',
force = self.m_args.force)
# project the features ivector
if self.m_args.lda_project_ivector:
self.m_tool_chain.lda_project_ivector(
self.m_tool,
dir_type='lnorm_ivector',
indices = self.indices(self.m_file_selector.feature_list('IVector'), self.m_grid_config.number_of_projections_per_job),
force = self.m_args.force)
# train WCCN projector
if self.m_args.wccn_train_projector:
self.m_tool_chain.wccn_train_projector(
self.m_tool,
dir_type='lda_projected_ivector',
force = self.m_args.force)
# project the features ivector
if self.m_args.wccn_project_ivector:
self.m_tool_chain.wccn_project_ivector(
self.m_tool,
dir_type='lda_projected_ivector',
indices = self.indices(self.m_file_selector.feature_list('IVector'), self.m_grid_config.number_of_projections_per_job),
force = self.m_args.force)
cur_type = 'wccn_projected_ivector'
# train plda enroler
if self.m_args.train_plda_enroler:
self.m_tool_chain.train_plda_enroler(
self.m_tool,
dir_type=cur_type,
force = self.m_args.force)
# enrol models
if self.m_args.enrol_models:
if self.m_args.model_type == 'N':
self.m_tool_chain.enrol_models(
self.m_tool,
self.m_feature_extractor,
not self.m_args.no_zt_norm,
dir_type = cur_type,
indices = self.indices(self.m_file_selector.model_ids(self.m_args.group), self.m_grid_config.number_of_models_per_enrol_job),
groups = [self.m_args.group],
types = ['N'],
force = self.m_args.force)
else:
self.m_tool_chain.enrol_models(
self.m_tool,
self.m_feature_extractor,
not self.m_args.no_zt_norm,
dir_type = cur_type,
indices = self.indices(self.m_file_selector.tmodel_ids(self.m_args.group), self.m_grid_config.number_of_models_per_enrol_job),
groups = [self.m_args.group],
types = ['T'],
force = self.m_args.force)
# compute scores
if self.m_args.compute_scores:
if self.m_args.score_type in ['A', 'B']:
self.m_tool_chain.compute_scores(
self.m_tool,
not self.m_args.no_zt_norm,
dir_type = cur_type,
indices = self.indices(self.m_file_selector.model_ids(self.m_args.group), self.m_grid_config.number_of_models_per_score_job),
groups = [self.m_args.group],
types = [self.m_args.score_type],
preload_probes = self.m_args.preload_probes,
force = self.m_args.force)
elif self.m_args.score_type in ['C', 'D']:
self.m_tool_chain.compute_scores(
self.m_tool,
not self.m_args.no_zt_norm,
dir_type = cur_type,
indices = self.indices(self.m_file_selector.tmodel_ids(self.m_args.group), self.m_grid_config.number_of_models_per_score_job),
groups = [self.m_args.group],
types = [self.m_args.score_type],
preload_probes = self.m_args.preload_probes,
force = self.m_args.force)
else:
self.m_tool_chain.zt_norm(self.m_tool, groups = [self.m_args.group])
# concatenate
if self.m_args.concatenate:
self.m_tool_chain.concatenate(
not self.m_args.no_zt_norm,
groups = [self.m_args.group])
def parse_args(command_line_arguments = sys.argv[1:]):
"""This function parses the given options (which by default are the command line options)"""
# sorry for that.
global parameters
parameters = command_line_arguments
# set up command line parser
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# add the arguments required for all tool chains
config_group, dir_group, file_group, sub_dir_group, other_group, skip_group = ToolChainExecutorIVector.required_command_line_options(parser)
file_group.add_argument('--whitening-enroler-file' , type = str, metavar = 'FILE', default = 'WhiteEnroler.hdf5',
help = 'Name of the file to write the model of whitening enroler into')
file_group.add_argument('--lda-projector-file' , type = str, metavar = 'FILE', default = 'LDAProjector.hdf5',
help = 'Name of the file to write the model of LDA projector into')
file_group.add_argument('--wccn-projector-file' , type = str, metavar = 'FILE', default = 'WCCNProjector.hdf5',
help = 'Name of the file to write the model of WCCN projector into')
file_group.add_argument('--plda-enroler-file' , type = str, metavar = 'FILE', default = 'PLDAEnroler.hdf5',
help = 'Name of the file to write the model of PLDA enroler into')
sub_dir_group.add_argument('--projected-ivector-directory', type = str, metavar = 'DIR', default = 'projected_ivector', dest = 'projected_ivector_dir',
help = 'Name of the directory where the projected data should be stored')
sub_dir_group.add_argument('--whitened-ivector-directory', type = str, metavar = 'DIR', default = 'whitened_ivector', dest = 'whitened_ivector_dir',
help = 'Name of the directory where the projected data should be stored')
sub_dir_group.add_argument('--lnorm-ivector-directory', type = str, metavar = 'DIR', default = 'lnorm_ivector', dest = 'lnorm_ivector_dir',
help = 'Name of the directory where the projected data should be stored')
sub_dir_group.add_argument('--lda-projected-ivector-directory', type = str, metavar = 'DIR', default = 'lda_projected_ivector', dest = 'lda_projected_ivector_dir',
help = 'Name of the directory where the projected data should be stored')
sub_dir_group.add_argument('--wccn-projected-ivector-directory', type = str, metavar = 'DIR', default = 'wccn_projected_ivector', dest = 'wccn_projected_ivector_dir',
help = 'Name of the directory where the projected data should be stored')
sub_dir_group.add_argument('--models-directories', type = str, metavar = 'DIR', nargs = 2, dest='models_dirs',
default = ['models', 'tmodels'],
help = 'Subdirectories (of temp directory) where the models should be stored')
sub_dir_group.add_argument('--zt-norm-directories', type = str, metavar = 'DIR', nargs = 5, dest='zt_dirs',
default = ['zt_norm_A', 'zt_norm_B', 'zt_norm_C', 'zt_norm_D', 'zt_norm_D_sameValue'],
help = 'Subdirectories (of --temp-dir) where to write the zt_norm values')
sub_dir_group.add_argument('--score-dirs', type = str, metavar = 'DIR', nargs = 2, dest='score_dirs',
default = ['nonorm', 'ztnorm'],
help = 'Subdirectories (of --user-dir) where to write the results to')
skip_group.add_argument('--skip-projection-ivector', '--noproivec', action='store_true', dest='skip_projection_ivector',
help = 'Skip the feature IVector projection')
skip_group.add_argument('--skip-whitening-enroler-training', '--nowenrt', action='store_true', dest='skip_whitening_enroler_training',
help = 'Skip the training of the model whitening enrolment')
skip_group.add_argument('--skip-whitening-ivector', '--nowivec', action='store_true', dest='skip_whitening_ivector',
help = 'Skip whitening i-vectors')
skip_group.add_argument('--skip-lnorm-ivector', '--nolnivec', action='store_true', dest='skip_lnorm_ivector',
help = 'Skip lnorm i-vectors')
skip_group.add_argument('--skip-lda-train-projector', '--noldaprojt', action='store_true', dest='skip_lda_train_projector',
help = 'Skip the training of the LDA projector')
skip_group.add_argument('--skip-lda-projection', '--noldaproj', action='store_true', dest='skip_lda_projection',
help = 'Skip projecting i-vectors on LDA')
skip_group.add_argument('--skip-wccn-train-projector', '--nowccnprojt', action='store_true', dest='skip_wccn_train_projector',
help = 'Skip the training of the WCCN projector')
skip_group.add_argument('--skip-wccn-projection', '--nowccnproj', action='store_true', dest='skip_wccn_projection',
help = 'Skip projecting i-vectors on WCCN')
skip_group.add_argument('--skip-train-plda-enroler', '--nopldaenrt', action='store_true', dest='skip_train_plda_enroler',
help = 'Skip the training of the plda model enrolment')
#######################################################################################
############################ other options ############################################
other_group.add_argument('-z', '--no-zt-norm', action='store_true', dest = 'no_zt_norm',
help = 'DISABLE the computation of ZT norms')
other_group.add_argument('-F', '--force', action='store_true',
help = 'Force to erase former data if already exist')
other_group.add_argument('-w', '--preload-probes', action='store_true', dest='preload_probes',
help = 'Preload probe files during score computation (needs more memory, but is faster and requires fewer file accesses). WARNING! Use this flag with care!')
other_group.add_argument('--groups', type = str, metavar = 'GROUP', nargs = '+', default = ['dev', 'eval'],
help = "The group (i.e., 'dev' or 'eval') for which the models and scores should be generated")
#######################################################################################
#################### sub-tasks being executed by this script ##########################
parser.add_argument('--execute-sub-task', action='store_true', dest = 'execute_sub_task',
help = argparse.SUPPRESS) #'Executes a subtask (FOR INTERNAL USE ONLY!!!)'
parser.add_argument('--preprocess', action='store_true',
help = argparse.SUPPRESS) #'Perform VAD on the given range of audio files'
parser.add_argument('--feature-extraction-training', action='store_true', dest = 'feature_extraction_training',
help = argparse.SUPPRESS) #'Perform feature extraction for the given range of preprocessed audiofiles'
parser.add_argument('--feature-extraction', action='store_true', dest = 'feature_extraction',
help = argparse.SUPPRESS) #'Perform feature extraction for the given range of preprocessed audio files'
parser.add_argument('--train-projector', action='store_true', dest = 'train_projector',
help = argparse.SUPPRESS) #'Perform feature extraction training'
parser.add_argument('--feature-projection-ubm', action='store_true', dest = 'projection_ubm',
help = argparse.SUPPRESS) #'Perform feature projection ubm'
parser.add_argument('--train-enroler', action='store_true', dest = 'train_enroler',
help = argparse.SUPPRESS) #'Perform enrolment training'
parser.add_argument('--train-whitening-enroler', action='store_true', dest = 'train_whitening_enroler',
help = argparse.SUPPRESS) #'Perform enrolment training'
parser.add_argument('--feature-projection-ivector', action='store_true', dest = 'projection_ivector',
help = argparse.SUPPRESS) #'Perform feature projection ivector'
parser.add_argument('--whitening-ivector', action='store_true', dest = 'whitening_ivector',
help = argparse.SUPPRESS) #'Perform ivector whitening'
parser.add_argument('--lnorm-ivector', action='store_true', dest = 'lnorm_ivector',
help = argparse.SUPPRESS) #'Perform ivector whitening'
parser.add_argument('--lda-train-projector', action='store_true', dest = 'lda_train_projector',
help = argparse.SUPPRESS) #'Perform enrolment training'
parser.add_argument('--lda-project-ivector', action='store_true', dest = 'lda_project_ivector',
help = argparse.SUPPRESS) #'Perform LDA projection'
parser.add_argument('--wccn-train-projector', action='store_true', dest = 'wccn_train_projector',
help = argparse.SUPPRESS) #'Perform enrolment training'
parser.add_argument('--wccn-project-ivector', action='store_true', dest = 'wccn_project_ivector',
help = argparse.SUPPRESS) #'Perform WCCN projection'
parser.add_argument('--train-plda-enroler', action='store_true', dest = 'train_plda_enroler',
help = argparse.SUPPRESS) #'Perform WCCN projection'
parser.add_argument('--enrol-models', action='store_true', dest = 'enrol_models',
help = argparse.SUPPRESS) #'Generate the given range of models from the features'
parser.add_argument('--model-type', type = str, choices = ['N', 'T'], metavar = 'TYPE',
help = argparse.SUPPRESS) #'Which type of models to generate (Normal or TModels)'
parser.add_argument('--compute-scores', action='store_true', dest = 'compute_scores',
help = argparse.SUPPRESS) #'Compute scores for the given range of models'
parser.add_argument('--score-type', type = str, choices=['A', 'B', 'C', 'D', 'Z'], metavar = 'SCORE',
help = argparse.SUPPRESS) #'The type of scores that should be computed'
parser.add_argument('--group', type = str, metavar = 'GROUP',
help = argparse.SUPPRESS) #'The group for which the current action should be performed'
parser.add_argument('--concatenate', action='store_true',
help = argparse.SUPPRESS) #'Concatenates the results of all scores of the given group'
return parser.parse_args(command_line_arguments)
def speaker_verify(args, external_dependencies = [], external_fake_job_id = 0):
"""This is the main entry point for computing speaker verification experiments.
You just have to specify configuration scripts for any of the steps of the toolchain, which are:
-- the database
-- preprocessing (VAD)
-- feature extraction
-- the score computation tool
-- and the grid configuration (in case, the function should be executed in the grid).
Additionally, you can skip parts of the toolchain by selecting proper --skip-... parameters.
If your probe files are not too big, you can also specify the --preload-probes switch to speed up the score computation.
If files should be re-generated, please specify the --force option (might be combined with the --skip-... options)"""
# generate tool chain executor
executor = ToolChainExecutorIVector(args)
# as the main entry point, check whether the grid option was given
if not args.grid:
# not in a grid, use default tool chain sequentially
executor.execute_tool_chain()
return []
elif args.execute_sub_task:
# execute the desired sub-task
executor.execute_grid_job()
return []
else:
# no other parameter given, so deploy new jobs
# get the name of this file
this_file = __file__
if this_file[-1] == 'c':
this_file = this_file[0:-1]
# initialize the executor to submit the jobs to the grid
global parameters
executor.set_common_parameters(calling_file = this_file, parameters = parameters, fake_job_id = external_fake_job_id )
# add the jobs
return executor.add_jobs_to_grid(external_dependencies)
def main(command_line_parameters = sys.argv):
"""Executes the main function"""
# do the command line parsing
args = parse_args(command_line_parameters[1:])
# verify that the input files exist
for f in (args.database, args.preprocessor, args.tool):
if not os.path.exists(str(f)):
raise ValueError("The given file '%s' does not exist."%f)
# perform speaker verification test
speaker_verify(args)
if __name__ == "__main__":
main()
|
PypiClean
|
/pulumi_gcp_native-0.0.2a1617829075.tar.gz/pulumi_gcp_native-0.0.2a1617829075/pulumi_gcp_native/firebasehosting/v1beta1/outputs.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ActingUserResponse',
'CertDnsChallengeResponse',
'CertHttpChallengeResponse',
'CloudRunRewriteResponse',
'DomainProvisioningResponse',
'DomainRedirectResponse',
'HeaderResponse',
'I18nConfigResponse',
'PreviewConfigResponse',
'RedirectResponse',
'ReleaseResponse',
'RewriteResponse',
'ServingConfigResponse',
'VersionResponse',
]
@pulumi.output_type
class ActingUserResponse(dict):
"""
Contains metadata about the user who performed an action, such as creating a release or finalizing a version.
"""
def __init__(__self__, *,
email: str,
image_url: str):
"""
Contains metadata about the user who performed an action, such as creating a release or finalizing a version.
:param str email: The email address of the user when the user performed the action.
:param str image_url: A profile image URL for the user. May not be present if the user has changed their email address or deleted their account.
"""
pulumi.set(__self__, "email", email)
pulumi.set(__self__, "image_url", image_url)
@property
@pulumi.getter
def email(self) -> str:
"""
The email address of the user when the user performed the action.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter(name="imageUrl")
def image_url(self) -> str:
"""
A profile image URL for the user. May not be present if the user has changed their email address or deleted their account.
"""
return pulumi.get(self, "image_url")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CertDnsChallengeResponse(dict):
"""
Represents a DNS certificate challenge.
"""
def __init__(__self__, *,
domain_name: str,
token: str):
"""
Represents a DNS certificate challenge.
:param str domain_name: The domain name upon which the DNS challenge must be satisfied.
:param str token: The value that must be present as a TXT record on the domain name to satisfy the challenge.
"""
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "token", token)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> str:
"""
The domain name upon which the DNS challenge must be satisfied.
"""
return pulumi.get(self, "domain_name")
@property
@pulumi.getter
def token(self) -> str:
"""
The value that must be present as a TXT record on the domain name to satisfy the challenge.
"""
return pulumi.get(self, "token")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CertHttpChallengeResponse(dict):
"""
Represents an HTTP certificate challenge.
"""
def __init__(__self__, *,
path: str,
token: str):
"""
Represents an HTTP certificate challenge.
:param str path: The URL path on which to serve the specified token to satisfy the certificate challenge.
:param str token: The token to serve at the specified URL path to satisfy the certificate challenge.
"""
pulumi.set(__self__, "path", path)
pulumi.set(__self__, "token", token)
@property
@pulumi.getter
def path(self) -> str:
"""
The URL path on which to serve the specified token to satisfy the certificate challenge.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def token(self) -> str:
"""
The token to serve at the specified URL path to satisfy the certificate challenge.
"""
return pulumi.get(self, "token")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CloudRunRewriteResponse(dict):
"""
A configured rewrite that directs requests to a Cloud Run service. If the Cloud Run service does not exist when setting or updating your Firebase Hosting configuration, then the request fails. Any errors from the Cloud Run service are passed to the end user (for example, if you delete a service, any requests directed to that service receive a `404` error).
"""
def __init__(__self__, *,
region: str,
service_id: str):
"""
A configured rewrite that directs requests to a Cloud Run service. If the Cloud Run service does not exist when setting or updating your Firebase Hosting configuration, then the request fails. Any errors from the Cloud Run service are passed to the end user (for example, if you delete a service, any requests directed to that service receive a `404` error).
:param str region: Optional. User-provided region where the Cloud Run service is hosted. Defaults to `us-central1` if not supplied.
:param str service_id: Required. User-defined ID of the Cloud Run service.
"""
pulumi.set(__self__, "region", region)
pulumi.set(__self__, "service_id", service_id)
@property
@pulumi.getter
def region(self) -> str:
"""
Optional. User-provided region where the Cloud Run service is hosted. Defaults to `us-central1` if not supplied.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="serviceId")
def service_id(self) -> str:
"""
Required. User-defined ID of the Cloud Run service.
"""
return pulumi.get(self, "service_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DomainProvisioningResponse(dict):
"""
The current certificate provisioning status information for a domain.
"""
def __init__(__self__, *,
cert_challenge_discovered_txt: Sequence[str],
cert_challenge_dns: 'outputs.CertDnsChallengeResponse',
cert_challenge_http: 'outputs.CertHttpChallengeResponse',
cert_status: str,
discovered_ips: Sequence[str],
dns_fetch_time: str,
dns_status: str,
expected_ips: Sequence[str]):
"""
The current certificate provisioning status information for a domain.
:param Sequence[str] cert_challenge_discovered_txt: The TXT records (for the certificate challenge) that were found at the last DNS fetch.
:param 'CertDnsChallengeResponseArgs' cert_challenge_dns: The DNS challenge for generating a certificate.
:param 'CertHttpChallengeResponseArgs' cert_challenge_http: The HTTP challenge for generating a certificate.
:param str cert_status: The certificate provisioning status; updated when Firebase Hosting provisions an SSL certificate for the domain.
:param Sequence[str] discovered_ips: The IPs found at the last DNS fetch.
:param str dns_fetch_time: The time at which the last DNS fetch occurred.
:param str dns_status: The DNS record match status as of the last DNS fetch.
:param Sequence[str] expected_ips: The list of IPs to which the domain is expected to resolve.
"""
pulumi.set(__self__, "cert_challenge_discovered_txt", cert_challenge_discovered_txt)
pulumi.set(__self__, "cert_challenge_dns", cert_challenge_dns)
pulumi.set(__self__, "cert_challenge_http", cert_challenge_http)
pulumi.set(__self__, "cert_status", cert_status)
pulumi.set(__self__, "discovered_ips", discovered_ips)
pulumi.set(__self__, "dns_fetch_time", dns_fetch_time)
pulumi.set(__self__, "dns_status", dns_status)
pulumi.set(__self__, "expected_ips", expected_ips)
@property
@pulumi.getter(name="certChallengeDiscoveredTxt")
def cert_challenge_discovered_txt(self) -> Sequence[str]:
"""
The TXT records (for the certificate challenge) that were found at the last DNS fetch.
"""
return pulumi.get(self, "cert_challenge_discovered_txt")
@property
@pulumi.getter(name="certChallengeDns")
def cert_challenge_dns(self) -> 'outputs.CertDnsChallengeResponse':
"""
The DNS challenge for generating a certificate.
"""
return pulumi.get(self, "cert_challenge_dns")
@property
@pulumi.getter(name="certChallengeHttp")
def cert_challenge_http(self) -> 'outputs.CertHttpChallengeResponse':
"""
The HTTP challenge for generating a certificate.
"""
return pulumi.get(self, "cert_challenge_http")
@property
@pulumi.getter(name="certStatus")
def cert_status(self) -> str:
"""
The certificate provisioning status; updated when Firebase Hosting provisions an SSL certificate for the domain.
"""
return pulumi.get(self, "cert_status")
@property
@pulumi.getter(name="discoveredIps")
def discovered_ips(self) -> Sequence[str]:
"""
The IPs found at the last DNS fetch.
"""
return pulumi.get(self, "discovered_ips")
@property
@pulumi.getter(name="dnsFetchTime")
def dns_fetch_time(self) -> str:
"""
The time at which the last DNS fetch occurred.
"""
return pulumi.get(self, "dns_fetch_time")
@property
@pulumi.getter(name="dnsStatus")
def dns_status(self) -> str:
"""
The DNS record match status as of the last DNS fetch.
"""
return pulumi.get(self, "dns_status")
@property
@pulumi.getter(name="expectedIps")
def expected_ips(self) -> Sequence[str]:
"""
The list of IPs to which the domain is expected to resolve.
"""
return pulumi.get(self, "expected_ips")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DomainRedirectResponse(dict):
"""
Defines the behavior of a domain-level redirect. Domain redirects preserve the path of the redirect but replace the requested domain with the one specified in the redirect configuration.
"""
def __init__(__self__, *,
domain_name: str,
type: str):
"""
Defines the behavior of a domain-level redirect. Domain redirects preserve the path of the redirect but replace the requested domain with the one specified in the redirect configuration.
:param str domain_name: Required. The domain name to redirect to.
:param str type: Required. The redirect status code.
"""
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> str:
"""
Required. The domain name to redirect to.
"""
return pulumi.get(self, "domain_name")
@property
@pulumi.getter
def type(self) -> str:
"""
Required. The redirect status code.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HeaderResponse(dict):
"""
A [`Header`](https://firebase.google.com/docs/hosting/full-config#headers) specifies a URL pattern that, if matched to the request URL path, triggers Hosting to apply the specified custom response headers.
"""
def __init__(__self__, *,
glob: str,
headers: Mapping[str, str],
regex: str):
"""
A [`Header`](https://firebase.google.com/docs/hosting/full-config#headers) specifies a URL pattern that, if matched to the request URL path, triggers Hosting to apply the specified custom response headers.
:param str glob: The user-supplied [glob](https://firebase.google.com/docs/hosting/full-config#glob_pattern_matching) to match against the request URL path.
:param Mapping[str, str] headers: Required. The additional headers to add to the response.
:param str regex: The user-supplied RE2 regular expression to match against the request URL path.
"""
pulumi.set(__self__, "glob", glob)
pulumi.set(__self__, "headers", headers)
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def glob(self) -> str:
"""
The user-supplied [glob](https://firebase.google.com/docs/hosting/full-config#glob_pattern_matching) to match against the request URL path.
"""
return pulumi.get(self, "glob")
@property
@pulumi.getter
def headers(self) -> Mapping[str, str]:
"""
Required. The additional headers to add to the response.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter
def regex(self) -> str:
"""
The user-supplied RE2 regular expression to match against the request URL path.
"""
return pulumi.get(self, "regex")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class I18nConfigResponse(dict):
"""
If provided, i18n rewrites are enabled.
"""
def __init__(__self__, *,
root: str):
"""
If provided, i18n rewrites are enabled.
:param str root: Required. The user-supplied path where country and language specific content will be looked for within the public directory.
"""
pulumi.set(__self__, "root", root)
@property
@pulumi.getter
def root(self) -> str:
"""
Required. The user-supplied path where country and language specific content will be looked for within the public directory.
"""
return pulumi.get(self, "root")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PreviewConfigResponse(dict):
"""
Deprecated in favor of [site channels](sites.channels).
"""
def __init__(__self__, *,
active: bool,
expire_time: str):
"""
Deprecated in favor of [site channels](sites.channels).
:param bool active: If true, preview URLs are enabled for this version.
:param str expire_time: Indicates the expiration time for previewing this version; preview URL requests received after this time will 404.
"""
pulumi.set(__self__, "active", active)
pulumi.set(__self__, "expire_time", expire_time)
@property
@pulumi.getter
def active(self) -> bool:
"""
If true, preview URLs are enabled for this version.
"""
return pulumi.get(self, "active")
@property
@pulumi.getter(name="expireTime")
def expire_time(self) -> str:
"""
Indicates the expiration time for previewing this version; preview URL requests received after this time will 404.
"""
return pulumi.get(self, "expire_time")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RedirectResponse(dict):
"""
A [`Redirect`](https://firebase.google.com/docs/hosting/full-config#redirects) specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond with a redirect to the specified destination path.
"""
def __init__(__self__, *,
glob: str,
location: str,
regex: str,
status_code: int):
"""
A [`Redirect`](https://firebase.google.com/docs/hosting/full-config#redirects) specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond with a redirect to the specified destination path.
:param str glob: The user-supplied [glob](https://firebase.google.com/docs/hosting/full-config#glob_pattern_matching) to match against the request URL path.
:param str location: Required. The value to put in the HTTP location header of the response. The location can contain capture group values from the pattern using a `:` prefix to identify the segment and an optional `*` to capture the rest of the URL. For example: "glob": "/:capture*", "statusCode": 301, "location": "https://example.com/foo/:capture"
:param str regex: The user-supplied RE2 regular expression to match against the request URL path.
:param int status_code: Required. The status HTTP code to return in the response. It must be a valid 3xx status code.
"""
pulumi.set(__self__, "glob", glob)
pulumi.set(__self__, "location", location)
pulumi.set(__self__, "regex", regex)
pulumi.set(__self__, "status_code", status_code)
@property
@pulumi.getter
def glob(self) -> str:
"""
The user-supplied [glob](https://firebase.google.com/docs/hosting/full-config#glob_pattern_matching) to match against the request URL path.
"""
return pulumi.get(self, "glob")
@property
@pulumi.getter
def location(self) -> str:
"""
Required. The value to put in the HTTP location header of the response. The location can contain capture group values from the pattern using a `:` prefix to identify the segment and an optional `*` to capture the rest of the URL. For example: "glob": "/:capture*", "statusCode": 301, "location": "https://example.com/foo/:capture"
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def regex(self) -> str:
"""
The user-supplied RE2 regular expression to match against the request URL path.
"""
return pulumi.get(self, "regex")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> int:
"""
Required. The status HTTP code to return in the response. It must be a valid 3xx status code.
"""
return pulumi.get(self, "status_code")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ReleaseResponse(dict):
"""
A `Release` is a particular [collection of configurations and files](sites.versions) that is set to be public at a particular time.
"""
def __init__(__self__, *,
message: str,
name: str,
release_time: str,
release_user: 'outputs.ActingUserResponse',
type: str,
version: 'outputs.VersionResponse'):
"""
A `Release` is a particular [collection of configurations and files](sites.versions) that is set to be public at a particular time.
:param str message: The deploy description when the release was created. The value can be up to 512 characters.
:param str name: The unique identifier for the release, in either of the following formats: - sites/SITE_ID/releases/RELEASE_ID - sites/SITE_ID/channels/CHANNEL_ID/releases/RELEASE_ID This name is provided in the response body when you call [`releases.create`](sites.releases/create) or [`channels.releases.create`](sites.channels.releases/create).
:param str release_time: The time at which the version is set to be public.
:param 'ActingUserResponseArgs' release_user: Identifies the user who created the release.
:param str type: Explains the reason for the release. Specify a value for this field only when creating a `SITE_DISABLE` type release.
:param 'VersionResponseArgs' version: The configuration and content that was released.
"""
pulumi.set(__self__, "message", message)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "release_time", release_time)
pulumi.set(__self__, "release_user", release_user)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def message(self) -> str:
"""
The deploy description when the release was created. The value can be up to 512 characters.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def name(self) -> str:
"""
The unique identifier for the release, in either of the following formats: - sites/SITE_ID/releases/RELEASE_ID - sites/SITE_ID/channels/CHANNEL_ID/releases/RELEASE_ID This name is provided in the response body when you call [`releases.create`](sites.releases/create) or [`channels.releases.create`](sites.channels.releases/create).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="releaseTime")
def release_time(self) -> str:
"""
The time at which the version is set to be public.
"""
return pulumi.get(self, "release_time")
@property
@pulumi.getter(name="releaseUser")
def release_user(self) -> 'outputs.ActingUserResponse':
"""
Identifies the user who created the release.
"""
return pulumi.get(self, "release_user")
@property
@pulumi.getter
def type(self) -> str:
"""
Explains the reason for the release. Specify a value for this field only when creating a `SITE_DISABLE` type release.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def version(self) -> 'outputs.VersionResponse':
"""
The configuration and content that was released.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RewriteResponse(dict):
"""
A [`Rewrite`](https://firebase.google.com/docs/hosting/full-config#rewrites) specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond as if the service were given the specified destination URL.
"""
def __init__(__self__, *,
dynamic_links: bool,
function: str,
glob: str,
path: str,
regex: str,
run: 'outputs.CloudRunRewriteResponse'):
"""
A [`Rewrite`](https://firebase.google.com/docs/hosting/full-config#rewrites) specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond as if the service were given the specified destination URL.
:param bool dynamic_links: The request will be forwarded to Firebase Dynamic Links.
:param str function: The function to proxy requests to. Must match the exported function name exactly.
:param str glob: The user-supplied [glob](https://firebase.google.com/docs/hosting/full-config#glob_pattern_matching) to match against the request URL path.
:param str path: The URL path to rewrite the request to.
:param str regex: The user-supplied RE2 regular expression to match against the request URL path.
:param 'CloudRunRewriteResponseArgs' run: The request will be forwarded to Cloud Run.
"""
pulumi.set(__self__, "dynamic_links", dynamic_links)
pulumi.set(__self__, "function", function)
pulumi.set(__self__, "glob", glob)
pulumi.set(__self__, "path", path)
pulumi.set(__self__, "regex", regex)
pulumi.set(__self__, "run", run)
@property
@pulumi.getter(name="dynamicLinks")
def dynamic_links(self) -> bool:
"""
The request will be forwarded to Firebase Dynamic Links.
"""
return pulumi.get(self, "dynamic_links")
@property
@pulumi.getter
def function(self) -> str:
"""
The function to proxy requests to. Must match the exported function name exactly.
"""
return pulumi.get(self, "function")
@property
@pulumi.getter
def glob(self) -> str:
"""
The user-supplied [glob](https://firebase.google.com/docs/hosting/full-config#glob_pattern_matching) to match against the request URL path.
"""
return pulumi.get(self, "glob")
@property
@pulumi.getter
def path(self) -> str:
"""
The URL path to rewrite the request to.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def regex(self) -> str:
"""
The user-supplied RE2 regular expression to match against the request URL path.
"""
return pulumi.get(self, "regex")
@property
@pulumi.getter
def run(self) -> 'outputs.CloudRunRewriteResponse':
"""
The request will be forwarded to Cloud Run.
"""
return pulumi.get(self, "run")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServingConfigResponse(dict):
"""
The configuration for how incoming requests to a site should be routed and processed before serving content. The URL request paths are matched against the specified URL patterns in the configuration, then Hosting applies the applicable configuration according to a specific [priority order](https://firebase.google.com/docs/hosting/full-config#hosting_priority_order).
"""
def __init__(__self__, *,
app_association: str,
clean_urls: bool,
headers: Sequence['outputs.HeaderResponse'],
i18n: 'outputs.I18nConfigResponse',
redirects: Sequence['outputs.RedirectResponse'],
rewrites: Sequence['outputs.RewriteResponse'],
trailing_slash_behavior: str):
"""
The configuration for how incoming requests to a site should be routed and processed before serving content. The URL request paths are matched against the specified URL patterns in the configuration, then Hosting applies the applicable configuration according to a specific [priority order](https://firebase.google.com/docs/hosting/full-config#hosting_priority_order).
:param str app_association: How to handle well known App Association files.
:param bool clean_urls: Defines whether to drop the file extension from uploaded files.
:param Sequence['HeaderResponseArgs'] headers: An array of objects, where each object specifies a URL pattern that, if matched to the request URL path, triggers Hosting to apply the specified custom response headers.
:param 'I18nConfigResponseArgs' i18n: Optional. Defines i18n rewrite behavior.
:param Sequence['RedirectResponseArgs'] redirects: An array of objects (called redirect rules), where each rule specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond with a redirect to the specified destination path.
:param Sequence['RewriteResponseArgs'] rewrites: An array of objects (called rewrite rules), where each rule specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond as if the service were given the specified destination URL.
:param str trailing_slash_behavior: Defines how to handle a trailing slash in the URL path.
"""
pulumi.set(__self__, "app_association", app_association)
pulumi.set(__self__, "clean_urls", clean_urls)
pulumi.set(__self__, "headers", headers)
pulumi.set(__self__, "i18n", i18n)
pulumi.set(__self__, "redirects", redirects)
pulumi.set(__self__, "rewrites", rewrites)
pulumi.set(__self__, "trailing_slash_behavior", trailing_slash_behavior)
@property
@pulumi.getter(name="appAssociation")
def app_association(self) -> str:
"""
How to handle well known App Association files.
"""
return pulumi.get(self, "app_association")
@property
@pulumi.getter(name="cleanUrls")
def clean_urls(self) -> bool:
"""
Defines whether to drop the file extension from uploaded files.
"""
return pulumi.get(self, "clean_urls")
@property
@pulumi.getter
def headers(self) -> Sequence['outputs.HeaderResponse']:
"""
An array of objects, where each object specifies a URL pattern that, if matched to the request URL path, triggers Hosting to apply the specified custom response headers.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter
def i18n(self) -> 'outputs.I18nConfigResponse':
"""
Optional. Defines i18n rewrite behavior.
"""
return pulumi.get(self, "i18n")
@property
@pulumi.getter
def redirects(self) -> Sequence['outputs.RedirectResponse']:
"""
An array of objects (called redirect rules), where each rule specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond with a redirect to the specified destination path.
"""
return pulumi.get(self, "redirects")
@property
@pulumi.getter
def rewrites(self) -> Sequence['outputs.RewriteResponse']:
"""
An array of objects (called rewrite rules), where each rule specifies a URL pattern that, if matched to the request URL path, triggers Hosting to respond as if the service were given the specified destination URL.
"""
return pulumi.get(self, "rewrites")
@property
@pulumi.getter(name="trailingSlashBehavior")
def trailing_slash_behavior(self) -> str:
"""
Defines how to handle a trailing slash in the URL path.
"""
return pulumi.get(self, "trailing_slash_behavior")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VersionResponse(dict):
"""
A `Version` is a configuration and a collection of static files which determine how a site is displayed.
"""
def __init__(__self__, *,
config: 'outputs.ServingConfigResponse',
create_time: str,
create_user: 'outputs.ActingUserResponse',
delete_time: str,
delete_user: 'outputs.ActingUserResponse',
file_count: str,
finalize_time: str,
finalize_user: 'outputs.ActingUserResponse',
labels: Mapping[str, str],
name: str,
preview: 'outputs.PreviewConfigResponse',
status: str,
version_bytes: str):
"""
A `Version` is a configuration and a collection of static files which determine how a site is displayed.
:param 'ServingConfigResponseArgs' config: The configuration for the behavior of the site. This configuration exists in the [`firebase.json`](https://firebase.google.com/docs/cli/#the_firebasejson_file) file.
:param str create_time: The time at which the version was created.
:param 'ActingUserResponseArgs' create_user: Identifies the user who created the version.
:param str delete_time: The time at which the version was `DELETED`.
:param 'ActingUserResponseArgs' delete_user: Identifies the user who `DELETED` the version.
:param str file_count: The total number of files associated with the version. This value is calculated after a version is `FINALIZED`.
:param str finalize_time: The time at which the version was `FINALIZED`.
:param 'ActingUserResponseArgs' finalize_user: Identifies the user who `FINALIZED` the version.
:param Mapping[str, str] labels: The labels used for extra metadata and/or filtering.
:param str name: The fully-qualified resource name for the version, in the format: sites/ SITE_ID/versions/VERSION_ID This name is provided in the response body when you call [`CreateVersion`](sites.versions/create).
:param 'PreviewConfigResponseArgs' preview: Deprecated in favor of [site channels](sites.channels).
:param str status: The deploy status of the version. For a successful deploy, call [`CreateVersion`](sites.versions/create) to make a new version (`CREATED` status), [upload all desired files](sites.versions/populateFiles) to the version, then [update](sites.versions/patch) the version to the `FINALIZED` status. Note that if you leave the version in the `CREATED` state for more than 12 hours, the system will automatically mark the version as `ABANDONED`. You can also change the status of a version to `DELETED` by calling [`DeleteVersion`](sites.versions/delete).
:param str version_bytes: The total stored bytesize of the version. This value is calculated after a version is `FINALIZED`.
"""
pulumi.set(__self__, "config", config)
pulumi.set(__self__, "create_time", create_time)
pulumi.set(__self__, "create_user", create_user)
pulumi.set(__self__, "delete_time", delete_time)
pulumi.set(__self__, "delete_user", delete_user)
pulumi.set(__self__, "file_count", file_count)
pulumi.set(__self__, "finalize_time", finalize_time)
pulumi.set(__self__, "finalize_user", finalize_user)
pulumi.set(__self__, "labels", labels)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "preview", preview)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "version_bytes", version_bytes)
@property
@pulumi.getter
def config(self) -> 'outputs.ServingConfigResponse':
"""
The configuration for the behavior of the site. This configuration exists in the [`firebase.json`](https://firebase.google.com/docs/cli/#the_firebasejson_file) file.
"""
return pulumi.get(self, "config")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time at which the version was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="createUser")
def create_user(self) -> 'outputs.ActingUserResponse':
"""
Identifies the user who created the version.
"""
return pulumi.get(self, "create_user")
@property
@pulumi.getter(name="deleteTime")
def delete_time(self) -> str:
"""
The time at which the version was `DELETED`.
"""
return pulumi.get(self, "delete_time")
@property
@pulumi.getter(name="deleteUser")
def delete_user(self) -> 'outputs.ActingUserResponse':
"""
Identifies the user who `DELETED` the version.
"""
return pulumi.get(self, "delete_user")
@property
@pulumi.getter(name="fileCount")
def file_count(self) -> str:
"""
The total number of files associated with the version. This value is calculated after a version is `FINALIZED`.
"""
return pulumi.get(self, "file_count")
@property
@pulumi.getter(name="finalizeTime")
def finalize_time(self) -> str:
"""
The time at which the version was `FINALIZED`.
"""
return pulumi.get(self, "finalize_time")
@property
@pulumi.getter(name="finalizeUser")
def finalize_user(self) -> 'outputs.ActingUserResponse':
"""
Identifies the user who `FINALIZED` the version.
"""
return pulumi.get(self, "finalize_user")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
The labels used for extra metadata and/or filtering.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
The fully-qualified resource name for the version, in the format: sites/ SITE_ID/versions/VERSION_ID This name is provided in the response body when you call [`CreateVersion`](sites.versions/create).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def preview(self) -> 'outputs.PreviewConfigResponse':
"""
Deprecated in favor of [site channels](sites.channels).
"""
return pulumi.get(self, "preview")
@property
@pulumi.getter
def status(self) -> str:
"""
The deploy status of the version. For a successful deploy, call [`CreateVersion`](sites.versions/create) to make a new version (`CREATED` status), [upload all desired files](sites.versions/populateFiles) to the version, then [update](sites.versions/patch) the version to the `FINALIZED` status. Note that if you leave the version in the `CREATED` state for more than 12 hours, the system will automatically mark the version as `ABANDONED`. You can also change the status of a version to `DELETED` by calling [`DeleteVersion`](sites.versions/delete).
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="versionBytes")
def version_bytes(self) -> str:
"""
The total stored bytesize of the version. This value is calculated after a version is `FINALIZED`.
"""
return pulumi.get(self, "version_bytes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
PypiClean
|
/kiwitcms-12.4.tar.gz/kiwitcms-12.4/tcms/node_modules/patternfly/dist/js/patternfly-functions-list.js
|
(function ($) {
'use strict';
$.fn.pfList = function () {
function init (list) {
// Ensure the state of the expansion elements is consistent
list.find('[data-list=expansion], .list-pf-item, .list-pf-expansion').each(function (index, element) {
var $expansion = $(element),
$collapse = $expansion.find('.collapse').first(),
expanded = $collapse.hasClass('in');
updateChevron($expansion, expanded);
if ($expansion.hasClass('list-pf-item')) {
updateActive($expansion, expanded);
}
});
list.find('.list-pf-container').each(function (index, element) {
var $element = $(element);
// The toggle element is the element with the data-list=toggle attribute
// or the entire .list-pf-container as a fallback
var $toggles = $element.find('[data-list=toggle]');
$toggles.length || ($toggles = $element);
$toggles.on('keydown', function (event) {
if (event.keyCode === 13 || event.keyCode === 32) {
toggleCollapse(this);
event.stopPropagation();
event.preventDefault();
}
});
$toggles.on('click', function (event) {
toggleCollapse(this);
event.stopPropagation();
event.preventDefault();
});
});
}
function toggleCollapse (toggle) {
var $toggle, $expansion, $collapse, expanded, $listItem;
$toggle = $(toggle);
// Find the parent expansion of the toggle
$expansion = $toggle.parentsUntil('.list-pf', '[data-list=expansion]').first();
$expansion.length || ($expansion = $toggle.closest('.list-pf-item, .list-pf-expansion'));
// toggle the "in" class of its first .collapse child
$collapse = $expansion.find('.collapse').first();
$collapse.toggleClass('in');
// update the state of the expansion element
updateChevron($expansion, $collapse.hasClass('in'));
$listItem = $expansion.closest('.list-pf-item');
updateActive($listItem, $listItem.find('.collapse').first().hasClass('in'));
}
function updateActive ($listItem, expanded) {
// Find the closest .list-pf-item of the expansion, and set its "active" class
if (expanded) {
$listItem.addClass('active');
} else {
$listItem.removeClass('active');
}
}
function updateChevron ($expansion, expanded) {
var $chevron = $expansion.find('.list-pf-chevron .fa').first();
if (expanded) {
$chevron.removeClass('fa-angle-right');
$chevron.addClass('fa-angle-down');
} else {
$chevron.addClass('fa-angle-right');
$chevron.removeClass('fa-angle-down');
}
}
init(this);
return this;
};
}(jQuery));
|
PypiClean
|
/py_base_layer-1.0.0-py3-none-any.whl/tencentcloud/ams/v20201229/errorcodes.py
|
# DryRun 操作,代表请求将会是成功的,只是多传了 DryRun 参数。
DRYRUNOPERATION = 'DryRunOperation'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 内部错误。
INTERNALERROR = 'InternalError'
# InternalError.InternalError
INTERNALERROR_INTERNALERROR = 'InternalError.InternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# InvalidParameter.ImageSizeTooSmall
INVALIDPARAMETER_IMAGESIZETOOSMALL = 'InvalidParameter.ImageSizeTooSmall'
# InvalidParameter.InvalidImageContent
INVALIDPARAMETER_INVALIDIMAGECONTENT = 'InvalidParameter.InvalidImageContent'
# InvalidParameter.ParameterError
INVALIDPARAMETER_PARAMETERERROR = 'InvalidParameter.ParameterError'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# InvalidParameterValue.EmptyImageContent
INVALIDPARAMETERVALUE_EMPTYIMAGECONTENT = 'InvalidParameterValue.EmptyImageContent'
# InvalidParameterValue.ImageSizeTooSmall
INVALIDPARAMETERVALUE_IMAGESIZETOOSMALL = 'InvalidParameterValue.ImageSizeTooSmall'
# InvalidParameterValue.InvalidContent
INVALIDPARAMETERVALUE_INVALIDCONTENT = 'InvalidParameterValue.InvalidContent'
# InvalidParameterValue.InvalidDataId
INVALIDPARAMETERVALUE_INVALIDDATAID = 'InvalidParameterValue.InvalidDataId'
# InvalidParameterValue.InvalidFileContentSize
INVALIDPARAMETERVALUE_INVALIDFILECONTENTSIZE = 'InvalidParameterValue.InvalidFileContentSize'
# InvalidParameterValue.InvalidImageContent
INVALIDPARAMETERVALUE_INVALIDIMAGECONTENT = 'InvalidParameterValue.InvalidImageContent'
# InvalidParameterValue.InvalidParameter
INVALIDPARAMETERVALUE_INVALIDPARAMETER = 'InvalidParameterValue.InvalidParameter'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 操作被拒绝。
OPERATIONDENIED = 'OperationDenied'
# 请求的次数超过了频率限制。
REQUESTLIMITEXCEEDED = 'RequestLimitExceeded'
# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'
# 资源不足。
RESOURCEINSUFFICIENT = 'ResourceInsufficient'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 资源不可用。
RESOURCEUNAVAILABLE = 'ResourceUnavailable'
# ResourceUnavailable.InvalidImageContent
RESOURCEUNAVAILABLE_INVALIDIMAGECONTENT = 'ResourceUnavailable.InvalidImageContent'
# 资源售罄。
RESOURCESSOLDOUT = 'ResourcesSoldOut'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# 未开通权限/无有效套餐包/账号已欠费。
UNAUTHORIZEDOPERATION_UNAUTHORIZED = 'UnauthorizedOperation.Unauthorized'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
|
PypiClean
|
/llm_toys-0.1.1-py3-none-any.whl/llm_toys/hf/transformers/models/perceiver/configuration_perceiver.py
|
""" Perceiver model configuration"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
logger = logging.get_logger(__name__)
PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class PerceiverConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PerceiverModel`]. It is used to instantiate an
Perceiver model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Perceiver
[deepmind/language-perceiver](https://huggingface.co/deepmind/language-perceiver) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_latents (`int`, *optional*, defaults to 256):
The number of latents.
d_latents (`int`, *optional*, defaults to 1280):
Dimension of the latent embeddings.
d_model (`int`, *optional*, defaults to 768):
Dimension of the inputs. Should only be provided in case [*PerceiverTextPreprocessor*] is used or no
preprocessor is provided.
num_blocks (`int`, *optional*, defaults to 1):
Number of blocks in the Transformer encoder.
num_self_attends_per_block (`int`, *optional*, defaults to 26):
The number of self-attention layers per block.
num_self_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each self-attention layer in the Transformer encoder.
num_cross_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each cross-attention layer in the Transformer encoder.
qk_channels (`int`, *optional*):
Dimension to project the queries + keys before applying attention in the cross-attention and self-attention
layers of the encoder. Will default to preserving the dimension of the queries if not specified.
v_channels (`int`, *optional*):
Dimension to project the values before applying attention in the cross-attention and self-attention layers
of the encoder. Will default to preserving the dimension of the queries if not specified.
cross_attention_shape_for_attention (`str`, *optional*, defaults to `'kv'`):
Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.
self_attention_widening_factor (`int`, *optional*, defaults to 1):
Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.
cross_attention_widening_factor (`int`, *optional*, defaults to 1):
Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_query_residual (`float`, *optional*, defaults to `True`):
Whether to add a query residual in the cross-attention layer of the encoder.
vocab_size (`int`, *optional*, defaults to 262):
Vocabulary size for the masked language modeling model.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that the masked language modeling model might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
image_size (`int`, *optional*, defaults to 56):
Size of the images after preprocessing, for [`PerceiverForImageClassificationLearned`].
train_size (`List[int]`, *optional*, defaults to [368, 496]):
Training size of the images for the optical flow model.
num_frames (`int`, *optional*, defaults to 16):
Number of video frames used for the multimodal autoencoding model.
audio_samples_per_frame (`int`, *optional*, defaults to 1920):
Number of audio samples per frame for the multimodal autoencoding model.
samples_per_patch (`int`, *optional*, defaults to 16):
Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model.
output_shape (`List[int]`, *optional*, defaults to `[1, 16, 224, 224]`):
Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal
autoencoding model. This excludes the channel dimension.
Example:
```python
>>> from transformers import PerceiverModel, PerceiverConfig
>>> # Initializing a Perceiver deepmind/language-perceiver style configuration
>>> configuration = PerceiverConfig()
>>> # Initializing a model from the deepmind/language-perceiver style configuration
>>> model = PerceiverModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "perceiver"
def __init__(
self,
num_latents=256,
d_latents=1280,
d_model=768,
num_blocks=1,
num_self_attends_per_block=26,
num_self_attention_heads=8,
num_cross_attention_heads=8,
qk_channels=None,
v_channels=None,
cross_attention_shape_for_attention="kv",
self_attention_widening_factor=1,
cross_attention_widening_factor=1,
hidden_act="gelu",
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
layer_norm_eps=1e-12,
use_query_residual=True,
vocab_size=262,
max_position_embeddings=2048,
image_size=56,
train_size=[368, 496],
num_frames=16,
audio_samples_per_frame=1920,
samples_per_patch=16,
output_shape=[1, 16, 224, 224],
**kwargs,
):
super().__init__(**kwargs)
self.num_latents = num_latents
self.d_latents = d_latents
self.d_model = d_model
self.num_blocks = num_blocks
self.num_self_attends_per_block = num_self_attends_per_block
self.num_self_attention_heads = num_self_attention_heads
self.num_cross_attention_heads = num_cross_attention_heads
self.qk_channels = qk_channels
self.v_channels = v_channels
self.cross_attention_shape_for_attention = cross_attention_shape_for_attention
self.self_attention_widening_factor = self_attention_widening_factor
self.cross_attention_widening_factor = cross_attention_widening_factor
self.hidden_act = hidden_act
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_query_residual = use_query_residual
# masked language modeling attributes
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
# image classification attributes
self.image_size = image_size
# flow attributes
self.train_size = train_size
# multimodal autoencoding attributes
self.num_frames = num_frames
self.audio_samples_per_frame = audio_samples_per_frame
self.samples_per_patch = samples_per_patch
self.output_shape = output_shape
class PerceiverOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
else:
dynamic_axis = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
]
)
@property
def atol_for_validation(self) -> float:
return 1e-4
def generate_dummy_inputs(
self,
preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
batch_size: int = -1,
seq_length: int = -1,
num_choices: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
num_channels: int = 3,
image_width: int = 40,
image_height: int = 40,
) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(preprocessor, PreTrainedTokenizerBase):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
batch_size = compute_effective_axis_dimension(
batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
token_to_add = preprocessor.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(
seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
)
# Generate dummy inputs according to compute batch and sequence
dummy_input = [" ".join(["a"]) * seq_length] * batch_size
inputs = dict(preprocessor(dummy_input, return_tensors=framework))
inputs["inputs"] = inputs.pop("input_ids")
return inputs
elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
inputs = dict(preprocessor(images=dummy_input, return_tensors=framework))
inputs["inputs"] = inputs.pop("pixel_values")
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor."
)
|
PypiClean
|
/onshape_client-1.6.3-py3-none-any.whl/onshape_client/oas/models/btm_sketch_curve4.py
|
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_curve_geometry114
except ImportError:
bt_curve_geometry114 = sys.modules["onshape_client.oas.models.bt_curve_geometry114"]
try:
from onshape_client.oas.models import btm_parameter1
except ImportError:
btm_parameter1 = sys.modules["onshape_client.oas.models.btm_parameter1"]
try:
from onshape_client.oas.models import btm_sketch_curve4_all_of
except ImportError:
btm_sketch_curve4_all_of = sys.modules[
"onshape_client.oas.models.btm_sketch_curve4_all_of"
]
try:
from onshape_client.oas.models import btm_sketch_curve_segment155
except ImportError:
btm_sketch_curve_segment155 = sys.modules[
"onshape_client.oas.models.btm_sketch_curve_segment155"
]
try:
from onshape_client.oas.models import btm_sketch_geom_entity5
except ImportError:
btm_sketch_geom_entity5 = sys.modules[
"onshape_client.oas.models.btm_sketch_geom_entity5"
]
class BTMSketchCurve4(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"center_id": (str,), # noqa: E501
"geometry": (bt_curve_geometry114.BTCurveGeometry114,), # noqa: E501
"internal_ids": ([str],), # noqa: E501
"control_box_ids": ([str],), # noqa: E501
"entity_id": (str,), # noqa: E501
"entity_id_and_replace_in_dependent_fields": (str,), # noqa: E501
"import_microversion": (str,), # noqa: E501
"is_construction": (bool,), # noqa: E501
"namespace": (str,), # noqa: E501
"node_id": (str,), # noqa: E501
"parameters": ([btm_parameter1.BTMParameter1],), # noqa: E501
}
@staticmethod
def discriminator():
return {
"bt_type": {
"BTMSketchCurveSegment-155": btm_sketch_curve_segment155.BTMSketchCurveSegment155,
},
}
attribute_map = {
"bt_type": "btType", # noqa: E501
"center_id": "centerId", # noqa: E501
"geometry": "geometry", # noqa: E501
"internal_ids": "internalIds", # noqa: E501
"control_box_ids": "controlBoxIds", # noqa: E501
"entity_id": "entityId", # noqa: E501
"entity_id_and_replace_in_dependent_fields": "entityIdAndReplaceInDependentFields", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"is_construction": "isConstruction", # noqa: E501
"namespace": "namespace", # noqa: E501
"node_id": "nodeId", # noqa: E501
"parameters": "parameters", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btm_sketch_curve4.BTMSketchCurve4 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
center_id (str): [optional] # noqa: E501
geometry (bt_curve_geometry114.BTCurveGeometry114): [optional] # noqa: E501
internal_ids ([str]): [optional] # noqa: E501
control_box_ids ([str]): [optional] # noqa: E501
entity_id (str): [optional] # noqa: E501
entity_id_and_replace_in_dependent_fields (str): [optional] # noqa: E501
import_microversion (str): [optional] # noqa: E501
is_construction (bool): [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
parameters ([btm_parameter1.BTMParameter1]): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
btm_sketch_curve4_all_of.BTMSketchCurve4AllOf,
btm_sketch_geom_entity5.BTMSketchGeomEntity5,
],
"oneOf": [],
}
@classmethod
def get_discriminator_class(cls, from_server, data):
"""Returns the child class specified by the discriminator"""
discriminator = cls.discriminator()
discr_propertyname_py = list(discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if from_server:
class_name = data[discr_propertyname_js]
else:
class_name = data[discr_propertyname_py]
class_name_to_discr_class = discriminator[discr_propertyname_py]
return class_name_to_discr_class.get(class_name)
|
PypiClean
|
/un-treaties-0.1.2.tar.gz/un-treaties-0.1.2/README.md
|
# UN Treaties
_Push countries to ratify UN treaties!_
## Join The Project
**Meet the team**
| | Name | @github | Role | Working on | Season |
|-------|------------------|----------------------------|--------------|----------------|-------------|
| - | Anastasiia Filchenko | - | Developer | Visualization | Autumn 2018 |
| - | John Melin | - | Developer | UX/Front-end | Autumn 2018 |
|  | Kleng Bråtveit | [@klengbratveit](https://github.com/klengbratveit) | Idea provider, Project Lead | UX | Spring 2018 |
|  | Dirk Hesse | [@dhesse](https://github.com/dhesse) | Developer | Crawler | Spring 2018 |
|  | Emma Scala | [@makingwaves27](https://github.com/makingwaves27) | Developer | Crawler | Spring 2018 |
|  | Geir Arne Hjelle | [@gahjelle](https://github.com/gahjelle) | Project Lead, Developer | Crawler, Visualization | Spring/Autumn 2018 |
|  | Harjeet Harpal | [@tanyuu](https://github.com/tanyuu) | Developer | Crawler | Spring 2018 |
|  | Patrick Merlot | [@patechoc](https://github.com/patechoc) | Developer | Crawler, Visualization | Spring/Autumn 2018 |
Join us on [Slack](http://dataforgood.no/contact-us/), then join our [channel](https://dataforgood-norway.slack.com/messages/CA183A89G/).
### We Need Your Help
See [all tasks](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen) or click in the table below to find a list of specfic tasks.
| | [Easy Task](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Easy+Task%22+) | [Medium Task](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Medium+Task%22+) | [Ninja Task](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Ninja+Task%22+) |
|-----|:---:|:---:|:---:|
| [**Crawler**](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Crawler%22+) | [Easy Crawler](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Easy+Task%22+label%3A%22Topic%3A+Crawler%22+) | [Medium Crawler](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Medium+Task%22+label%3A%22Topic%3A+Crawler%22) | [Ninja Crawler](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Ninja+Task%22+label%3A%22Topic%3A+Crawler%22+) |
| [**Data Preparation**](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Data+Preparation%22+) | [Easy Data Prep](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Data+Preparation%22+label%3A%22Easy+Task%22) | [Medium Data Prep](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Data+Preparation%22+label%3A%22Medium+Task%22) | [Ninja Data Prep](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Data+Preparation%22+label%3A%22Ninja+Task%22) |
| [**Visualization**](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Visualization%22+) | [Easy Viz](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Visualization%22+label%3A%22Easy+Task%22) | [Medium Viz](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Visualization%22+label%3A%22Medium+Task%22) | [Ninja Viz](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Visualization%22+label%3A%22Ninja+Task%22) |
| [**Impact**](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Impact%22+) | [Easy Impact](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Impact%22+label%3A%22Easy+Task%22+) | [Medium Impact](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Impact%22+label%3A%22Medium+Task%22+) | [Ninja Impact](https://github.com/DataForGood-Norway/ResoCrawl/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22Topic%3A+Impact%22+label%3A%22Ninja+Task%22+) |
## What Are We Doing?
This project is mainly split into four parts:
+ **Crawler**: Scrape data from the [UN Treaties web site](https://treaties.un.org/Pages/ParticipationStatus.aspx?clang=_en)
+ **Data Preparation**: Organize the data into a nice-to-work-with CSV or similar
+ **Visualization**: Create visualizations that illustrate the data
+ **Impact**: Use the data and visualizations to incentivize more countries to ratify treaties
See [the original project idea](https://github.com/DataForGood-Norway/project-ideas/issues/6) for the initial discussion.
## Goal
Our goals are:
* to make information about UN Treaties more accessible to people,
* to incentivize people in power to sign/follow up on treaties.
## Existing solutions/articles discussing the ratification of treaties
* http://indicators.ohchr.org
* [List of treaties by number of parties (Wikipedia)](https://en.wikipedia.org/wiki/List_of_treaties_by_number_of_parties)
* [United States Ratification of International Human Rights Treaties (Article by Human Rights Watch)](https://www.hrw.org/news/2009/07/24/united-states-ratification-international-human-rights-treaties)
* [Human Rights Library (University of Minnesota)](http://hrlibrary.umn.edu/index.html)
* [an article advocating against the ratification of a treaty by the USA](https://www.heritage.org/global-politics/commentary/7-reasons-us-should-not-ratify-un-convention-the-law-the-sea): interesting to see what arguments are used to justify such position.
* [Where does the US stand on UN human rights conventions? (Jan.2018)](https://eu.cincinnati.com/story/opinion/contributors/2018/01/03/where-does-us-stand-un-human-rights-conventions/972726001/): something that should easily stand out from any visualization of these (non-) ratifications.
## More Information
+ [More background, information and definitions](DEFINITIONS.md)
+ [How to set up and run the code](HOWTO.md)
|
PypiClean
|
/isbnlib_worldcat2-0.1.2-py3-none-any.whl/isbnlib_worldcat/_worldcat.py
|
'''Query the WorldCat service for metadata. '''
# This code is partially based on the (now defunct) isbnlib-dnb module by arangb.
# https://pypi.org/project/isbnlib-dnb/ https://github.com/arangb/isbnlib-dnb
import logging
import re
import bs4
import pycountry
from isbnlib.dev import stdmeta
from isbnlib.dev._bouth23 import u
from isbnlib.dev.webquery import query as wquery
UA = 'isbnlib (gzip)'
SERVICE_URL = 'https://www.worldcat.org/search?q=bn%3A{isbn}&lang={lang}'
QUERY_LANG = 'en'
LOGGER = logging.getLogger(__name__)
_re1 = re.compile(r'(?<=[A-Z])(\s)(?![A-z]{2,})')
_re2 = re.compile(r'(?<=[A-Z])(\s)(?=[A-z]{2,})')
def parser_worldcat(data):
'''Parse the response from the WorldCat service. The input data is the result webpage in html from the search.'''
records = {}
soup = bs4.BeautifulSoup(data, features='html.parser')
try:
result = soup.find('td', class_='result details') # We should only need to look at the first entry
# Extract title
raw = result.find('div', class_='name').find('a').find('strong').contents[0]
records['Title'] = str(raw.replace(' :', ':')) # Fix a error in title markup
# Extract list of author(s)
raw = result.find('div', class_='author').contents[0].replace('by ', '')
names = [atr.replace(';', '') for atr in raw.split('; ')] # Split and fix another common error
def fix_punctuation(name): # Fix missing punctuation in author initials (WorldCat issues...)
step = re.sub(_re1, r'.', name)
return re.sub(_re2, r'. ', step)
records['Authors'] = [fix_punctuation(name) for name in names]
# Extract language
langs = [i.contents[0] for i in result.find('div', class_='language').find_all('span', class_='itemLanguage')]
lang_codes = []
for lang in langs:
code = pycountry.languages.lookup(str(lang))
if code:
lang_codes.append(code.alpha_2.lower())
if len(lang_codes) == 1:
records['Language'] = str(lang_codes[0])
elif len(lang_codes) > 1:
records['Language'] = str(', '.join(lang_codes))
else:
records['Language'] = ''
# Extract publisher and year
raw = result.find('span', class_='itemPublisher').contents[0]
raw = raw.split(': ')[-1] # Filter out publisher's seat
raw = raw.split(', ')
publisher = raw[0]
year = raw[-1].replace('[', '').replace(']', '').replace('.', '')
records['Publisher'] = str(publisher)
records['Year'] = str(year)
except (AttributeError, KeyError) as ex:
LOGGER.debug('Error parsing WorldCat html. Did the layout change?')
records = {}
return records
def _mapper(isbn, records):
'''Make records canonical.
canonical: ISBN-13, Title, Authors, Publisher, Year, Language
'''
# handle special case
if not records: # pragma: no cover
return {}
# add ISBN-13
records['ISBN-13'] = u(isbn)
# call stdmeta for extra cleaning and validation
return stdmeta(records)
def query(isbn):
'''Query the WorldCat service for metadata. '''
data = wquery(
SERVICE_URL.format(isbn=isbn, lang=QUERY_LANG), user_agent=UA, parser=parser_worldcat)
if not data: # pragma: no cover
LOGGER.debug('No data from WorldCat for isbn %s', isbn)
return {}
return _mapper(isbn, data)
|
PypiClean
|
/pyemir-0.19.tar.gz/pyemir-0.19/src/emirdrp/util/sextractor.py
|
# ======================================================================
from __future__ import print_function
from six.moves import builtins as __builtin__
import os
import subprocess
import re
import copy
from .sexcatalog import *
# ======================================================================
__version__ = "1.15.0 (2005-07-06)"
# ======================================================================
class SExtractorException(Exception):
pass
# ======================================================================
nnw_config = \
"""NNW
# Neural Network Weights for the SExtractor star/galaxy classifier (V1.3)
# inputs: 9 for profile parameters + 1 for seeing.
# outputs: ``Stellarity index'' (0.0 to 1.0)
# Seeing FWHM range: from 0.025 to 5.5''
# (images must have 1.5 < FWHM < 5 pixels)
# Optimized for Moffat profiles with 2<= beta <= 4.
3 10 10 1
-1.56604e+00 -2.48265e+00 -1.44564e+00 -1.24675e+00 -9.44913e-01 -5.22453e-01 4.61342e-02 8.31957e-01 2.15505e+00 2.64769e-01
3.03477e+00 2.69561e+00 3.16188e+00 3.34497e+00 3.51885e+00 3.65570e+00 3.74856e+00 3.84541e+00 4.22811e+00 3.27734e+00
-3.22480e-01 -2.12804e+00 6.50750e-01 -1.11242e+00 -1.40683e+00 -1.55944e+00 -1.84558e+00 -1.18946e-01 5.52395e-01 -4.36564e-01 -5.30052e+00
4.62594e-01 -3.29127e+00 1.10950e+00 -6.01857e-01 1.29492e-01 1.42290e+00 2.90741e+00 2.44058e+00 -9.19118e-01 8.42851e-01 -4.69824e+00
-2.57424e+00 8.96469e-01 8.34775e-01 2.18845e+00 2.46526e+00 8.60878e-02 -6.88080e-01 -1.33623e-02 9.30403e-02 1.64942e+00 -1.01231e+00
4.81041e+00 1.53747e+00 -1.12216e+00 -3.16008e+00 -1.67404e+00 -1.75767e+00 -1.29310e+00 5.59549e-01 8.08468e-01 -1.01592e-02 -7.54052e+00
1.01933e+01 -2.09484e+01 -1.07426e+00 9.87912e-01 6.05210e-01 -6.04535e-02 -5.87826e-01 -7.94117e-01 -4.89190e-01 -8.12710e-02 -2.07067e+01
-5.31793e+00 7.94240e+00 -4.64165e+00 -4.37436e+00 -1.55417e+00 7.54368e-01 1.09608e+00 1.45967e+00 1.62946e+00 -1.01301e+00 1.13514e-01
2.20336e-01 1.70056e+00 -5.20105e-01 -4.28330e-01 1.57258e-03 -3.36502e-01 -8.18568e-02 -7.16163e+00 8.23195e+00 -1.71561e-02 -1.13749e+01
3.75075e+00 7.25399e+00 -1.75325e+00 -2.68814e+00 -3.71128e+00 -4.62933e+00 -2.13747e+00 -1.89186e-01 1.29122e+00 -7.49380e-01 6.71712e-01
-8.41923e-01 4.64997e+00 5.65808e-01 -3.08277e-01 -1.01687e+00 1.73127e-01 -8.92130e-01 1.89044e+00 -2.75543e-01 -7.72828e-01 5.36745e-01
-3.65598e+00 7.56997e+00 -3.76373e+00 -1.74542e+00 -1.37540e-01 -5.55400e-01 -1.59195e-01 1.27910e-01 1.91906e+00 1.42119e+00 -4.35502e+00
-1.70059e+00 -3.65695e+00 1.22367e+00 -5.74367e-01 -3.29571e+00 2.46316e+00 5.22353e+00 2.42038e+00 1.22919e+00 -9.22250e-01 -2.32028e+00
0.00000e+00
1.00000e+00
"""
# ======================================================================
class SExtractor:
"""
A wrapper class to transparently use SExtractor.
"""
_SE_config = {
"CATALOG_NAME":
{"comment": "name of the output catalog",
"value": "py-sextractor.cat"},
"CATALOG_TYPE":
{"comment":
'"NONE","ASCII_HEAD","ASCII","FITS_1.0" or "FITS_LDAC"',
"value": "ASCII_HEAD"},
"PARAMETERS_NAME":
{"comment": "name of the file containing catalog contents",
"value": "py-sextractor.param"},
"DETECT_TYPE":
{"comment": '"CCD" or "PHOTO"',
"value": "CCD"},
"FLAG_IMAGE":
{"comment": "filename for an input FLAG-image",
"value": "flag.fits"},
"DETECT_MINAREA":
{"comment": "minimum number of pixels above threshold",
"value": 5},
"DETECT_THRESH":
{"comment": "<sigmas> or <threshold>,<ZP> in mag.arcsec-2",
"value": 1.5},
"ANALYSIS_THRESH":
{"comment": "<sigmas> or <threshold>,<ZP> in mag.arcsec-2",
"value": 1.5},
"FILTER":
{"comment": 'apply filter for detection ("Y" or "N")',
"value": 'Y'},
"FILTER_NAME":
{"comment": "name of the file containing the filter",
"value": "py-sextractor.conv"},
"DEBLEND_NTHRESH":
{"comment": "Number of deblending sub-thresholds",
"value": 32},
"DEBLEND_MINCONT":
{"comment": "Minimum contrast parameter for deblending",
"value": 0.005},
"CLEAN":
{"comment": "Clean spurious detections (Y or N)",
"value": 'Y'},
"CLEAN_PARAM":
{"comment": "Cleaning efficiency",
"value": 1.0},
"MASK_TYPE":
{"comment": 'type of detection MASKing: can be one of "NONE",'
' "BLANK" or "CORRECT"',
"value": "CORRECT"},
"PHOT_APERTURES":
{"comment": "MAG_APER aperture diameter(s) in pixels",
"value": 5},
"PHOT_AUTOPARAMS":
{"comment": 'MAG_AUTO parameters: <Kron_fact>,<min_radius>',
"value": [2.5, 3.5]},
"SATUR_LEVEL":
{"comment": "level (in ADUs) at which arises saturation",
"value": 50000.0},
"MAG_ZEROPOINT":
{"comment": "magnitude zero-point",
"value": 0.0},
"MAG_GAMMA":
{"comment": "gamma of emulsion (for photographic scans)",
"value": 4.0},
"GAIN":
{"comment": "detector gain in e-/ADU",
"value": 0.0},
"PIXEL_SCALE":
{"comment": "size of pixel in arcsec (0=use FITS WCS info)",
"value": 1.0},
"SEEING_FWHM":
{"comment": "stellar FWHM in arcsec",
"value": 1.2},
"STARNNW_NAME":
{"comment": "Neural-Network_Weight table filename",
"value": "py-sextractor.nnw"},
"BACK_SIZE":
{"comment": "Background mesh: <size> or <width>,<height>",
"value": 64},
"BACK_TYPE":
{"comment": "Type of background to subtract: MANUAL or AUTO generated",
"value": 'AUTO'},
"BACK_VALUE":
{"comment": "User-supplied constant value to be subtracted as sky",
"value": "0.0,0.0"},
"BACK_FILTERSIZE":
{"comment": "Background filter: <size> or <width>,<height>",
"value": 3},
"BACKPHOTO_TYPE":
{"comment": 'can be "GLOBAL" or "LOCAL"',
"value": "GLOBAL"},
"BACKPHOTO_THICK":
{"comment": "Thickness in pixels of the background local annulus",
"value": 24},
"CHECKIMAGE_TYPE":
{"comment": 'can be one of "NONE", "BACKGROUND", "MINIBACKGROUND",'
' "-BACKGROUND", "OBJECTS", "-OBJECTS", "SEGMENTATION",'
' "APERTURES", or "FILTERED"',
"value": "NONE"},
"CHECKIMAGE_NAME":
{"comment": "Filename for the check-image",
"value": "check.fits"},
"MEMORY_OBJSTACK":
{"comment": "number of objects in stack",
"value": 3000},
"MEMORY_PIXSTACK":
{"comment": "number of pixels in stack",
"value": 300000},
"MEMORY_BUFSIZE":
{"comment": "number of lines in buffer",
"value": 1024},
"VERBOSE_TYPE":
{"comment": 'can be "QUIET", "NORMAL" or "FULL"',
"value": "QUIET"},
"WEIGHT_TYPE":
{"comment": 'type of WEIGHTing: NONE, BACKGROUND, '
'MAP_RMS, MAP_VAR or MAP_WEIGHT',
"value": "NONE"},
"WEIGHT_IMAGE":
{"comment": '# weight-map filename',
"value": "NONE"},
"WEIGHT_THRESH":
{"comment": 'weight threshold[s] for bad pixels',
"value": 0},
# -- Extra-keys (will not be saved in the main configuration file
"PARAMETERS_LIST":
{"comment": '[Extra key] catalog contents (to put in PARAMETERS_NAME)',
"value": ["NUMBER", "FLUX_BEST", "FLUXERR_BEST",
"X_IMAGE", "Y_IMAGE", "FLAGS", "FWHM_IMAGE"]},
"CONFIG_FILE":
{"comment": '[Extra key] name of the main configuration file',
"value": "py-sextractor.sex"},
"FILTER_MASK":
{"comment": 'Array to put in the FILTER_MASK file',
"value": [[1, 2, 1],
[2, 4, 2],
[1, 2, 1]]}
}
# -- Special config. keys that should not go into the config. file.
_SE_config_special_keys = ["PARAMETERS_LIST", "CONFIG_FILE", "FILTER_MASK"]
# -- Dictionary of all possible parameters (from sexcatalog.py module)
_SE_parameters = SExtractorfile._SE_keys
def __init__(self):
"""
SExtractor class constructor.
"""
self.config = (
dict([(k, copy.deepcopy(SExtractor._SE_config[k]["value"]))
for k in SExtractor._SE_config]))
# print self.config
self.program = None
self.version = None
def setup(self, path=None):
"""
Look for SExtractor program ('sextractor', or 'sex').
If a full path is provided, only this path is checked.
Raise a SExtractorException if it failed.
Return program and version if it succeed.
"""
# -- Finding sextractor program and its version
# first look for 'sextractor', then 'sex'
candidates = ['sextractor', 'sex']
if (path):
candidates = [path]
selected = None
for candidate in candidates:
try:
p = subprocess.Popen(candidate, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
(_out_err, _in) = (p.stdout, p.stdin)
versionline = _out_err.read()
if (versionline.find("SExtractor") != -1):
selected = candidate
break
except IOError:
continue
if not(selected):
raise SExtractorException(
"""
Cannot find SExtractor program. Check your PATH,
or provide the SExtractor program path in the constructor.
"""
)
_program = selected
# print versionline
_version_match = re.search("[Vv]ersion ([0-9\.])+", versionline)
if not _version_match:
raise SExtractorException(
"Cannot determine SExtractor version."
)
_version = _version_match.group()[8:]
if not _version:
raise SExtractorException(
"Cannot determine SExtractor version."
)
# print "Use " + self.program + " [" + self.version + "]"
return _program, _version
def update_config(self):
"""
Update the configuration files according to the current
in-memory SExtractor configuration.
"""
# -- Write filter configuration file
# First check the filter itself
filter = self.config['FILTER_MASK']
rows = len(filter)
cols = len(filter[0]) # May raise ValueError, OK
filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w')
filter_f.write("CONV NORM\n")
filter_f.write("# %dx%d Generated from sextractor.py module.\n" %
(rows, cols))
for row in filter:
filter_f.write(" ".join(map(repr, row)))
filter_f.write("\n")
filter_f.close()
# -- Write parameter list file
parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w')
for parameter in self.config['PARAMETERS_LIST']:
print(parameter, file=parameters_f)
parameters_f.close()
# -- Write NNW configuration file
nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w')
nnw_f.write(nnw_config)
nnw_f.close()
# -- Write main configuration file
main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w')
for key in self.config.keys():
if (key in SExtractor._SE_config_special_keys):
continue
if (key == "PHOT_AUTOPARAMS"): # tuple instead of a single value
value = " ".join(map(str, self.config[key]))
else:
value = str(self.config[key])
print(("%-16s %-16s # %s" % (key, value, SExtractor._SE_config[key]['comment'])), file=main_f)
main_f.close()
def run(self, file, updateconfig=True, clean=False, path=None):
"""
Run SExtractor.
If updateconfig is True (default), the configuration
files will be updated before running SExtractor.
If clean is True (default: False), configuration files
(if any) will be deleted after SExtractor terminates.
"""
if updateconfig:
self.update_config()
# Try to find SExtractor program
# This will raise an exception if it failed
self.program, self.version = self.setup(path)
commandline = (
self.program + " -c " + self.config['CONFIG_FILE'] + " " + file)
# print commandline
rcode = os.system(commandline)
if (rcode):
raise SExtractorException(
"SExtractor command [%s] failed." % commandline
)
if clean:
self.clean()
def catalog(self):
"""
Read the output catalog produced by the last SExtractor run.
Output is a list of dictionaries, with a dictionary for
each star: {'param1': value, 'param2': value, ...}.
"""
output_f = SExtractorfile(self.config['CATALOG_NAME'], 'r')
c = output_f.read()
output_f.close()
return c
def clean(self, config=True, catalog=False, check=False):
"""
Remove the generated SExtractor files (if any).
If config is True, remove generated configuration files.
If catalog is True, remove the output catalog.
If check is True, remove output check image.
"""
try:
if (config):
os.unlink(self.config['FILTER_NAME'])
os.unlink(self.config['PARAMETERS_NAME'])
os.unlink(self.config['STARNNW_NAME'])
os.unlink(self.config['CONFIG_FILE'])
if (catalog):
os.unlink(self.config['CATALOG_NAME'])
if (check):
os.unlink(self.config['CHECKIMAGE_NAME'])
except OSError:
pass
# ======================================================================
|
PypiClean
|
/monk_keras_cuda101_test-0.0.1-py3-none-any.whl/monk/pytorch/finetune/level_13_updates_main.py
|
from monk.pytorch.finetune.imports import *
from monk.system.imports import *
from monk.pytorch.finetune.level_12_losses_main import prototype_losses
class prototype_updates(prototype_losses):
'''
Main class for all parametric update functions
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
##########################################################################################################################################################
@warning_checks(None, ["gte", 32, "lte", 1024], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_input_size(self, input_size):
'''
Update input size.
Args:
input_size (int): New input size
Returns:
None
'''
self.system_dict = set_input_size(input_size, self.system_dict);
self.custom_print("Update: Input size - {}".format(self.system_dict["dataset"]["params"]["input_size"]));
self.custom_print("");
@warning_checks(None, ["lte", 128], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_batch_size(self, batch_size):
'''
Update batch size.
Args:
batch_size (int): New batch size
Returns:
None
'''
self.system_dict = set_batch_size(batch_size, self.system_dict);
self.custom_print("Update: Batch size - {}".format(self.system_dict["dataset"]["params"]["batch_size"]));
self.custom_print("");
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_shuffle_data(self, shuffle):
'''
Update to shuffle data or not.
Args:
shuffle (bool): If True, will shuffle data
Returns:
None
'''
self.system_dict = set_data_shuffle(shuffle, self.system_dict);
self.custom_print("Update: Data shuffle - {}".format(self.system_dict["dataset"]["params"]["train_shuffle"]));
self.custom_print("");
@warning_checks(None, ["lte", psutil.cpu_count()], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_num_processors(self, num_processors):
'''
Update num processors for data loader.
Args:
num_processors (int): Max CPUs for data sampling
Returns:
None
'''
self.system_dict = set_num_processors(num_processors, self.system_dict);
self.custom_print("Update: Num processors - {}".format(self.system_dict["dataset"]["params"]["num_workers"]));
self.custom_print("");
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_weighted_sampling(self, sample):
'''
Function inactive
'''
self.system_dict = set_weighted_sampling(sample, self.system_dict);
self.custom_print("Update: Weighted Sampling - {}".format(self.system_dict["dataset"]["params"]["weighted_sample"]));
self.custom_print("");
@warning_checks(None, ["gt", 0.5, "lt", 1], post_trace=False)
@error_checks(None, ["gt", 0, "lt", 1], post_trace=False)
@accepts("self", float, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_trainval_split(self, value):
'''
Update training-validation split
Args:
split (float): Indicating train validation split
Division happens as follows:
train - total dataset * split * 100
val - total dataset * (1-split) * 100
Returns:
None
'''
if(self.system_dict["dataset"]["dataset_type"] == "train"):
dataset_path = self.system_dict["dataset"]["train_path"];
path_to_csv=False;
elif(self.system_dict["dataset"]["dataset_type"] == "train-val"):
dataset_path = [self.system_dict["dataset"]["train_path"], self.system_dict["dataset"]["val_path"]];
path_to_csv=False;
elif(self.system_dict["dataset"]["dataset_type"] == "csv_train"):
dataset_path = self.system_dict["dataset"]["train_path"];
path_to_csv = self.system_dict["dataset"]["csv_train"];
elif(self.system_dict["dataset"]["dataset_type"] == "csv_train-val"):
dataset_path = [self.system_dict["dataset"]["train_path"], self.system_dict["dataset"]["val_path"]];
path_to_csv = [self.system_dict["dataset"]["csv_train"], self.system_dict["dataset"]["csv_val"]];
else:
msg = "Dataset Type invalid.\n";
msg += "Cannot update split"
ConstraintsWarning(msg)
self.system_dict = set_dataset_train_path(self.system_dict, dataset_path, value, path_to_csv, self.system_dict["dataset"]["params"]["delimiter"]);
@warning_checks(None, dataset_path=None, split=["gt", 0.5, "lt", 1], path_to_csv=None, delimiter=None, post_trace=False)
@error_checks(None, dataset_path=["folder", 'r'], split=["gt", 0, "lt", 1], path_to_csv=["file", 'r'], delimiter=["in", [",", ";", "-", " "]], post_trace=False)
@accepts("self", dataset_path=[str, list], split=float, path_to_csv=[str, list, bool], delimiter=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_dataset(self, dataset_path=False, split=0.9, path_to_csv=False, delimiter=","):
'''
Update dataset path
Args:
dataset_path (str, list): Path to Dataset folder
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
path_to_csv (str, list): Path to csv file pointing towards images
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
value (float): Indicating train validation split
Division happens as follows:
train - total dataset * split * 100
val - total dataset * (1-split) * 100
delimiter (str): Delimiter for csv file
Returns:
None
'''
self.system_dict = set_dataset_train_path(self.system_dict, dataset_path, split, path_to_csv, delimiter);
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", str, force=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_model_name(self, model_name, force=False):
'''
Update model name
Args:
model_name (str): Select from available models. Check via List_Models() function
force (bool): Dummy function
Returns:
None
'''
if(not force):
if(self.system_dict["training"]["status"]):
ConstraintWarning("Model trained using {}\n".format(self.system_dict["model"]["params"]["model_name"]));
ConstraintWarning("Changing the model will overwrite previously trained models if training is executed.\n");
inp = input("Do you wish to continue further (y/n):");
if(inp == "y"):
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
else:
self.custom_print("Model not updated.");
self.custom_print("");
else:
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
else:
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", [str, list], force=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_model_path(self, model_path, force=False):
'''
Update model path for inferencing
Args:
model_path (str): Path to model weights.
force (bool): Dummy function
Returns:
None
'''
if(not force):
if(self.system_dict["training"]["status"]):
ConstraintWarning("Model trained using {}\n".format(self.system_dict["model"]["params"]["model_name"]));
ConstraintWarning("Changing the model will overwrite previously trained models if training is executed.\n");
inp = input("Do you wish to continue further (y/n):");
if(inp == "y"):
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
else:
self.custom_print("Model not updated.");
self.custom_print("");
else:
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
else:
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_use_gpu(self, gpu):
'''
Update to use gpu or cpu
Args:
gpu (bool): If True, then use GPU
Returns:
None
'''
self.system_dict = set_device(gpu, self.system_dict);
self.custom_print("Update: Use Gpu - {}".format(self.system_dict["model"]["params"]["use_gpu"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_use_pretrained(self, pretrained):
'''
Update to use pretrained wights or randomly initialized weights
Args:
pretrained (bool): If True, use pretrained weights
else, use randomly initialized weights
Returns:
None
'''
self.system_dict = set_pretrained(pretrained, self.system_dict);
self.custom_print("Update: Use pretrained - {}".format(self.system_dict["model"]["params"]["use_pretrained"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_freeze_base_network(self, freeze):
'''
Update whether freeze base network or not
Args:
freeze (bool): If True, then base network is non-trainable, works as a feature extractor
Returns:
None
'''
self.system_dict = set_freeze_base_network(freeze, self.system_dict);
self.custom_print("Update: Freeze Base Network - {}".format(self.system_dict["model"]["params"]["freeze_base_network"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@error_checks(None, ["gte", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_freeze_layers(self, num_freeze):
'''
Update to freeze certain layers in the network
Args:
num_freeze (int): Number of layers to freeze in network starting from top
Returns:
None
'''
self.system_dict["model"]["params"]["num_freeze"] = num_freeze;
self.custom_print("Update: Freeze layers - {}".format(self.system_dict["model"]["params"]["num_freeze"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@warning_checks(None, ["lt", 100], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_num_epochs(self, num_epochs):
'''
Update number of epochs to train the network
Args:
num_epochs (int): New number of epochs
Returns:
None
'''
self.system_dict = set_num_epochs(num_epochs, self.system_dict);
self.custom_print("Update: Num Epochs - {}".format(self.system_dict["hyper-parameters"]["num_epochs"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@warning_checks(None, ["lt", 1], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", [int, float], post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_learning_rate(self, learning_rate):
'''
Update base learning rate for training
Args:
learning_rate (float): New base learning rate
Returns:
None
'''
self.system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
self.system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
self.custom_print("Update: Learning Rate - {}".format(self.system_dict["hyper-parameters"]["learning_rate"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_display_progress_realtime(self, value):
'''
Update display progress param
Args:
value (bool): If True, then real time progress is displayed
Returns:
None
'''
self.system_dict = set_display_progress_realtime(value, self.system_dict);
self.custom_print("Update: Display progress realtime - {}".format(self.system_dict["training"]["settings"]["display_progress_realtime"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_display_progress(self, value):
'''
Update display progress param
Args:
value (bool): If True, then per epoch progress is displayed
Returns:
None
'''
self.system_dict = set_display_progress(value, self.system_dict);
self.custom_print("Update: Display progress - {}".format(self.system_dict["training"]["settings"]["display_progress"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@error_checks(None, None, prefix=["name", ["A-Z", "a-z", "0-9", "-", "_"]], post_trace=False)
@accepts("self", bool, prefix=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_save_intermediate_models(self, value, prefix="intermediate_model_"):
'''
Update whether to save intermediate models or not
Args:
value (bool): If True, saves model weight post every epoch
prefix (str): Appends a prefix to intermediate weights
Returns:
None
'''
if(value):
if(not os.access(self.system_dict["model_dir"], os.W_OK)):
msg = "Folder \"{}\" has no read access".format(self.system_dict["model_dir"])
msg += "Cannot save Intermediate models";
raise ConstraintError(msg);
self.system_dict = set_save_intermediate_models(value, self.system_dict);
self.system_dict = set_intermediate_model_prefix(prefix, self.system_dict);
self.custom_print("Update: Save Intermediate models - {}".format(self.system_dict["training"]["settings"]["save_intermediate_models"]));
if(self.system_dict["training"]["settings"]["save_intermediate_models"]):
self.custom_print("Update: Intermediate model prefix - {}".format(self.system_dict["training"]["settings"]["intermediate_model_prefix"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_save_training_logs(self, value):
'''
Update whether to save training logs or not
Args:
value (bool): If True, saves all training and validation metrics. Required for comparison.
Returns:
None
'''
self.system_dict = set_save_training_logs(value, self.system_dict);
self.custom_print("Update: Save Training logs - {}".format(self.system_dict["training"]["settings"]["save_training_logs"]));
self.custom_print("");
##########################################################################################################################################################
|
PypiClean
|
/pyhaystack-3.0.0.tar.gz/pyhaystack-3.0.0/docs/source/tags.rst
|
Tags
====
A list of tags can be found here : http://project-haystack.org/tag
For a detailed explanation of tag model, please read this:
http://project-haystack.org/doc/TagModel
Pyhaystack let you find what you look for via the "find_entity" functions.
Let see how...
Finding sensors
---------------
Let say I want to find every sensors on a site which are temperature sensors used in zone.
::
op = session.find_entity(filter_expr='sensor and zone and temp')
op.wait()
This will find what you are looking for in the form of a "FindEntityOperation" object.
To use the values of this object, you will need to retrive the results using ::
znt = op.result
Exploring points and tags
--------------------------
This will return a :py:class:`dict` object that contains all of the Project Haystack
entities that matched the given filter string. The entities are keyed by
identifier. When exploring interactively, you can get a list of all the
matching entities' identifiers by calling:
::
list(znt.keys())
To retrieve a specific entity, you give its identifier as the key:
::
my_office = znt['S.SERVISYS.Salle-Conf~e9rence.ZN~2dT']
Having done this, it is possible to interrogate the tags attached to this
entity. These are accessed by the ``tags`` property, which also returns a
:py:class:`pyhaystack.client.entity.tags.MutableEntityTags` if your server
supports making changes via the Project Haystack API (currently only WideSky),
or :py:class:`pyhaystack.client.entity.tags.ReadOnlyEntityTags` otherwise.
Both classes function like a :py:class:`dict`.
::
my_office.tags
{air, axAnnotated,
axSlotPath='slot:/Drivers/BacnetNetwork/MSTP1/PCV$2d2$2d008/points/ZN$2dT',
axStatus='ok', axType='control:NumericPoint', cur, curStatus='ok',
curVal=BasicQuantity(23.4428, '°C'),
dis='SERVISYS Salle Conférence Salle Conférence ZN-T',
equipRef=Ref('S.SERVISYS.Salle-Conf~e9rence', None, False),
his, kind='Number', navName='ZN~2dT', point,
precision=1.0, sensor, siteRef=Ref('S.SERVISYS', None, False),
temp, tz='Montreal', unit='°C', zone}
You can access specific tags, again, by giving the tag's name as the key ::
val = my_office.tags['curVal']
# That will return BasicQuantity(23.4428, '°C')
# from which you can retrieve
val.value
val.unit
What is the ``~nn`` codes I keep seeing?
''''''''''''''''''''''''''''''''''''''''
This is a feature specific to nHaystack. Project Haystack entity identifiers
have a restricted character set, and only support a small subset of possible
ASCII characters. nHaystack derives the entity's identifier from the name
supplied by the user in the back-end configuration.
To encode other forms of characters (from the ISO8859-1 character set), the
character is replaced by the sequence, ``~nn`` where ``nn`` is the hexadecimal
character code for that character. In this case, you'll see ``~2d`` in place
of ``-``, and ``~e9`` in place of ``é``.
Adding, Changing and Deleting tags
----------------------------------
From this interface, it is also possible to update the values of these tags.
This requires a back-end server that supports "CRUD" operations (Create, Read,
Update & Delete). If your server supports these operations (and pyhaystack
supports using them), the ``tags`` property will be of type
:py:class:`pyhaystack.client.entity.tags.MutableEntityTags`.
Again, this object functions like a :py:class:`dict`:
::
# Change the display text
znt.tags['dis'] = 'A new display text string'
# Delete the 'his' tag
del znt.tags['his']
# Add a new tag
znt.tags['space'] = hszinc.Quantity(4, 'm²')
The changes are held in memory until such time as you either commit them, or
revert them. When changes are stored locally, the ``is_dirty`` property will
return ``True``.
To forget these changes and roll it back to what's live on the server, call
``revert``. This can take an optional list (or other iterable sequence) of
tag names that you specifically wish to revert.
Alternatively, to push these changes, call ``commit``, which takes an optional
callback function. The return value of ``commit`` is a state machine that
returns an instance of the updated entity on success (or raises an exception
with the error):
::
assert znt.is_dirty # assert will pass because of changes
op = znt.commit()
op.wait()
assert op.result is znt # ← this assert will pass
assert not znt.is_dirty # assert will pass because we've "committed"
# our changes back to the server.
|
PypiClean
|
/project_generator-0.12.0.tar.gz/project_generator-0.12.0/project_generator/util.py
|
import os
import yaml
import locale
import shutil
import string
import operator
from functools import reduce
FILES_EXTENSIONS = {
'include_files': ['h', 'hpp', 'inc'],
'source_files_s': ['s'],
'source_files_c': ['c'],
'source_files_cpp': ['cpp', 'cc'],
'source_files_lib': ['lib', 'ar', 'a'],
'source_files_obj': ['o', 'obj'],
'linker_file': ['sct', 'ld', 'lin', 'icf'],
}
OUTPUT_TYPES = {
'executable': 'exe',
'exe': 'exe',
'library': 'lib',
'lib': 'lib',
}
FILE_MAP = {v:k for k,values in FILES_EXTENSIONS.items() for v in values}
SOURCE_KEYS = ['source_files_c', 'source_files_s', 'source_files_cpp', 'source_files_lib', 'source_files_obj']
VALID_EXTENSIONS = reduce(lambda x,y:x+y,[FILES_EXTENSIONS[key] for key in SOURCE_KEYS])
def rmtree_if_exists(directory):
if os.path.exists(directory):
shutil.rmtree(directory)
def uniqify(_list):
# see: http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order/29898968#29898968
return reduce(lambda r, v: v in r[1] and r or (r[0].append(v) or r[1].add(v)) or r, _list, ([], set()))[0]
def merge_recursive(*args):
if all(isinstance(x, dict) for x in args):
output = {}
keys = reduce(operator.or_, [set(x) for x in args])
for key in keys:
# merge all of the ones that have them
output[key] = merge_recursive(*[x[key] for x in args if key in x])
return output
else:
return reduce(operator.add, args)
def flatten(S):
if S == []:
return S
if isinstance(S[0], list):
return flatten(S[0]) + flatten(S[1:])
return S[:1] + flatten(S[1:])
def load_yaml_records(yaml_files):
dictionaries = []
for yaml_file in yaml_files:
try:
f = open(yaml_file, 'rt')
dictionaries.append(yaml.load(f, Loader=yaml.FullLoader))
except IOError:
raise IOError("The file %s referenced in main yaml doesn't exist." % yaml_file)
return dictionaries
class PartialFormatter(string.Formatter):
def get_field(self, field_name, args, kwargs):
try:
val = super(PartialFormatter, self).get_field(field_name, args, kwargs)
except (IndexError, KeyError, AttributeError):
first, _ = field_name._formatter_field_name_split()
val = '{' + field_name + '}', first
return val
def fix_paths(project_data, rel_path, extensions):
""" Fix paths for extension list """
norm_func = lambda path : os.path.normpath(os.path.join(rel_path, path))
for key in extensions:
if type(project_data[key]) is dict:
for k,v in project_data[key].items():
project_data[key][k] = [norm_func(i) for i in v]
elif type(project_data[key]) is list:
project_data[key] = [norm_func(i) for i in project_data[key]]
else:
project_data[key] = norm_func(project_data[key])
|
PypiClean
|
/h2o_pysparkling_2.2-3.36.1.5-1.tar.gz/h2o_pysparkling_2.2-3.36.1.5-1/h2o/utils/metaclass.py
|
# Note: no unicode_literals feature, since type.__getattribute__ cannot take unicode strings as parameter...
from __future__ import division, print_function, absolute_import
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import _str_type
from functools import wraps
import inspect
import warnings
from h2o.exceptions import H2ODeprecationWarning
def h2o_meta(*args):
return with_metaclass(H2OMeta, *args)
def fullname(fn):
"""for compatibility with Py 2.7"""
return fn.__qualname__ if hasattr(fn, '__qualname__') else fn.__name__
def extend_and_replace(cls, **attrs):
new_attrs = dict(cls.__dict__)
new_attrs.update(attrs)
new_cls = type(cls.__name__, (cls,), new_attrs)
# new_cls.__module__ = cls.__module__
# setattr(sys.modules[cls.__module__], cls.__name__, new_cls)
return new_cls
def decoration_info(fn):
return getattr(fn, '__decoration__', None)
def _set_decoration_info(wrapper, wrapped, decoration_type):
wrapper.__decoration__ = dict(
wrapped=wrapped,
type=decoration_type
)
def deprecated_params(deprecations):
old = deprecations.keys()
def decorator(fn):
fn_name = fullname(fn)
@wraps(fn)
def wrapper(*args, **kwargs):
new_kwargs = {}
keys = set(kwargs.keys())
messages = []
for k, v in kwargs.items():
if k in old:
new = deprecations[k]
new_tup = (((lambda ov: None), None) if new in [None, ()]
else ((lambda ov: {new: ov}), None) if isinstance(new, _str_type)
else (new, None) if callable(new)
else ((lambda ov: None), new[1]) if isinstance(new, tuple) and new[0] is None
else ((lambda ov: {new[0]: ov}), new[1]) if isinstance(new, tuple) and isinstance(new[0], _str_type)
else new)
assert isinstance(new_tup, tuple), (
"`deprecations` values must be one of: "
"None (deprecated param removed), a string (deprecated property renamed), "
"a tuple(new_name: Optional[str], message: str) to customize the deprecation message, "
"a callable lambda old_value: dict(param1=value1, param2=value2) for advanced deprecations "
"(one param replaced with one or more params with transformation of the deprecated value), "
"or a tuple(lambda old_value: dict(param1=value1, param2=value2), message: str).")
transform_fn, msg = new_tup
new_params = transform_fn(v)
if new_params in [None, {}]:
messages.append(msg or "``{}`` param of ``{}`` is deprecated and will be ignored."
.format(k, fn_name))
else:
assert isinstance(new_params, dict)
messages.append(msg or "``{}`` param of ``{}`` is deprecated, please use ``{}`` instead."
.format(k, fn_name, ', '.join(new_params.keys())))
intersect = set(new_params.keys()) & keys
if any(intersect):
messages.append("Using both deprecated param ``{}`` and new param(s) ``{}`` in call to ``{}``, "
"the deprecated param will be ignored."
.format(k, ', '.join(intersect), fn_name))
else:
new_kwargs.update(new_params)
else:
new_kwargs[k] = v
for msg in messages:
warnings.warn(msg, H2ODeprecationWarning, 2)
return fn(*args, **new_kwargs)
_set_decoration_info(wrapper, fn, 'deprecation')
return wrapper
return decorator
def deprecated_property(name, replaced_by=None, message=None):
"""
Creates a deprecated property that forwards logic to `replaced_by` property.
:param name: name of the deprecated property.
:param replaced_by: the new property object. If None, then the deprecated property will be a no-op property.
:param message: the custom deprecation message. If None, a default message will be used.
:return: the deprecated property.
"""
if replaced_by:
new_name = replaced_by.fget.__name__
doc = message or "[Deprecated] Use ``{}`` instead".format(new_name)
# doc += "\n\n{}".format(replaced_by.__doc__)
msg = message or "``{}`` is deprecated, please use ``{}`` instead.".format(name, new_name)
def wrap(accessor):
if accessor is None: return
def wrapper(*args):
warnings.warn(msg, H2ODeprecationWarning, 2)
return accessor(*args)
return wrapper
return property(wrap(replaced_by.fget), wrap(replaced_by.fset), wrap(replaced_by.fdel), doc)
else:
doc = message or "[Deprecated] The property was removed and will be ignored."
msg = message or "``{}`` is deprecated and will be ignored.".format(name)
def _fget(self):
warnings.warn(msg, H2ODeprecationWarning, 2)
return None
def _fset(self, _):
warnings.warn(msg, H2ODeprecationWarning, 2)
return property(_fget, _fset, None, doc)
class _DeprecatedFunction(object):
"""
Decorator for deprecated functions or methods.
:example::
class Foo:
def new_method(self, param=None):
...
do_sth(param)
@deprecated_function(replaced_by=new_method)
def old_method(self, param=None):
pass
"""
def __init__(self, msg=None, replaced_by=None):
"""
:param msg: the deprecation message to print as a ``DeprecationWarning`` when the function is called.
:param replaced_by: the optional function replacing the deprecated one.
If provided, then the code from the legacy method can be deleted and limited to `pass`,
as the call, with its arguments, will be automatically forwarded to this replacement function.
"""
self._msg = msg
self._replaced_by = replaced_by
def __call__(self, fn):
msg = (self._msg if self._msg is not None
else "``{}`` is deprecated, please use ``{}`` instead."
.format(fullname(fn), fullname(self._replaced_by))
if self._replaced_by is not None
else "``{}`` is deprecated.".format(fullname(fn)))
fn.__doc__ = "{msg}\n\n{doc}".format(msg=msg, doc=fn.__doc__) if fn.__doc__ is not None else msg
call_fn = self._replaced_by or fn
@wraps(fn)
def wrapper(*args, **kwargs):
warnings.warn(msg, H2ODeprecationWarning, 2)
return call_fn(*args, **kwargs)
return wrapper
deprecated_fn = _DeprecatedFunction
def deprecated_params_order(old_sig, is_called_with_old_sig):
"""
Creates a deprecated property order and provide a correct function call
:param old_sig: list of strings in old property order
:param is_called_with_old_sig: Function that return true if the function is called with different order
:return: function call with correct parameter order and deprecation warning or the same function call
:example::
def _is_called_with_old_sig(*args, **kwargs): return len(args) > 0 and isinstance(args[0], bool)
class Foo:
@deprecated_params_order(old_sig=["param2", "param1"], is_called_with_old_sig=_is_called_with_old_sig)
def method(self, param1, param2):
pass
"""
def handle_deprecated_params_order(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
if is_called_with_old_sig and is_called_with_old_sig(*args, **kwargs):
warnings.warn("please check and use the new signature of method "+fullname(fn), H2ODeprecationWarning, 2)
for i, arg in enumerate(args):
kw = old_sig[i]
kwargs[kw] = arg
return fn(self, **kwargs)
else:
return fn(self, *args, **kwargs)
return wrapper
return handle_deprecated_params_order
class MetaFeature(object):
"""To be implemented by meta features exposed through the ``H2OMeta` metaclass"""
NOT_FOUND = object()
@classmethod
def before_class(cls, bases, dct):
"""Allows to dynamically change how the class will be constructed"""
return bases, dct
@classmethod
def after_class(cls, clz):
"""Allows to modify the class after construction.
Note that decorators applied at class level are still not accessible at that time
as they're applied only once the class is FULLY constructed."""
return clz
@classmethod
def get_class_attr(cls, clz, name):
"""Allows to override how the class attributes are accessed on this class."""
return MetaFeature.NOT_FOUND
@classmethod
def set_class_attr(cls, clz, name, value):
"""Allows to override how the class attributes are set on this class."""
return False
@staticmethod
def type_attr(clz, name):
try:
return type.__getattribute__(clz, name)
except AttributeError:
return None
class _Alias(MetaFeature):
"""
Decorator to alias the current method without having to implement any duplicating or forwarding code.
:example::
class Foo(metaclass=H2OMeta):
@alias('ein', 'uno')
def one(self, param):
...
do_sth()
"""
@classmethod
def before_class(cls, bases, dct):
attr_names = set(dct)
ddct = dict(dct)
for name, impl in dct.items():
if hasattr(impl, '_aliases'):
for alias in impl._aliases - attr_names:
ddct[str(alias)] = impl
delattr(impl, '_aliases')
return bases, ddct
def __init__(self, *aliases):
"""
:param aliases: alternative names for the method on which the decorator is applied.
"""
self._aliases = set(aliases)
def __call__(self, fn):
fn._aliases = self._aliases
return fn
alias = _Alias
class _BackwardsCompatible(MetaFeature):
"""
Decorator to keep backward compatibility support for old methods without exposing them (non-discoverable, non-documented).
:example:
@backwards_compatibility(
class_attrs=dict(
counter=1
),
instance_attrs=dict(
getincr=local_function_with_legacy_logic
)
)
class Foo(metaclass=H2OMeta):
global_counter = 0
def __init__(self):
self._counter = 0
def incr_and_get(self):
Foo.counter += 1
self._counter += 1
return self._counter
"""
def __init__(self, class_attrs=None, instance_attrs=None):
self._class_attrs = class_attrs or {}
self._instance_attrs = instance_attrs or {}
def __call__(self, clz):
clz._bc = self
new_clz = None
@wraps(clz.__init__)
def __init__(self, *args, **kwargs):
super(new_clz, self).__init__(*args, **kwargs)
self._bci = {name: val.__get__(self, new_clz) if callable(val) else val for name, val in clz._bc._instance_attrs.items()}
def __getattr__(self, name):
try:
attr = super(new_clz, self).__getattr__(self, name)
return attr
except AttributeError:
pass
if name in self._bci:
return self._bci[name]
return getattr(new_clz, name)
new_clz = extend_and_replace(clz, __init__=__init__, __getattr__=__getattr__)
return new_clz
@classmethod
def get_class_attr(cls, clz, name):
bc = cls.type_attr(clz, '_bc')
if bc is not None and name in bc._class_attrs:
return bc._class_attrs[name]
return super(_BackwardsCompatible, cls).get_class_attr(clz, name)
@classmethod
def set_class_attr(cls, clz, name, value):
bc = cls.type_attr(clz, '_bc')
if bc is not None and name in bc._class_attrs:
bc._class_attrs[name] = value
return True
return super(_BackwardsCompatible, cls).set_class_attr(clz, name, value)
backwards_compatibility = _BackwardsCompatible
class H2OMeta(type):
"""
The H2O metaclass to be used by classes wanting to benefit from most of the decorators implemented in this file.
Features requiring usage of this metaclass are listed and injected through the `_FEATURES` static field.
"""
_FEATURES = [_Alias, _BackwardsCompatible]
def __new__(mcs, name, bases, dct):
for m in H2OMeta._FEATURES:
bases, dct = m.before_class(bases, dct)
clz = super(H2OMeta, mcs).__new__(mcs, name, bases, dct)
for m in H2OMeta._FEATURES:
clz = m.after_class(clz)
return clz
def __getattribute__(cls, name):
for m in H2OMeta._FEATURES:
attr = m.get_class_attr(cls, name)
if attr is not MetaFeature.NOT_FOUND:
return attr
return type.__getattribute__(cls, name)
def __setattr__(cls, name, value):
for m in H2OMeta._FEATURES:
if m.set_class_attr(cls, name, value):
return
type.__setattr__(cls, name, value)
# noinspection PyAbstractClass
class CallableString(str):
def __call__(self):
return self
|
PypiClean
|
/geonode_worldmap-0.3.tar.gz/geonode_worldmap-0.3/geonode_worldmap/gazetteer/models.py
|
from django.utils.translation import ugettext as _
from django.contrib.gis.db import models
from django.db.models import signals
from geonode.layers.models import Layer, Attribute
# Querying postgis database for features then saving as django model object is
# significantly slower than doing everything via SQL on postgis database only.
# from django.modelsinspector import add_introspection_rules
# add_introspection_rules([], ["^django\.contrib\.gis\.db\.models\.fields\.GeometryField"])
class GazetteerEntry(models.Model):
layer_name = models.CharField(_('Layer Name'), max_length=255, blank=False, null=False)
layer_attribute = models.CharField(_('Layer Attribute'), max_length=255, blank=False, null=False)
feature_type = models.CharField(_('Feature Type'), max_length=255, blank=False, null=False)
feature_fid = models.BigIntegerField(_('Feature FID'), blank=False, null=False)
latitude = models.FloatField(_('Latitude'))
longitude = models.FloatField(_('Longitude'))
place_name = models.TextField(_('Place name'))
start_date = models.TextField(_('Start Date'), blank=True, null=True)
end_date = models.TextField(_('End Date'), blank=True, null=True)
julian_start = models.IntegerField(_('Julian Date Start'), blank=True, null=True)
julian_end = models.IntegerField(_('Julian Date End'), blank=True, null=True)
project = models.CharField(_('Project'), max_length=255, blank=True, null=True)
feature = models.GeometryField(_('Geometry'), null=True, blank=True)
username = models.CharField(_('User Name'), max_length=30, blank=True, null=True)
objects = models.GeoManager()
class Meta:
unique_together = (("layer_name", "layer_attribute", "feature_fid"))
class GazetteerAttribute(models.Model):
attribute = models.OneToOneField(
Attribute,
blank=False,
null=False)
in_gazetteer = models.BooleanField(default=False)
is_start_date = models.BooleanField(default=False)
is_end_date = models.BooleanField(default=False)
date_format = models.TextField(blank=True, null=True)
def layer_name(self):
return self.attribute.layer.name
def gazetteer_delete_layer(instance, sender, **kwargs):
GazetteerEntry.objects.filter(layer_name=instance.name).delete()
print 'Removing gazetteer entries for the layer'
signals.pre_delete.connect(gazetteer_delete_layer, sender=Layer)
|
PypiClean
|
/ipydrawio-widgets-1.3.0.tar.gz/ipydrawio-widgets-1.3.0/README.md
|
# IPyDrawio Widgets
[![docs][docs-badge]][docs] [![binder-badge][]][binder]
[![install from pypi][pypi-badge]][pypi] [![install from conda-forge][conda-badge]][conda]
[![build][workflow-badge]][workflow] [![coverage][cov-badge]][cov]
> The kernel-side classes for [ipydrawio](https://github.com/deathbeds/ipydrawio).
This package is useful in situations where your JupyterLab client is configured in another
environment than the kernel that might create widgets.
See the [main project repo](https://github.com/deathbeds/ipydrawio) for more
information.
## Installation
> _**Note:** Usually, you'll want the entire `ipydrawio` suite, replacing `ipydrawio-widgets`
> with `ipydrawio`!_
To install just the kernel-side widgets (without any of the front end assets):
```bash
pip install ipydrawio-widgets # or...
mamba install -c conda-forge ipydrawio-widgets # or...
conda install -c conda-forge ipydrawio-widgets
```
## Usage
Display a basic diagram:
```python
from ipydrawio_widgets import Diagram
diagram = Diagram()
diagram
```
Update the XML source:
```python
from pathlib import Path
diagram.source.value = Path("a-drawio.dio").read_text()
```
The `.source.value` will always contain the up-to-date XML.
For more, see the documentation
## Open Source
This work is licensed under the [Apache-2.0] License.
```
Copyright 2023 ipydrawio contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```
[apache-2.0]:
https://github.com/deathbeds/ipydrawio/blob/main/py_packages/ipydrawio-widgets/LICENSE.txt
[binder]:
http://mybinder.org/v2/gh/deathbeds/ipydrawio/main?urlpath=lab/tree/docs/Poster.dio.svg
[binder-badge]: https://mybinder.org/badge_logo.svg
[cov-badge]:
https://codecov.io/gh/deathbeds/ipydrawio/branch/main/graph/badge.svg?token=9B74VKHQDK
[binder-badge]: https://mybinder.org/badge_logo.svg
[pypi-badge]: https://img.shields.io/pypi/v/ipydrawio-widgets
[pypi]: https://pypi.org/project/ipydrawio-widgets
[conda-badge]: https://img.shields.io/conda/vn/conda-forge/ipydrawio-widgets
[conda]: https://anaconda.org/conda-forge/ipydrawio-widgets
[workflow-badge]:
https://github.com/deathbeds/ipydrawio/workflows/.github/workflows/ci.yml/badge.svg
[workflow]:
https://github.com/deathbeds/ipydrawio/actions?query=branch%3Amain+workflow%3A.github%2Fworkflows%2Fci.yml
[cov-badge]:
https://codecov.io/gh/deathbeds/ipydrawio/branch/main/graph/badge.svg?token=9B74VKHQDK
[cov]: https://codecov.io/gh/deathbeds/ipydrawio
[docs-badge]: https://readthedocs.org/projects/ipydrawio/badge/?version=latest
[docs]: https://ipydrawio.rtfd.io
|
PypiClean
|
/esper-video-0.1.0.tar.gz/esper-video-0.1.0/esper/scanner.py
|
import scannertools as st
from scannerpy import Database
import os
import socket
import subprocess as sp
import json
from scannerpy.stdlib import writers
def join_path(src_cls, dest_cls):
from collections import defaultdict
from django.apps import apps
models = apps.get_models()
def key(m):
return m._meta.db_table
def join_graph():
edges = defaultdict(list)
edge_fields = defaultdict(dict)
for model in models:
for field in model._meta.get_fields():
if field.is_relation and hasattr(field,
'column') and not field.null:
edges[key(model)].append(key(field.related_model))
edge_fields[key(model)][key(field.related_model)] = field
return edges, edge_fields
def bfs(join_graph):
frontier = set([key(src_cls)])
visited = set()
paths = {key(src_cls): []}
while len(frontier) > 0:
new_frontier = set()
for node in frontier:
adjacent_unvisited = set(join_graph[node]) - visited - frontier
for other in adjacent_unvisited:
paths[other] = paths[node] + [node]
new_frontier |= adjacent_unvisited
visited |= frontier
frontier = new_frontier
return {k: v + [k] for k, v in paths.items()}
keymap = {key(m): m for m in models}
graph, fields = join_graph()
paths = bfs(graph)
path = paths[key(dest_cls)]
return [fields[path[i]][path[i + 1]] for i in range(len(path) - 1)]
class ScannerWrapper:
def __init__(self, db, cluster=None):
self.db = db
self.cluster = cluster
@classmethod
def create(cls, cluster=None, multiworker=False, **kwargs):
if cluster is not None:
db = cluster.database(**kwargs)
else:
workers = [
'localhost:{}'.format(5002 + i)
for i in range(mp.cpu_count() // 8)
] if multiworker else None
# import scannerpy.libscanner as bindings
# import scanner.metadata_pb2 as metadata_types
# params = metadata_types.MachineParameters()
# params.ParseFromString(bindings.default_machine_params())
# params.num_load_workers = 2
# params.num_save_workers = 2
db = Database(
#machine_params=params.SerializeToString(),
workers=workers,
**kwargs)
return cls(db, cluster)
def sql_config(self):
return self.db.protobufs.SQLConfig(
adapter='postgres',
hostaddr=socket.gethostbyname('db')
if self.db._start_cluster else '127.0.0.1',
port=5432,
dbname='esper',
user=os.environ['DJANGO_DB_USER'],
password=os.environ['DJANGO_DB_PASSWORD'])
def sql_source(self, cls):
from query.models import Frame, Video
table = cls._meta.db_table
def joins(dst):
return [
'INNER JOIN {dst} ON {src}.{srcfield} = {dst}.{dstfield}'.
format(src=field.model._meta.db_table,
srcfield=field.column,
dst=field.related_model._meta.db_table,
dstfield='id') for field in join_path(cls, dst)
]
return self.db.sources.SQL(
config=self.sql_config(),
query=self.db.protobufs.SQLQuery(
fields=','.join([
'{}.{} as {}'.format(table, field.name, field.name)
for field in cls._meta.get_fields()
if not field.is_relation
]),
id='{}.id'.format(table),
group='{}.number'.format(Frame._meta.db_table),
table='{} {}'.format(table, ' '.join(joins(Video)))))
def sql_source_args(self, video, num_elements=None, filter=None):
from query.models import Video
return {
'filter':
'{}.id = {} {}'.format(Video._meta.db_table, video.id,
('AND ' +
filter) if filter is not None else ''),
'num_elements':
num_elements if num_elements is not None else 0
}
def sql_sink(self,
cls,
input,
videos,
suffix,
insert=True,
ignore_conflicts=True):
from query.models import ScannerJob
sink = self.db.sinks.SQL(config=self.sql_config(),
input=input,
table=cls._meta.db_table,
job_table=ScannerJob._meta.db_table,
insert=insert,
ignore_conflicts=ignore_conflicts)
args = [{'job_name': '{}_{}'.format(v.path, suffix)} for v in videos]
return st.BoundOp(op=sink, args=args)
# Remove videos that don't have a table or have already been processed by the pipeline
def filter_videos(self, videos, pipeline):
from query.models import ScannerJob
suffix = pipeline.job_suffix
assert suffix is not None
processed = set([
t['name'] for t in ScannerJob.objects.filter(
name__contains=suffix).values('name')
])
return [
v for v in videos if self.db.has_table(v.path)
and not '{}_{}'.format(v.path, suffix) in processed
]
class ScannerSQLTable(st.DataSource):
def __init__(self, cls, video, num_elements=None, filter=None):
self._cls = cls
self._video = video
self._num_elements = num_elements
self._filter = filter
def scanner_source(self, db):
return ScannerWrapper(db).sql_source(self._cls)
def scanner_args(self, db):
return ScannerWrapper(db).sql_source_args(
self._video, num_elements=self._num_elements, filter=self._filter)
class ScannerSQLPipeline:
json_kernel = None
db_class = None
_job_cache = None
def build_sink(self, db_videos):
self._json_kernel_instance = getattr(
self._db.ops, self.json_kernel)(**self._output_ops)
return ScannerWrapper(self._db).sql_sink(
cls=self.db_class,
input=self._json_kernel_instance,
videos=db_videos,
suffix=self.job_suffix,
insert=True)
def committed(self, output):
from query.models import ScannerJob
if self._job_cache is None:
self._job_cache = set([
r['name'] for r in ScannerJob.objects.filter(
name__contains=self.job_suffix).values('name')
])
return output['job_name'] in self._job_cache
def parse_output(self):
pass
|
PypiClean
|
/ls.joyous-1.4.0rc1.tar.gz/ls.joyous-1.4.0rc1/docs/releases/1.4.0.rst
|
==========================
Joyous 1.4.0 release notes
==========================
.. contents::
:local:
:depth: 3
What's new
==========
Translations
~~~~~~~~~~~~
Thanks to Tomas Walch for the new Swedish translation.
Compatibility with Wagtail 2.11
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Now that 2.11 is the new LTS for Wagtail
replace request.site with Site.find_for_request(request) removing the need
for wagtail.core.middleware.SiteMiddleware. Thanks to Tomas Walch for this.
Compatibility with Wagtail 2.12
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Deprecated
~~~~~~~~~~
The ``JOYOUS_DEFEND_FORMS`` setting is deprecated. It still works, but
is no longer documented.
Bug fixes
~~~~~~~~~
* FormDefender should cope with `base_form_class = None`
* Do not assimilate subclasses
* Fix for _gregorian_to_ssweek where date is in last week of prev year
* Tests should cope with year missing from title of exceptions
* Stop using deprecated is_ajax function
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_24/api/support_api.py
|
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class SupportApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api224_support_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
filter=None, # type: str
limit=None, # type: int
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.SupportGetResponse
"""List connection paths
Displays connection paths between the current array and each connected array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api224_support_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: SupportGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api224_support_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api224_support_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.24/support', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SupportGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api224_support_patch_with_http_info(
self,
support=None, # type: models.SupportPatch
authorization=None, # type: str
x_request_id=None, # type: str
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.SupportResponse
"""Create connection path
Creates a connection path from the array to another array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api224_support_patch_with_http_info(support, async_req=True)
>>> result = thread.get()
:param SupportPatch support: (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: SupportResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'support' is set
if support is None:
raise TypeError("Missing the required parameter `support` when calling `api224_support_patch`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'support' in params:
body_params = params['support']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.24/support', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SupportResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api224_support_test_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
filter=None, # type: str
limit=None, # type: int
offset=None, # type: int
sort=None, # type: List[str]
test_type=None, # type: str
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.TestResultGetResponse
"""List Pure Storage Support connection data
Displays information about whether the array can connect to Pure Storage Support by establishing a secure shell or secure HTTP connection and verifies that messages can be exchanged.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api224_support_test_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param str test_type: Specifies the type of test. Valid values are `all`, `phonehome`, and `remote-assist`. If not specified, defaults to `all`.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: TestResultGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api224_support_test_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api224_support_test_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'test_type' in params:
query_params.append(('test_type', params['test_type']))
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.24/support/test', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TestResultGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
|
PypiClean
|
/monk_pytorch_cuda101-0.0.1.tar.gz/monk_pytorch_cuda101-0.0.1/monk/tf_keras_1/finetune/level_14_master_main.py
|
from monk.tf_keras_1.finetune.imports import *
from monk.system.imports import *
from monk.tf_keras_1.finetune.level_13_updates_main import prototype_updates
class prototype_master(prototype_updates):
'''
Main class for all functions in expert mode
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Dataset(self):
'''
Load transforms and set dataloader
Args:
None
Returns:
None
'''
self.set_dataset_final(test=self.system_dict["states"]["eval_infer"]);
save(self.system_dict);
if(self.system_dict["states"]["eval_infer"]):
self.custom_print("Pre-Composed Test Transforms");
self.custom_print(self.system_dict["dataset"]["transforms"]["test"]);
self.custom_print("");
self.custom_print("Dataset Numbers");
self.custom_print(" Num test images: {}".format(self.system_dict["dataset"]["params"]["num_test_images"]));
self.custom_print(" Num classes: {}".format(self.system_dict["dataset"]["params"]["num_classes"]))
self.custom_print("");
else:
self.custom_print("Pre-Composed Train Transforms");
self.custom_print(self.system_dict["dataset"]["transforms"]["train"]);
self.custom_print("");
self.custom_print("Pre-Composed Val Transforms");
self.custom_print(self.system_dict["dataset"]["transforms"]["val"]);
self.custom_print("");
self.custom_print("Dataset Numbers");
self.custom_print(" Num train images: {}".format(self.system_dict["dataset"]["params"]["num_train_images"]));
self.custom_print(" Num val images: {}".format(self.system_dict["dataset"]["params"]["num_val_images"]));
self.custom_print(" Num classes: {}".format(self.system_dict["dataset"]["params"]["num_classes"]))
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", [int, float], post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Dataset_Percent(self, percent):
'''
Select a portion of dataset
Args:
percent (bool): percentage of sub-dataset
Returns:
None
'''
sampled_dataset = None;
image_datasets = {};
dataset_type = self.system_dict["dataset"]["dataset_type"];
dataset_train_path = self.system_dict["dataset"]["train_path"];
dataset_val_path = self.system_dict["dataset"]["val_path"];
csv_train = self.system_dict["dataset"]["csv_train"];
csv_val = self.system_dict["dataset"]["csv_val"];
train_val_split = self.system_dict["dataset"]["params"]["train_val_split"];
delimiter = self.system_dict["dataset"]["params"]["delimiter"];
batch_size = self.system_dict["dataset"]["params"]["batch_size"];
shuffle = self.system_dict["dataset"]["params"]["train_shuffle"];
num_workers = self.system_dict["dataset"]["params"]["num_workers"];
if(dataset_type == "train"):
label_list = [];
image_list = [];
classes = os.listdir(dataset_train_path);
for i in range(len(classes)):
tmp_image_list = os.listdir(dataset_train_path + "/" + classes[i]);
subset_image_list = tmp_image_list[:int(len(tmp_image_list)*percent/100.0)];
result = list(map(lambda x: classes[i] + "/" + x, subset_image_list))
tmp_label_list = [classes[i]]*len(subset_image_list);
label_list += tmp_label_list;
image_list += result;
image_label_dict = {'ID': image_list, 'Label': label_list}
df = pd.DataFrame(image_label_dict);
df.to_csv("sampled_dataset_train.csv", index=False);
elif(dataset_type == "train-val"):
label_list = [];
image_list = [];
classes = os.listdir(dataset_train_path);
for i in range(len(classes)):
tmp_image_list = os.listdir(dataset_train_path + "/" + classes[i]);
subset_image_list = tmp_image_list[:int(len(tmp_image_list)*percent/100.0)];
result = list(map(lambda x: classes[i] + "/" + x, subset_image_list))
tmp_label_list = [classes[i]]*len(subset_image_list);
label_list += tmp_label_list;
image_list += result;
image_label_dict = {'ID': image_list, 'Label': label_list}
df = pd.DataFrame(image_label_dict);
df.to_csv("sampled_dataset_train.csv", index=False);
label_list = [];
image_list = [];
classes = os.listdir(dataset_train_path);
for i in range(len(classes)):
tmp_image_list = os.listdir(dataset_val_path + "/" + classes[i]);
subset_image_list = tmp_image_list[:int(len(tmp_image_list)*percent/100.0)];
result = list(map(lambda x: classes[i] + "/" + x, subset_image_list))
tmp_label_list = [classes[i]]*len(subset_image_list);
label_list += tmp_label_list;
image_list += result;
image_label_dict = {'ID': image_list, 'Label': label_list}
df = pd.DataFrame(image_label_dict);
df.to_csv("sampled_dataset_val.csv", index=False);
elif(dataset_type == "csv_train"):
df = pd.read_csv(csv_train);
df = df.iloc[np.random.permutation(len(df))]
df_sampled = df.iloc[:int(len(df)*percent/100.0)];
df_sampled.to_csv("sampled_dataset_train.csv", index=False);
elif(dataset_type == "csv_train-val"):
df = pd.read_csv(csv_train);
df = df.iloc[np.random.permutation(len(df))]
df_sampled = df.iloc[:int(len(df)*percent/100.0)];
df_sampled.to_csv("sampled_dataset_train.csv", index=False);
df = pd.read_csv(csv_val);
df = df.iloc[np.random.permutation(len(df))]
df_sampled = df.iloc[:int(len(df)*percent/100.0)];
df_sampled.to_csv("sampled_dataset_val.csv", index=False);
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Model(self):
'''
Load Model as per paraameters set
Args:
None
Returns:
None
'''
if(self.system_dict["states"]["copy_from"]):
msg = "Cannot set model in Copy-From mode.\n";
raise ConstraintError(msg)
self.set_model_final();
save(self.system_dict)
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Train(self):
'''
Master function for training
Args:
None
Returns:
None
'''
self.set_training_final();
save(self.system_dict);
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Evaluate(self):
'''
Master function for external validation
Args:
None
Returns:
None
'''
accuracy, class_based_accuracy = self.set_evaluation_final();
save(self.system_dict);
return accuracy, class_based_accuracy;
###############################################################################################################################################
###############################################################################################################################################
@error_checks(None, img_name=["file", "r"], img_dir=["folder", "r"], return_raw=False, img_thresh=["gte", 0.0, "lte", 1.0], post_trace=False)
@accepts("self", img_name=[str, bool], img_dir=[str, bool], return_raw=bool, img_thresh=float, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Infer(self, img_name=False, img_dir=False, return_raw=False, img_thresh=0.5):
'''
Master function for inference
Args:
img_name (str): path to image
img_dir (str): path to folders containing images.
(Optional)
return_raw (bool): If True, then output dictionary contains image probability for every class in the set.
Else, only the most probable class score is returned back.
img_thresh (float): Thresholding for multi label image classification.
Returns:
dict: Dictionary containing details on predictions.
'''
if(self.system_dict["dataset"]["label_type"] == "single" or self.system_dict["dataset"]["label_type"] == False):
if(not img_dir):
predictions = self.set_prediction_final(img_name=img_name, return_raw=return_raw);
else:
predictions = self.set_prediction_final(img_dir=img_dir, return_raw=return_raw);
return predictions;
else:
if(not img_dir):
predictions = self.set_prediction_final_multiple(img_name=img_name, return_raw=return_raw, img_thresh=img_thresh);
else:
predictions = self.set_prediction_final_multiple(img_dir=img_dir, return_raw=return_raw, img_thresh=img_thresh);
return predictions;
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", network=list, data_shape=tuple, use_gpu=bool, network_initializer=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Compile_Network(self, network, data_shape=(3, 224, 224), use_gpu=True, network_initializer="xavier_normal"):
'''
Master function for compiling custom network and initializing it
Args:
network: Network stacked as list of lists
data_shape (tuple): Input shape of data in format C, H, W
use_gpu (bool): If True, model loaded on gpu
network_initializer (str): Initialize network with random weights. Select the random generator type function.
Returns:
None
'''
self.system_dict["custom_model"]["network_stack"] = network;
self.system_dict["custom_model"]["network_initializer"] = network_initializer;
self.system_dict["model"]["type"] = "custom";
self.system_dict["dataset"]["params"]["data_shape"] = data_shape;
self.system_dict = set_device(use_gpu, self.system_dict);
save(self.system_dict);
self.set_model_final();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", data_shape=tuple, port=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Visualize_With_Netron(self, data_shape=None, port=None):
'''
Visualize network with netron library
Args:
data_shape (tuple): Input shape of data in format C, H, W
port (int): Local host free port.
Returns:
None
'''
self.custom_print("Using Netron To Visualize");
self.custom_print("Not compatible on kaggle");
self.custom_print("Compatible only for Jupyter Notebooks");
if not data_shape:
self.custom_print("Provide data_shape argument");
pass;
else:
c, h, w = data_shape;
batch_size=1;
if(tf.__version__.split(".")[0] == "2"):
x = tf.keras.backend.placeholder(dtype=tf.float32, shape=(batch_size, h, w, c))
else:
x = tf.placeholder(tf.float32, shape=(batch_size, h, w, c))
y = self.system_dict["local"]["model"](x)
self.system_dict["local"]["model"].save("final.h5");
import netron
if(not port):
netron.start('final.h5')
else:
netron.start('final.h5', port=port)
###############################################################################################################################################
|
PypiClean
|
/tb-rest-client-3.5.tar.gz/tb-rest-client-3.5/tb_rest_client/models/models_ce/slack_notification_target_config.py
|
# Copyright 2023. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
import re # noqa: F401
import six
from tb_rest_client.models.models_ce.notification_target_config import NotificationTargetConfig # noqa: F401,E501
class SlackNotificationTargetConfig(NotificationTargetConfig):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'conversation': 'SlackConversation',
'conversation_type': 'str',
'description': 'str'
}
if hasattr(NotificationTargetConfig, "swagger_types"):
swagger_types.update(NotificationTargetConfig.swagger_types)
attribute_map = {
'conversation': 'conversation',
'conversation_type': 'conversationType',
'description': 'description'
}
if hasattr(NotificationTargetConfig, "attribute_map"):
attribute_map.update(NotificationTargetConfig.attribute_map)
def __init__(self, conversation=None, conversation_type=None, description=None, *args, **kwargs): # noqa: E501
"""SlackNotificationTargetConfig - a model defined in Swagger""" # noqa: E501
self._conversation = None
self._conversation_type = None
self._description = None
self.discriminator = None
self.conversation = conversation
if conversation_type is not None:
self.conversation_type = conversation_type
if description is not None:
self.description = description
NotificationTargetConfig.__init__(self, *args, **kwargs)
@property
def conversation(self):
"""Gets the conversation of this SlackNotificationTargetConfig. # noqa: E501
:return: The conversation of this SlackNotificationTargetConfig. # noqa: E501
:rtype: SlackConversation
"""
return self._conversation
@conversation.setter
def conversation(self, conversation):
"""Sets the conversation of this SlackNotificationTargetConfig.
:param conversation: The conversation of this SlackNotificationTargetConfig. # noqa: E501
:type: SlackConversation
"""
if conversation is None:
raise ValueError("Invalid value for `conversation`, must not be `None`") # noqa: E501
self._conversation = conversation
@property
def conversation_type(self):
"""Gets the conversation_type of this SlackNotificationTargetConfig. # noqa: E501
:return: The conversation_type of this SlackNotificationTargetConfig. # noqa: E501
:rtype: str
"""
return self._conversation_type
@conversation_type.setter
def conversation_type(self, conversation_type):
"""Sets the conversation_type of this SlackNotificationTargetConfig.
:param conversation_type: The conversation_type of this SlackNotificationTargetConfig. # noqa: E501
:type: str
"""
allowed_values = ["DIRECT", "PRIVATE_CHANNEL", "PUBLIC_CHANNEL"] # noqa: E501
if conversation_type not in allowed_values:
raise ValueError(
"Invalid value for `conversation_type` ({0}), must be one of {1}" # noqa: E501
.format(conversation_type, allowed_values)
)
self._conversation_type = conversation_type
@property
def description(self):
"""Gets the description of this SlackNotificationTargetConfig. # noqa: E501
:return: The description of this SlackNotificationTargetConfig. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SlackNotificationTargetConfig.
:param description: The description of this SlackNotificationTargetConfig. # noqa: E501
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SlackNotificationTargetConfig, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SlackNotificationTargetConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/google-cloud-automl-2.11.2.tar.gz/google-cloud-automl-2.11.2/google/cloud/automl_v1/types/io.py
|
from __future__ import annotations
from typing import MutableMapping, MutableSequence
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.automl.v1",
manifest={
"InputConfig",
"BatchPredictInputConfig",
"DocumentInputConfig",
"OutputConfig",
"BatchPredictOutputConfig",
"ModelExportOutputConfig",
"GcsSource",
"GcsDestination",
},
)
class InputConfig(proto.Message):
r"""Input configuration for
[AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]
action.
The format of input depends on dataset_metadata the Dataset into
which the import is happening has. As input source the
[gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is
expected, unless specified otherwise. Additionally any input .CSV
file by itself must be 100MB or smaller, unless specified otherwise.
If an "example" file (that is, image, video etc.) with identical
content (even if it had different ``GCS_FILE_PATH``) is mentioned
multiple times, then its label, bounding boxes etc. are appended.
The same file should be always provided with the same ``ML_USE`` and
``GCS_FILE_PATH``, if it is not, then these values are
nondeterministically selected from the given ones.
The formats are represented in EBNF with commas being literal and
with non-terminal symbols defined near the end of this comment. The
formats are:
.. raw:: html
<h4>AutoML Vision</h4>
.. raw:: html
<div class="ds-selector-tabs"><section><h5>Classification</h5>
See `Preparing your training
data <https://cloud.google.com/vision/automl/docs/prepare>`__ for
more information.
CSV file(s) with each line in format:
::
ML_USE,GCS_FILE_PATH,LABEL,LABEL,...
- ``ML_USE`` - Identifies the data set that the current row (file)
applies to. This value can be one of the following:
- ``TRAIN`` - Rows in this file are used to train the model.
- ``TEST`` - Rows in this file are used to test the model during
training.
- ``UNASSIGNED`` - Rows in this file are not categorized. They
are Automatically divided into train and test data. 80% for
training and 20% for testing.
- ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image
of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG,
.WEBP, .BMP, .TIFF, .ICO.
- ``LABEL`` - A label that identifies the object in the image.
For the ``MULTICLASS`` classification type, at most one ``LABEL`` is
allowed per image. If an image has not yet been labeled, then it
should be mentioned just once with no ``LABEL``.
Some sample rows:
::
TRAIN,gs://folder/image1.jpg,daisy
TEST,gs://folder/image2.jpg,dandelion,tulip,rose
UNASSIGNED,gs://folder/image3.jpg,daisy
UNASSIGNED,gs://folder/image4.jpg
.. raw:: html
</section><section><h5>Object Detection</h5>
See [Preparing your training
data](https://cloud.google.com/vision/automl/object-detection/docs/prepare)
for more information.
A CSV file(s) with each line in format:
::
ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,)
- ``ML_USE`` - Identifies the data set that the current row (file)
applies to. This value can be one of the following:
- ``TRAIN`` - Rows in this file are used to train the model.
- ``TEST`` - Rows in this file are used to test the model during
training.
- ``UNASSIGNED`` - Rows in this file are not categorized. They
are Automatically divided into train and test data. 80% for
training and 20% for testing.
- ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image
of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG.
Each image is assumed to be exhaustively labeled.
- ``LABEL`` - A label that identifies the object in the image
specified by the ``BOUNDING_BOX``.
- ``BOUNDING BOX`` - The vertices of an object in the example
image. The minimum allowed ``BOUNDING_BOX`` edge length is 0.01,
and no more than 500 ``BOUNDING_BOX`` instances per image are
allowed (one ``BOUNDING_BOX`` per line). If an image has no
looked for objects then it should be mentioned just once with no
LABEL and the ",,,,,,," in place of the ``BOUNDING_BOX``.
**Four sample rows:**
::
TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
TEST,gs://folder/im3.png,,,,,,,,,
.. raw:: html
</section>
</div>
.. raw:: html
<h4>AutoML Video Intelligence</h4>
.. raw:: html
<div class="ds-selector-tabs"><section><h5>Classification</h5>
See `Preparing your training
data <https://cloud.google.com/video-intelligence/automl/docs/prepare>`__
for more information.
CSV file(s) with each line in format:
::
ML_USE,GCS_FILE_PATH
For ``ML_USE``, do not use ``VALIDATE``.
``GCS_FILE_PATH`` is the path to another .csv file that describes
training example for a given ``ML_USE``, using the following row
format:
::
GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,)
Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up
to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the
length of the video, and the end time must be after the start time.
Any segment of a video which has one or more labels on it, is
considered a hard negative for all other labels. Any segment with no
labels on it is considered to be unknown. If a whole video is
unknown, then it should be mentioned just once with ",," in place of
``LABEL, TIME_SEGMENT_START,TIME_SEGMENT_END``.
Sample top level CSV file:
::
TRAIN,gs://folder/train_videos.csv
TEST,gs://folder/test_videos.csv
UNASSIGNED,gs://folder/other_videos.csv
Sample rows of a CSV file for a particular ML_USE:
::
gs://folder/video1.avi,car,120,180.000021
gs://folder/video1.avi,bike,150,180.000021
gs://folder/vid2.avi,car,0,60.5
gs://folder/vid3.avi,,,
.. raw:: html
</section><section><h5>Object Tracking</h5>
See `Preparing your training
data </video-intelligence/automl/object-tracking/docs/prepare>`__
for more information.
CSV file(s) with each line in format:
::
ML_USE,GCS_FILE_PATH
For ``ML_USE``, do not use ``VALIDATE``.
``GCS_FILE_PATH`` is the path to another .csv file that describes
training example for a given ``ML_USE``, using the following row
format:
::
GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX
or
::
GCS_FILE_PATH,,,,,,,,,,
Here ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up
to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
Providing ``INSTANCE_ID``\ s can help to obtain a better model. When
a specific labeled entity leaves the video frame, and shows up
afterwards it is not required, albeit preferable, that the same
``INSTANCE_ID`` is given to it.
``TIMESTAMP`` must be within the length of the video, the
``BOUNDING_BOX`` is assumed to be drawn on the closest video's frame
to the ``TIMESTAMP``. Any mentioned by the ``TIMESTAMP`` frame is
expected to be exhaustively labeled and no more than 500
``BOUNDING_BOX``-es per frame are allowed. If a whole video is
unknown, then it should be mentioned just once with ",,,,,,,,,," in
place of ``LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX``.
Sample top level CSV file:
::
TRAIN,gs://folder/train_videos.csv
TEST,gs://folder/test_videos.csv
UNASSIGNED,gs://folder/other_videos.csv
Seven sample rows of a CSV file for a particular ML_USE:
::
gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
gs://folder/video2.avi,,,,,,,,,,,
.. raw:: html
</section>
</div>
.. raw:: html
<h4>AutoML Natural Language</h4>
.. raw:: html
<div class="ds-selector-tabs"><section><h5>Entity Extraction</h5>
See `Preparing your training
data </natural-language/automl/entity-analysis/docs/prepare>`__ for
more information.
One or more CSV file(s) with each line in the following format:
::
ML_USE,GCS_FILE_PATH
- ``ML_USE`` - Identifies the data set that the current row (file)
applies to. This value can be one of the following:
- ``TRAIN`` - Rows in this file are used to train the model.
- ``TEST`` - Rows in this file are used to test the model during
training.
- ``UNASSIGNED`` - Rows in this file are not categorized. They
are Automatically divided into train and test data. 80% for
training and 20% for testing..
- ``GCS_FILE_PATH`` - a Identifies JSON Lines (.JSONL) file stored
in Google Cloud Storage that contains in-line text in-line as
documents for model training.
After the training data set has been determined from the ``TRAIN``
and ``UNASSIGNED`` CSV files, the training data is divided into
train and validation data sets. 70% for training and 30% for
validation.
For example:
::
TRAIN,gs://folder/file1.jsonl
VALIDATE,gs://folder/file2.jsonl
TEST,gs://folder/file3.jsonl
**In-line JSONL files**
In-line .JSONL files contain, per line, a JSON document that wraps a
[``text_snippet``][google.cloud.automl.v1.TextSnippet] field
followed by one or more
[``annotations``][google.cloud.automl.v1.AnnotationPayload] fields,
which have ``display_name`` and ``text_extraction`` fields to
describe the entity from the text snippet. Multiple JSON documents
can be separated using line breaks (\n).
The supplied text must be annotated exhaustively. For example, if
you include the text "horse", but do not label it as "animal", then
"horse" is assumed to not be an "animal".
Any given text snippet content must have 30,000 characters or less,
and also be UTF-8 NFC encoded. ASCII is accepted as it is UTF-8 NFC
encoded.
For example:
::
{
"text_snippet": {
"content": "dog car cat"
},
"annotations": [
{
"display_name": "animal",
"text_extraction": {
"text_segment": {"start_offset": 0, "end_offset": 2}
}
},
{
"display_name": "vehicle",
"text_extraction": {
"text_segment": {"start_offset": 4, "end_offset": 6}
}
},
{
"display_name": "animal",
"text_extraction": {
"text_segment": {"start_offset": 8, "end_offset": 10}
}
}
]
}\n
{
"text_snippet": {
"content": "This dog is good."
},
"annotations": [
{
"display_name": "animal",
"text_extraction": {
"text_segment": {"start_offset": 5, "end_offset": 7}
}
}
]
}
**JSONL files that reference documents**
.JSONL files contain, per line, a JSON document that wraps a
``input_config`` that contains the path to a source document.
Multiple JSON documents can be separated using line breaks (\n).
Supported document extensions: .PDF, .TIF, .TIFF
For example:
::
{
"document": {
"input_config": {
"gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
}
}
}
}\n
{
"document": {
"input_config": {
"gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
}
}
}
}
**In-line JSONL files with document layout information**
**Note:** You can only annotate documents using the UI. The format
described below applies to annotated documents exported using the UI
or ``exportData``.
In-line .JSONL files for documents contain, per line, a JSON
document that wraps a ``document`` field that provides the textual
content of the document and the layout information.
For example:
::
{
"document": {
"document_text": {
"content": "dog car cat"
}
"layout": [
{
"text_segment": {
"start_offset": 0,
"end_offset": 11,
},
"page_number": 1,
"bounding_poly": {
"normalized_vertices": [
{"x": 0.1, "y": 0.1},
{"x": 0.1, "y": 0.3},
{"x": 0.3, "y": 0.3},
{"x": 0.3, "y": 0.1},
],
},
"text_segment_type": TOKEN,
}
],
"document_dimensions": {
"width": 8.27,
"height": 11.69,
"unit": INCH,
}
"page_count": 3,
},
"annotations": [
{
"display_name": "animal",
"text_extraction": {
"text_segment": {"start_offset": 0, "end_offset": 3}
}
},
{
"display_name": "vehicle",
"text_extraction": {
"text_segment": {"start_offset": 4, "end_offset": 7}
}
},
{
"display_name": "animal",
"text_extraction": {
"text_segment": {"start_offset": 8, "end_offset": 11}
}
},
],
.. raw:: html
</section><section><h5>Classification</h5>
See `Preparing your training
data <https://cloud.google.com/natural-language/automl/docs/prepare>`__
for more information.
One or more CSV file(s) with each line in the following format:
::
ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,...
- ``ML_USE`` - Identifies the data set that the current row (file)
applies to. This value can be one of the following:
- ``TRAIN`` - Rows in this file are used to train the model.
- ``TEST`` - Rows in this file are used to test the model during
training.
- ``UNASSIGNED`` - Rows in this file are not categorized. They
are Automatically divided into train and test data. 80% for
training and 20% for testing.
- ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a
pattern. If the column content is a valid Google Cloud Storage
file path, that is, prefixed by "gs://", it is treated as a
``GCS_FILE_PATH``. Otherwise, if the content is enclosed in
double quotes (""), it is treated as a ``TEXT_SNIPPET``. For
``GCS_FILE_PATH``, the path must lead to a file with supported
extension and UTF-8 encoding, for example,
"gs://folder/content.txt" AutoML imports the file content as a
text snippet. For ``TEXT_SNIPPET``, AutoML imports the column
content excluding quotes. In both cases, size of the content must
be 10MB or less in size. For zip files, the size of each file
inside the zip must be 10MB or less in size.
For the ``MULTICLASS`` classification type, at most one ``LABEL``
is allowed.
The ``ML_USE`` and ``LABEL`` columns are optional. Supported file
extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
A maximum of 100 unique labels are allowed per CSV row.
Sample rows:
::
TRAIN,"They have bad food and very rude",RudeService,BadFood
gs://folder/content.txt,SlowService
TEST,gs://folder/document.pdf
VALIDATE,gs://folder/text_files.zip,BadFood
.. raw:: html
</section><section><h5>Sentiment Analysis</h5>
See `Preparing your training
data <https://cloud.google.com/natural-language/automl/docs/prepare>`__
for more information.
CSV file(s) with each line in format:
::
ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT
- ``ML_USE`` - Identifies the data set that the current row (file)
applies to. This value can be one of the following:
- ``TRAIN`` - Rows in this file are used to train the model.
- ``TEST`` - Rows in this file are used to test the model during
training.
- ``UNASSIGNED`` - Rows in this file are not categorized. They
are Automatically divided into train and test data. 80% for
training and 20% for testing.
- ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a
pattern. If the column content is a valid Google Cloud Storage
file path, that is, prefixed by "gs://", it is treated as a
``GCS_FILE_PATH``. Otherwise, if the content is enclosed in
double quotes (""), it is treated as a ``TEXT_SNIPPET``. For
``GCS_FILE_PATH``, the path must lead to a file with supported
extension and UTF-8 encoding, for example,
"gs://folder/content.txt" AutoML imports the file content as a
text snippet. For ``TEXT_SNIPPET``, AutoML imports the column
content excluding quotes. In both cases, size of the content must
be 128kB or less in size. For zip files, the size of each file
inside the zip must be 128kB or less in size.
The ``ML_USE`` and ``SENTIMENT`` columns are optional. Supported
file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP
- ``SENTIMENT`` - An integer between 0 and
Dataset.text_sentiment_dataset_metadata.sentiment_max
(inclusive). Describes the ordinal of the sentiment - higher
value means a more positive sentiment. All the values are
completely relative, i.e. neither 0 needs to mean a negative or
neutral sentiment nor sentiment_max needs to mean a positive one
- it is just required that 0 is the least positive sentiment in
the data, and sentiment_max is the most positive one. The
SENTIMENT shouldn't be confused with "score" or "magnitude" from
the previous Natural Language Sentiment Analysis API. All
SENTIMENT values between 0 and sentiment_max must be represented
in the imported data. On prediction the same 0 to sentiment_max
range will be used. The difference between neighboring sentiment
values needs not to be uniform, e.g. 1 and 2 may be similar
whereas the difference between 2 and 3 may be large.
Sample rows:
::
TRAIN,"@freewrytin this is way too good for your product",2
gs://folder/content.txt,3
TEST,gs://folder/document.pdf
VALIDATE,gs://folder/text_files.zip,2
.. raw:: html
</section>
</div>
.. raw:: html
<h4>AutoML Tables</h4><div class="ui-datasection-main"><section
class="selected">
See `Preparing your training
data <https://cloud.google.com/automl-tables/docs/prepare>`__ for
more information.
You can use either
[gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or
[bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source].
All input is concatenated into a single
[primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id]
**For gcs_source:**
CSV file(s), where the first row of the first file is the header,
containing unique column names. If the first row of a subsequent
file is the same as the header, then it is also treated as a header.
All other rows contain values for the corresponding columns.
Each .CSV file by itself must be 10GB or smaller, and their total
size must be 100GB or smaller.
First three sample rows of a CSV file:
.. raw:: html
<pre>
"Id","First Name","Last Name","Dob","Addresses"
"1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
"2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
</pre>
**For bigquery_source:**
An URI of a BigQuery table. The user data size of the BigQuery table
must be 100GB or smaller.
An imported table must have between 2 and 1,000 columns, inclusive,
and between 1000 and 100,000,000 rows, inclusive. There are at most
5 import data running in parallel.
.. raw:: html
</section>
</div>
**Input field definitions:**
``ML_USE`` : ("TRAIN" \| "VALIDATE" \| "TEST" \| "UNASSIGNED")
Describes how the given example (file) should be used for model
training. "UNASSIGNED" can be used when user has no preference.
``GCS_FILE_PATH`` : The path to a file on Google Cloud Storage. For
example, "gs://folder/image1.png".
``LABEL`` : A display name of an object on an image, video etc.,
e.g. "dog". Must be up to 32 characters long and can consist only of
ASCII Latin letters A-Z and a-z, underscores(_), and ASCII digits
0-9. For each label an AnnotationSpec is created which display_name
becomes the label; AnnotationSpecs are given back in predictions.
``INSTANCE_ID`` : A positive integer that identifies a specific
instance of a labeled entity on an example. Used e.g. to track two
cars on a video while being able to tell apart which one is which.
``BOUNDING_BOX`` : (``VERTEX,VERTEX,VERTEX,VERTEX`` \|
``VERTEX,,,VERTEX,,``) A rectangle parallel to the frame of the
example (image, video). If 4 vertices are given they are connected
by edges in the order provided, if 2 are given they are recognized
as diagonally opposite vertices of the rectangle.
``VERTEX`` : (``COORDINATE,COORDINATE``) First coordinate is
horizontal (x), the second is vertical (y).
``COORDINATE`` : A float in 0 to 1 range, relative to total length
of image or video in given dimension. For fractions the leading
non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top
left.
``TIME_SEGMENT_START`` : (``TIME_OFFSET``) Expresses a beginning,
inclusive, of a time segment within an example that has a time
dimension (e.g. video).
``TIME_SEGMENT_END`` : (``TIME_OFFSET``) Expresses an end,
exclusive, of a time segment within n example that has a time
dimension (e.g. video).
``TIME_OFFSET`` : A number of seconds as measured from the start of
an example (e.g. video). Fractions are allowed, up to a microsecond
precision. "inf" is allowed, and it means the end of the example.
``TEXT_SNIPPET`` : The content of a text snippet, UTF-8 encoded,
enclosed within double quotes ("").
``DOCUMENT`` : A field that provides the textual content with
document and the layout information.
**Errors:**
If any of the provided CSV files can't be parsed or if more than
certain percent of CSV rows cannot be processed then the operation
fails and nothing is imported. Regardless of overall success or
failure the per-row failures, up to a certain count cap, is listed
in Operation.metadata.partial_failures.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_source (google.cloud.automl_v1.types.GcsSource):
The Google Cloud Storage location for the input content. For
[AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData],
``gcs_source`` points to a CSV file with a structure
described in
[InputConfig][google.cloud.automl.v1.InputConfig].
This field is a member of `oneof`_ ``source``.
params (MutableMapping[str, str]):
Additional domain-specific parameters describing the
semantic of the imported data, any string must be up to
25000 characters long.
.. raw:: html
<h4>AutoML Tables</h4>
``schema_inference_version`` : (integer) This value must be
supplied. The version of the algorithm to use for the
initial inference of the column data types of the imported
table. Allowed values: "1".
"""
gcs_source: "GcsSource" = proto.Field(
proto.MESSAGE,
number=1,
oneof="source",
message="GcsSource",
)
params: MutableMapping[str, str] = proto.MapField(
proto.STRING,
proto.STRING,
number=2,
)
class BatchPredictInputConfig(proto.Message):
r"""Input configuration for BatchPredict Action.
The format of input depends on the ML problem of the model used for
prediction. As input source the
[gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] is
expected, unless specified otherwise.
The formats are represented in EBNF with commas being literal and
with non-terminal symbols defined near the end of this comment. The
formats are:
.. raw:: html
<h4>AutoML Vision</h4>
<div class="ds-selector-tabs"><section><h5>Classification</h5>
One or more CSV files where each line is a single column:
::
GCS_FILE_PATH
The Google Cloud Storage location of an image of up to 30MB in size.
Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the
ID in the batch predict output.
Sample rows:
::
gs://folder/image1.jpeg
gs://folder/image2.gif
gs://folder/image3.png
.. raw:: html
</section><section><h5>Object Detection</h5>
One or more CSV files where each line is a single column:
::
GCS_FILE_PATH
The Google Cloud Storage location of an image of up to 30MB in size.
Supported extensions: .JPEG, .GIF, .PNG. This path is treated as the
ID in the batch predict output.
Sample rows:
::
gs://folder/image1.jpeg
gs://folder/image2.gif
gs://folder/image3.png
.. raw:: html
</section>
</div>
.. raw:: html
<h4>AutoML Video Intelligence</h4>
<div class="ds-selector-tabs"><section><h5>Classification</h5>
One or more CSV files where each line is a single column:
::
GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
``GCS_FILE_PATH`` is the Google Cloud Storage location of video up
to 50GB in size and up to 3h in duration duration. Supported
extensions: .MOV, .MPEG4, .MP4, .AVI.
``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the
length of the video, and the end time must be after the start time.
Sample rows:
::
gs://folder/video1.mp4,10,40
gs://folder/video1.mp4,20,60
gs://folder/vid2.mov,0,inf
.. raw:: html
</section><section><h5>Object Tracking</h5>
One or more CSV files where each line is a single column:
::
GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
``GCS_FILE_PATH`` is the Google Cloud Storage location of video up
to 50GB in size and up to 3h in duration duration. Supported
extensions: .MOV, .MPEG4, .MP4, .AVI.
``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the
length of the video, and the end time must be after the start time.
Sample rows:
::
gs://folder/video1.mp4,10,40
gs://folder/video1.mp4,20,60
gs://folder/vid2.mov,0,inf
.. raw:: html
</section>
</div>
.. raw:: html
<h4>AutoML Natural Language</h4>
<div class="ds-selector-tabs"><section><h5>Classification</h5>
One or more CSV files where each line is a single column:
::
GCS_FILE_PATH
``GCS_FILE_PATH`` is the Google Cloud Storage location of a text
file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF
Text files can be no larger than 10MB in size.
Sample rows:
::
gs://folder/text1.txt
gs://folder/text2.pdf
gs://folder/text3.tif
.. raw:: html
</section><section><h5>Sentiment Analysis</h5>
One or more CSV files where each line is a single column:
::
GCS_FILE_PATH
``GCS_FILE_PATH`` is the Google Cloud Storage location of a text
file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF
Text files can be no larger than 128kB in size.
Sample rows:
::
gs://folder/text1.txt
gs://folder/text2.pdf
gs://folder/text3.tif
.. raw:: html
</section><section><h5>Entity Extraction</h5>
One or more JSONL (JSON Lines) files that either provide inline text
or documents. You can only use one format, either inline text or
documents, for a single call to [AutoMl.BatchPredict].
Each JSONL file contains a per line a proto that wraps a temporary
user-assigned TextSnippet ID (string up to 2000 characters long)
called "id", a TextSnippet proto (in JSON representation) and zero
or more TextFeature protos. Any given text snippet content must have
30,000 characters or less, and also be UTF-8 NFC encoded (ASCII
already is). The IDs provided should be unique.
Each document JSONL file contains, per line, a proto that wraps a
Document proto with ``input_config`` set. Each document cannot
exceed 2MB in size.
Supported document extensions: .PDF, .TIF, .TIFF
Each JSONL file must not exceed 100MB in size, and no more than 20
JSONL files may be passed.
Sample inline JSONL file (Shown with artificial line breaks. Actual
line breaks are denoted by "\n".):
::
{
"id": "my_first_id",
"text_snippet": { "content": "dog car cat"},
"text_features": [
{
"text_segment": {"start_offset": 4, "end_offset": 6},
"structural_type": PARAGRAPH,
"bounding_poly": {
"normalized_vertices": [
{"x": 0.1, "y": 0.1},
{"x": 0.1, "y": 0.3},
{"x": 0.3, "y": 0.3},
{"x": 0.3, "y": 0.1},
]
},
}
],
}\n
{
"id": "2",
"text_snippet": {
"content": "Extended sample content",
"mime_type": "text/plain"
}
}
Sample document JSONL file (Shown with artificial line breaks.
Actual line breaks are denoted by "\n".):
::
{
"document": {
"input_config": {
"gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ]
}
}
}
}\n
{
"document": {
"input_config": {
"gcs_source": { "input_uris": [ "gs://folder/document2.tif" ]
}
}
}
}
.. raw:: html
</section>
</div>
.. raw:: html
<h4>AutoML Tables</h4><div class="ui-datasection-main"><section
class="selected">
See `Preparing your training
data <https://cloud.google.com/automl-tables/docs/predict-batch>`__
for more information.
You can use either
[gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source]
or [bigquery_source][BatchPredictInputConfig.bigquery_source].
**For gcs_source:**
CSV file(s), each by itself 10GB or smaller and total size must be
100GB or smaller, where first file must have a header containing
column names. If the first row of a subsequent file is the same as
the header, then it is also treated as a header. All other rows
contain values for the corresponding columns.
The column names must contain the model's
[input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
[display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
(order doesn't matter). The columns corresponding to the model's
input feature column specs must contain values compatible with the
column spec's data types. Prediction on all the rows, i.e. the CSV
lines, will be attempted.
Sample rows from a CSV file:
.. raw:: html
<pre>
"First Name","Last Name","Dob","Addresses"
"John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
"Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
</pre>
**For bigquery_source:**
The URI of a BigQuery table. The user data size of the BigQuery
table must be 100GB or smaller.
The column names must contain the model's
[input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs]
[display_name-s][google.cloud.automl.v1.ColumnSpec.display_name]
(order doesn't matter). The columns corresponding to the model's
input feature column specs must contain values compatible with the
column spec's data types. Prediction on all the rows of the table
will be attempted.
.. raw:: html
</section>
</div>
**Input field definitions:**
``GCS_FILE_PATH`` : The path to a file on Google Cloud Storage. For
example, "gs://folder/video.avi".
``TIME_SEGMENT_START`` : (``TIME_OFFSET``) Expresses a beginning,
inclusive, of a time segment within an example that has a time
dimension (e.g. video).
``TIME_SEGMENT_END`` : (``TIME_OFFSET``) Expresses an end,
exclusive, of a time segment within n example that has a time
dimension (e.g. video).
``TIME_OFFSET`` : A number of seconds as measured from the start of
an example (e.g. video). Fractions are allowed, up to a microsecond
precision. "inf" is allowed, and it means the end of the example.
**Errors:**
If any of the provided CSV files can't be parsed or if more than
certain percent of CSV rows cannot be processed then the operation
fails and prediction does not happen. Regardless of overall success
or failure the per-row failures, up to a certain count cap, will be
listed in Operation.metadata.partial_failures.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_source (google.cloud.automl_v1.types.GcsSource):
Required. The Google Cloud Storage location
for the input content.
This field is a member of `oneof`_ ``source``.
"""
gcs_source: "GcsSource" = proto.Field(
proto.MESSAGE,
number=1,
oneof="source",
message="GcsSource",
)
class DocumentInputConfig(proto.Message):
r"""Input configuration of a
[Document][google.cloud.automl.v1.Document].
Attributes:
gcs_source (google.cloud.automl_v1.types.GcsSource):
The Google Cloud Storage location of the
document file. Only a single path should be
given.
Max supported size: 512MB.
Supported extensions: .PDF.
"""
gcs_source: "GcsSource" = proto.Field(
proto.MESSAGE,
number=1,
message="GcsSource",
)
class OutputConfig(proto.Message):
r"""- For Translation: CSV file ``translation.csv``, with each line in
format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file
which describes examples that have given ML_USE, using the
following row format per line: TEXT_SNIPPET (in source language)
\\t TEXT_SNIPPET (in target language)
- For Tables: Output depends on whether the dataset was imported
from Google Cloud Storage or BigQuery. Google Cloud Storage
case:
[gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination]
must be set. Exported are CSV file(s) ``tables_1.csv``,
``tables_2.csv``,...,\ ``tables_N.csv`` with each having as
header line the table's column names, and all other lines
contain values for the header columns. BigQuery case:
[bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
pointing to a BigQuery project must be set. In the given
project a new dataset will be created with name
``export_data_<automl-dataset-display-name>_<timestamp-of-export-call>``
where will be made BigQuery-dataset-name compatible (e.g. most
special characters will become underscores), and timestamp
will be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601"
format. In that dataset a new table called ``primary_table``
will be created, and filled with precisely the same data as
this obtained on import.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_destination (google.cloud.automl_v1.types.GcsDestination):
Required. The Google Cloud Storage location where the output
is to be written to. For Image Object Detection, Text
Extraction, Video Classification and Tables, in the given
directory a new directory will be created with name:
export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ
ISO-8601 format. All export output will be written into that
directory.
This field is a member of `oneof`_ ``destination``.
"""
gcs_destination: "GcsDestination" = proto.Field(
proto.MESSAGE,
number=1,
oneof="destination",
message="GcsDestination",
)
class BatchPredictOutputConfig(proto.Message):
r"""Output configuration for BatchPredict Action.
As destination the
[gcs_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_destination]
must be set unless specified otherwise for a domain. If
gcs_destination is set then in the given directory a new directory
is created. Its name will be "prediction--", where timestamp is in
YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends
on the ML problem the predictions are made for.
- For Image Classification: In the created directory files
``image_classification_1.jsonl``,
``image_classification_2.jsonl``,...,\ ``image_classification_N.jsonl``
will be created, where N may be 1, and depends on the total
number of the successfully predicted images and annotations. A
single image will be listed only once with all its annotations,
and its annotations will never be split across files. Each .JSONL
file will contain, per line, a JSON representation of a proto
that wraps image's "ID" : "<id_value>" followed by a list of zero
or more AnnotationPayload protos (called annotations), which have
classification detail populated. If prediction for any image
failed (partially or completely), then an additional
``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl``
files will be created (N depends on total number of failed
predictions). These files will have a JSON representation of a
proto that wraps the same "ID" : "<id_value>" but here followed
by exactly one
```google.rpc.Status`` <https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto>`__
containing only ``code`` and ``message``\ fields.
- For Image Object Detection: In the created directory files
``image_object_detection_1.jsonl``,
``image_object_detection_2.jsonl``,...,\ ``image_object_detection_N.jsonl``
will be created, where N may be 1, and depends on the total
number of the successfully predicted images and annotations. Each
.JSONL file will contain, per line, a JSON representation of a
proto that wraps image's "ID" : "<id_value>" followed by a list
of zero or more AnnotationPayload protos (called annotations),
which have image_object_detection detail populated. A single
image will be listed only once with all its annotations, and its
annotations will never be split across files. If prediction for
any image failed (partially or completely), then additional
``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl``
files will be created (N depends on total number of failed
predictions). These files will have a JSON representation of a
proto that wraps the same "ID" : "<id_value>" but here followed
by exactly one
```google.rpc.Status`` <https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto>`__
containing only ``code`` and ``message``\ fields.
- For Video Classification: In the created directory a
video_classification.csv file, and a .JSON file per each video
classification requested in the input (i.e. each line in given
CSV(s)), will be created.
::
The format of video_classification.csv is:
GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
where:
GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
the prediction input lines (i.e. video_classification.csv has
precisely the same number of lines as the prediction input had.)
JSON_FILE_NAME = Name of .JSON file in the output directory, which
contains prediction responses for the video time segment.
STATUS = "OK" if prediction completed successfully, or an error code
with message otherwise. If STATUS is not "OK" then the .JSON file
for that line may not exist or be empty.
Each .JSON file, assuming STATUS is "OK", will contain a list of
AnnotationPayload protos in JSON format, which are the predictions
for the video time segment the file is assigned to in the
video_classification.csv. All AnnotationPayload protos will have
video_classification field set, and will be sorted by
video_classification.type field (note that the returned types are
governed by `classifaction_types` parameter in
[PredictService.BatchPredictRequest.params][]).
- For Video Object Tracking: In the created directory a
video_object_tracking.csv file will be created, and multiple
files video_object_trackinng_1.json,
video_object_trackinng_2.json,..., video_object_trackinng_N.json,
where N is the number of requests in the input (i.e. the number
of lines in given CSV(s)).
::
The format of video_object_tracking.csv is:
GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
where:
GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
the prediction input lines (i.e. video_object_tracking.csv has
precisely the same number of lines as the prediction input had.)
JSON_FILE_NAME = Name of .JSON file in the output directory, which
contains prediction responses for the video time segment.
STATUS = "OK" if prediction completed successfully, or an error
code with message otherwise. If STATUS is not "OK" then the .JSON
file for that line may not exist or be empty.
Each .JSON file, assuming STATUS is "OK", will contain a list of
AnnotationPayload protos in JSON format, which are the predictions
for each frame of the video time segment the file is assigned to in
video_object_tracking.csv. All AnnotationPayload protos will have
video_object_tracking field set.
- For Text Classification: In the created directory files
``text_classification_1.jsonl``,
``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl``
will be created, where N may be 1, and depends on the total
number of inputs and annotations found.
::
Each .JSONL file will contain, per line, a JSON representation of a
proto that wraps input text file (or document) in
the text snippet (or document) proto and a list of
zero or more AnnotationPayload protos (called annotations), which
have classification detail populated. A single text file (or
document) will be listed only once with all its annotations, and its
annotations will never be split across files.
If prediction for any input file (or document) failed (partially or
completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
`errors_N.jsonl` files will be created (N depends on total number of
failed predictions). These files will have a JSON representation of a
proto that wraps input file followed by exactly one
[`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
containing only `code` and `message`.
- For Text Sentiment: In the created directory files
``text_sentiment_1.jsonl``,
``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will
be created, where N may be 1, and depends on the total number of
inputs and annotations found.
::
Each .JSONL file will contain, per line, a JSON representation of a
proto that wraps input text file (or document) in
the text snippet (or document) proto and a list of
zero or more AnnotationPayload protos (called annotations), which
have text_sentiment detail populated. A single text file (or
document) will be listed only once with all its annotations, and its
annotations will never be split across files.
If prediction for any input file (or document) failed (partially or
completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
`errors_N.jsonl` files will be created (N depends on total number of
failed predictions). These files will have a JSON representation of a
proto that wraps input file followed by exactly one
[`google.rpc.Status`](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
containing only `code` and `message`.
- For Text Extraction: In the created directory files
``text_extraction_1.jsonl``,
``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl``
will be created, where N may be 1, and depends on the total
number of inputs and annotations found. The contents of these
.JSONL file(s) depend on whether the input used inline text, or
documents. If input was inline, then each .JSONL file will
contain, per line, a JSON representation of a proto that wraps
given in request text snippet's "id" (if specified), followed by
input text snippet, and a list of zero or more AnnotationPayload
protos (called annotations), which have text_extraction detail
populated. A single text snippet will be listed only once with
all its annotations, and its annotations will never be split
across files. If input used documents, then each .JSONL file will
contain, per line, a JSON representation of a proto that wraps
given in request document proto, followed by its OCR-ed
representation in the form of a text snippet, finally followed by
a list of zero or more AnnotationPayload protos (called
annotations), which have text_extraction detail populated and
refer, via their indices, to the OCR-ed text snippet. A single
document (and its text snippet) will be listed only once with all
its annotations, and its annotations will never be split across
files. If prediction for any text snippet failed (partially or
completely), then additional ``errors_1.jsonl``,
``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created
(N depends on total number of failed predictions). These files
will have a JSON representation of a proto that wraps either the
"id" : "<id_value>" (in case of inline) or the document proto (in
case of document) but here followed by exactly one
```google.rpc.Status`` <https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto>`__
containing only ``code`` and ``message``.
- For Tables: Output depends on whether
[gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination]
or
[bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination]
is set (either is allowed). Google Cloud Storage case: In the
created directory files ``tables_1.csv``, ``tables_2.csv``,...,
``tables_N.csv`` will be created, where N may be 1, and depends
on the total number of the successfully predicted rows. For all
CLASSIFICATION
[prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
Each .csv file will contain a header, listing all columns'
[display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
given on input followed by M target column names in the format of
"<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
[display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>*\ score"
where M is the number of distinct target values, i.e. number of
distinct values in the target column of the table used to train
the model. Subsequent lines will contain the respective values of
successfully predicted rows, with the last, i.e. the target,
columns having the corresponding prediction
[scores][google.cloud.automl.v1p1beta.TablesAnnotation.score].
For REGRESSION and FORECASTING
[prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]:
Each .csv file will contain a header, listing all columns'
[display_name-s][google.cloud.automl.v1p1beta.display_name] given
on input followed by the predicted target column with name in the
format of
"predicted\ <[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
[display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
Subsequent lines will contain the respective values of
successfully predicted rows, with the last, i.e. the target,
column having the predicted target value. If prediction for any
rows failed, then an additional ``errors_1.csv``,
``errors_2.csv``,..., ``errors_N.csv`` will be created (N depends
on total number of failed rows). These files will have analogous
format as ``tables_*.csv``, but always with a single target
column
having*\ ```google.rpc.Status`` <https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto>`__\ *represented
as a JSON string, and containing only ``code`` and ``message``.
BigQuery case:
[bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination]
pointing to a BigQuery project must be set. In the given project
a new dataset will be created with name
``prediction_<model-display-name>_<timestamp-of-prediction-call>``
where will be made BigQuery-dataset-name compatible (e.g. most
special characters will become underscores), and timestamp will
be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the
dataset two tables will be created, ``predictions``, and
``errors``. The ``predictions`` table's column names will be the
input columns'
[display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name]
followed by the target column with name in the format of
"predicted*\ <[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
[display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>"
The input feature columns will contain the respective values of
successfully predicted rows, with the target column having an
ARRAY of
[AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload],
represented as STRUCT-s, containing
[TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation].
The ``errors`` table contains rows for which the prediction has
failed, it has analogous input columns while the target column
name is in the format of
"errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec]
[display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>",
and as a value has
```google.rpc.Status`` <https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto>`__
represented as a STRUCT, and containing only ``code`` and
``message``.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_destination (google.cloud.automl_v1.types.GcsDestination):
Required. The Google Cloud Storage location
of the directory where the output is to be
written to.
This field is a member of `oneof`_ ``destination``.
"""
gcs_destination: "GcsDestination" = proto.Field(
proto.MESSAGE,
number=1,
oneof="destination",
message="GcsDestination",
)
class ModelExportOutputConfig(proto.Message):
r"""Output configuration for ModelExport Action.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_destination (google.cloud.automl_v1.types.GcsDestination):
Required. The Google Cloud Storage location where the model
is to be written to. This location may only be set for the
following model formats: "tflite", "edgetpu_tflite",
"tf_saved_model", "tf_js", "core_ml".
Under the directory given as the destination a new one with
name "model-export--", where timestamp is in
YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created.
Inside the model and any of its supporting files will be
written.
This field is a member of `oneof`_ ``destination``.
model_format (str):
The format in which the model must be exported. The
available, and default, formats depend on the problem and
model type (if given problem and type combination doesn't
have a format listed, it means its models are not
exportable):
- For Image Classification mobile-low-latency-1,
mobile-versatile-1, mobile-high-accuracy-1: "tflite"
(default), "edgetpu_tflite", "tf_saved_model", "tf_js",
"docker".
- For Image Classification mobile-core-ml-low-latency-1,
mobile-core-ml-versatile-1,
mobile-core-ml-high-accuracy-1: "core_ml" (default).
- For Image Object Detection mobile-low-latency-1,
mobile-versatile-1, mobile-high-accuracy-1: "tflite",
"tf_saved_model", "tf_js". Formats description:
- tflite - Used for Android mobile devices.
- edgetpu_tflite - Used for `Edge
TPU <https://cloud.google.com/edge-tpu/>`__ devices.
- tf_saved_model - A tensorflow model in SavedModel format.
- tf_js - A
`TensorFlow.js <https://www.tensorflow.org/js>`__ model
that can be used in the browser and in Node.js using
JavaScript.
- docker - Used for Docker containers. Use the params field
to customize the container. The container is verified to
work correctly on ubuntu 16.04 operating system. See more
at `containers
quickstart <https://cloud.google.com/vision/automl/docs/containers-gcs-quickstart>`__
- core_ml - Used for iOS mobile devices.
params (MutableMapping[str, str]):
Additional model-type and format specific parameters
describing the requirements for the to be exported model
files, any string must be up to 25000 characters long.
- For ``docker`` format: ``cpu_architecture`` - (string)
"x86_64" (default). ``gpu_architecture`` - (string)
"none" (default), "nvidia".
"""
gcs_destination: "GcsDestination" = proto.Field(
proto.MESSAGE,
number=1,
oneof="destination",
message="GcsDestination",
)
model_format: str = proto.Field(
proto.STRING,
number=4,
)
params: MutableMapping[str, str] = proto.MapField(
proto.STRING,
proto.STRING,
number=2,
)
class GcsSource(proto.Message):
r"""The Google Cloud Storage location for the input content.
Attributes:
input_uris (MutableSequence[str]):
Required. Google Cloud Storage URIs to input files, up to
2000 characters long. Accepted forms:
- Full object path, e.g. gs://bucket/directory/object.csv
"""
input_uris: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
class GcsDestination(proto.Message):
r"""The Google Cloud Storage location where the output is to be
written to.
Attributes:
output_uri_prefix (str):
Required. Google Cloud Storage URI to output directory, up
to 2000 characters long. Accepted forms:
- Prefix path: gs://bucket/directory The requesting user
must have write permission to the bucket. The directory
is created if it doesn't exist.
"""
output_uri_prefix: str = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
PypiClean
|
/unified_planning-1.0.0-py3-none-any.whl/unified_planning/engines/meta_engine.py
|
"""This module defines the meta engine interface."""
from abc import abstractmethod
from unified_planning.exceptions import UPUsageError
from unified_planning.engines.engine import Engine, EngineMeta
from unified_planning.model import ProblemKind
from functools import partial
from typing import Type
class MetaEngineMeta(EngineMeta):
def __getitem__(self, engine_class: Type[Engine]):
assert issubclass(self, MetaEngine)
if not self.is_compatible_engine(engine_class): # type: ignore
raise UPUsageError(f"{engine_class.name} is not compatible with the meta engine {self.name}") # type: ignore
class MetaEngineImpl(self): # type: ignore
_engine_class = engine_class
@staticmethod
def supported_kind() -> ProblemKind:
return self._supported_kind(engine_class) # type: ignore
@staticmethod
def supports(problem_kind: ProblemKind) -> bool:
return self._supports(problem_kind, engine_class) # type: ignore
return MetaEngineImpl
class MetaEngine(Engine, metaclass=MetaEngineMeta):
"""
This class represents a meta engine.
A meta engine is an `Engine` that can be instantiated over a generic `Engine`.
e.g. `OversubscriptionPlanner[Tamer]` is an `Engine class` that use the `MetaEngine`
`OversubscriptionPlanner` over the `Tamer Engine`.
"""
def __init__(self, *args, **kwargs):
Engine.__init__(self)
self._engine = self._engine_class(*args, **kwargs) # type: ignore
@property
def engine(self) -> Engine:
"""Returns the engine used by this `MetaEngine` class"""
return self._engine
@property
def skip_checks(self) -> bool:
"""Same as :func:`skip_checks <unified_planning.engines.Engine.skip_checks>`"""
return self._skip_checks
@skip_checks.setter
def skip_checks(self, new_value: bool):
"""Same as :func:`skip_checks <unified_planning.engines.Engine.skip_checks>`"""
self._skip_checks = new_value
self._engine.skip_checks = new_value
@property
def error_on_failed_checks(self) -> bool:
"""Same as :func:`error_on_failed_checks <unified_planning.engines.Engine.error_on_failed_checks>`"""
return self._error_on_failed_checks
@error_on_failed_checks.setter
def error_on_failed_checks(self, new_value: bool):
"""Same as :func:`error_on_failed_checks <unified_planning.engines.Engine.error_on_failed_checks>`"""
self._error_on_failed_checks = new_value
self.engine.error_on_failed_checks = new_value
@staticmethod
@abstractmethod
def is_compatible_engine(engine: Type[Engine]) -> bool:
"""
Returns `True` iff the given `engine` is compatible with this `MetaEngine`.
:param engine: The `Engine` Class tested for compatibility.
:return: `True` iff the given `engine` is compatible with this `MetaEngine`
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def _supported_kind(engine: Type[Engine]) -> ProblemKind:
"""Returns the supported kind of this meta engine with the given engine"""
raise NotImplementedError
@staticmethod
@abstractmethod
def _supports(problem_kind: ProblemKind, engine: Type[Engine]) -> bool:
"""Returns true iff the given problem kind is supported by this meta
engine with the given engine"""
raise NotImplementedError
|
PypiClean
|
/tf_encrypt-0.0.1-py3-none-any.whl/tf_encrypted/layers/convolution.py
|
import numpy as np
from typing import List
from . import core
class Conv2D(core.Layer):
"""
2 Dimensional convolutional layer, expects NCHW data format
:param List[int] input_shape: The shape of the data flowing into the convolution.
:param List[int] filter_shape: The shape of the convolutional filter. Expected to be rank 4.
:param int strides: The size of the stride
:param padding str: The type of padding ("SAAME" or "VALID")
:param lambda filter_init: lambda function with shape parameter
`Example`
.. code-block:: python
Conv2D((4, 4, 1, 20), strides=2, filter_init=lambda shp:
np.random.normal(scale=0.01, size=shp))
"""
def __init__(self,
input_shape: List[int], filter_shape: List[int],
strides: int = 1, padding: str = "SAME",
filter_init=lambda shp: np.random.normal(scale = 0.1, size = shp),
l2reg_lambda: float = 0.0, channels_first: bool = True) -> None:
self.fshape = filter_shape
self.strides = strides
self.padding = padding
self.filter_init = filter_init
self.l2reg_lambda = l2reg_lambda
self.cache = None
self.cached_x_col = None
self.cached_input_shape = None
self.initializer = None
self.weights = None
self.bias = None
self.model = None
self.channels_first = channels_first
super(Conv2D, self).__init__(input_shape)
def get_output_shape(self) -> List[int]:
h_filter, w_filter, d_filters, n_filters = self.fshape
if self.channels_first:
n_x, d_x, h_x, w_x = self.input_shape
else:
n_x, h_x, w_x, d_x = self.input_shape
if self.padding == "SAME":
h_out = int(np.ceil(float(h_x) / float(self.strides)))
w_out = int(np.ceil(float(w_x) / float(self.strides)))
if self.padding == "VALID":
h_out = int(np.ceil(float(h_x - h_filter + 1) / float(self.strides)))
w_out = int(np.ceil(float(w_x - w_filter + 1) / float(self.strides)))
return [n_x, n_filters, h_out, w_out]
def initialize(self, initial_weights=None) -> None:
if initial_weights is None:
initial_weights = self.filter_init(self.fshape)
self.weights = self.prot.define_private_variable(initial_weights)
self.bias = self.prot.define_private_variable(np.zeros(self.output_shape[1:]))
def forward(self, x):
self.cached_input_shape = x.shape
self.cache = x
if not self.channels_first:
x = self.prot.transpose(x, perm=[0, 3, 1, 2])
out = self.prot.conv2d(x, self.weights, self.strides, self.padding)
out = out + self.bias
if not self.channels_first:
out = self.prot.transpose(out, perm=[0, 2, 3, 1])
return out
def backward(self, d_y, learning_rate):
if not self.channels_first:
raise TypeError("channels must be first on the backward pass")
x = self.cache
h_filter, w_filter, d_filter, n_filter = map(int, self.weights.shape)
if self.model.layers.index(self) != 0:
W_reshaped = self.weights.reshape(n_filter, -1).transpose()
dout_reshaped = d_y.transpose(1, 2, 3, 0).reshape(n_filter, -1)
dx = W_reshaped.matmul(dout_reshaped).col2im(
imshape=self.cached_input_shape,
field_height=h_filter,
field_width=w_filter,
padding=self.padding,
stride=self.strides
)
d_w = self.prot.conv2d_bw(x, d_y, self.weights.shape, self.strides, self.padding)
d_bias = d_y.reduce_sum(axis=0)
self.weights.assign((d_w * learning_rate).neg() + self.weights)
self.bias.assign((d_bias * learning_rate).neg() + self.bias)
return dx
def set_protocol(new_prot):
core.Layer.prot = new_prot
|
PypiClean
|
/marqeta_client-3.0.0.tar.gz/marqeta_client-3.0.0/marqeta_client/models/internal_crypto_key_request_model.py
|
from __future__ import annotations
from inspect import getfullargspec
import pprint
import re # noqa: F401
import json
from typing import List
from pydantic import BaseModel, Field, conlist
from marqeta_client.models.internal_crypto_key_request import InternalCryptoKeyRequest
class InternalCryptoKeyRequestModel(BaseModel):
"""
InternalCryptoKeyRequestModel
"""
crypto_keys: conlist(InternalCryptoKeyRequest, max_items=2147483647, min_items=1) = Field(..., description="A list of 'internal crypto key' items")
__properties = ["crypto_keys"]
class Config:
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> InternalCryptoKeyRequestModel:
"""Create an instance of InternalCryptoKeyRequestModel from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
},
exclude_none=True)
# override the default output from pydantic by calling `to_dict()` of each item in crypto_keys (list)
_items = []
if self.crypto_keys:
for _item in self.crypto_keys:
if _item:
_items.append(_item.to_dict())
_dict['crypto_keys'] = _items
return _dict
@classmethod
def from_dict(cls, obj: dict) -> InternalCryptoKeyRequestModel:
"""Create an instance of InternalCryptoKeyRequestModel from a dict"""
if obj is None:
return None
if type(obj) is not dict:
return InternalCryptoKeyRequestModel.parse_obj(obj)
_obj = InternalCryptoKeyRequestModel.parse_obj({
"crypto_keys": [InternalCryptoKeyRequest.from_dict(_item) for _item in obj.get("crypto_keys")] if obj.get("crypto_keys") is not None else None
})
return _obj
|
PypiClean
|
/balsam_flow-0.7.0a10-py3-none-any.whl/balsam/_api/query.py
|
from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Optional, TypeVar, Union, overload
from .model import BalsamModel
T = TypeVar("T", bound=BalsamModel)
if TYPE_CHECKING:
from .manager import Manager
U = TypeVar("U", bound="Query") # type: ignore
REPR_OUTPUT_SIZE = 20
class Query(Iterable[T]):
def __init__(self, manager: "Manager[T]") -> None:
self._manager: "Manager[T]" = manager
self._result_cache: Optional[List[T]] = None
self._filters: Dict[str, Any] = {}
self._order_field: Optional[str] = None
self._count: Optional[int] = None
self._limit: Optional[int] = None
self._offset: Optional[int] = None
self._empty: bool = False
def __repr__(self) -> str:
data = list(self[: REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..." # type: ignore
return "<%s %r>" % (self.__class__.__name__, data)
def __len__(self) -> int:
self._fetch_cache()
assert isinstance(self._result_cache, list)
return len(self._result_cache)
def __bool__(self) -> bool:
self._fetch_cache()
return bool(self._result_cache)
def __setitem__(self, k: int, v: T) -> None:
if not isinstance(k, int):
raise TypeError("Item assignment only support for int index")
if self._result_cache is None:
self._fetch_cache()
assert isinstance(self._result_cache, list)
self._result_cache[k] = v
@overload
def __getitem__(self, k: int) -> T:
...
@overload
def __getitem__(self, k: slice) -> Union[List[T], "Query[T]"]: # noqa: F811
...
def __getitem__(self, k: Union[int, slice]) -> Union[List[T], T, "Query[T]"]: # noqa: F811
"""
Retrieve an item or slice from the set of results.
"""
if not isinstance(k, (int, slice)):
raise TypeError("Query indices must be integers or slices, not %s." % type(k).__name__)
assert (not isinstance(k, slice) and (k >= 0)) or (
isinstance(k, slice) and (k.start is None or k.start >= 0) and (k.stop is None or k.stop >= 0)
), "Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
start: Optional[int]
stop: Optional[int]
clone = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
clone._set_limits(start, stop)
return list(clone)[start : stop : k.step] if k.step else clone
else:
clone = self._clone()
clone._set_limits(k, k + 1)
clone._fetch_cache()
assert clone._result_cache is not None
return clone._result_cache[0]
@property
def _is_sliced(self) -> bool:
return not (self._limit is None and self._offset is None)
def _clone(self: "U") -> "U":
clone = type(self)(manager=self._manager)
clone._filters = self._filters.copy()
clone._order_field = self._order_field
clone._limit = self._limit
clone._offset = self._offset
clone._empty = self._empty
return clone
def _set_limits(self, start: Optional[int], stop: Optional[int]) -> None:
if start is None:
start = 0
self._offset = start
if stop is not None:
self._limit = stop - start
def __iter__(self) -> Iterator[T]:
self._fetch_cache()
assert isinstance(self._result_cache, list)
return iter(self._result_cache)
def _fetch_cache(self) -> None:
if self._result_cache is not None:
return
if self._empty:
self._result_cache = []
return
instances, count = self._manager._get_list(
filters=self._filters,
ordering=self._order_field,
limit=self._limit,
offset=self._offset,
)
self._count = count
self._result_cache = instances
def _filter(self: "U", **kwargs: Any) -> "U":
if self._is_sliced:
raise AttributeError("Cannot filter a sliced Query")
clone = self._clone()
for key, val in kwargs.items():
if isinstance(val, dict):
kwargs[key] = [f"{k}:{v}" for k, v in val.items()]
if isinstance(val, (list, tuple)) and not val:
clone._empty = True
clone._filters.update(kwargs)
return clone
def _order_by(self: "U", field: Optional[str]) -> "U":
if self._is_sliced:
raise AttributeError("Cannot re-order a sliced Query")
clone = self._clone()
clone._order_field = field
return clone
# Methods that do not return a Query
# **********************************
def _get(self, **kwargs: Any) -> T:
clone: Query[T] = self._filter(**kwargs)
clone._fetch_cache()
assert clone._result_cache is not None
results: List[T] = list(clone)
nobj = len(results)
if nobj == 1:
return results[0]
elif nobj == 0:
raise self._manager._model_class.DoesNotExist(clone._filters)
else:
raise self._manager._model_class.MultipleObjectsReturned(nobj)
def first(self) -> T:
return self[0]
def count(self) -> Optional[int]:
if self._empty:
return 0
if self._count is None:
_, _count = self._manager._get_list(filters=self._filters, limit=0, offset=0, ordering=None)
self._count = _count
return self._count
def _update(self, **kwargs: Any) -> Union[int, List[T]]:
if self._empty:
return []
return self._manager._do_bulk_update_query(patch=kwargs, filters=self._filters)
def delete(self) -> Union[int, None]:
if self._empty:
return None
return self._manager._do_bulk_delete(filters=self._filters)
|
PypiClean
|
/lava-cactus-1.1.1.tar.gz/lava-cactus-1.1.1/cactus/site.py
|
import os
import sys
import shutil
import logging
import traceback
import django.conf
from django.utils import translation
from cactus import ui as ui_module
from cactus.config.router import ConfigRouter
from cactus.deployment import get_deployment_engine_class
from cactus.i18n.commands import MessageMaker, MessageCompiler
from cactus.plugin.builtin.cache import CacheDurationPlugin
from cactus.plugin.builtin.context import ContextPlugin
from cactus.plugin.builtin.ignore import IgnorePatternsPlugin
from cactus.plugin.loader import CustomPluginsLoader, ObjectsPluginLoader
from cactus.plugin.manager import PluginManager
from cactus.static.external.manager import ExternalManager
from cactus.compat.paths import SiteCompatibilityLayer
from cactus.compat.page import PageContextCompatibilityPlugin
from cactus.utils.file import fileSize
from cactus.utils.filesystem import chdir, fileList
from cactus.utils.helpers import memoize, map_apply
from cactus.utils.network import internetWorking
from cactus.utils.parallel import multiMap, PARALLEL_DISABLED, PARALLEL_CONSERVATIVE, PARALLEL_AGGRESSIVE
from cactus.utils.url import is_external
from cactus.page import Page
from cactus.static import Static
from cactus.listener import Listener
from cactus.server import WebServer
from cactus.utils import ipc
logger = logging.getLogger(__name__)
DEFAULT_PROVIDER = "aws"
class Site(SiteCompatibilityLayer):
_path = None
_parallel = PARALLEL_CONSERVATIVE #TODO: Test me
_static = None
VERB_UNKNOWN = 0
VERB_SERVE = 1
VERB_BUILD = 2
def __init__(self, path, config_paths=None, ui=None,
PluginManagerClass=None, ExternalManagerClass=None, DeploymentEngineClass=None,
verb=VERB_UNKNOWN):
# Load the config engine
if config_paths is None:
config_paths = []
self.config = ConfigRouter(config_paths)
self.verb = verb
# Load site-specific config values
self.prettify_urls = self.config.get('prettify', False)
self.compress_extensions = self.config.get('compress', ['html', 'css', 'js', 'txt', 'xml'])
self.fingerprint_extensions = self.config.get('fingerprint', [])
self.use_translate = self.config.get('use_translate', False)
self.locale = []
self.default_language = self.config.get('default_language', 'en')
if self.use_translate:
self.locale.append(self.default_language)
self.other_languages = self.config.get('other_languages', [])
self.locale += self.other_languages
self.path = path
self.verify_path()
# Load Managers
if ui is None:
ui = ui_module
self.ui = ui
if PluginManagerClass is None:
PluginManagerClass = PluginManager
self.plugin_manager = PluginManagerClass(self,
[
CustomPluginsLoader(self.plugin_path), # User plugins
ObjectsPluginLoader([ # Builtin plugins
ContextPlugin(), CacheDurationPlugin(),
IgnorePatternsPlugin(), PageContextCompatibilityPlugin(),
])
]
)
if ExternalManagerClass is None:
ExternalManagerClass = ExternalManager
self.external_manager = ExternalManagerClass(self)
if DeploymentEngineClass is None:
hosting_provider = self.config.get("provider", DEFAULT_PROVIDER)
DeploymentEngineClass = get_deployment_engine_class(hosting_provider)
assert DeploymentEngineClass is not None, \
"Could not load Deployment for Provider: {0}".format(hosting_provider)
self.deployment_engine = DeploymentEngineClass(self)
# Load Django settings
self.setup()
@property
def url(self):
return self.config.get('site-url')
@url.setter
def url(self, value):
self.config.set('site-url', value)
self.config.write()
def verify_url(self):
"""
We need the site url to generate the sitemap.
"""
#TODO: Make a "required" option in the config.
#TODO: Use URL tags in the sitemap
# if self.url is None:
# self.url = self.ui.prompt_url("Enter your site URL (e.g. http://example.com/)")
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
self.build_path = os.path.join(path, '.build')
self.deploy_path = os.path.join(path, '.deploy')
self.template_path = os.path.join(path, 'templates')
self.page_path = os.path.join(path, 'pages')
self.plugin_path = os.path.join(path, 'plugins')
self.static_path = os.path.join(path, 'static')
self.script_path = os.path.join(os.getcwd(), __file__)
self.locale_path = os.path.join(path, "locale")
def setup(self):
"""
Configure django to use both our template and pages folder as locations
to look for included templates.
"""
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [self.template_path, self.page_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'myproject.utils.context_processors.settings_context',
],
'builtins': [
'cactus.template_tags',
],
},
}]
MIDDLEWARE = [
'django.middleware.locale.LocaleMiddleware',
]
settings = {
"TEMPLATES": TEMPLATES,
"MIDDLEWARE": MIDDLEWARE,
}
if self.use_translate:
settings.update({
"USE_I18N": True,
"USE_L10N": False,
"LOCALE_PATHS": [self.locale_path],
})
django.conf.settings.configure(**settings)
django.setup()
def verify_path(self):
"""
Check if this path looks like a Cactus website
"""
required_subfolders = ['pages', 'static', 'templates', 'plugins']
if self.use_translate:
required_subfolders.append('locale')
for p in required_subfolders:
if not os.path.isdir(os.path.join(self.path, p)):
logger.error('This does not look like a (complete) cactus project (missing "%s" subfolder)', p)
sys.exit(1)
@memoize
def context(self):
"""
Base context for the site: all the html pages.
"""
ctx = {
'CACTUS': {
'pages': [p for p in self.pages() if p.is_html()],
'static': [p for p in self.static()]
},
'__CACTUS_SITE__': self,
}
# Also make lowercase work
ctx['cactus'] = ctx['CACTUS']
return ctx
def make_messages(self):
"""
Generate the .po files for the site.
"""
if not self.use_translate:
logger.error("You should set use_translate=true in configurations file. "
"You also can set default_language and other_languages to translate")
return
message_maker = MessageMaker(self)
message_maker.execute()
def compile_messages(self):
"""
Remove pre-existing compiled language files, and re-compile.
"""
message_compiler = MessageCompiler(self)
message_compiler.execute()
def clean(self):
"""
Remove all build files.
"""
logger.debug("*** CLEAN %s", self.path)
if os.path.isdir(self.build_path):
shutil.rmtree(self.build_path)
def build_with_translation(self, locale_item=None):
logger.debug("*** BUILD %s", self.path)
language = self.default_language
if locale_item is not None:
locale_build_path = os.path.join(self.build_path, locale_item)
self.build_path = locale_build_path
language = locale_item
django.conf.settings.LANGUAGE_CODE = language
translation.activate(language)
self.verify_url()
# Reset the static content
self._static = None
self._static_resources_dict = None
# TODO: Facility to reset the site, and reload config.
# TODO: Currently, we can't build a site instance multiple times
self.plugin_manager.reload() # Reload in case we're running on the server # We're still loading twice!
self.plugin_manager.preBuild(self)
logger.debug('Plugins: %s', ', '.join([p.plugin_name for p in self.plugin_manager.plugins]))
logger.debug('Processors: %s', ', '.join([p.__name__ for p in self.external_manager.processors]))
logger.debug('Optimizers: %s', ', '.join([p.__name__ for p in self.external_manager.optimizers]))
# Make sure the build path exists
if not os.path.exists(self.build_path):
os.mkdir(self.build_path)
# Copy the static files
self.buildStatic()
# Always clean out the pages
build_static_path = os.path.join(self.build_path, "static")
for path in os.listdir(self.build_path):
path = os.path.join(self.build_path, path)
if path != build_static_path:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
# Render the pages to their output files
mapper = multiMap if self._parallel >= PARALLEL_AGGRESSIVE else map_apply
# mapper = map_apply
mapper(lambda p: p.build(), self.pages())
self.plugin_manager.postBuild(self)
for static in self.static():
if os.path.isdir(static.pre_dir):
shutil.rmtree(static.pre_dir)
def build(self):
"""
Generate fresh site from templates.
"""
self.build_with_translation()
# Prepare translations
if self.use_translate:
build_path_tmp = self.build_path
self.compile_messages()
for locale_item in self.locale:
self.build_with_translation(locale_item)
self.build_path = build_path_tmp
def static(self):
"""
Retrieve a list of static files for the site
"""
if self._static is None:
self._static = []
for path in fileList(self.static_path, relative=True):
full_path = os.path.join(self.static_path, path)
if os.path.islink(full_path):
if not os.path.exists(os.path.realpath(full_path)):
logger.warning("Skipping symlink that points to unexisting file:\n%s", full_path)
continue
self._static.append(Static(self, path))
return self._static
def static_resources_dict(self):
"""
Retrieve a dictionary mapping URL's to static files
"""
if self._static_resources_dict is None:
self._static_resources_dict = dict((resource.link_url, resource) for resource in self.static())
return self._static_resources_dict
def _get_resource(self, src_url, resources):
if is_external(src_url):
return src_url
for split_char in ["#", "?"]:
if split_char in src_url:
src_url = src_url.split(split_char)[0]
if src_url in resources:
return resources[src_url].final_url
return None
def _get_url(self, src_url, resources):
return self._get_resource(src_url, resources)
def get_url_for_static(self, src_path):
return self._get_url(src_path, self.static_resources_dict())
def get_url_for_page(self, src_path):
return self._get_url(src_path, dict((resource.link_url, resource) for resource in self.pages()))
def buildStatic(self):
"""
Build static files (pre-process, copy to static folder)
"""
mapper = multiMap if self._parallel > PARALLEL_DISABLED else map_apply
mapper(lambda s: s.build(), self.static())
def pages(self):
"""
List of pages.
"""
if not hasattr(self, "_page_cache"):
self._page_cache = {}
pages = []
for path in fileList(self.page_path, relative=True):
if path.endswith("~"):
continue
if path not in self._page_cache:
logger.debug("Found page: %s", path)
self._page_cache[path] = Page(self, path)
pages.append(self._page_cache[path])
return pages
def _rebuild_should_ignore(self, file_path):
file_relative_path = os.path.relpath(file_path, self.path)
# Ignore anything in a hidden folder like .git
for path_part in file_relative_path.split(os.path.sep):
if path_part.startswith("."):
return True
if file_path.startswith(self.page_path):
return False
if file_path.startswith(self.template_path):
return False
if file_path.startswith(self.static_path):
return False
if file_path.startswith(self.plugin_path):
return False
return True
def _rebuild(self, changes):
logger.debug("*** REBUILD %s", self.path)
logger.info('*** Rebuilding (%s changed)' % self.path)
# We will pause the listener while building so scripts that alter the output
# like coffeescript and less don't trigger the listener again immediately.
self.listener.pause()
try:
#TODO: Fix this.
#TODO: The static files should handle collection of their static folder on their own
#TODO: The static files should not run everything on __init__
#TODO: Only rebuild static files that changed
# We need to "clear out" the list of static first. Otherwise, processors will not run again
# They run on __init__ to run before fingerprinting, and the "built" static files themselves,
# which are in a temporary folder, have been deleted already!
# self._static = None
self.build()
except Exception as e:
logger.info('*** Error while building\n%s', e)
traceback.print_exc(file=sys.stdout)
changed_file_extension = set(map(lambda x: os.path.splitext(x)[1], changes["changed"]))
reload_css_file_extenstions = set([".css", ".sass", ".scss", ".styl"])
# When we have changes, we want to refresh the browser tabs with the updates.
# Mostly we just refresh the browser except when there are just css changes,
# then we reload the css in place.
local_hosts = [
"http://127.0.0.1:%s" % self._port,
"http://localhost:%s" % self._port,
"http://0.0.0.0:%s" % self._port
]
if len(changes["added"]) == 0 and len(changes["deleted"]) == 0 and changed_file_extension.issubset(reload_css_file_extenstions):
# browserReloadCSS(local_hosts)
self.server.reloadCSS()
else:
# browserReload(local_hosts)
self.server.reloadPage()
self.listener.resume()
def serve(self, browser=True, port=8000):
"""
Start a http server and rebuild on changes.
"""
self._parallel = PARALLEL_DISABLED
self._port = port
self.verb = self.VERB_SERVE
self.clean()
self.build()
logger.info('Running webserver at http://127.0.0.1:%s for %s' % (port, self.build_path))
ipc.signal("server.didstart")
logger.info('Type control-c to exit')
with chdir(self.build_path):
self.listener = Listener(self.path, self._rebuild, ignore=self._rebuild_should_ignore)
self.listener.run()
self.server = WebServer(self.build_path, port=port)
try:
self.server.start()
# if browser is True:
# webbrowser.open('http://127.0.0.1:%s' % port)
except (KeyboardInterrupt, SystemExit):
self.server.stop()
logger.info("Bye")
def upload(self):
# Make sure we have internet
if not internetWorking():
logger.info('There does not seem to be internet here, check your connection')
return
logger.debug('Start upload')
self.build_path = self.deploy_path
self.clean()
self.build()
self.plugin_manager.preDeploy(self)
totalFiles = self.deployment_engine.deploy()
changedFiles = [r for r in totalFiles if r['changed']]
self.plugin_manager.postDeploy(self)
# Display done message and some statistics
logger.info('\nDone\n')
logger.info('%s total files with a size of %s' %
(len(totalFiles), fileSize(sum([r['size'] for r in totalFiles]))))
logger.info('%s changed files with a size of %s' %
(len(changedFiles), fileSize(sum([r['size'] for r in changedFiles]))))
logger.info('\nhttp://%s\n' % self.config.get('aws-bucket-website')) #TODO: Fix
def domain_setup(self):
# Make sure we have internet
if not internetWorking():
logger.info('There does not seem to be internet here, check your connection')
return
self.deployment_engine.domain_setup()
self.domain_list()
def domain_list(self):
self.deployment_engine.domain_list()
|
PypiClean
|
/indic-doctr-0.7.1a0.tar.gz/indic-doctr-0.7.1a0/doctr/transforms/modules/pytorch.py
|
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import torch
from PIL.Image import Image
from torch.nn.functional import pad
from torchvision.transforms import functional as F
from torchvision.transforms import transforms as T
from ..functional.pytorch import random_shadow
__all__ = ["Resize", "GaussianNoise", "ChannelShuffle", "RandomHorizontalFlip", "RandomShadow"]
class Resize(T.Resize):
def __init__(
self,
size: Union[int, Tuple[int, int]],
interpolation=F.InterpolationMode.BILINEAR,
preserve_aspect_ratio: bool = False,
symmetric_pad: bool = False,
) -> None:
super().__init__(size, interpolation)
self.preserve_aspect_ratio = preserve_aspect_ratio
self.symmetric_pad = symmetric_pad
if not isinstance(self.size, (int, tuple, list)):
raise AssertionError("size should be either a tuple, a list or an int")
def forward(
self,
img: torch.Tensor,
target: Optional[np.ndarray] = None,
) -> Union[torch.Tensor, Tuple[torch.Tensor, np.ndarray]]:
if isinstance(self.size, int):
target_ratio = img.shape[-2] / img.shape[-1]
else:
target_ratio = self.size[0] / self.size[1]
actual_ratio = img.shape[-2] / img.shape[-1]
if not self.preserve_aspect_ratio or (target_ratio == actual_ratio and (isinstance(self.size, (tuple, list)))):
# If we don't preserve the aspect ratio or the wanted aspect ratio is the same than the original one
# We can use with the regular resize
if target is not None:
return super().forward(img), target
return super().forward(img)
else:
# Resize
if isinstance(self.size, (tuple, list)):
if actual_ratio > target_ratio:
tmp_size = (self.size[0], max(int(self.size[0] / actual_ratio), 1))
else:
tmp_size = (max(int(self.size[1] * actual_ratio), 1), self.size[1])
elif isinstance(self.size, int): # self.size is the longest side, infer the other
if img.shape[-2] <= img.shape[-1]:
tmp_size = (max(int(self.size * actual_ratio), 1), self.size)
else:
tmp_size = (self.size, max(int(self.size / actual_ratio), 1))
# Scale image
img = F.resize(img, tmp_size, self.interpolation)
raw_shape = img.shape[-2:]
if isinstance(self.size, (tuple, list)):
# Pad (inverted in pytorch)
_pad = (0, self.size[1] - img.shape[-1], 0, self.size[0] - img.shape[-2])
if self.symmetric_pad:
half_pad = (math.ceil(_pad[1] / 2), math.ceil(_pad[3] / 2))
_pad = (half_pad[0], _pad[1] - half_pad[0], half_pad[1], _pad[3] - half_pad[1])
img = pad(img, _pad)
# In case boxes are provided, resize boxes if needed (for detection task if preserve aspect ratio)
if target is not None:
if self.preserve_aspect_ratio:
# Get absolute coords
if target.shape[1:] == (4,):
if isinstance(self.size, (tuple, list)) and self.symmetric_pad:
if np.max(target) <= 1:
offset = half_pad[0] / img.shape[-1], half_pad[1] / img.shape[-2]
target[:, [0, 2]] = offset[0] + target[:, [0, 2]] * raw_shape[-1] / img.shape[-1]
target[:, [1, 3]] = offset[1] + target[:, [1, 3]] * raw_shape[-2] / img.shape[-2]
else:
target[:, [0, 2]] *= raw_shape[-1] / img.shape[-1]
target[:, [1, 3]] *= raw_shape[-2] / img.shape[-2]
elif target.shape[1:] == (4, 2):
if isinstance(self.size, (tuple, list)) and self.symmetric_pad:
if np.max(target) <= 1:
offset = half_pad[0] / img.shape[-1], half_pad[1] / img.shape[-2]
target[..., 0] = offset[0] + target[..., 0] * raw_shape[-1] / img.shape[-1]
target[..., 1] = offset[1] + target[..., 1] * raw_shape[-2] / img.shape[-2]
else:
target[..., 0] *= raw_shape[-1] / img.shape[-1]
target[..., 1] *= raw_shape[-2] / img.shape[-2]
else:
raise AssertionError
return img, target
return img
def __repr__(self) -> str:
interpolate_str = self.interpolation.value
_repr = f"output_size={self.size}, interpolation='{interpolate_str}'"
if self.preserve_aspect_ratio:
_repr += f", preserve_aspect_ratio={self.preserve_aspect_ratio}, symmetric_pad={self.symmetric_pad}"
return f"{self.__class__.__name__}({_repr})"
class GaussianNoise(torch.nn.Module):
"""Adds Gaussian Noise to the input tensor
>>> import torch
>>> from doctr.transforms import GaussianNoise
>>> transfo = GaussianNoise(0., 1.)
>>> out = transfo(torch.rand((3, 224, 224)))
Args:
mean : mean of the gaussian distribution
std : std of the gaussian distribution
"""
def __init__(self, mean: float = 0.0, std: float = 1.0) -> None:
super().__init__()
self.std = std
self.mean = mean
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Reshape the distribution
noise = self.mean + 2 * self.std * torch.rand(x.shape, device=x.device) - self.std
if x.dtype == torch.uint8:
return (x + 255 * noise).round().clamp(0, 255).to(dtype=torch.uint8)
else:
return (x + noise.to(dtype=x.dtype)).clamp(0, 1)
def extra_repr(self) -> str:
return f"mean={self.mean}, std={self.std}"
class ChannelShuffle(torch.nn.Module):
"""Randomly shuffle channel order of a given image"""
def __init__(self):
super().__init__()
def forward(self, img: torch.Tensor) -> torch.Tensor:
# Get a random order
chan_order = torch.rand(img.shape[0]).argsort()
return img[chan_order]
class RandomHorizontalFlip(T.RandomHorizontalFlip):
def forward(
self, img: Union[torch.Tensor, Image], target: Dict[str, Any]
) -> Tuple[Union[torch.Tensor, Image], Dict[str, Any]]:
"""
Args:
img: Image to be flipped.
target: Dictionary with boxes (in relative coordinates of shape (N, 4)) and labels as keys
Returns:
Tuple of PIL Image or Tensor and target
"""
if torch.rand(1) < self.p:
_img = F.hflip(img)
_target = target.copy()
# Changing the relative bbox coordinates
_target["boxes"][:, ::2] = 1 - target["boxes"][:, [2, 0]]
return _img, _target
return img, target
class RandomShadow(torch.nn.Module):
"""Adds random shade to the input image
>>> import torch
>>> from doctr.transforms import RandomShadow
>>> transfo = RandomShadow((0., 1.))
>>> out = transfo(torch.rand((3, 64, 64)))
Args:
opacity_range : minimum and maximum opacity of the shade
"""
def __init__(self, opacity_range: Optional[Tuple[float, float]] = None) -> None:
super().__init__()
self.opacity_range = opacity_range if isinstance(opacity_range, tuple) else (0.2, 0.8)
def __call__(self, x: torch.Tensor) -> torch.Tensor:
# Reshape the distribution
try:
if x.dtype == torch.uint8:
return (
(
255
* random_shadow(
x.to(dtype=torch.float32) / 255,
self.opacity_range,
)
)
.round()
.clip(0, 255)
.to(dtype=torch.uint8)
)
else:
return random_shadow(x, self.opacity_range).clip(0, 1)
except ValueError:
return x
def extra_repr(self) -> str:
return f"opacity_range={self.opacity_range}"
|
PypiClean
|
/celo_etl-0.0.2-py3-none-any.whl/celoetl/cli/__init__.py
|
import click
from celoetl.cli.export_all import export_all
from celoetl.cli.export_blocks_and_transactions import export_blocks_and_transactions
from celoetl.cli.export_contracts import export_contracts
from celoetl.cli.export_geth_traces import export_geth_traces
from celoetl.cli.export_receipts_and_logs import export_receipts_and_logs
from celoetl.cli.export_token_transfers import export_token_transfers
from celoetl.cli.export_tokens import export_tokens
from celoetl.cli.export_traces import export_traces
from celoetl.cli.extract_contracts import extract_contracts
from celoetl.cli.extract_csv_column import extract_csv_column
from celoetl.cli.extract_field import extract_field
from celoetl.cli.extract_geth_traces import extract_geth_traces
from celoetl.cli.extract_token_transfers import extract_token_transfers
from celoetl.cli.extract_tokens import extract_tokens
from celoetl.cli.filter_items import filter_items
from celoetl.cli.get_block_range_for_date import get_block_range_for_date
from celoetl.cli.get_block_range_for_timestamps import get_block_range_for_timestamps
from celoetl.cli.get_keccak_hash import get_keccak_hash
from celoetl.cli.stream import stream
@click.group()
@click.version_option(version='0.0.1')
@click.pass_context
def cli(ctx):
pass
# export
cli.add_command(export_all, "export_all")
cli.add_command(export_blocks_and_transactions, "export_blocks_and_transactions")
cli.add_command(export_receipts_and_logs, "export_receipts_and_logs")
cli.add_command(export_token_transfers, "export_token_transfers")
cli.add_command(extract_token_transfers, "extract_token_transfers")
cli.add_command(export_contracts, "export_contracts")
cli.add_command(export_tokens, "export_tokens")
cli.add_command(export_traces, "export_traces")
cli.add_command(export_geth_traces, "export_geth_traces")
cli.add_command(extract_geth_traces, "extract_geth_traces")
cli.add_command(extract_contracts, "extract_contracts")
cli.add_command(extract_tokens, "extract_tokens")
# streaming
cli.add_command(stream, "stream")
# utils
cli.add_command(get_block_range_for_date, "get_block_range_for_date")
cli.add_command(get_block_range_for_timestamps, "get_block_range_for_timestamps")
cli.add_command(get_keccak_hash, "get_keccak_hash")
cli.add_command(extract_csv_column, "extract_csv_column")
cli.add_command(filter_items, "filter_items")
cli.add_command(extract_field, "extract_field")
|
PypiClean
|
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/compute/v20180930/get_snapshot.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetSnapshotResult',
'AwaitableGetSnapshotResult',
'get_snapshot',
]
@pulumi.output_type
class GetSnapshotResult:
"""
Snapshot resource.
"""
def __init__(__self__, creation_data=None, disk_size_gb=None, encryption_settings_collection=None, hyper_v_generation=None, id=None, location=None, managed_by=None, name=None, os_type=None, provisioning_state=None, sku=None, tags=None, time_created=None, type=None):
if creation_data and not isinstance(creation_data, dict):
raise TypeError("Expected argument 'creation_data' to be a dict")
pulumi.set(__self__, "creation_data", creation_data)
if disk_size_gb and not isinstance(disk_size_gb, int):
raise TypeError("Expected argument 'disk_size_gb' to be a int")
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if encryption_settings_collection and not isinstance(encryption_settings_collection, dict):
raise TypeError("Expected argument 'encryption_settings_collection' to be a dict")
pulumi.set(__self__, "encryption_settings_collection", encryption_settings_collection)
if hyper_v_generation and not isinstance(hyper_v_generation, str):
raise TypeError("Expected argument 'hyper_v_generation' to be a str")
pulumi.set(__self__, "hyper_v_generation", hyper_v_generation)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_by and not isinstance(managed_by, str):
raise TypeError("Expected argument 'managed_by' to be a str")
pulumi.set(__self__, "managed_by", managed_by)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if os_type and not isinstance(os_type, str):
raise TypeError("Expected argument 'os_type' to be a str")
pulumi.set(__self__, "os_type", os_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="creationData")
def creation_data(self) -> 'outputs.CreationDataResponse':
"""
Disk source information. CreationData information cannot be changed after the disk has been created.
"""
return pulumi.get(self, "creation_data")
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[int]:
"""
If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
"""
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter(name="encryptionSettingsCollection")
def encryption_settings_collection(self) -> Optional['outputs.EncryptionSettingsCollectionResponse']:
"""
Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot.
"""
return pulumi.get(self, "encryption_settings_collection")
@property
@pulumi.getter(name="hyperVGeneration")
def hyper_v_generation(self) -> Optional[str]:
"""
The hypervisor generation of the Virtual Machine. Applicable to OS disks only.
"""
return pulumi.get(self, "hyper_v_generation")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> str:
"""
Unused. Always Null.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
The Operating System type.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The disk provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SnapshotSkuResponse']:
"""
The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The time when the disk was created.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetSnapshotResult(GetSnapshotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSnapshotResult(
creation_data=self.creation_data,
disk_size_gb=self.disk_size_gb,
encryption_settings_collection=self.encryption_settings_collection,
hyper_v_generation=self.hyper_v_generation,
id=self.id,
location=self.location,
managed_by=self.managed_by,
name=self.name,
os_type=self.os_type,
provisioning_state=self.provisioning_state,
sku=self.sku,
tags=self.tags,
time_created=self.time_created,
type=self.type)
def get_snapshot(resource_group_name: Optional[str] = None,
snapshot_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSnapshotResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str snapshot_name: The name of the snapshot that is being created. The name can't be changed after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The max name length is 80 characters.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['snapshotName'] = snapshot_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:compute/v20180930:getSnapshot', __args__, opts=opts, typ=GetSnapshotResult).value
return AwaitableGetSnapshotResult(
creation_data=__ret__.creation_data,
disk_size_gb=__ret__.disk_size_gb,
encryption_settings_collection=__ret__.encryption_settings_collection,
hyper_v_generation=__ret__.hyper_v_generation,
id=__ret__.id,
location=__ret__.location,
managed_by=__ret__.managed_by,
name=__ret__.name,
os_type=__ret__.os_type,
provisioning_state=__ret__.provisioning_state,
sku=__ret__.sku,
tags=__ret__.tags,
time_created=__ret__.time_created,
type=__ret__.type)
|
PypiClean
|
/fake_bpy_module_2.90-20230117-py3-none-any.whl/bl_ui/space_sequencer.py
|
import sys
import typing
import bpy_types
import bl_ui.space_toolsystem_common
import bl_ui.properties_grease_pencil_common
import rna_prop_ui
GenericType = typing.TypeVar("GenericType")
class SEQUENCER_HT_header(bpy_types.Header, bpy_types._GenericUI):
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_HT_tool_header(bpy_types.Header, bpy_types._GenericUI):
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_tool_settings(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_add(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
bl_translation_context = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_add_effect(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_add_empty(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_add_transitions(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_change(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_context_menu(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_editor_menus(bpy_types.Menu, bpy_types._GenericUI):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_marker(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_navigation(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_preview_zoom(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_proxy(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_range(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_select(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_select_channel(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_select_handle(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_select_linked(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_effect(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_input(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_lock_mute(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_movie(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_strip_transform(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_view(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_MT_view_cache(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_active_tool(
bl_ui.space_toolsystem_common.ToolActivePanelHelper, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SequencerButtonsPanel:
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def has_sequencer(self, context):
'''
'''
pass
def poll(self, context):
'''
'''
pass
class SequencerButtonsPanel_Output:
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def has_preview(self, context):
'''
'''
pass
def poll(self, context):
'''
'''
pass
class SEQUENCER_PT_adjust(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_color(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_comp(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_sound(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_transform(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_transform_crop(
SequencerButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_transform_offset(
SequencerButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_adjust_video(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_cache_settings(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_custom_props(SequencerButtonsPanel,
rna_prop_ui.PropertyPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_order = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_effect(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_effect_text_layout(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_effect_text_style(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_mask(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_modifiers(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_proxy_settings(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_scene(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_sound(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_source(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_strip(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_strip_cache(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_strip_proxy(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_time(SequencerButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header_preset(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_sequencer(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_annotation(
SequencerButtonsPanel_Output,
bl_ui.properties_grease_pencil_common.AnnotationDataPanel,
bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def draw_layers(self, context, layout, gpd):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_annotation_onion(
SequencerButtonsPanel_Output,
bl_ui.properties_grease_pencil_common.AnnotationOnionSkin,
bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_frame_overlay(SequencerButtonsPanel_Output, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_preview(SequencerButtonsPanel_Output, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_view(SequencerButtonsPanel_Output, bpy_types.Panel,
bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_view_safe_areas(SequencerButtonsPanel_Output,
bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SEQUENCER_PT_view_safe_areas_center_cut(
SequencerButtonsPanel_Output, bpy_types.Panel, bpy_types._GenericUI):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def has_preview(self, context):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
def act_strip(context):
'''
'''
pass
def draw_color_balance(layout, color_balance):
'''
'''
pass
def selected_sequences_len(context):
'''
'''
pass
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/sites/item/onenote/notebooks/item/copy_notebook/copy_notebook_post_request_body.py
|
from __future__ import annotations
from kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
class CopyNotebookPostRequestBody(AdditionalDataHolder, Parsable):
def __init__(self,) -> None:
"""
Instantiates a new copyNotebookPostRequestBody and sets the default values.
"""
# Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
self._additional_data: Dict[str, Any] = {}
# The groupId property
self._group_id: Optional[str] = None
# The notebookFolder property
self._notebook_folder: Optional[str] = None
# The renameAs property
self._rename_as: Optional[str] = None
# The siteCollectionId property
self._site_collection_id: Optional[str] = None
# The siteId property
self._site_id: Optional[str] = None
@property
def additional_data(self,) -> Dict[str, Any]:
"""
Gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Returns: Dict[str, Any]
"""
return self._additional_data
@additional_data.setter
def additional_data(self,value: Dict[str, Any]) -> None:
"""
Sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Args:
value: Value to set for the AdditionalData property.
"""
self._additional_data = value
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> CopyNotebookPostRequestBody:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: CopyNotebookPostRequestBody
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return CopyNotebookPostRequestBody()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
fields: Dict[str, Callable[[Any], None]] = {
"groupId": lambda n : setattr(self, 'group_id', n.get_str_value()),
"notebookFolder": lambda n : setattr(self, 'notebook_folder', n.get_str_value()),
"renameAs": lambda n : setattr(self, 'rename_as', n.get_str_value()),
"siteCollectionId": lambda n : setattr(self, 'site_collection_id', n.get_str_value()),
"siteId": lambda n : setattr(self, 'site_id', n.get_str_value()),
}
return fields
@property
def group_id(self,) -> Optional[str]:
"""
Gets the groupId property value. The groupId property
Returns: Optional[str]
"""
return self._group_id
@group_id.setter
def group_id(self,value: Optional[str] = None) -> None:
"""
Sets the groupId property value. The groupId property
Args:
value: Value to set for the group_id property.
"""
self._group_id = value
@property
def notebook_folder(self,) -> Optional[str]:
"""
Gets the notebookFolder property value. The notebookFolder property
Returns: Optional[str]
"""
return self._notebook_folder
@notebook_folder.setter
def notebook_folder(self,value: Optional[str] = None) -> None:
"""
Sets the notebookFolder property value. The notebookFolder property
Args:
value: Value to set for the notebook_folder property.
"""
self._notebook_folder = value
@property
def rename_as(self,) -> Optional[str]:
"""
Gets the renameAs property value. The renameAs property
Returns: Optional[str]
"""
return self._rename_as
@rename_as.setter
def rename_as(self,value: Optional[str] = None) -> None:
"""
Sets the renameAs property value. The renameAs property
Args:
value: Value to set for the rename_as property.
"""
self._rename_as = value
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
writer.write_str_value("groupId", self.group_id)
writer.write_str_value("notebookFolder", self.notebook_folder)
writer.write_str_value("renameAs", self.rename_as)
writer.write_str_value("siteCollectionId", self.site_collection_id)
writer.write_str_value("siteId", self.site_id)
writer.write_additional_data_value(self.additional_data)
@property
def site_collection_id(self,) -> Optional[str]:
"""
Gets the siteCollectionId property value. The siteCollectionId property
Returns: Optional[str]
"""
return self._site_collection_id
@site_collection_id.setter
def site_collection_id(self,value: Optional[str] = None) -> None:
"""
Sets the siteCollectionId property value. The siteCollectionId property
Args:
value: Value to set for the site_collection_id property.
"""
self._site_collection_id = value
@property
def site_id(self,) -> Optional[str]:
"""
Gets the siteId property value. The siteId property
Returns: Optional[str]
"""
return self._site_id
@site_id.setter
def site_id(self,value: Optional[str] = None) -> None:
"""
Sets the siteId property value. The siteId property
Args:
value: Value to set for the site_id property.
"""
self._site_id = value
|
PypiClean
|
/rally_openstack-2.3.0-py3-none-any.whl/rally_openstack/task/scenarios/designate/basic.py
|
import random
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.designate import utils
"""Basic scenarios for Designate."""
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["designate"]},
name="DesignateBasic.create_and_list_zones",
platform="openstack")
class CreateAndListZones(utils.DesignateScenario):
def run(self):
"""Create a zone and list all zones.
Measure the "openstack zone list" command performance.
If you have only 1 user in your context, you will
add 1 zone on every iteration. So you will have more
and more zone and will be able to measure the
performance of the "openstack zone list" command depending on
the number of zones owned by users.
"""
zone = self._create_zone()
self.assertTrue(zone)
list_zones = self._list_zones()
self.assertIn(zone, list_zones)
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="DesignateBasic.list_zones", platform="openstack")
class ListZones(utils.DesignateScenario):
def run(self):
"""List Designate zones.
This simple scenario tests the openstack zone list command by listing
all the zones.
"""
self._list_zones()
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["designate"]},
name="DesignateBasic.create_and_delete_zone",
platform="openstack")
class CreateAndDeleteZone(utils.DesignateScenario):
def run(self):
"""Create and then delete a zone.
Measure the performance of creating and deleting zones
with different level of load.
"""
zone = self._create_zone()
self._delete_zone(zone["id"])
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="DesignateBasic.list_recordsets",
platform="openstack")
class ListRecordsets(utils.DesignateScenario):
def run(self, zone_id):
"""List Designate recordsets.
This simple scenario tests the openstack recordset list command by
listing all the recordsets in a zone.
:param zone_id: Zone ID
"""
self._list_recordsets(zone_id)
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("zones"))
@scenario.configure(context={"cleanup@openstack": ["designate"]},
name="DesignateBasic.create_and_delete_recordsets",
platform="openstack")
class CreateAndDeleteRecordsets(utils.DesignateScenario):
def run(self, recordsets_per_zone=5):
"""Create and then delete recordsets.
Measure the performance of creating and deleting recordsets
with different level of load.
:param recordsets_per_zone: recordsets to create pr zone.
"""
zone = random.choice(self.context["tenant"]["zones"])
recordsets = []
for i in range(recordsets_per_zone):
recordset = self._create_recordset(zone)
recordsets.append(recordset)
for recordset in recordsets:
self._delete_recordset(
zone["id"], recordset["id"])
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("zones"))
@scenario.configure(context={"cleanup@openstack": ["designate"]},
name="DesignateBasic.create_and_list_recordsets",
platform="openstack")
class CreateAndListRecordsets(utils.DesignateScenario):
def run(self, recordsets_per_zone=5):
"""Create and then list recordsets.
If you have only 1 user in your context, you will
add 1 recordset on every iteration. So you will have more
and more recordsets and will be able to measure the
performance of the "openstack recordset list" command depending on
the number of zones/recordsets owned by users.
:param recordsets_per_zone: recordsets to create pr zone.
"""
zone = random.choice(self.context["tenant"]["zones"])
for i in range(recordsets_per_zone):
self._create_recordset(zone)
self._list_recordsets(zone["id"])
|
PypiClean
|
/alchemite_apiclient-0.61.0-py3-none-any.whl/alchemite_apiclient/api/default_api.py
|
import re # noqa: F401
import sys # noqa: F401
from alchemite_apiclient.api_client import ApiClient, Endpoint as _Endpoint
from alchemite_apiclient.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from alchemite_apiclient.model.error import Error
from alchemite_apiclient.model.job_patch import JobPatch
from alchemite_apiclient.model.query_request import QueryRequest
from alchemite_apiclient.model.query_response import QueryResponse
from alchemite_apiclient.model.suggest_initial_request import SuggestInitialRequest
from alchemite_apiclient.model.suggest_initial_response import SuggestInitialResponse
from alchemite_apiclient.model.version_response import VersionResponse
class DefaultApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.query_v1_put_endpoint = _Endpoint(
settings={
'response_type': (QueryResponse,),
'auth': [
'oauth'
],
'endpoint_path': '/query/v1',
'operation_id': 'query_v1_put',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'query_request',
'offset',
'limit',
],
'required': [
'query_request',
],
'nullable': [
],
'enum': [
],
'validation': [
'offset',
'limit',
]
},
root_map={
'validations': {
('offset',): {
'inclusive_minimum': 0,
},
('limit',): {
'inclusive_maximum': 100,
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'query_request':
(QueryRequest,),
'offset':
(int,),
'limit':
(int,),
},
'attribute_map': {
'offset': 'offset',
'limit': 'limit',
},
'location_map': {
'query_request': 'body',
'offset': 'query',
'limit': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.suggest_initial_get_endpoint = _Endpoint(
settings={
'response_type': ([SuggestInitialResponse],),
'auth': [
'oauth'
],
'endpoint_path': '/suggest-initial',
'operation_id': 'suggest_initial_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.suggest_initial_job_id_delete_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'oauth'
],
'endpoint_path': '/suggest-initial/{job_id}',
'operation_id': 'suggest_initial_job_id_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'job_id',
],
'required': [
'job_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'job_id':
(str,),
},
'attribute_map': {
'job_id': 'job_id',
},
'location_map': {
'job_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.suggest_initial_job_id_get_endpoint = _Endpoint(
settings={
'response_type': (SuggestInitialResponse,),
'auth': [
'oauth'
],
'endpoint_path': '/suggest-initial/{job_id}',
'operation_id': 'suggest_initial_job_id_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'job_id',
],
'required': [
'job_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'job_id':
(str,),
},
'attribute_map': {
'job_id': 'job_id',
},
'location_map': {
'job_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.suggest_initial_job_id_patch_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'oauth'
],
'endpoint_path': '/suggest-initial/{job_id}',
'operation_id': 'suggest_initial_job_id_patch',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'job_id',
'job_patch',
],
'required': [
'job_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'job_id':
(str,),
'job_patch':
(JobPatch,),
},
'attribute_map': {
'job_id': 'job_id',
},
'location_map': {
'job_id': 'path',
'job_patch': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.suggest_initial_post_endpoint = _Endpoint(
settings={
'response_type': (str,),
'auth': [
'oauth'
],
'endpoint_path': '/suggest-initial',
'operation_id': 'suggest_initial_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'suggest_initial_request',
],
'required': [
'suggest_initial_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'suggest_initial_request':
(SuggestInitialRequest,),
},
'attribute_map': {
},
'location_map': {
'suggest_initial_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/plain',
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.version_get_endpoint = _Endpoint(
settings={
'response_type': (VersionResponse,),
'auth': [
'oauth'
],
'endpoint_path': '/version',
'operation_id': 'version_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def query_v1_put(
self,
query_request,
**kwargs
):
"""Query the datastore # noqa: E501
Returns all rows matching the query passed. Will only return results on datasets that are in the 'uploaded' state. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_v1_put(query_request, async_req=True)
>>> result = thread.get()
Args:
query_request (QueryRequest):
Keyword Args:
offset (int): The number of items to skip before starting to collect the result set.. [optional] if omitted the server will use the default value of 0
limit (int): The number of items to return.. [optional] if omitted the server will use the default value of 20
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
QueryResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['query_request'] = \
query_request
return self.query_v1_put_endpoint.call_with_http_info(**kwargs)
def suggest_initial_get(
self,
**kwargs
):
"""Get all suggest-initial jobs # noqa: E501
Get all suggest-initial jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.suggest_initial_get(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[SuggestInitialResponse]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.suggest_initial_get_endpoint.call_with_http_info(**kwargs)
def suggest_initial_job_id_delete(
self,
job_id,
**kwargs
):
"""Delete suggest-initial job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.suggest_initial_job_id_delete(job_id, async_req=True)
>>> result = thread.get()
Args:
job_id (str): Unique ID of the job
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['job_id'] = \
job_id
return self.suggest_initial_job_id_delete_endpoint.call_with_http_info(**kwargs)
def suggest_initial_job_id_get(
self,
job_id,
**kwargs
):
"""Get suggest-initial job data # noqa: E501
Get suggest-initial job data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.suggest_initial_job_id_get(job_id, async_req=True)
>>> result = thread.get()
Args:
job_id (str): Unique ID of the job
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
SuggestInitialResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['job_id'] = \
job_id
return self.suggest_initial_job_id_get_endpoint.call_with_http_info(**kwargs)
def suggest_initial_job_id_patch(
self,
job_id,
**kwargs
):
"""Update a suggest initial jobs's metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.suggest_initial_job_id_patch(job_id, async_req=True)
>>> result = thread.get()
Args:
job_id (str): Unique ID of the job
Keyword Args:
job_patch (JobPatch): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['job_id'] = \
job_id
return self.suggest_initial_job_id_patch_endpoint.call_with_http_info(**kwargs)
def suggest_initial_post(
self,
suggest_initial_request,
**kwargs
):
"""Suggest initial DoE experiments without a trained model # noqa: E501
Suggest initial DoE experiments without a trained model Performing the suggested experiments could serve as the basis for an initial dataset to train a model with. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.suggest_initial_post(suggest_initial_request, async_req=True)
>>> result = thread.get()
Args:
suggest_initial_request (SuggestInitialRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['suggest_initial_request'] = \
suggest_initial_request
return self.suggest_initial_post_endpoint.call_with_http_info(**kwargs)
def version_get(
self,
**kwargs
):
"""Get API and application versions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.version_get(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
VersionResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.version_get_endpoint.call_with_http_info(**kwargs)
|
PypiClean
|
/nni-3.0rc1-py3-none-macosx_10_9_x86_64.whl/nni_node/node_modules/moment/README.md
|
# [Moment.js](http://momentjs.com/)
[![NPM version][npm-version-image]][npm-url]
[![NPM downloads][npm-downloads-image]][npm-downloads-url]
[![MIT License][license-image]][license-url]
[![Build Status][travis-image]][travis-url]
[![Coverage Status][coveralls-image]][coveralls-url]
[![FOSSA Status][fossa-badge-image]][fossa-badge-url]
[![SemVer compatibility][semver-image]][semver-url]
A JavaScript date library for parsing, validating, manipulating, and formatting dates.
## Project Status
Moment.js is a legacy project, now in maintenance mode. In most cases, you should choose a different library.
For more details and recommendations, please see [Project Status](https://momentjs.com/docs/#/-project-status/) in the docs.
*Thank you.*
## Resources
- [Documentation](https://momentjs.com/docs/)
- [Changelog](CHANGELOG.md)
- [Stack Overflow](https://stackoverflow.com/questions/tagged/momentjs)
## License
Moment.js is freely distributable under the terms of the [MIT license][license-url].
[![FOSSA Status][fossa-large-image]][fossa-large-url]
[license-image]: https://img.shields.io/badge/license-MIT-blue.svg?style=flat
[license-url]: LICENSE
[npm-url]: https://npmjs.org/package/moment
[npm-version-image]: https://img.shields.io/npm/v/moment.svg?style=flat
[npm-downloads-image]: https://img.shields.io/npm/dm/moment.svg?style=flat
[npm-downloads-url]: https://npmcharts.com/compare/moment?minimal=true
[travis-url]: https://travis-ci.org/moment/moment
[travis-image]: https://img.shields.io/travis/moment/moment/develop.svg?style=flat
[coveralls-image]: https://coveralls.io/repos/moment/moment/badge.svg?branch=develop
[coveralls-url]: https://coveralls.io/r/moment/moment?branch=develop
[fossa-badge-image]: https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fmoment%2Fmoment.svg?type=shield
[fossa-badge-url]: https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fmoment%2Fmoment?ref=badge_shield
[fossa-large-image]: https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fmoment%2Fmoment.svg?type=large
[fossa-large-url]: https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fmoment%2Fmoment?ref=badge_large
[semver-image]: https://api.dependabot.com/badges/compatibility_score?dependency-name=moment&package-manager=npm_and_yarn&version-scheme=semver
[semver-url]: https://dependabot.com/compatibility-score.html?dependency-name=moment&package-manager=npm_and_yarn&version-scheme=semver
|
PypiClean
|
/pipelinewise-tap-snowflake-3.0.0.tar.gz/pipelinewise-tap-snowflake-3.0.0/tap_snowflake/sync_strategies/full_table.py
|
import singer
import tap_snowflake.sync_strategies.common as common
LOGGER = singer.get_logger('tap_snowflake')
BOOKMARK_KEYS = {'last_pk_fetched', 'max_pk_values', 'version', 'initial_full_table_complete'}
def get_max_pk_values(cursor, catalog_entry):
"""Get actual max primary key values from database"""
database_name = common.get_database_name(catalog_entry)
escaped_db = common.escape(database_name)
escaped_table = common.escape(catalog_entry.table)
key_properties = common.get_key_properties(catalog_entry)
escaped_columns = [common.escape(c) for c in key_properties]
sql = """SELECT {}
FROM {}.{}
ORDER BY {}
LIMIT 1
"""
select_column_clause = ', '.join(escaped_columns)
order_column_clause = ', '.join([pk + ' DESC' for pk in escaped_columns])
cursor.execute(sql.format(select_column_clause,
escaped_db,
escaped_table,
order_column_clause))
result = cursor.fetchone()
if result:
max_pk_values = dict(zip(key_properties, result))
else:
max_pk_values = {}
return max_pk_values
def generate_pk_clause(catalog_entry, state):
"""Generate primary key where clause to SQL select"""
key_properties = common.get_key_properties(catalog_entry)
escaped_columns = [common.escape(c) for c in key_properties]
max_pk_values = singer.get_bookmark(state,
catalog_entry.tap_stream_id,
'max_pk_values')
last_pk_fetched = singer.get_bookmark(state,
catalog_entry.tap_stream_id,
'last_pk_fetched')
if last_pk_fetched:
pk_comparisons = ['({} > {} AND {} <= {})'.format(common.escape(pk),
last_pk_fetched[pk],
common.escape(pk),
max_pk_values[pk])
for pk in key_properties]
else:
pk_comparisons = [f'{common.escape(pk)} <= {max_pk_values[pk]}' for pk in key_properties]
sql = ' WHERE {} ORDER BY {} ASC'.format(' AND '.join(pk_comparisons),
', '.join(escaped_columns))
return sql
def sync_table(snowflake_conn, catalog_entry, state, columns, stream_version):
"""Sync table with FULL_TABLE"""
common.whitelist_bookmark_keys(BOOKMARK_KEYS, catalog_entry.tap_stream_id, state)
bookmark = state.get('bookmarks', {}).get(catalog_entry.tap_stream_id, {})
version_exists = True if 'version' in bookmark else False
initial_full_table_complete = singer.get_bookmark(state,
catalog_entry.tap_stream_id,
'initial_full_table_complete')
state_version = singer.get_bookmark(state,
catalog_entry.tap_stream_id,
'version')
activate_version_message = singer.ActivateVersionMessage(
stream=catalog_entry.stream,
version=stream_version
)
# For the initial replication, emit an ACTIVATE_VERSION message
# at the beginning so the records show up right away.
if not initial_full_table_complete and not (version_exists and state_version is None):
singer.write_message(activate_version_message)
with snowflake_conn.connect_with_backoff() as open_conn:
with open_conn.cursor() as cur:
select_sql = common.generate_select_sql(catalog_entry, columns)
params = {}
common.sync_query(cur,
catalog_entry,
state,
select_sql,
columns,
stream_version,
params)
# clear max pk value and last pk fetched upon successful sync
singer.clear_bookmark(state, catalog_entry.tap_stream_id, 'max_pk_values')
singer.clear_bookmark(state, catalog_entry.tap_stream_id, 'last_pk_fetched')
singer.write_message(activate_version_message)
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.