id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/bio-embeddings-allennlp-0.9.2.tar.gz/bio-embeddings-allennlp-0.9.2/allennlp/data/fields/array_field.py
|
from typing import Dict
import numpy
import torch
from overrides import overrides
from allennlp.data.fields.field import Field
class ArrayField(Field[numpy.ndarray]):
"""
A class representing an array, which could have arbitrary dimensions.
A batch of these arrays are padded to the max dimension length in the batch
for each dimension.
"""
def __init__(self,
array: numpy.ndarray,
padding_value: int = 0,
dtype: numpy.dtype = numpy.float32) -> None:
self.array = array
self.padding_value = padding_value
self.dtype = dtype
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {"dimension_" + str(i): shape
for i, shape in enumerate(self.array.shape)}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> torch.Tensor:
max_shape = [padding_lengths["dimension_{}".format(i)]
for i in range(len(padding_lengths))]
# Convert explicitly to an ndarray just in case it's an scalar
# (it'd end up not being an ndarray otherwise).
# Also, the explicit dtype declaration for `asarray` is necessary for scalars.
return_array = numpy.asarray(numpy.ones(max_shape, dtype=self.dtype) * self.padding_value,
dtype=self.dtype)
# If the tensor has a different shape from the largest tensor, pad dimensions with zeros to
# form the right shaped list of slices for insertion into the final tensor.
slicing_shape = list(self.array.shape)
if len(self.array.shape) < len(max_shape):
slicing_shape = slicing_shape + [0 for _ in range(len(max_shape) - len(self.array.shape))]
slices = tuple([slice(0, x) for x in slicing_shape])
return_array[slices] = self.array
tensor = torch.from_numpy(return_array)
return tensor
@overrides
def empty_field(self): # pylint: disable=no-self-use
# Pass the padding_value, so that any outer field, e.g., `ListField[ArrayField]` uses the
# same padding_value in the padded ArrayFields
return ArrayField(numpy.array([], dtype=self.dtype),
padding_value=self.padding_value,
dtype=self.dtype)
def __str__(self) -> str:
return f"ArrayField with shape: {self.array.shape} and dtype: {self.dtype}."
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/users/item/calendar/calendar_permissions/count/count_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from ......models.o_data_errors import o_data_error
class CountRequestBuilder():
"""
Provides operations to count the resources in the collection.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new CountRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/users/{user%2Did}/calendar/calendarPermissions/$count{?%24filter}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
async def get(self,request_configuration: Optional[CountRequestBuilderGetRequestConfiguration] = None) -> Optional[int]:
"""
Get the number of the resource
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[int]
"""
request_info = self.to_get_request_information(
request_configuration
)
from ......models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_primitive_async(request_info, "int", error_mapping)
def to_get_request_information(self,request_configuration: Optional[CountRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
Get the number of the resource
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = ["text/plain"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
@dataclass
class CountRequestBuilderGetQueryParameters():
"""
Get the number of the resource
"""
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "filter":
return "%24filter"
return original_name
# Filter items by property values
filter: Optional[str] = None
@dataclass
class CountRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[CountRequestBuilder.CountRequestBuilderGetQueryParameters] = None
|
PypiClean
|
/pyonwater-0.2.5.tar.gz/pyonwater-0.2.5/README.md
|
# pyonwater
[EyeOnWater](eyeonwater.com) client library
The usage example:
```
import asyncio
import aiohttp
from pyonwater import Account, Client
async def main():
account = Account(
eow_hostname="eyeonwater.com",
username="your EOW login",
password="your EOW password",
metric_measurement_system=False,
)
websession = aiohttp.ClientSession()
client = Client(websession=websession, account=account)
await client.authenticate()
meters = await account.fetch_meters(client=client)
print(f"{len(meters)} meters found")
for meter in meters:
await meter.read_meter(client=client)
print(f"meter {meter.meter_uuid} shows {meter.reading}")
print(f"meter {meter.meter_uuid} info {meter.meter_info}")
for d in meter.last_historical_data:
print(str(d["dt"]), d["reading"])
await websession.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
```
|
PypiClean
|
/co-awscli-login-0.1.0a6.tar.gz/co-awscli-login-0.1.0a6/README.rst
|
The awscli-login plugin allows retrieving temporary Amazon credentials by
authenticating against a SAML Identity Provider (IdP).
This application is supported under Linux, MacOS, and the `Windows Subsystem for Linux
<https://docs.microsoft.com/en-us/windows/wsl/about>`_.
Currently, Windows PowerShell, Command Prompt, and Git Shell
for Windows are not supported.
.. |--| unicode:: U+2013 .. en dash
Installation
------------
The simplest way to install the awscli-login plugin is to use pip::
$ pip install awscli-login
After awscli-login has been installed, run the following command
to enable the plugin::
$ aws configure set plugins.login awscli_login
Getting Started
-------------------
Before using awscli-login to retrieve temporary credentials, configure
one or more profiles for use with the plugin. To configure this
plugin, you must know the URL of the ECP Endpoint for your IdP. If
you do not have this information, contact your IdP administrator.
Here is an example configuring the default profile for use with the University
of Illinois at Urbana-Champaign's IdP::
$ aws login configure
ECP Endpoint URL [None]: https://shibboleth.illinois.edu/idp/profile/SAML2/SOAP/ECP
Username [None]:
Enable Keyring [False]:
Duo Factor [None]:
Role ARN [None]:
To log in, type the following command::
$ aws login
Username [username]: netid
Password: ********
Factor: passcode
Code: 123456789
The ``username`` and ``password`` are the values needed to authenticate
against the IdP configured for the selected profile. The ``factor``
is only required if your IdP requires Duo for authentication. If
it does not, leave ``factor`` blank. If your IdP does require Duo
then ``Factor`` may be one of ``auto``, ``push``, ``passcode``,
``sms``, or ``phone``. If ``factor`` is left blank, ``auto`` is
the default. The ``code`` is a Duo code useful for use with a
YubiKey, SMS codes, or other one-time codes.
If you have access to more than one role, you will be prompted to choose
one. For example::
$ aws login
Username [username]: netid
Password: ********
Factor:
Please choose the role you would like to assume:
Account: 978517677611
[ 0 ]: Admin
Account: 520135271718
[ 1 ]: ReadOnlyUser
[ 2 ]: S3Admin
Selection: 2
To switch roles, first log out, then log in again selecting a different
role. Note that if you log in to the same IdP using the same username,
you will not be prompted for your password or Duo factor until
the IdP session expires::
$ aws logout
$ aws login
Username [netid]:
Please choose the role you would like to assume:
Account: 520135271718
[ 0 ]: TestUser
[ 1 ]: IAMUser
Selection: 0
Advanced Example
-------------------
It is possible to be logged in to more than one role at the same
time using multiple profiles. For example, consider the following
configuration involving two profiles |--| one called ``prod``, and the other
``test``::
$ aws --profile prod login configure
ECP Endpoint URL [None]: https://shibboleth.illinois.edu/idp/profile/SAML2/SOAP/ECP
Username [None]: netid
Enable Keyring [False]: True
Duo Factor [None]: auto
Role ARN [None]: arn:aws:iam::999999999999:role/Admin
$ aws --profile test login configure
ECP Endpoint URL [None]: https://shibboleth.illinois.edu/idp/profile/SAML2/SOAP/ECP
Username [None]: netid
Enable Keyring [False]: True
Duo Factor [None]: passcode
Role ARN [None]: arn:aws:iam::111111111111:role/Admin
This example involves several advanced features. First, we are
setting the username, factor, and role. This means we will not be
prompted for this information when logging in to these two profiles.
In addition, we are using a keyring. On the first login using one
of the profiles, the user will be prompted for his password. On
subsequent logins the user will not be prompted for his password
because it has been stored in a secure keyring.
For example, when we initially log in to prod::
$ export AWS_PROFILE=test
$ aws login
Password: ********
Code: 123456789
We are only prompted for the password and code. We're prompted for
the password because this is the initial login, and the code because
this profile is configured for use with a passcode device such as
a YubiKey. We are now no longer prompted when we log in to test::
$ aws --profile prod login
Even if the IdP session has expired in this case, we will not be
prompted for a password because it is stored in the keyring. The
user will receive either a phone call or a push to the default
Duo device.
Known Issues
------------
**Unable to authenticate after changing password**
After the user changes his IdP password, subsequent logins fail.
To remedy the situation, change the data stored in the keyring as follows:
$ keyring set awscli_login username@hostname_of_your_IdP
You may be prompted for your user login password by your operating
system, depending on how your key store is configured.
**Windows issues**
Windows PowerShell, Command Prompt, and Git Shell for Windows are not
currently supported because of problems with auto-renewal of AWS credentials,
and other known issues.
|
PypiClean
|
/glmnet_py-0.1.0b2.tar.gz/glmnet_py-0.1.0b2/glmnet_py/mrelnet.py
|
# import packages/methods
import scipy
import ctypes
from wtmean import wtmean
from loadGlmLib import loadGlmLib
def mrelnet(x, is_sparse, irs, pcs, y, weights, offset, parm,
nobs, nvars, jd, vp, cl, ne, nx, nlam, flmin, ulam,
thresh, isd, jsd, intr, maxit, family):
# load shared fortran library
glmlib = loadGlmLib()
#
nr = y.shape[1]
wym = wtmean(y, weights)
wym = scipy.reshape(wym, (1, wym.size))
yt2 = (y - scipy.tile(wym, (y.shape[0], 1)))**2
nulldev = scipy.sum(wtmean(yt2,weights)*scipy.sum(weights))
if len(offset) == 0:
offset = y*0
is_offset = False
else:
if offset.shape != y.shape:
raise ValueError('Offset must match dimension of y')
is_offset = True
#
y = y - offset
# now convert types and allocate memory before calling
# glmnet fortran library
######################################
# --------- PROCESS INPUTS -----------
######################################
# force inputs into fortran order and scipy float64
copyFlag = False
x = x.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
irs = irs.astype(dtype = scipy.int32, order = 'F', copy = copyFlag)
pcs = pcs.astype(dtype = scipy.int32, order = 'F', copy = copyFlag)
y = y.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
weights = weights.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
jd = jd.astype(dtype = scipy.int32, order = 'F', copy = copyFlag)
vp = vp.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
cl = cl.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
ulam = ulam.astype(dtype = scipy.float64, order = 'F', copy = copyFlag)
######################################
# --------- ALLOCATE OUTPUTS ---------
######################################
# lmu
lmu = -1
lmu_r = ctypes.c_int(lmu)
# a0
a0 = scipy.zeros([nr, nlam], dtype = scipy.float64)
a0 = a0.astype(dtype = scipy.float64, order = 'F', copy = False)
a0_r = a0.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
# ca
ca = scipy.zeros([nx, nr, nlam], dtype = scipy.float64)
ca = ca.astype(dtype = scipy.float64, order = 'F', copy = False)
ca_r = ca.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
# ia
ia = -1*scipy.ones([nx], dtype = scipy.int32)
ia = ia.astype(dtype = scipy.int32, order = 'F', copy = False)
ia_r = ia.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
# nin
nin = -1*scipy.ones([nlam], dtype = scipy.int32)
nin = nin.astype(dtype = scipy.int32, order = 'F', copy = False)
nin_r = nin.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
# rsq
rsq = -1*scipy.ones([nlam], dtype = scipy.float64)
rsq = rsq.astype(dtype = scipy.float64, order = 'F', copy = False)
rsq_r = rsq.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
# alm
alm = -1*scipy.ones([nlam], dtype = scipy.float64)
alm = alm.astype(dtype = scipy.float64, order = 'F', copy = False)
alm_r = alm.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
# nlp
nlp = -1
nlp_r = ctypes.c_int(nlp)
# jerr
jerr = -1
jerr_r = ctypes.c_int(jerr)
# ###################################
# main glmnet fortran caller
# ###################################
if is_sparse:
# sparse multnet
glmlib.multspelnet_(
ctypes.byref(ctypes.c_double(parm)),
ctypes.byref(ctypes.c_int(nobs)),
ctypes.byref(ctypes.c_int(nvars)),
ctypes.byref(ctypes.c_int(nr)),
x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
pcs.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
irs.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
y.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
weights.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
jd.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
vp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
cl.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.byref(ctypes.c_int(ne)),
ctypes.byref(ctypes.c_int(nx)),
ctypes.byref(ctypes.c_int(nlam)),
ctypes.byref(ctypes.c_double(flmin)),
ulam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.byref(ctypes.c_double(thresh)),
ctypes.byref(ctypes.c_int(isd)),
ctypes.byref(ctypes.c_int(jsd)),
ctypes.byref(ctypes.c_int(intr)),
ctypes.byref(ctypes.c_int(maxit)),
ctypes.byref(lmu_r),
a0_r,
ca_r,
ia_r,
nin_r,
rsq_r,
alm_r,
ctypes.byref(nlp_r),
ctypes.byref(jerr_r)
)
else:
# call fortran multnet routine
glmlib.multelnet_(
ctypes.byref(ctypes.c_double(parm)),
ctypes.byref(ctypes.c_int(nobs)),
ctypes.byref(ctypes.c_int(nvars)),
ctypes.byref(ctypes.c_int(nr)),
x.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
y.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
weights.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
jd.ctypes.data_as(ctypes.POINTER(ctypes.c_int)),
vp.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
cl.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.byref(ctypes.c_int(ne)),
ctypes.byref(ctypes.c_int(nx)),
ctypes.byref(ctypes.c_int(nlam)),
ctypes.byref(ctypes.c_double(flmin)),
ulam.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
ctypes.byref(ctypes.c_double(thresh)),
ctypes.byref(ctypes.c_int(isd)),
ctypes.byref(ctypes.c_int(jsd)),
ctypes.byref(ctypes.c_int(intr)),
ctypes.byref(ctypes.c_int(maxit)),
ctypes.byref(lmu_r),
a0_r,
ca_r,
ia_r,
nin_r,
rsq_r,
alm_r,
ctypes.byref(nlp_r),
ctypes.byref(jerr_r)
)
# ###################################
# post process results
# ###################################
# check for error
if (jerr_r.value > 0):
raise ValueError("Fatal glmnet error in library call : error code = ", jerr_r.value)
elif (jerr_r.value < 0):
print("Warning: Non-fatal error in glmnet library call: error code = ", jerr_r.value)
print("Check results for accuracy. Partial or no results returned.")
# clip output to correct sizes
lmu = lmu_r.value
a0 = a0[0:nr, 0:lmu]
ca = ca[0:nx, 0:nr, 0:lmu]
ia = ia[0:nx]
nin = nin[0:lmu]
rsq = rsq[0:lmu]
alm = alm[0:lmu]
# ninmax
ninmax = max(nin)
# fix first value of alm (from inf to correct value)
if ulam[0] == 0.0:
t1 = scipy.log(alm[1])
t2 = scipy.log(alm[2])
alm[0] = scipy.exp(2*t1 - t2)
# create return fit dictionary
if nr > 1:
dfmat = a0.copy()
dd = scipy.array([nvars, lmu], dtype = scipy.integer)
beta_list = list()
if ninmax > 0:
# TODO: is the reshape here done right?
ca = scipy.reshape(ca, (nx, nr, lmu))
ca = ca[0:ninmax, :, :]
ja = ia[0:ninmax] - 1 # ia is 1-indexed in fortran
oja = scipy.argsort(ja)
ja1 = ja[oja]
df = scipy.any(scipy.absolute(ca) > 0, axis=1)
df = scipy.sum(df, axis = 0)
df = scipy.reshape(df, (1, df.size))
for k in range(0, nr):
ca1 = scipy.reshape(ca[:,k,:], (ninmax, lmu))
cak = ca1[oja,:]
dfmat[k, :] = scipy.sum(scipy.absolute(cak) > 0, axis = 0)
beta = scipy.zeros([nvars, lmu], dtype = scipy.float64)
beta[ja1, :] = cak
beta_list.append(beta)
else:
for k in range(0, nr):
dfmat[k, :] = scipy.zeros([1, lmu], dtype = scipy.float64)
beta_list.append(scipy.zeros([nvars, lmu], dtype = scipy.float64))
#
df = scipy.zeros([1, lmu], dtype = scipy.float64)
#
fit = dict()
fit['beta'] = beta_list
fit['dfmat']= dfmat
else:
dd = scipy.array([nvars, lmu], dtype = scipy.integer)
if ninmax > 0:
ca = ca[0:ninmax,:];
df = scipy.sum(scipy.absolute(ca) > 0, axis = 0);
ja = ia[0:ninmax] - 1; # ia is 1-indexes in fortran
oja = scipy.argsort(ja)
ja1 = ja[oja]
beta = scipy.zeros([nvars, lmu], dtype = scipy.float64);
beta[ja1, :] = ca[oja, :];
else:
beta = scipy.zeros([nvars,lmu], dtype = scipy.float64);
df = scipy.zeros([1,lmu], dtype = scipy.float64);
fit['beta'] = beta
fit['a0'] = a0
fit['dev'] = rsq
fit['nulldev'] = nulldev
fit['df'] = df
fit['lambdau'] = alm
fit['npasses'] = nlp_r.value
fit['jerr'] = jerr_r.value
fit['dim'] = dd
fit['offset'] = is_offset
fit['class'] = 'mrelnet'
# ###################################
# return to caller
# ###################################
return fit
#-----------------------------------------
# end of method mrelnet
#-----------------------------------------
|
PypiClean
|
/pyqt-top-left-right-file-list-widget-0.0.1.tar.gz/pyqt-top-left-right-file-list-widget-0.0.1/pyqt_top_left_right_file_list_widget/topLeftRightFileListWidget.py
|
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QWidget, QFileDialog, QCheckBox
from pyqt_file_list_widget.fileListWidget import FileListWidget
from pyqt_svg_icon_pushbutton import SvgIconPushButton
from simplePyQt5.topLabelBottomWidget import TopLabelBottomWidget
class TopLeftRightFileListWidget(QWidget):
fileAdded = pyqtSignal(list)
fileRemoved = pyqtSignal(list)
def __init__(self, parent=None):
super().__init__(parent)
self.__extensions = ''
self.__initUi()
def __initUi(self):
self.__addBtn = SvgIconPushButton()
self.__delBtn = SvgIconPushButton()
self.__clearBtn = SvgIconPushButton()
self.__addBtn.setIcon('ico/add.svg')
self.__delBtn.setIcon('ico/delete.svg')
self.__clearBtn.setIcon('ico/clear.svg')
self.__addBtn.clicked.connect(self.__add)
self.__delBtn.clicked.connect(self.__delete)
self.__clearBtn.clicked.connect(self.__clear)
btns = [self.__addBtn, self.__delBtn, self.__clearBtn]
self.__addBtn.setToolTip('Add')
self.__delBtn.setToolTip('Delete')
self.__clearBtn.setToolTip('Clear')
self.__fileListWidget = FileListWidget()
self.__fileListWidget.currentItemChanged.connect(self.__currentItemChanged)
self.__onlyFileNameChkBox = QCheckBox('Show file name only')
self.__onlyFileNameChkBox.stateChanged.connect(self.__fileListWidget.setFilenameOnly)
self.__mainWidget = TopLabelBottomWidget()
self.__mainWidget.setLabel('List of files')
self.__mainWidget.setLeftWidgets([self.__onlyFileNameChkBox])
self.__mainWidget.setRightWidgets(btns)
self.__mainWidget.addBottomWidget(self.__fileListWidget)
lay = self.__mainWidget.layout()
lay.setContentsMargins(5, 5, 5, 5)
self.setLayout(lay)
self.__btnToggled()
def __currentItemChanged(self, i1, i2):
self.__btnToggled()
def __btnToggled(self):
f1 = self.__fileListWidget.count() > 0
f2 = True if self.__fileListWidget.currentItem() else False
self.__delBtn.setEnabled(f1 and f2)
self.__clearBtn.setEnabled(f1)
def setLabel(self, text: str):
self.__mainWidget.setLabel(text)
def setExtensions(self, extensions: list):
self.__extensions = extensions
def isDuplicateEnabled(self) -> bool:
return self.__duplicate_flag
def setDuplicateEnabled(self, f: bool):
self.__fileListWidget.setDuplicateEnabled(f)
def __add(self):
ext_lst = self.__extensions if self.__extensions else 'All Files (*.*)'
filenames = QFileDialog.getOpenFileNames(self, 'Open Files', '', ext_lst)
if filenames[0]:
filenames = filenames[0]
self.__fileListWidget.addFilenames(filenames)
self.fileAdded.emit(filenames)
self.__btnToggled()
def __delete(self):
filenames = self.__fileListWidget.getSelectedFilenames()
self.__fileListWidget.removeSelectedRows()
self.__btnToggled()
self.fileRemoved.emit(filenames)
def __clear(self):
self.__fileListWidget.clear()
self.__btnToggled()
|
PypiClean
|
/monk_keras_cuda92_test-0.0.1.tar.gz/monk_keras_cuda92_test-0.0.1/monk/keras_prototype.py
|
from monk.tf_keras_1.finetune.imports import *
from monk.system.imports import *
from monk.tf_keras_1.finetune.level_14_master_main import prototype_master
class prototype(prototype_master):
'''
Main class for Mxnet Backend
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
self.system_dict["library"] = "Keras";
self.custom_print("Keras Version: {}".format(keras.__version__));
self.custom_print("Tensorflow Version: {}".format(tf.__version__));
self.custom_print("");
###############################################################################################################################################
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]],
eval_infer=None, resume_train=None, copy_from=None, pseudo_copy_from=None, summary=None, post_trace=False)
@accepts("self", str, str, eval_infer=bool, resume_train=bool, copy_from=[list, bool], pseudo_copy_from=[list, bool], summary=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Prototype(self, project_name, experiment_name, eval_infer=False, resume_train=False, copy_from=False, pseudo_copy_from=False, summary=False):
'''
Create project and experiment for instantiation and running the experiments
Args:
project_name (str): Project Name
experiment_name (str): Experiment Name
eval_infer (bool): If set as True, model is loaded in evaluation mode
resume_train (bool): If set as True, model is loaded from last checkpoint
copy_from (list): [project, experiment] to copy from
pseudo_copy_from (list): For creating sub-experiments while in hyper-parametric analysis state
summary (list): Dummy variable
Returns:
None
'''
self.set_system_project(project_name);
self.set_system_experiment(experiment_name, eval_infer=eval_infer, resume_train=resume_train, copy_from=copy_from,
pseudo_copy_from=pseudo_copy_from, summary=summary);
self.custom_print("Experiment Details");
self.custom_print(" Project: {}".format(self.system_dict["project_name"]));
self.custom_print(" Experiment: {}".format(self.system_dict["experiment_name"]));
self.custom_print(" Dir: {}".format(self.system_dict["experiment_dir"]));
self.custom_print("");
################################################################################################################################################
###############################################################################################################################################
@warning_checks(None, dataset_path=None, path_to_csv=None, delimiter=None,
model_name=None, freeze_base_network=None, num_epochs=["lt", 100], post_trace=False)
@error_checks(None, dataset_path=["folder", "r"], path_to_csv=["file", "r"], delimiter=["in", [",", ";", "-", " "]],
model_name=None, freeze_base_network=None, num_epochs=["gte", 1], post_trace=False)
@accepts("self", dataset_path=[str, list, bool], path_to_csv=[str, list, bool], delimiter=str,
model_name=str, freeze_base_network=bool, num_epochs=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Default(self, dataset_path=False, path_to_csv=False, delimiter=",", model_name="resnet18_v1", freeze_base_network=True, num_epochs=10):
'''
Use monk in default (quick prototyping) mode
Args:
dataset_path (str, list): Path to Dataset folder
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
path_to_csv (str, list): Path to csv file pointing towards images
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
delimiter (str): Delimiter for csv file
model_name (str): Base model name
freeze_base_network (bool): If True base network is freezed
num_epochs (int): Number of epochs to train the data
Returns:
None
'''
if(self.system_dict["states"]["eval_infer"]):
self.Dataset_Params(dataset_path=dataset_path, import_as_csv=import_as_csv, path_to_csv=path_to_csv, delimiter=delimiter);
self.Dataset();
else:
input_size=224;
self.Dataset_Params(dataset_path=dataset_path, path_to_csv=path_to_csv, delimiter=delimiter,
split=0.7, input_size=input_size, batch_size=4, shuffle_data=True, num_processors=psutil.cpu_count());
self.apply_random_horizontal_flip(probability=0.8, train=True, val=True);
self.apply_mean_subtraction(mean=[0.485, 0.456, 0.406], train=True, val=True, test=True);
self.Dataset();
self.Model_Params(model_name=model_name, freeze_base_network=freeze_base_network, use_gpu=True, gpu_memory_fraction=0.6, use_pretrained=True);
self.Model();
model_name = self.system_dict["model"]["params"]["model_name"];
if("resnet" in model_name or "vgg" in model_name or "dense" in model_name or "xception" in model_name):
self.optimizer_sgd(0.0001, momentum=0.9);
self.lr_plateau_decrease(factor=0.1, patience=max(min(10, num_epochs//3), 1), verbose=True);
self.loss_crossentropy();
elif("nas" in model_name):
self.optimizer_rmsprop(0.0001, weight_decay=0.00004, momentum=0.9);
self.lr_step_decrease(2, gamma=0.97);
self.loss_crossentropy();
elif("mobile" in model_name):
self.optimizer_sgd(0.0001, weight_decay=0.00004, momentum=0.9);
self.lr_step_decrease(1, gamma=0.97);
self.loss_crossentropy();
elif("inception" in model_name):
self.optimizer_sgd(0.0001, weight_decay=0.0001, momentum=0.9);
self.lr_step_decrease(1, gamma=0.9);
self.loss_crossentropy();
self.Training_Params(num_epochs=num_epochs, display_progress=True, display_progress_realtime=True,
save_intermediate_models=True, intermediate_model_prefix="intermediate_model_", save_training_logs=True);
self.system_dict["hyper-parameters"]["status"] = True;
save(self.system_dict);
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Summary(self):
'''
Print summary of entire project
Args:
None
Returns:
None
'''
print_summary(self.system_dict["fname_relative"]);
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Models(self):
'''
List all base models supported.
Args:
None
Returns:
None
'''
self.print_list_models();
###############################################################################################################################################
## Will be depricated in v2.0
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Layers(self):
'''
List all layers available for appending the base model.
Args:
None
Returns:
None
'''
self.print_list_layers_transfer_learning();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Layers_Transfer_Learning(self):
'''
List all layers available for appending the base model.
Args:
None
Returns:
None
'''
self.print_list_layers_transfer_learning();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Layers_Custom_Model(self):
'''
List all layers available for building a custom model.
Args:
None
Returns:
None
'''
self.print_list_layers_custom_model();
###############################################################################################################################################
## Will be depricated in v2.0
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Activations(self):
'''
List all activations available for appending the base model.
Args:
None
Returns:
None
'''
self.print_list_activations_transfer_learning();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Activations_Transfer_Learning(self):
'''
List all activations available for appending the base model.
Args:
None
Returns:
None
'''
self.print_list_activations_transfer_learning();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Activations_Custom_Model(self):
'''
List all activations available for building a custom model.
Args:
None
Returns:
None
'''
self.print_list_activations_custom_model();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Losses(self):
'''
List all loss functions available.
Args:
None
Returns:
None
'''
self.print_list_losses();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Optimizers(self):
'''
List all optimizers functions available.
Args:
None
Returns:
None
'''
self.print_list_optimizers();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Schedulers(self):
'''
List all learning rate scheduler functions available.
Args:
None
Returns:
None
'''
self.print_list_schedulers();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Transforms(self):
'''
List all data transformation functions available.
Args:
None
Returns:
None
'''
self.print_list_transforms();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def List_Blocks(self):
'''
List all blocks available for building a custom model.
Args:
None
Returns:
None
'''
self.print_list_blocks();
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, None, ["lt", 50], num_epochs=["lte", 10], state=None, post_trace=False)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], None, ["gt", 0, "lte", 100], num_epochs=["gt", 0],
state=["in", ["keep_all", "keep_none"]], post_trace=False)
@accepts("self", str, list, [int, float], num_epochs=int, state=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Analyse_Learning_Rates(self, analysis_name, lr_list, percent_data, num_epochs=2, state="keep_all"):
'''
Hyperparameter Tuner - Analyse learning rate
Takes in a list of learning rates and trains on a part of dataset
Provides summaries and graphs on every sub-experiment created
Args:
analysis_name (str): A suitable name for analysis
lr_list (list): List of learning rates.
percent_data (int): Percentage of complete dataset to run experiments on.
num_epochs (int): Number of epochs for each sub-experiment
state ("str"): If set as "keep_all", keeps every file in the sub-experiment
If set as "keep_none", keeps only comparison files for each experiment
Returns:
dict: Tabular data on training_accuracy, validation_accuracy, training_loss, validation_loss and training_time for each experiment.
'''
from monk.keras_prototype import prototype
project = analysis_name;
self.custom_print("");
self.custom_print("Running Learning rate analysis"); #Change 1
self.custom_print("Analysis Name : {}".format(project));
self.custom_print("");
for i in range(len(lr_list)): #Change 2
ktf_ = prototype(verbose=0);
self.custom_print("Running experiment : {}/{}".format(i+1, len(lr_list))); #Change 3
experiment = "Learning_Rate_" + str(lr_list[i]); #Change 4
self.custom_print("Experiment name : {}".format(experiment))
ktf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);
ktf_.Dataset_Percent(percent_data);
dataset_type = ktf_.system_dict["dataset"]["dataset_type"];
dataset_train_path = ktf_.system_dict["dataset"]["train_path"];
dataset_val_path = ktf_.system_dict["dataset"]["val_path"];
csv_train = ktf_.system_dict["dataset"]["csv_train"];
csv_val = ktf_.system_dict["dataset"]["csv_val"];
if(dataset_type=="train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
elif(dataset_type=="csv_train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="csv_train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
ktf_.update_learning_rate(lr_list[i]) #Change 5
ktf_.Reload(); #Change 6
ktf_.update_num_epochs(num_epochs);
ktf_.update_display_progress_realtime(False)
ktf_.update_save_intermediate_models(False);
ktf_.system_dict = set_transform_estimate(ktf_.system_dict);
ktf_.set_dataset_dataloader(estimate=True);
total_time_per_epoch = ktf_.get_training_estimate();
total_time = total_time_per_epoch*num_epochs;
if(int(total_time//60) == 0):
self.custom_print("Estimated time : {} sec".format(total_time));
else:
self.custom_print("Estimated time : {} min".format(int(total_time//60)+1));
ktf_.Train();
self.custom_print("Experiment Complete");
self.custom_print("\n");
self.custom_print("Comparing Experiments");
from monk.compare_prototype import compare
ctf_ = compare(verbose=0);
ctf_.Comparison("Comparison_" + analysis_name);
self.custom_print("Comparison ID: {}".format("Comparison_" + analysis_name));
training_accuracies = [];
validation_accuracies = [];
training_losses = [];
validation_losses = [];
tabular_data = [];
for i in range(len(lr_list)): #Change 7
project = analysis_name;
experiment = "Learning_Rate_" + str(lr_list[i]); #Change 8
ctf_.Add_Experiment(project, experiment)
tmp = [];
tmp.append(experiment);
training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
tmp.append(np.load(training_accuracy_file, allow_pickle=True)[-1]);
validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
tmp.append(np.load(validation_accuracy_file, allow_pickle=True)[-1]);
training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
tmp.append(np.load(training_loss_file, allow_pickle=True)[-1]);
validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
tmp.append(np.load(validation_loss_file, allow_pickle=True)[-1]);
tabular_data.append(tmp)
ctf_.Generate_Statistics();
self.custom_print("Generated statistics post all epochs");
self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
self.custom_print("");
return_dict = {};
for i in range(len(tabular_data)):
return_dict[tabular_data[i][0]] = {};
return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];
fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
system_dict = read_json(fname);
return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];
if(state=="keep_none"):
shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);
return return_dict
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, None, ["lt", 50], num_epochs=["lte", 10], state=None, post_trace=False)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], None, ["gt", 0, "lte", 100], num_epochs=["gt", 0],
state=["in", ["keep_all", "keep_none"]], post_trace=False)
@accepts("self", str, list, [int, float], num_epochs=int, state=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Analyse_Input_Sizes(self, analysis_name, inp_size_list, percent_data, num_epochs=2, state="keep_all"):
'''
Hyperparameter Tuner - Analyse input sizes
Takes in a list of input sizes and trains on a part of dataset
Provides summaries and graphs on every sub-experiment created
Args:
analysis_name (str): A suitable name for analysis
inp_size_list (list): List of input_sizes.
percent_data (int): Percentage of complete dataset to run experiments on.
num_epochs (int): Number of epochs for each sub-experiment
state ("str"): If set as "keep_all", keeps every file in the sub-experiment
If set as "keep_none", keeps only comparison files for each experiment
Returns:
dict: Tabular data on training_accuracy, validation_accuracy, training_loss, validation_loss and training_time for each experiment.
'''
from monk.keras_prototype import prototype
project = analysis_name;
self.custom_print("");
self.custom_print("Running Input Size analysis"); #Change 1
self.custom_print("Analysis Name : {}".format(project));
self.custom_print("");
for i in range(len(inp_size_list)): #Change 2
ktf_ = prototype(verbose=0);
self.custom_print("Running experiment : {}/{}".format(i+1, len(inp_size_list))); #Change 3
experiment = "Input_Size_" + str(inp_size_list[i]); #Change 4
self.custom_print("Experiment name : {}".format(experiment))
ktf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);
ktf_.Dataset_Percent(percent_data);
dataset_type = ktf_.system_dict["dataset"]["dataset_type"];
dataset_train_path = ktf_.system_dict["dataset"]["train_path"];
dataset_val_path = ktf_.system_dict["dataset"]["val_path"];
csv_train = ktf_.system_dict["dataset"]["csv_train"];
csv_val = ktf_.system_dict["dataset"]["csv_val"];
if(dataset_type=="train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
elif(dataset_type=="csv_train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="csv_train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
ktf_.update_input_size(inp_size_list[i]) #Change 5
ktf_.Reload(); #Change 6
ktf_.update_num_epochs(num_epochs);
ktf_.update_display_progress_realtime(False)
ktf_.update_save_intermediate_models(False);
ktf_.system_dict = set_transform_estimate(ktf_.system_dict);
ktf_.set_dataset_dataloader(estimate=True);
total_time_per_epoch = ktf_.get_training_estimate();
total_time = total_time_per_epoch*num_epochs;
if(int(total_time//60) == 0):
self.custom_print("Estimated time : {} sec".format(total_time));
else:
self.custom_print("Estimated time : {} min".format(int(total_time//60)+1));
ktf_.Train();
self.custom_print("Experiment Complete");
self.custom_print("\n");
self.custom_print("Comparing Experiments");
from monk.compare_prototype import compare
ctf_ = compare(verbose=0);
ctf_.Comparison("Comparison_" + analysis_name);
self.custom_print("Comparison ID: {}".format("Comparison_" + analysis_name));
training_accuracies = [];
validation_accuracies = [];
training_losses = [];
validation_losses = [];
tabular_data = [];
for i in range(len(inp_size_list)): #Change 7
project = analysis_name;
experiment = "Input_Size_" + str(inp_size_list[i]); #Change 8
ctf_.Add_Experiment(project, experiment)
tmp = [];
tmp.append(experiment);
training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
tmp.append(np.load(training_accuracy_file, allow_pickle=True)[-1]);
validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
tmp.append(np.load(validation_accuracy_file, allow_pickle=True)[-1]);
training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
tmp.append(np.load(training_loss_file, allow_pickle=True)[-1]);
validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
tmp.append(np.load(validation_loss_file, allow_pickle=True)[-1]);
tabular_data.append(tmp)
ctf_.Generate_Statistics();
self.custom_print("Generated statistics post all epochs");
self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
self.custom_print("");
return_dict = {};
for i in range(len(tabular_data)):
return_dict[tabular_data[i][0]] = {};
return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];
fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
system_dict = read_json(fname);
return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];
if(state=="keep_none"):
shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);
return return_dict
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, None, ["lt", 50], num_epochs=["lte", 10], state=None, post_trace=False)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], None, ["gt", 0, "lte", 100], num_epochs=["gt", 0],
state=["in", ["keep_all", "keep_none"]], post_trace=False)
@accepts("self", str, list, [int, float], num_epochs=int, state=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Analyse_Batch_Sizes(self, analysis_name, batch_size_list, percent_data, num_epochs=2, state="keep_all"):
'''
Hyperparameter Tuner - Analyse batch sizes
Takes in a list of batch sizes and trains on a part of dataset
Provides summaries and graphs on every sub-experiment created
Args:
analysis_name (str): A suitable name for analysis
inp_size_list (list): List of batch sizes.
percent_data (int): Percentage of complete dataset to run experiments on.
num_epochs (int): Number of epochs for each sub-experiment
state ("str"): If set as "keep_all", keeps every file in the sub-experiment
If set as "keep_none", keeps only comparison files for each experiment
Returns:
dict: Tabular data on training_accuracy, validation_accuracy, training_loss, validation_loss and training_time for each experiment.
'''
from monk.keras_prototype import prototype
project = analysis_name;
self.custom_print("");
self.custom_print("Running Batch Size analysis"); #Change 1
self.custom_print("Analysis Name : {}".format(project));
self.custom_print("");
for i in range(len(batch_size_list)): #Change 2
ktf_ = prototype(verbose=0);
self.custom_print("Running experiment : {}/{}".format(i+1, len(batch_size_list))); #Change 3
experiment = "Batch_Size_" + str(batch_size_list[i]); #Change 4, 5
self.custom_print("Experiment name : {}".format(experiment))
ktf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);
ktf_.Dataset_Percent(percent_data);
dataset_type = ktf_.system_dict["dataset"]["dataset_type"];
dataset_train_path = ktf_.system_dict["dataset"]["train_path"];
dataset_val_path = ktf_.system_dict["dataset"]["val_path"];
csv_train = ktf_.system_dict["dataset"]["csv_train"];
csv_val = ktf_.system_dict["dataset"]["csv_val"];
if(dataset_type=="train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
elif(dataset_type=="csv_train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="csv_train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
ktf_.update_batch_size(batch_size_list[i]) #Change 6
ktf_.Reload(); #Change 7
ktf_.update_num_epochs(num_epochs);
ktf_.update_display_progress_realtime(False);
ktf_.update_save_intermediate_models(False);
ktf_.system_dict = set_transform_estimate(ktf_.system_dict);
ktf_.set_dataset_dataloader(estimate=True);
total_time_per_epoch = ktf_.get_training_estimate();
total_time = total_time_per_epoch*num_epochs;
if(int(total_time//60) == 0):
self.custom_print("Estimated time : {} sec".format(total_time));
else:
self.custom_print("Estimated time : {} min".format(int(total_time//60)+1));
ktf_.Train();
self.custom_print("Experiment Complete");
self.custom_print("\n");
self.custom_print("Comparing Experiments");
from monk.compare_prototype import compare
ctf_ = compare(verbose=0);
ctf_.Comparison("Comparison_" + analysis_name);
self.custom_print("Comparison ID: {}".format("Comparison_" + analysis_name));
training_accuracies = [];
validation_accuracies = [];
training_losses = [];
validation_losses = [];
tabular_data = [];
for i in range(len(batch_size_list)): #Change 8
project = analysis_name;
experiment = "Batch_Size_" + str(batch_size_list[i]); #Change 9, 10
ctf_.Add_Experiment(project, experiment)
tmp = [];
tmp.append(experiment);
training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
tmp.append(np.load(training_accuracy_file, allow_pickle=True)[-1]);
validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
tmp.append(np.load(validation_accuracy_file, allow_pickle=True)[-1]);
training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
tmp.append(np.load(training_loss_file, allow_pickle=True)[-1]);
validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
tmp.append(np.load(validation_loss_file, allow_pickle=True)[-1]);
tabular_data.append(tmp)
ctf_.Generate_Statistics();
self.custom_print("Generated statistics post all epochs");
self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
self.custom_print("");
return_dict = {};
for i in range(len(tabular_data)):
return_dict[tabular_data[i][0]] = {};
return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];
fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
system_dict = read_json(fname);
return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];
if(state=="keep_none"):
shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);
return return_dict
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, None, ["lt", 50], num_epochs=["lte", 10], state=None, post_trace=False)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], None, ["gt", 0, "lte", 100], num_epochs=["gt", 0],
state=["in", ["keep_all", "keep_none"]], post_trace=False)
@accepts("self", str, list, [int, float], num_epochs=int, state=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Analyse_Models(self, analysis_name, model_list, percent_data, num_epochs=2, state="keep_all"):
'''
Hyperparameter Tuner - Analyse base models
Takes in a list of base models and trains on a part of dataset
Provides summaries and graphs on every sub-experiment created
Args:
analysis_name (str): A suitable name for analysis
inp_size_list (list of list): List of base models.
The format is [model_name_string, freeze_base_model_bool, use_pretrained_model_bool]
1) First arg - Model name in string
2) Second arg - Whether to freeze base model or not
3) Thrid arg - Whether to use pretrained model or use randomly initialized weights
percent_data (int): Percentage of complete dataset to run experiments on.
num_epochs (int): Number of epochs for each sub-experiment
state ("str"): If set as "keep_all", keeps every file in the sub-experiment
If set as "keep_none", keeps only comparison files for each experiment
Returns:
dict: Tabular data on training_accuracy, validation_accuracy, training_loss, validation_loss and training_time for each experiment.
'''
from monk.keras_prototype import prototype
project = analysis_name;
self.custom_print("");
self.custom_print("Running Model analysis"); #Change 1
self.custom_print("Analysis Name : {}".format(project));
self.custom_print("");
for i in range(len(model_list)): #Change 2
ktf_ = prototype(verbose=0);
self.custom_print("Running experiment : {}/{}".format(i+1, len(model_list))); #Change 3
if(model_list[i][1]):
experiment = "Model_" + str(model_list[i][0]) + "_freeze_base"; #Change 4, 5
else:
experiment = "Model_" + str(model_list[i][0]) + "_unfreeze_base";
if(model_list[i][2]):
experiment += "_pretrained";
else:
experiment += "_uninitialized";
self.custom_print("Experiment name : {}".format(experiment))
ktf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);
ktf_.Dataset_Percent(percent_data);
dataset_type = ktf_.system_dict["dataset"]["dataset_type"];
dataset_train_path = ktf_.system_dict["dataset"]["train_path"];
dataset_val_path = ktf_.system_dict["dataset"]["val_path"];
csv_train = ktf_.system_dict["dataset"]["csv_train"];
csv_val = ktf_.system_dict["dataset"]["csv_val"];
if(dataset_type=="train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
elif(dataset_type=="csv_train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="csv_train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
ktf_.update_model_name(model_list[i][0]) #Change 6
ktf_.update_freeze_base_network(model_list[i][1])
ktf_.update_use_pretrained(model_list[i][2])
ktf_.Reload(); #Change 7
ktf_.update_num_epochs(num_epochs);
ktf_.update_display_progress_realtime(False)
ktf_.update_save_intermediate_models(False);
ktf_.system_dict = set_transform_estimate(ktf_.system_dict);
ktf_.set_dataset_dataloader(estimate=True);
total_time_per_epoch = ktf_.get_training_estimate();
total_time = total_time_per_epoch*num_epochs;
if(int(total_time//60) == 0):
self.custom_print("Estimated time : {} sec".format(total_time));
else:
self.custom_print("Estimated time : {} min".format(int(total_time//60)+1));
ktf_.Train();
self.custom_print("Experiment Complete");
self.custom_print("\n");
self.custom_print("Comparing Experiments");
from monk.compare_prototype import compare
ctf_ = compare(verbose=0);
ctf_.Comparison("Comparison_" + analysis_name);
self.custom_print("Comparison ID: {}".format("Comparison_" + analysis_name));
training_accuracies = [];
validation_accuracies = [];
training_losses = [];
validation_losses = [];
tabular_data = [];
for i in range(len(model_list)): #Change 8
project = analysis_name;
if(model_list[i][1]):
experiment = "Model_" + str(model_list[i][0]) + "_freeze_base"; #Change 9, 10
else:
experiment = "Model_" + str(model_list[i][0]) + "_unfreeze_base";
if(model_list[i][2]):
experiment += "_pretrained";
else:
experiment += "_uninitialized";
ctf_.Add_Experiment(project, experiment)
tmp = [];
tmp.append(experiment);
training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
tmp.append(np.load(training_accuracy_file, allow_pickle=True)[-1]);
validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
tmp.append(np.load(validation_accuracy_file, allow_pickle=True)[-1]);
training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
tmp.append(np.load(training_loss_file, allow_pickle=True)[-1]);
validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
tmp.append(np.load(validation_loss_file, allow_pickle=True)[-1]);
tabular_data.append(tmp)
ctf_.Generate_Statistics();
self.custom_print("Generated statistics post all epochs");
self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
self.custom_print("");
return_dict = {};
for i in range(len(tabular_data)):
return_dict[tabular_data[i][0]] = {};
return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];
fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
system_dict = read_json(fname);
return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];
if(state=="keep_none"):
shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);
return return_dict
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, None, ["lt", 50], num_epochs=["lte", 10], state=None, post_trace=False)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], None, ["gt", 0, "lte", 100], num_epochs=["gt", 0],
state=["in", ["keep_all", "keep_none"]], post_trace=False)
@accepts("self", str, list, [int, float], num_epochs=int, state=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Analyse_Optimizers(self, analysis_name, optimizer_list, percent_data, num_epochs=2, state="keep_all"):
'''
Hyperparameter Tuner - Analyse optimizers
Takes in a list of optimizers and trains on a part of dataset
Provides summaries and graphs on every sub-experiment created
Args:
analysis_name (str): A suitable name for analysis
inp_size_list (list): List of optimizers.
percent_data (int): Percentage of complete dataset to run experiments on.
num_epochs (int): Number of epochs for each sub-experiment
state ("str"): If set as "keep_all", keeps every file in the sub-experiment
If set as "keep_none", keeps only comparison files for each experiment
Returns:
dict: Tabular data on training_accuracy, validation_accuracy, training_loss, validation_loss and training_time for each experiment.
'''
from monk.keras_prototype import prototype
project = analysis_name;
self.custom_print("");
self.custom_print("Running Optimizer analysis"); #Change 1
self.custom_print("Analysis Name : {}".format(project));
self.custom_print("");
for i in range(len(optimizer_list)): #Change 2
ktf_ = prototype(verbose=0);
self.custom_print("Running experiment : {}/{}".format(i+1, len(optimizer_list))); #Change 3
experiment = "Optimizer_" + str(optimizer_list[i]); #Change 4, 5
self.custom_print("Experiment name : {}".format(experiment))
ktf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);
ktf_.Dataset_Percent(percent_data);
dataset_type = ktf_.system_dict["dataset"]["dataset_type"];
dataset_train_path = ktf_.system_dict["dataset"]["train_path"];
dataset_val_path = ktf_.system_dict["dataset"]["val_path"];
csv_train = ktf_.system_dict["dataset"]["csv_train"];
csv_val = ktf_.system_dict["dataset"]["csv_val"];
if(dataset_type=="train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
elif(dataset_type=="csv_train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="csv_train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
lr = ktf_.system_dict["hyper-parameters"]["learning_rate"]
if(optimizer_list[i] == "adagrad"): #Change 6
ktf_.optimizer_adagrad(lr);
elif(optimizer_list[i] == "adadelta"):
ktf_.optimizer_adadelta(lr);
elif(optimizer_list[i] == "adam"):
ktf_.optimizer_adam(lr);
elif(optimizer_list[i] == "adamax"):
ktf_.optimizer_adamax(lr);
elif(optimizer_list[i] == "rmsprop"):
ktf_.optimizer_rmsprop(lr);
elif(optimizer_list[i] == "nesterov_adam"):
ktf_.optimizer_nesterov_adam(lr);
elif(optimizer_list[i] == "sgd"):
ktf_.optimizer_sgd(lr);
elif(optimizer_list[i] == "nesterov_sgd"):
ktf_.optimizer_nesterov_sgd(lr);
ktf_.Reload(); #Change 7
ktf_.update_num_epochs(num_epochs);
ktf_.update_display_progress_realtime(False)
ktf_.update_save_intermediate_models(False);
ktf_.system_dict = set_transform_estimate(ktf_.system_dict);
ktf_.set_dataset_dataloader(estimate=True);
total_time_per_epoch = ktf_.get_training_estimate();
total_time = total_time_per_epoch*num_epochs;
if(int(total_time//60) == 0):
self.custom_print("Estimated time : {} sec".format(total_time));
else:
self.custom_print("Estimated time : {} min".format(int(total_time//60)+1));
ktf_.Train();
self.custom_print("Experiment Complete");
self.custom_print("\n");
self.custom_print("Comparing Experiments");
from monk.compare_prototype import compare
ctf_ = compare(verbose=0);
ctf_.Comparison("Comparison_" + analysis_name);
self.custom_print("Comparison ID: {}".format("Comparison_" + analysis_name));
training_accuracies = [];
validation_accuracies = [];
training_losses = [];
validation_losses = [];
tabular_data = [];
for i in range(len(optimizer_list)): #Change 8
project = analysis_name;
experiment = "Optimizer_" + str(optimizer_list[i]); #Change 9, 10
ctf_.Add_Experiment(project, experiment)
tmp = [];
tmp.append(experiment);
training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
tmp.append(np.load(training_accuracy_file, allow_pickle=True)[-1]);
validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
tmp.append(np.load(validation_accuracy_file, allow_pickle=True)[-1]);
training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
tmp.append(np.load(training_loss_file, allow_pickle=True)[-1]);
validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
tmp.append(np.load(validation_loss_file, allow_pickle=True)[-1]);
tabular_data.append(tmp)
ctf_.Generate_Statistics();
self.custom_print("Generated statistics post all epochs");
self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
self.custom_print("");
return_dict = {};
for i in range(len(tabular_data)):
return_dict[tabular_data[i][0]] = {};
return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];
fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
system_dict = read_json(fname);
return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];
if(state=="keep_none"):
shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);
return return_dict
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, None, ["lt", 50], num_epochs=["lte", 10], state=None, post_trace=False)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], None, ["gt", 0, "lte", 100], num_epochs=["gt", 0],
state=["in", ["keep_all", "keep_none"]], post_trace=False)
@accepts("self", str, list, [int, float], num_epochs=int, state=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Analyse_Trainval_Splits(self, analysis_name, split_list, percent_data, num_epochs=2, state="keep_all"):
'''
Hyperparameter Tuner - Analyse train-val splits
Takes in a list of training and validation data split values and trains on a part of dataset
Provides summaries and graphs on every sub-experiment created
Args:
analysis_name (str): A suitable name for analysis
split_list (list): List of trainval splits.
percent_data (int): Percentage of complete dataset to run experiments on.
num_epochs (int): Number of epochs for each sub-experiment
state ("str"): If set as "keep_all", keeps every file in the sub-experiment
If set as "keep_none", keeps only comparison files for each experiment
Returns:
dict: Tabular data on training_accuracy, validation_accuracy, training_loss, validation_loss and training_time for each experiment.
'''
from monk.keras_prototype import prototype
project = analysis_name;
self.custom_print("");
self.custom_print("Running Trainval split value analysis"); #Change 1
self.custom_print("Analysis Name : {}".format(project));
self.custom_print("");
for i in range(len(split_list)): #Change 2
ktf_ = prototype(verbose=0);
self.custom_print("Running experiment : {}/{}".format(i+1, len(split_list))); #Change 3
experiment = "Trainval_split" + str(split_list[i]); #Change 4
self.custom_print("Experiment name : {}".format(experiment))
ktf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);
ktf_.Dataset_Percent(percent_data);
dataset_type = ktf_.system_dict["dataset"]["dataset_type"];
dataset_train_path = ktf_.system_dict["dataset"]["train_path"];
dataset_val_path = ktf_.system_dict["dataset"]["val_path"];
csv_train = ktf_.system_dict["dataset"]["csv_train"];
csv_val = ktf_.system_dict["dataset"]["csv_val"];
if(dataset_type=="train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
elif(dataset_type=="csv_train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="csv_train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
ktf_.update_trainval_split(split_list[i]); #Change 5
ktf_.Reload(); #Change 6
ktf_.update_num_epochs(num_epochs);
ktf_.update_display_progress_realtime(False)
ktf_.update_save_intermediate_models(False);
ktf_.system_dict = set_transform_estimate(ktf_.system_dict);
ktf_.set_dataset_dataloader(estimate=True);
total_time_per_epoch = ktf_.get_training_estimate();
total_time = total_time_per_epoch*num_epochs;
if(int(total_time//60) == 0):
self.custom_print("Estimated time : {} sec".format(total_time));
else:
self.custom_print("Estimated time : {} min".format(int(total_time//60)+1));
ktf_.Train();
self.custom_print("Experiment Complete");
self.custom_print("\n");
self.custom_print("Comparing Experiments");
from monk.compare_prototype import compare
ctf_ = compare(verbose=0);
ctf_.Comparison("Comparison_" + analysis_name);
self.custom_print("Comparison ID: {}".format("Comparison_" + analysis_name));
training_accuracies = [];
validation_accuracies = [];
training_losses = [];
validation_losses = [];
tabular_data = [];
for i in range(len(split_list)): #Change 7
project = analysis_name;
experiment = "Trainval_split" + str(split_list[i]); #Change 8
ctf_.Add_Experiment(project, experiment)
tmp = [];
tmp.append(experiment);
training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
tmp.append(np.load(training_accuracy_file, allow_pickle=True)[-1]);
validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
tmp.append(np.load(validation_accuracy_file, allow_pickle=True)[-1]);
training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
tmp.append(np.load(training_loss_file, allow_pickle=True)[-1]);
validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
tmp.append(np.load(validation_loss_file, allow_pickle=True)[-1]);
tabular_data.append(tmp)
ctf_.Generate_Statistics();
self.custom_print("Generated statistics post all epochs");
self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
self.custom_print("");
return_dict = {};
for i in range(len(tabular_data)):
return_dict[tabular_data[i][0]] = {};
return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];
fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
system_dict = read_json(fname);
return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];
if(state=="keep_none"):
shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);
return return_dict
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, None, ["lt", 50], num_epochs=["lte", 10], state=None, post_trace=False)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], None, ["gt", 0, "lte", 100], num_epochs=["gt", 0],
state=["in", ["keep_all", "keep_none"]], post_trace=False)
@accepts("self", str, list, [int, float], num_epochs=int, state=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Analyse_Freeze_Layers(self, analysis_name, num_list, percent_data, num_epochs=2, state="keep_all"):
'''
Hyperparameter Tuner - Analyse train-val splits
Takes in a list of number of layers to freeze in network and runs experiments for each element in list
Provides summaries and graphs on every sub-experiment created
Args:
analysis_name (str): A suitable name for analysis
num_list (list): List of number of layers to freeze.
percent_data (int): Percentage of complete dataset to run experiments on.
num_epochs (int): Number of epochs for each sub-experiment
state ("str"): If set as "keep_all", keeps every file in the sub-experiment
If set as "keep_none", keeps only comparison files for each experiment
Returns:
dict: Tabular data on training_accuracy, validation_accuracy, training_loss, validation_loss and training_time for each experiment.
'''
from monk.keras_prototype import prototype
project = analysis_name;
self.custom_print("");
self.custom_print("Running Freezing layers analysis"); #Change 1
self.custom_print("Analysis Name : {}".format(project));
self.custom_print("");
for i in range(len(num_list)): #Change 2
ktf_ = prototype(verbose=0);
self.custom_print("Running experiment : {}/{}".format(i+1, len(num_list))); #Change 3
experiment = "Freeze_Layers_" + str(num_list[i]); #Change 4
self.custom_print("Experiment name : {}".format(experiment))
ktf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);
ktf_.Dataset_Percent(percent_data);
dataset_type = ktf_.system_dict["dataset"]["dataset_type"];
dataset_train_path = ktf_.system_dict["dataset"]["train_path"];
dataset_val_path = ktf_.system_dict["dataset"]["val_path"];
csv_train = ktf_.system_dict["dataset"]["csv_train"];
csv_val = ktf_.system_dict["dataset"]["csv_val"];
if(dataset_type=="train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
elif(dataset_type=="csv_train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="csv_train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
ktf_.update_freeze_layers(num_list[i]); #Change 5
ktf_.Reload(); #Change 6
ktf_.update_num_epochs(num_epochs);
ktf_.update_display_progress_realtime(False)
ktf_.update_save_intermediate_models(False);
ktf_.system_dict = set_transform_estimate(ktf_.system_dict);
ktf_.set_dataset_dataloader(estimate=True);
total_time_per_epoch = ktf_.get_training_estimate();
total_time = total_time_per_epoch*num_epochs;
if(int(total_time//60) == 0):
self.custom_print("Estimated time : {} sec".format(total_time));
else:
self.custom_print("Estimated time : {} min".format(int(total_time//60)+1));
ktf_.Train();
self.custom_print("Experiment Complete");
self.custom_print("\n");
self.custom_print("Comparing Experiments");
from monk.compare_prototype import compare
ctf_ = compare(verbose=0);
ctf_.Comparison("Comparison_" + analysis_name);
self.custom_print("Comparison ID: {}".format("Comparison_" + analysis_name));
training_accuracies = [];
validation_accuracies = [];
training_losses = [];
validation_losses = [];
tabular_data = [];
for i in range(len(num_list)): #Change 7
project = analysis_name;
experiment = "Freeze_Layers_" + str(num_list[i]); #Change 8
ctf_.Add_Experiment(project, experiment)
tmp = [];
tmp.append(experiment);
training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
tmp.append(np.load(training_accuracy_file, allow_pickle=True)[-1]);
validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
tmp.append(np.load(validation_accuracy_file, allow_pickle=True)[-1]);
training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
tmp.append(np.load(training_loss_file, allow_pickle=True)[-1]);
validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
tmp.append(np.load(validation_loss_file, allow_pickle=True)[-1]);
tabular_data.append(tmp)
ctf_.Generate_Statistics();
self.custom_print("Generated statistics post all epochs");
self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
self.custom_print("");
return_dict = {};
for i in range(len(tabular_data)):
return_dict[tabular_data[i][0]] = {};
return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];
fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
system_dict = read_json(fname);
return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];
if(state=="keep_none"):
shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);
return return_dict
###############################################################################################################################################
###############################################################################################################################################
@warning_checks(None, None, None, ["lt", 50], num_epochs=["lte", 10], state=None, post_trace=False)
@error_checks(None, ["name", ["A-Z", "a-z", "0-9", "-", "_", "."]], None, ["gt", 0, "lte", 100], num_epochs=["gt", 0],
state=["in", ["keep_all", "keep_none"]], post_trace=False)
@accepts("self", str, list, [int, float], num_epochs=int, state=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Analyse_Optimizers_With_LR(self, analysis_name, optimizer_list, percent_data, num_epochs=2, state="keep_all"):
'''
Hyperparameter Tuner - Analyse optimizers
Takes in a list of optimizers and initial learning rates to train on a part of dataset
Provides summaries and graphs on every sub-experiment created
Args:
analysis_name (str): A suitable name for analysis
optimizer_list (list): List of optimizers along with initial learning rates.
percent_data (int): Percentage of complete dataset to run experiments on.
num_epochs (int): Number of epochs for each sub-experiment
state ("str"): If set as "keep_all", keeps every file in the sub-experiment
If set as "keep_none", keeps only comparison files for each experiment
Returns:
dict: Tabular data on training_accuracy, validation_accuracy, training_loss, validation_loss and training_time for each experiment.
'''
from monk.keras_prototype import prototype
project = analysis_name;
self.custom_print("");
self.custom_print("Running Optimizer analysis"); #Change 1
self.custom_print("Analysis Name : {}".format(project));
self.custom_print("");
for i in range(len(optimizer_list)): #Change 2
ktf_ = prototype(verbose=0);
self.custom_print("Running experiment : {}/{}".format(i+1, len(optimizer_list))); #Change 3
experiment = "Optimizer_" + str(optimizer_list[i][0]) + "_LR_" + str(optimizer_list[i][1]); #Change 4, 5
self.custom_print("Experiment name : {}".format(experiment))
ktf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);
ktf_.Dataset_Percent(percent_data);
dataset_type = ktf_.system_dict["dataset"]["dataset_type"];
dataset_train_path = ktf_.system_dict["dataset"]["train_path"];
dataset_val_path = ktf_.system_dict["dataset"]["val_path"];
csv_train = ktf_.system_dict["dataset"]["csv_train"];
csv_val = ktf_.system_dict["dataset"]["csv_val"];
if(dataset_type=="train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
elif(dataset_type=="csv_train"):
ktf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
elif(dataset_type=="csv_train-val"):
ktf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path],
path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
lr = optimizer_list[i][1]
if(optimizer_list[i][0] == "adagrad"): #Change 6
ktf_.optimizer_adagrad(lr);
elif(optimizer_list[i][0] == "adadelta"):
ktf_.optimizer_adadelta(lr);
elif(optimizer_list[i][0] == "adam"):
ktf_.optimizer_adam(lr);
elif(optimizer_list[i][0] == "adamax"):
ktf_.optimizer_adamax(lr);
elif(optimizer_list[i][0] == "rmsprop"):
ktf_.optimizer_rmsprop(lr);
elif(optimizer_list[i][0] == "nesterov_adam"):
ktf_.optimizer_nesterov_adam(lr);
elif(optimizer_list[i][0] == "sgd"):
ktf_.optimizer_sgd(lr);
elif(optimizer_list[i][0] == "nesterov_sgd"):
ktf_.optimizer_nesterov_sgd(lr);
ktf_.Reload(); #Change 7
ktf_.update_num_epochs(num_epochs);
ktf_.update_display_progress_realtime(False)
ktf_.update_save_intermediate_models(False);
ktf_.system_dict = set_transform_estimate(ktf_.system_dict);
ktf_.set_dataset_dataloader(estimate=True);
total_time_per_epoch = ktf_.get_training_estimate();
total_time = total_time_per_epoch*num_epochs;
if(int(total_time//60) == 0):
self.custom_print("Estimated time : {} sec".format(total_time));
else:
self.custom_print("Estimated time : {} min".format(int(total_time//60)+1));
ktf_.Train();
self.custom_print("Experiment Complete");
self.custom_print("\n");
self.custom_print("Comparing Experiments");
from monk.compare_prototype import compare
ctf_ = compare(verbose=0);
ctf_.Comparison("Comparison_" + analysis_name);
self.custom_print("Comparison ID: {}".format("Comparison_" + analysis_name));
training_accuracies = [];
validation_accuracies = [];
training_losses = [];
validation_losses = [];
tabular_data = [];
for i in range(len(optimizer_list)): #Change 8
project = analysis_name;
experiment = "Optimizer_" + str(optimizer_list[i][0]) + "_LR_" + str(optimizer_list[i][1]); #Change 9, 10
ctf_.Add_Experiment(project, experiment)
tmp = [];
tmp.append(experiment);
training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
tmp.append(np.load(training_accuracy_file, allow_pickle=True)[-1]);
validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
tmp.append(np.load(validation_accuracy_file, allow_pickle=True)[-1]);
training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
tmp.append(np.load(training_loss_file, allow_pickle=True)[-1]);
validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
tmp.append(np.load(validation_loss_file, allow_pickle=True)[-1]);
tabular_data.append(tmp)
ctf_.Generate_Statistics();
self.custom_print("Generated statistics post all epochs");
self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
self.custom_print("");
return_dict = {};
for i in range(len(tabular_data)):
return_dict[tabular_data[i][0]] = {};
return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];
fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
system_dict = read_json(fname);
return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];
if(state=="keep_none"):
shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);
return return_dict
###############################################################################################################################################
|
PypiClean
|
/jittor-1.3.8.5.tar.gz/jittor-1.3.8.5/python/jittor_utils/ring_buffer.py
|
import multiprocessing as mp
import numpy as np
import ctypes
import random
import pickle
import ctypes
recv_raw_call = 0.0
class RingBufferAllocator:
def __init__(self, size):
self.size = size
self.l = mp.Value(ctypes.c_longlong, 0, lock=False)
self.r = mp.Value(ctypes.c_longlong, 0, lock=False)
self.is_full = mp.Value(ctypes.c_bool, False, lock=False)
self.lock = mp.Lock()
self.cv = mp.Condition(self.lock)
def __repr__(self):
l = self.l.value
r = self.r.value
is_full = self.is_full.value
if is_full:
cap = 0
else:
cap = (r - l) / self.size
if cap<=0: cap += 1
return f"Buffer(free={cap*100:.3f}% l={l} r={r} size={self.size})"
def alloc_with_lock(self, size):
with self.lock:
while True:
location = self.alloc(size)
if location is not None: break
self.cv.wait()
return location
def free_with_lock(self, size):
with self.lock:
location = self.free(size)
self.cv.notify()
return location
def clear(self):
with self.lock:
self.l.value = 0
self.r.value = 0
self.is_full.value = False
def alloc(self, size):
if size > self.size:
raise RuntimeError(f"Buffer size too small {self.size}<{size}")
l = self.l.value
r = self.r.value
is_full = self.is_full.value
if is_full: return None
if l == r and l > 0:
self.l.value = self.r.value = l = r = 0
# [l, r)
if r > l:
freed = r - l
if freed < size:
# |----l......r---|
# |----#########--|
return None
# |----l......r---|
# |----#####------|
location = l
self.l.value = l = l + size
else:
freed = self.size - l
if freed < size:
# |.....r------l...|
# |------------#######
if size > r:
# |.....r------l...|
# |#######-----------
return None
# |.....r------l...|
# |#####-----------
if size == r:
self.is_full.value = is_full= True
location = 0
self.l.value = l = size
else:
# |.....r------l...|
# |------------##--|
location = l
if freed == size:
self.l.value = l = 0
else:
self.l.value = l = l + size
if l == r:
self.is_full.value = is_full = True
return location
def free(self, size):
l = self.l.value
r = self.r.value
is_full = self.is_full.value
if size==0: return r
if is_full:
self.is_full.value = is_full = False
elif l == r:
return None
location = r
self.r.value = r = r + size
if r > self.size:
location = 0
self.r.value = r = size
elif r == self.size:
self.r.value = r = 0
return location
def str_to_char_array(s, array_len):
if len(s) > array_len: s = s[:array_len]
a = np.array(s, dtype='c')
if len(s) < array_len:
a = np.pad(a, (0,array_len-len(s)), constant_values=' ')
return a
def char_array_to_str(a):
return str(a.tobytes(), 'ascii').strip()
class RingBuffer:
def __init__(self, buffer):
self.allocator = RingBufferAllocator(len(buffer))
self.buffer = buffer
def clear(self): self.allocator.clear()
def send_int(self, data):
# int: int64[1]
# data
self.send_raw(np.array([data], dtype='int64'))
def recv_int(self):
return int(self.recv_raw(8, (1,), 'int64')[0])
def send_float(self, data):
# float: float64[1]
# data
self.send_raw(np.array([data], dtype='float64'))
def recv_float(self):
return float(self.recv_raw(8, (1,), 'float64')[0])
def send_str(self, data):
# str: int64[1] char[len]
# len data
data = np.array(data, dtype='c')
self.send_int(data.nbytes)
self.send_raw(data)
def recv_str(self):
nbytes = self.recv_int()
data = self.recv_raw(nbytes, nbytes, 'c')
return str(data.tostring(), 'ascii')
def send_ndarray(self, data):
# str: int64[1] char[8] int64[1] int64[slen] char[nbytes]
# slen dtype nbytes shape data
shape = data.shape
slen = len(shape)
self.send_int(slen)
self.send_fix_len_str(str(data.dtype))
self.send_int(data.nbytes)
self.send_raw(np.array(shape, dtype='int64'))
self.send_raw(data)
def recv_ndarray(self):
slen = self.recv_int()
dtype = self.recv_fix_len_str()
nbytes = self.recv_int()
shape = self.recv_raw(slen*8, slen, 'int64')
data = self.recv_raw(nbytes, shape, dtype)
return data
def send_tuple(self, data):
# tuple: int64[1] ....
# len
length = len(data)
self.send_int(length)
for a in data:
self.send(a)
def recv_tuple(self):
length = self.recv_int()
return tuple(self.recv() for i in range(length))
def send_list(self, data):
# list: int64[1] ....
# len
length = len(data)
self.send_int(length)
for a in data:
self.send(a)
def recv_list(self):
length = self.recv_int()
return [self.recv() for i in range(length)]
def send_pickle(self, data):
# pickle: int64[1] char[len]
# len data
data = pickle.dumps(data)
data = np.frombuffer(data, dtype='c')
self.send_int(data.nbytes)
self.send_raw(data)
def recv_pickle(self):
nbytes = self.recv_int()
data = self.recv_raw(nbytes, nbytes, 'c')
return pickle.loads(data.tostring())
def __repr__(self):
return f"{self.allocator}@0x{hex(ctypes.addressof(self.buffer))}"
def send_raw(self, data):
assert isinstance(data, np.ndarray) # and data.flags.c_contiguous
with self.allocator.lock:
location = self.allocator.alloc(data.nbytes)
while location is None:
self.allocator.cv.wait()
location = self.allocator.alloc(data.nbytes)
window = np.ndarray(shape=data.shape, dtype=data.dtype,
buffer=self.buffer, offset=location)
window[:] = data
self.allocator.cv.notify()
assert window.nbytes == data.nbytes
def recv_raw(self, nbytes, shape, dtype):
global recv_raw_call
recv_raw_call += 1
with self.allocator.lock:
location = self.allocator.free(nbytes)
while location is None:
self.allocator.cv.wait()
location = self.allocator.free(nbytes)
data = np.ndarray(shape=shape, dtype=dtype,
buffer=self.buffer, offset=location).copy()
self.allocator.cv.notify()
assert data.nbytes == nbytes
return data
def send_fix_len_str(self, s, array_len=8):
data = str_to_char_array(s, array_len)
self.send_raw(data)
def recv_fix_len_str(self, array_len=8):
data = self.recv_raw(8, 8, 'c')
return char_array_to_str(data)
def send(self, data):
ts = type(data).__name__
send = getattr(self, "send_"+ts, self.send_pickle)
self.send_fix_len_str(ts)
send(data)
def recv(self):
ts = self.recv_fix_len_str()
recv = getattr(self, "recv_"+ts, self.recv_pickle)
return recv()
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/domain/EcomRefundDisputeDTO.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
class EcomRefundDisputeDTO(object):
def __init__(self):
self._buyer_id = None
self._buyer_logistic_company_code = None
self._buyer_logistic_company_name = None
self._buyer_logistic_no = None
self._case_time = None
self._end_time = None
self._goods_needs = None
self._goods_status = None
self._order_id = None
self._reason_code = None
self._reason_text = None
self._refund_desc = None
self._refund_dispute_id = None
self._refund_fee = None
self._refund_status = None
self._refund_type = None
self._seller_id = None
self._start_time = None
@property
def buyer_id(self):
return self._buyer_id
@buyer_id.setter
def buyer_id(self, value):
self._buyer_id = value
@property
def buyer_logistic_company_code(self):
return self._buyer_logistic_company_code
@buyer_logistic_company_code.setter
def buyer_logistic_company_code(self, value):
self._buyer_logistic_company_code = value
@property
def buyer_logistic_company_name(self):
return self._buyer_logistic_company_name
@buyer_logistic_company_name.setter
def buyer_logistic_company_name(self, value):
self._buyer_logistic_company_name = value
@property
def buyer_logistic_no(self):
return self._buyer_logistic_no
@buyer_logistic_no.setter
def buyer_logistic_no(self, value):
self._buyer_logistic_no = value
@property
def case_time(self):
return self._case_time
@case_time.setter
def case_time(self, value):
self._case_time = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def goods_needs(self):
return self._goods_needs
@goods_needs.setter
def goods_needs(self, value):
self._goods_needs = value
@property
def goods_status(self):
return self._goods_status
@goods_status.setter
def goods_status(self, value):
self._goods_status = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def reason_code(self):
return self._reason_code
@reason_code.setter
def reason_code(self, value):
self._reason_code = value
@property
def reason_text(self):
return self._reason_text
@reason_text.setter
def reason_text(self, value):
self._reason_text = value
@property
def refund_desc(self):
return self._refund_desc
@refund_desc.setter
def refund_desc(self, value):
self._refund_desc = value
@property
def refund_dispute_id(self):
return self._refund_dispute_id
@refund_dispute_id.setter
def refund_dispute_id(self, value):
self._refund_dispute_id = value
@property
def refund_fee(self):
return self._refund_fee
@refund_fee.setter
def refund_fee(self, value):
self._refund_fee = value
@property
def refund_status(self):
return self._refund_status
@refund_status.setter
def refund_status(self, value):
self._refund_status = value
@property
def refund_type(self):
return self._refund_type
@refund_type.setter
def refund_type(self, value):
self._refund_type = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
def to_alipay_dict(self):
params = dict()
if self.buyer_id:
if hasattr(self.buyer_id, 'to_alipay_dict'):
params['buyer_id'] = self.buyer_id.to_alipay_dict()
else:
params['buyer_id'] = self.buyer_id
if self.buyer_logistic_company_code:
if hasattr(self.buyer_logistic_company_code, 'to_alipay_dict'):
params['buyer_logistic_company_code'] = self.buyer_logistic_company_code.to_alipay_dict()
else:
params['buyer_logistic_company_code'] = self.buyer_logistic_company_code
if self.buyer_logistic_company_name:
if hasattr(self.buyer_logistic_company_name, 'to_alipay_dict'):
params['buyer_logistic_company_name'] = self.buyer_logistic_company_name.to_alipay_dict()
else:
params['buyer_logistic_company_name'] = self.buyer_logistic_company_name
if self.buyer_logistic_no:
if hasattr(self.buyer_logistic_no, 'to_alipay_dict'):
params['buyer_logistic_no'] = self.buyer_logistic_no.to_alipay_dict()
else:
params['buyer_logistic_no'] = self.buyer_logistic_no
if self.case_time:
if hasattr(self.case_time, 'to_alipay_dict'):
params['case_time'] = self.case_time.to_alipay_dict()
else:
params['case_time'] = self.case_time
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.goods_needs:
if hasattr(self.goods_needs, 'to_alipay_dict'):
params['goods_needs'] = self.goods_needs.to_alipay_dict()
else:
params['goods_needs'] = self.goods_needs
if self.goods_status:
if hasattr(self.goods_status, 'to_alipay_dict'):
params['goods_status'] = self.goods_status.to_alipay_dict()
else:
params['goods_status'] = self.goods_status
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.reason_code:
if hasattr(self.reason_code, 'to_alipay_dict'):
params['reason_code'] = self.reason_code.to_alipay_dict()
else:
params['reason_code'] = self.reason_code
if self.reason_text:
if hasattr(self.reason_text, 'to_alipay_dict'):
params['reason_text'] = self.reason_text.to_alipay_dict()
else:
params['reason_text'] = self.reason_text
if self.refund_desc:
if hasattr(self.refund_desc, 'to_alipay_dict'):
params['refund_desc'] = self.refund_desc.to_alipay_dict()
else:
params['refund_desc'] = self.refund_desc
if self.refund_dispute_id:
if hasattr(self.refund_dispute_id, 'to_alipay_dict'):
params['refund_dispute_id'] = self.refund_dispute_id.to_alipay_dict()
else:
params['refund_dispute_id'] = self.refund_dispute_id
if self.refund_fee:
if hasattr(self.refund_fee, 'to_alipay_dict'):
params['refund_fee'] = self.refund_fee.to_alipay_dict()
else:
params['refund_fee'] = self.refund_fee
if self.refund_status:
if hasattr(self.refund_status, 'to_alipay_dict'):
params['refund_status'] = self.refund_status.to_alipay_dict()
else:
params['refund_status'] = self.refund_status
if self.refund_type:
if hasattr(self.refund_type, 'to_alipay_dict'):
params['refund_type'] = self.refund_type.to_alipay_dict()
else:
params['refund_type'] = self.refund_type
if self.seller_id:
if hasattr(self.seller_id, 'to_alipay_dict'):
params['seller_id'] = self.seller_id.to_alipay_dict()
else:
params['seller_id'] = self.seller_id
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = EcomRefundDisputeDTO()
if 'buyer_id' in d:
o.buyer_id = d['buyer_id']
if 'buyer_logistic_company_code' in d:
o.buyer_logistic_company_code = d['buyer_logistic_company_code']
if 'buyer_logistic_company_name' in d:
o.buyer_logistic_company_name = d['buyer_logistic_company_name']
if 'buyer_logistic_no' in d:
o.buyer_logistic_no = d['buyer_logistic_no']
if 'case_time' in d:
o.case_time = d['case_time']
if 'end_time' in d:
o.end_time = d['end_time']
if 'goods_needs' in d:
o.goods_needs = d['goods_needs']
if 'goods_status' in d:
o.goods_status = d['goods_status']
if 'order_id' in d:
o.order_id = d['order_id']
if 'reason_code' in d:
o.reason_code = d['reason_code']
if 'reason_text' in d:
o.reason_text = d['reason_text']
if 'refund_desc' in d:
o.refund_desc = d['refund_desc']
if 'refund_dispute_id' in d:
o.refund_dispute_id = d['refund_dispute_id']
if 'refund_fee' in d:
o.refund_fee = d['refund_fee']
if 'refund_status' in d:
o.refund_status = d['refund_status']
if 'refund_type' in d:
o.refund_type = d['refund_type']
if 'seller_id' in d:
o.seller_id = d['seller_id']
if 'start_time' in d:
o.start_time = d['start_time']
return o
|
PypiClean
|
/trixie-0.1.2.tar.gz/trixie-0.1.2/homeassistant/components/remember_the_milk/__init__.py
|
import json
import logging
import os
import voluptuous as vol
from homeassistant.const import (
CONF_API_KEY, CONF_ID, CONF_NAME, CONF_TOKEN, STATE_OK)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
# httplib2 is a transitive dependency from RtmAPI. If this dependency is not
# set explicitly, the library does not work.
REQUIREMENTS = ['RtmAPI==0.7.0', 'httplib2==0.10.3']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'remember_the_milk'
DEFAULT_NAME = DOMAIN
GROUP_NAME_RTM = 'remember the milk accounts'
CONF_SHARED_SECRET = 'shared_secret'
CONF_ID_MAP = 'id_map'
CONF_LIST_ID = 'list_id'
CONF_TIMESERIES_ID = 'timeseries_id'
CONF_TASK_ID = 'task_id'
RTM_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SHARED_SECRET): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All(cv.ensure_list, [RTM_SCHEMA])
}, extra=vol.ALLOW_EXTRA)
CONFIG_FILE_NAME = '.remember_the_milk.conf'
SERVICE_CREATE_TASK = 'create_task'
SERVICE_COMPLETE_TASK = 'complete_task'
SERVICE_SCHEMA_CREATE_TASK = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ID): cv.string,
})
SERVICE_SCHEMA_COMPLETE_TASK = vol.Schema({
vol.Required(CONF_ID): cv.string,
})
def setup(hass, config):
"""Set up the Remember the milk component."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, group_name=GROUP_NAME_RTM)
stored_rtm_config = RememberTheMilkConfiguration(hass)
for rtm_config in config[DOMAIN]:
account_name = rtm_config[CONF_NAME]
_LOGGER.info("Adding Remember the milk account %s", account_name)
api_key = rtm_config[CONF_API_KEY]
shared_secret = rtm_config[CONF_SHARED_SECRET]
token = stored_rtm_config.get_token(account_name)
if token:
_LOGGER.debug("found token for account %s", account_name)
_create_instance(
hass, account_name, api_key, shared_secret, token,
stored_rtm_config, component)
else:
_register_new_account(
hass, account_name, api_key, shared_secret,
stored_rtm_config, component)
_LOGGER.debug("Finished adding all Remember the milk accounts")
return True
def _create_instance(hass, account_name, api_key, shared_secret,
token, stored_rtm_config, component):
entity = RememberTheMilk(account_name, api_key, shared_secret,
token, stored_rtm_config)
component.add_entities([entity])
hass.services.register(
DOMAIN, '{}_create_task'.format(account_name), entity.create_task,
schema=SERVICE_SCHEMA_CREATE_TASK)
hass.services.register(
DOMAIN, '{}_complete_task'.format(account_name), entity.complete_task,
schema=SERVICE_SCHEMA_COMPLETE_TASK)
def _register_new_account(hass, account_name, api_key, shared_secret,
stored_rtm_config, component):
from rtmapi import Rtm
request_id = None
configurator = hass.components.configurator
api = Rtm(api_key, shared_secret, "write", None)
url, frob = api.authenticate_desktop()
_LOGGER.debug("Sent authentication request to server")
def register_account_callback(_):
"""Call for register the configurator."""
api.retrieve_token(frob)
token = api.token
if api.token is None:
_LOGGER.error("Failed to register, please try again")
configurator.notify_errors(
request_id,
'Failed to register, please try again.')
return
stored_rtm_config.set_token(account_name, token)
_LOGGER.debug("Retrieved new token from server")
_create_instance(
hass, account_name, api_key, shared_secret, token,
stored_rtm_config, component)
configurator.request_done(request_id)
request_id = configurator.async_request_config(
'{} - {}'.format(DOMAIN, account_name),
callback=register_account_callback,
description='You need to log in to Remember The Milk to' +
'connect your account. \n\n' +
'Step 1: Click on the link "Remember The Milk login"\n\n' +
'Step 2: Click on "login completed"',
link_name='Remember The Milk login',
link_url=url,
submit_caption="login completed",
)
class RememberTheMilkConfiguration(object):
"""Internal configuration data for RememberTheMilk class.
This class stores the authentication token it get from the backend.
"""
def __init__(self, hass):
"""Create new instance of configuration."""
self._config_file_path = hass.config.path(CONFIG_FILE_NAME)
if not os.path.isfile(self._config_file_path):
self._config = dict()
return
try:
_LOGGER.debug("Loading configuration from file: %s",
self._config_file_path)
with open(self._config_file_path, 'r') as config_file:
self._config = json.load(config_file)
except ValueError:
_LOGGER.error("Failed to load configuration file, creating a "
"new one: %s", self._config_file_path)
self._config = dict()
def save_config(self):
"""Write the configuration to a file."""
with open(self._config_file_path, 'w') as config_file:
json.dump(self._config, config_file)
def get_token(self, profile_name):
"""Get the server token for a profile."""
if profile_name in self._config:
return self._config[profile_name][CONF_TOKEN]
return None
def set_token(self, profile_name, token):
"""Store a new server token for a profile."""
self._initialize_profile(profile_name)
self._config[profile_name][CONF_TOKEN] = token
self.save_config()
def delete_token(self, profile_name):
"""Delete a token for a profile.
Usually called when the token has expired.
"""
self._config.pop(profile_name, None)
self.save_config()
def _initialize_profile(self, profile_name):
"""Initialize the data structures for a profile."""
if profile_name not in self._config:
self._config[profile_name] = dict()
if CONF_ID_MAP not in self._config[profile_name]:
self._config[profile_name][CONF_ID_MAP] = dict()
def get_rtm_id(self, profile_name, hass_id):
"""Get the RTM ids for a Home Assistant task ID.
The id of a RTM tasks consists of the tuple:
list id, timeseries id and the task id.
"""
self._initialize_profile(profile_name)
ids = self._config[profile_name][CONF_ID_MAP].get(hass_id)
if ids is None:
return None
return ids[CONF_LIST_ID], ids[CONF_TIMESERIES_ID], ids[CONF_TASK_ID]
def set_rtm_id(self, profile_name, hass_id, list_id, time_series_id,
rtm_task_id):
"""Add/Update the RTM task ID for a Home Assistant task IS."""
self._initialize_profile(profile_name)
id_tuple = {
CONF_LIST_ID: list_id,
CONF_TIMESERIES_ID: time_series_id,
CONF_TASK_ID: rtm_task_id,
}
self._config[profile_name][CONF_ID_MAP][hass_id] = id_tuple
self.save_config()
def delete_rtm_id(self, profile_name, hass_id):
"""Delete a key mapping."""
self._initialize_profile(profile_name)
if hass_id in self._config[profile_name][CONF_ID_MAP]:
del self._config[profile_name][CONF_ID_MAP][hass_id]
self.save_config()
class RememberTheMilk(Entity):
"""Representation of an interface to Remember The Milk."""
def __init__(self, name, api_key, shared_secret, token, rtm_config):
"""Create new instance of Remember The Milk component."""
import rtmapi
self._name = name
self._api_key = api_key
self._shared_secret = shared_secret
self._token = token
self._rtm_config = rtm_config
self._rtm_api = rtmapi.Rtm(api_key, shared_secret, "delete", token)
self._token_valid = None
self._check_token()
_LOGGER.debug("Instance created for account %s", self._name)
def _check_token(self):
"""Check if the API token is still valid.
If it is not valid any more, delete it from the configuration. This
will trigger a new authentication process.
"""
valid = self._rtm_api.token_valid()
if not valid:
_LOGGER.error("Token for account %s is invalid. You need to "
"register again!", self.name)
self._rtm_config.delete_token(self._name)
self._token_valid = False
else:
self._token_valid = True
return self._token_valid
def create_task(self, call):
"""Create a new task on Remember The Milk.
You can use the smart syntax to define the attributes of a new task,
e.g. "my task #some_tag ^today" will add tag "some_tag" and set the
due date to today.
"""
import rtmapi
try:
task_name = call.data.get(CONF_NAME)
hass_id = call.data.get(CONF_ID)
rtm_id = None
if hass_id is not None:
rtm_id = self._rtm_config.get_rtm_id(self._name, hass_id)
result = self._rtm_api.rtm.timelines.create()
timeline = result.timeline.value
if hass_id is None or rtm_id is None:
result = self._rtm_api.rtm.tasks.add(
timeline=timeline, name=task_name, parse='1')
_LOGGER.debug("Created new task '%s' in account %s",
task_name, self.name)
self._rtm_config.set_rtm_id(
self._name, hass_id, result.list.id,
result.list.taskseries.id, result.list.taskseries.task.id)
else:
self._rtm_api.rtm.tasks.setName(
name=task_name, list_id=rtm_id[0], taskseries_id=rtm_id[1],
task_id=rtm_id[2], timeline=timeline)
_LOGGER.debug("Updated task with id '%s' in account "
"%s to name %s", hass_id, self.name, task_name)
except rtmapi.RtmRequestFailedException as rtm_exception:
_LOGGER.error("Error creating new Remember The Milk task for "
"account %s: %s", self._name, rtm_exception)
return False
return True
def complete_task(self, call):
"""Complete a task that was previously created by this component."""
import rtmapi
hass_id = call.data.get(CONF_ID)
rtm_id = self._rtm_config.get_rtm_id(self._name, hass_id)
if rtm_id is None:
_LOGGER.error("Could not find task with ID %s in account %s. "
"So task could not be closed", hass_id, self._name)
return False
try:
result = self._rtm_api.rtm.timelines.create()
timeline = result.timeline.value
self._rtm_api.rtm.tasks.complete(
list_id=rtm_id[0], taskseries_id=rtm_id[1], task_id=rtm_id[2],
timeline=timeline)
self._rtm_config.delete_rtm_id(self._name, hass_id)
_LOGGER.debug("Completed task with id %s in account %s",
hass_id, self._name)
except rtmapi.RtmRequestFailedException as rtm_exception:
_LOGGER.error("Error creating new Remember The Milk task for "
"account %s: %s", self._name, rtm_exception)
return True
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if not self._token_valid:
return "API token invalid"
return STATE_OK
|
PypiClean
|
/django-trackable-0.3.8.tar.gz/django-trackable-0.3.8/trackable/settings.py
|
import os.path
import logging
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = DEBUG
THUMBNAIL_DEBUG = DEBUG
ADMINS = (
('Thom Linton', '[email protected]'),
)
MANAGERS = ()
INTERNAL_IPS = (
'127.0.0.1',
)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
USE_ETAGS = False
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.load_template_source',
)
# default timeout is 300s
CACHE_BACKEND = 'memcached://127.0.0.1:11211/'
# CACHE_BACKEND = 'newcache://127.0.0.1:11211/?binary=true'
# CACHE_MIDDLEWARE_SECONDS = 300
# CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.middleware.transaction.TransactionMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.core.context_processors.debug",
]
AUTHENTICATION_BACKENDS = []
INSTALLED_APPS = [
'django.contrib.contenttypes',
'trackable',
'trackable.tests',
'south',
'djcelery',
]
ROOT_URLCONF = 'tests.test_urls'
# Base path info
BASE_PATH = os.path.dirname(__file__)
# Testing settings
# TEST_RUNNER = "djcelery.contrib.test_runner.run_tests"
# Celery settings
# CELERY_RESULT_BACKEND = "cache"
CELERY_QUEUES = {
"trackable": {
"exchange": "direct",
"binding_key": "trackable",
},
}
CELERY_DEFAULT_QUEUE = "trackable"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DEFAULT_ROUTING_KEY = "trackable"
# CELERY_SEND_TASK_ERROR_EMAILS = False
# CELERY_SEND_EVENTS = True
# CELERYD_CONCURRENCY = 2
CELERYD_LOG_LEVEL = 'INFO'
# Carrot settings
BROKER_HOST = "localhost"
BROKER_PORT = 5672
BROKER_VHOST = "/"
BROKER_USER = "guest"
BROKER_PASSWORD = "guest"
TRACKABLE_USER_AGENT_FILTERING = False
TRACKABLE_REMOVE_MALFORMED_MESSAGES = True
TRACKABLE_CAPTURE_CONNECTION_ERRORS = True
TRACKABLE_DISABLE_COLLECTION_TASK = True
# TRACKABLE_LOGLEVEL = logging.DEBUG
TRACKABLE_ENGINE = 'trackable.message.backends.kombu'
FIXTURE_DIRS = (
os.path.join( BASE_PATH, 'fixtures' )
)
TEMPLATE_DIRS = (
os.path.join( BASE_PATH, 'templates')
)
try:
from local_settings import *
import djcelery
djcelery.setup_loader()
except ImportError:
pass
|
PypiClean
|
/wandb-ng-0.0.44.tar.gz/wandb-ng-0.0.44/wandb/vendor/pygments/lexers/_stata_builtins.py
|
builtins_base = (
"if", "else", "in", "foreach", "for", "forv", "forva",
"forval", "forvalu", "forvalue", "forvalues", "by", "bys",
"bysort", "quietly", "qui", "about", "ac",
"ac_7", "acprplot", "acprplot_7", "adjust", "ado", "adopath",
"adoupdate", "alpha", "ameans", "an", "ano", "anov", "anova",
"anova_estat", "anova_terms", "anovadef", "aorder", "ap", "app",
"appe", "appen", "append", "arch", "arch_dr", "arch_estat",
"arch_p", "archlm", "areg", "areg_p", "args", "arima",
"arima_dr", "arima_estat", "arima_p", "as", "asmprobit",
"asmprobit_estat", "asmprobit_lf", "asmprobit_mfx__dlg",
"asmprobit_p", "ass", "asse", "asser", "assert", "avplot",
"avplot_7", "avplots", "avplots_7", "bcskew0", "bgodfrey",
"binreg", "bip0_lf", "biplot", "bipp_lf", "bipr_lf",
"bipr_p", "biprobit", "bitest", "bitesti", "bitowt", "blogit",
"bmemsize", "boot", "bootsamp", "bootstrap", "bootstrap_8",
"boxco_l", "boxco_p", "boxcox", "boxcox_6", "boxcox_p",
"bprobit", "br", "break", "brier", "bro", "brow", "brows",
"browse", "brr", "brrstat", "bs", "bs_7", "bsampl_w",
"bsample", "bsample_7", "bsqreg", "bstat", "bstat_7", "bstat_8",
"bstrap", "bstrap_7", "ca", "ca_estat", "ca_p", "cabiplot",
"camat", "canon", "canon_8", "canon_8_p", "canon_estat",
"canon_p", "cap", "caprojection", "capt", "captu", "captur",
"capture", "cat", "cc", "cchart", "cchart_7", "cci",
"cd", "censobs_table", "centile", "cf", "char", "chdir",
"checkdlgfiles", "checkestimationsample", "checkhlpfiles",
"checksum", "chelp", "ci", "cii", "cl", "class", "classutil",
"clear", "cli", "clis", "clist", "clo", "clog", "clog_lf",
"clog_p", "clogi", "clogi_sw", "clogit", "clogit_lf",
"clogit_p", "clogitp", "clogl_sw", "cloglog", "clonevar",
"clslistarray", "cluster", "cluster_measures", "cluster_stop",
"cluster_tree", "cluster_tree_8", "clustermat", "cmdlog",
"cnr", "cnre", "cnreg", "cnreg_p", "cnreg_sw", "cnsreg",
"codebook", "collaps4", "collapse", "colormult_nb",
"colormult_nw", "compare", "compress", "conf", "confi",
"confir", "confirm", "conren", "cons", "const", "constr",
"constra", "constrai", "constrain", "constraint", "continue",
"contract", "copy", "copyright", "copysource", "cor", "corc",
"corr", "corr2data", "corr_anti", "corr_kmo", "corr_smc",
"corre", "correl", "correla", "correlat", "correlate",
"corrgram", "cou", "coun", "count", "cox", "cox_p", "cox_sw",
"coxbase", "coxhaz", "coxvar", "cprplot", "cprplot_7",
"crc", "cret", "cretu", "cretur", "creturn", "cross", "cs",
"cscript", "cscript_log", "csi", "ct", "ct_is", "ctset",
"ctst_5", "ctst_st", "cttost", "cumsp", "cumsp_7", "cumul",
"cusum", "cusum_7", "cutil", "d", "datasig", "datasign",
"datasigna", "datasignat", "datasignatu", "datasignatur",
"datasignature", "datetof", "db", "dbeta", "de", "dec",
"deco", "decod", "decode", "deff", "des", "desc", "descr",
"descri", "describ", "describe", "destring", "dfbeta",
"dfgls", "dfuller", "di", "di_g", "dir", "dirstats", "dis",
"discard", "disp", "disp_res", "disp_s", "displ", "displa",
"display", "distinct", "do", "doe", "doed", "doedi",
"doedit", "dotplot", "dotplot_7", "dprobit", "drawnorm",
"drop", "ds", "ds_util", "dstdize", "duplicates", "durbina",
"dwstat", "dydx", "e", "ed", "edi", "edit", "egen",
"eivreg", "emdef", "en", "enc", "enco", "encod", "encode",
"eq", "erase", "ereg", "ereg_lf", "ereg_p", "ereg_sw",
"ereghet", "ereghet_glf", "ereghet_glf_sh", "ereghet_gp",
"ereghet_ilf", "ereghet_ilf_sh", "ereghet_ip", "eret",
"eretu", "eretur", "ereturn", "err", "erro", "error", "est",
"est_cfexist", "est_cfname", "est_clickable", "est_expand",
"est_hold", "est_table", "est_unhold", "est_unholdok",
"estat", "estat_default", "estat_summ", "estat_vce_only",
"esti", "estimates", "etodow", "etof", "etomdy", "ex",
"exi", "exit", "expand", "expandcl", "fac", "fact", "facto",
"factor", "factor_estat", "factor_p", "factor_pca_rotated",
"factor_rotate", "factormat", "fcast", "fcast_compute",
"fcast_graph", "fdades", "fdadesc", "fdadescr", "fdadescri",
"fdadescrib", "fdadescribe", "fdasav", "fdasave", "fdause",
"fh_st", "open", "read", "close",
"file", "filefilter", "fillin", "find_hlp_file", "findfile",
"findit", "findit_7", "fit", "fl", "fli", "flis", "flist",
"for5_0", "form", "forma", "format", "fpredict", "frac_154",
"frac_adj", "frac_chk", "frac_cox", "frac_ddp", "frac_dis",
"frac_dv", "frac_in", "frac_mun", "frac_pp", "frac_pq",
"frac_pv", "frac_wgt", "frac_xo", "fracgen", "fracplot",
"fracplot_7", "fracpoly", "fracpred", "fron_ex", "fron_hn",
"fron_p", "fron_tn", "fron_tn2", "frontier", "ftodate", "ftoe",
"ftomdy", "ftowdate", "g", "gamhet_glf", "gamhet_gp",
"gamhet_ilf", "gamhet_ip", "gamma", "gamma_d2", "gamma_p",
"gamma_sw", "gammahet", "gdi_hexagon", "gdi_spokes", "ge",
"gen", "gene", "gener", "genera", "generat", "generate",
"genrank", "genstd", "genvmean", "gettoken", "gl", "gladder",
"gladder_7", "glim_l01", "glim_l02", "glim_l03", "glim_l04",
"glim_l05", "glim_l06", "glim_l07", "glim_l08", "glim_l09",
"glim_l10", "glim_l11", "glim_l12", "glim_lf", "glim_mu",
"glim_nw1", "glim_nw2", "glim_nw3", "glim_p", "glim_v1",
"glim_v2", "glim_v3", "glim_v4", "glim_v5", "glim_v6",
"glim_v7", "glm", "glm_6", "glm_p", "glm_sw", "glmpred", "glo",
"glob", "globa", "global", "glogit", "glogit_8", "glogit_p",
"gmeans", "gnbre_lf", "gnbreg", "gnbreg_5", "gnbreg_p",
"gomp_lf", "gompe_sw", "gomper_p", "gompertz", "gompertzhet",
"gomphet_glf", "gomphet_glf_sh", "gomphet_gp", "gomphet_ilf",
"gomphet_ilf_sh", "gomphet_ip", "gphdot", "gphpen",
"gphprint", "gprefs", "gprobi_p", "gprobit", "gprobit_8", "gr",
"gr7", "gr_copy", "gr_current", "gr_db", "gr_describe",
"gr_dir", "gr_draw", "gr_draw_replay", "gr_drop", "gr_edit",
"gr_editviewopts", "gr_example", "gr_example2", "gr_export",
"gr_print", "gr_qscheme", "gr_query", "gr_read", "gr_rename",
"gr_replay", "gr_save", "gr_set", "gr_setscheme", "gr_table",
"gr_undo", "gr_use", "graph", "graph7", "grebar", "greigen",
"greigen_7", "greigen_8", "grmeanby", "grmeanby_7",
"gs_fileinfo", "gs_filetype", "gs_graphinfo", "gs_stat",
"gsort", "gwood", "h", "hadimvo", "hareg", "hausman",
"haver", "he", "heck_d2", "heckma_p", "heckman", "heckp_lf",
"heckpr_p", "heckprob", "hel", "help", "hereg", "hetpr_lf",
"hetpr_p", "hetprob", "hettest", "hexdump", "hilite",
"hist", "hist_7", "histogram", "hlogit", "hlu", "hmeans",
"hotel", "hotelling", "hprobit", "hreg", "hsearch", "icd9",
"icd9_ff", "icd9p", "iis", "impute", "imtest", "inbase",
"include", "inf", "infi", "infil", "infile", "infix", "inp",
"inpu", "input", "ins", "insheet", "insp", "inspe",
"inspec", "inspect", "integ", "inten", "intreg", "intreg_7",
"intreg_p", "intrg2_ll", "intrg_ll", "intrg_ll2", "ipolate",
"iqreg", "ir", "irf", "irf_create", "irfm", "iri", "is_svy",
"is_svysum", "isid", "istdize", "ivprob_1_lf", "ivprob_lf",
"ivprobit", "ivprobit_p", "ivreg", "ivreg_footnote",
"ivtob_1_lf", "ivtob_lf", "ivtobit", "ivtobit_p", "jackknife",
"jacknife", "jknife", "jknife_6", "jknife_8", "jkstat",
"joinby", "kalarma1", "kap", "kap_3", "kapmeier", "kappa",
"kapwgt", "kdensity", "kdensity_7", "keep", "ksm", "ksmirnov",
"ktau", "kwallis", "l", "la", "lab", "labe", "label",
"labelbook", "ladder", "levels", "levelsof", "leverage",
"lfit", "lfit_p", "li", "lincom", "line", "linktest",
"lis", "list", "lloghet_glf", "lloghet_glf_sh", "lloghet_gp",
"lloghet_ilf", "lloghet_ilf_sh", "lloghet_ip", "llogi_sw",
"llogis_p", "llogist", "llogistic", "llogistichet",
"lnorm_lf", "lnorm_sw", "lnorma_p", "lnormal", "lnormalhet",
"lnormhet_glf", "lnormhet_glf_sh", "lnormhet_gp",
"lnormhet_ilf", "lnormhet_ilf_sh", "lnormhet_ip", "lnskew0",
"loadingplot", "loc", "loca", "local", "log", "logi",
"logis_lf", "logistic", "logistic_p", "logit", "logit_estat",
"logit_p", "loglogs", "logrank", "loneway", "lookfor",
"lookup", "lowess", "lowess_7", "lpredict", "lrecomp", "lroc",
"lroc_7", "lrtest", "ls", "lsens", "lsens_7", "lsens_x",
"lstat", "ltable", "ltable_7", "ltriang", "lv", "lvr2plot",
"lvr2plot_7", "m", "ma", "mac", "macr", "macro", "makecns",
"man", "manova", "manova_estat", "manova_p", "manovatest",
"mantel", "mark", "markin", "markout", "marksample", "mat",
"mat_capp", "mat_order", "mat_put_rr", "mat_rapp", "mata",
"mata_clear", "mata_describe", "mata_drop", "mata_matdescribe",
"mata_matsave", "mata_matuse", "mata_memory", "mata_mlib",
"mata_mosave", "mata_rename", "mata_which", "matalabel",
"matcproc", "matlist", "matname", "matr", "matri",
"matrix", "matrix_input__dlg", "matstrik", "mcc", "mcci",
"md0_", "md1_", "md1debug_", "md2_", "md2debug_", "mds",
"mds_estat", "mds_p", "mdsconfig", "mdslong", "mdsmat",
"mdsshepard", "mdytoe", "mdytof", "me_derd", "mean",
"means", "median", "memory", "memsize", "meqparse", "mer",
"merg", "merge", "mfp", "mfx", "mhelp", "mhodds", "minbound",
"mixed_ll", "mixed_ll_reparm", "mkassert", "mkdir",
"mkmat", "mkspline", "ml", "ml_5", "ml_adjs", "ml_bhhhs",
"ml_c_d", "ml_check", "ml_clear", "ml_cnt", "ml_debug",
"ml_defd", "ml_e0", "ml_e0_bfgs", "ml_e0_cycle", "ml_e0_dfp",
"ml_e0i", "ml_e1", "ml_e1_bfgs", "ml_e1_bhhh", "ml_e1_cycle",
"ml_e1_dfp", "ml_e2", "ml_e2_cycle", "ml_ebfg0", "ml_ebfr0",
"ml_ebfr1", "ml_ebh0q", "ml_ebhh0", "ml_ebhr0", "ml_ebr0i",
"ml_ecr0i", "ml_edfp0", "ml_edfr0", "ml_edfr1", "ml_edr0i",
"ml_eds", "ml_eer0i", "ml_egr0i", "ml_elf", "ml_elf_bfgs",
"ml_elf_bhhh", "ml_elf_cycle", "ml_elf_dfp", "ml_elfi",
"ml_elfs", "ml_enr0i", "ml_enrr0", "ml_erdu0", "ml_erdu0_bfgs",
"ml_erdu0_bhhh", "ml_erdu0_bhhhq", "ml_erdu0_cycle",
"ml_erdu0_dfp", "ml_erdu0_nrbfgs", "ml_exde", "ml_footnote",
"ml_geqnr", "ml_grad0", "ml_graph", "ml_hbhhh", "ml_hd0",
"ml_hold", "ml_init", "ml_inv", "ml_log", "ml_max",
"ml_mlout", "ml_mlout_8", "ml_model", "ml_nb0", "ml_opt",
"ml_p", "ml_plot", "ml_query", "ml_rdgrd", "ml_repor",
"ml_s_e", "ml_score", "ml_searc", "ml_technique", "ml_unhold",
"mleval", "mlf_", "mlmatbysum", "mlmatsum", "mlog", "mlogi",
"mlogit", "mlogit_footnote", "mlogit_p", "mlopts", "mlsum",
"mlvecsum", "mnl0_", "mor", "more", "mov", "move", "mprobit",
"mprobit_lf", "mprobit_p", "mrdu0_", "mrdu1_", "mvdecode",
"mvencode", "mvreg", "mvreg_estat", "n", "nbreg",
"nbreg_al", "nbreg_lf", "nbreg_p", "nbreg_sw", "nestreg", "net",
"newey", "newey_7", "newey_p", "news", "nl", "nl_7", "nl_9",
"nl_9_p", "nl_p", "nl_p_7", "nlcom", "nlcom_p", "nlexp2",
"nlexp2_7", "nlexp2a", "nlexp2a_7", "nlexp3", "nlexp3_7",
"nlgom3", "nlgom3_7", "nlgom4", "nlgom4_7", "nlinit", "nllog3",
"nllog3_7", "nllog4", "nllog4_7", "nlog_rd", "nlogit",
"nlogit_p", "nlogitgen", "nlogittree", "nlpred", "no",
"nobreak", "noi", "nois", "noisi", "noisil", "noisily", "note",
"notes", "notes_dlg", "nptrend", "numlabel", "numlist", "odbc",
"old_ver", "olo", "olog", "ologi", "ologi_sw", "ologit",
"ologit_p", "ologitp", "on", "one", "onew", "onewa", "oneway",
"op_colnm", "op_comp", "op_diff", "op_inv", "op_str", "opr",
"opro", "oprob", "oprob_sw", "oprobi", "oprobi_p", "oprobit",
"oprobitp", "opts_exclusive", "order", "orthog", "orthpoly",
"ou", "out", "outf", "outfi", "outfil", "outfile", "outs",
"outsh", "outshe", "outshee", "outsheet", "ovtest", "pac",
"pac_7", "palette", "parse", "parse_dissim", "pause", "pca",
"pca_8", "pca_display", "pca_estat", "pca_p", "pca_rotate",
"pcamat", "pchart", "pchart_7", "pchi", "pchi_7", "pcorr",
"pctile", "pentium", "pergram", "pergram_7", "permute",
"permute_8", "personal", "peto_st", "pkcollapse", "pkcross",
"pkequiv", "pkexamine", "pkexamine_7", "pkshape", "pksumm",
"pksumm_7", "pl", "plo", "plot", "plugin", "pnorm",
"pnorm_7", "poisgof", "poiss_lf", "poiss_sw", "poisso_p",
"poisson", "poisson_estat", "post", "postclose", "postfile",
"postutil", "pperron", "pr", "prais", "prais_e", "prais_e2",
"prais_p", "predict", "predictnl", "preserve", "print",
"pro", "prob", "probi", "probit", "probit_estat", "probit_p",
"proc_time", "procoverlay", "procrustes", "procrustes_estat",
"procrustes_p", "profiler", "prog", "progr", "progra",
"program", "prop", "proportion", "prtest", "prtesti", "pwcorr",
"pwd", "q", "s", "qby", "qbys", "qchi", "qchi_7", "qladder",
"qladder_7", "qnorm", "qnorm_7", "qqplot", "qqplot_7", "qreg",
"qreg_c", "qreg_p", "qreg_sw", "qu", "quadchk", "quantile",
"quantile_7", "que", "quer", "query", "range", "ranksum",
"ratio", "rchart", "rchart_7", "rcof", "recast", "reclink",
"recode", "reg", "reg3", "reg3_p", "regdw", "regr", "regre",
"regre_p2", "regres", "regres_p", "regress", "regress_estat",
"regriv_p", "remap", "ren", "rena", "renam", "rename",
"renpfix", "repeat", "replace", "report", "reshape",
"restore", "ret", "retu", "retur", "return", "rm", "rmdir",
"robvar", "roccomp", "roccomp_7", "roccomp_8", "rocf_lf",
"rocfit", "rocfit_8", "rocgold", "rocplot", "rocplot_7",
"roctab", "roctab_7", "rolling", "rologit", "rologit_p",
"rot", "rota", "rotat", "rotate", "rotatemat", "rreg",
"rreg_p", "ru", "run", "runtest", "rvfplot", "rvfplot_7",
"rvpplot", "rvpplot_7", "sa", "safesum", "sample",
"sampsi", "sav", "save", "savedresults", "saveold", "sc",
"sca", "scal", "scala", "scalar", "scatter", "scm_mine",
"sco", "scob_lf", "scob_p", "scobi_sw", "scobit", "scor",
"score", "scoreplot", "scoreplot_help", "scree", "screeplot",
"screeplot_help", "sdtest", "sdtesti", "se", "search",
"separate", "seperate", "serrbar", "serrbar_7", "serset", "set",
"set_defaults", "sfrancia", "sh", "she", "shel", "shell",
"shewhart", "shewhart_7", "signestimationsample", "signrank",
"signtest", "simul", "simul_7", "simulate", "simulate_8",
"sktest", "sleep", "slogit", "slogit_d2", "slogit_p", "smooth",
"snapspan", "so", "sor", "sort", "spearman", "spikeplot",
"spikeplot_7", "spikeplt", "spline_x", "split", "sqreg",
"sqreg_p", "sret", "sretu", "sretur", "sreturn", "ssc", "st",
"st_ct", "st_hc", "st_hcd", "st_hcd_sh", "st_is", "st_issys",
"st_note", "st_promo", "st_set", "st_show", "st_smpl",
"st_subid", "stack", "statsby", "statsby_8", "stbase", "stci",
"stci_7", "stcox", "stcox_estat", "stcox_fr", "stcox_fr_ll",
"stcox_p", "stcox_sw", "stcoxkm", "stcoxkm_7", "stcstat",
"stcurv", "stcurve", "stcurve_7", "stdes", "stem", "stepwise",
"stereg", "stfill", "stgen", "stir", "stjoin", "stmc", "stmh",
"stphplot", "stphplot_7", "stphtest", "stphtest_7",
"stptime", "strate", "strate_7", "streg", "streg_sw", "streset",
"sts", "sts_7", "stset", "stsplit", "stsum", "sttocc",
"sttoct", "stvary", "stweib", "su", "suest", "suest_8",
"sum", "summ", "summa", "summar", "summari", "summariz",
"summarize", "sunflower", "sureg", "survcurv", "survsum",
"svar", "svar_p", "svmat", "svy", "svy_disp", "svy_dreg",
"svy_est", "svy_est_7", "svy_estat", "svy_get", "svy_gnbreg_p",
"svy_head", "svy_header", "svy_heckman_p", "svy_heckprob_p",
"svy_intreg_p", "svy_ivreg_p", "svy_logistic_p", "svy_logit_p",
"svy_mlogit_p", "svy_nbreg_p", "svy_ologit_p", "svy_oprobit_p",
"svy_poisson_p", "svy_probit_p", "svy_regress_p", "svy_sub",
"svy_sub_7", "svy_x", "svy_x_7", "svy_x_p", "svydes",
"svydes_8", "svygen", "svygnbreg", "svyheckman", "svyheckprob",
"svyintreg", "svyintreg_7", "svyintrg", "svyivreg", "svylc",
"svylog_p", "svylogit", "svymarkout", "svymarkout_8",
"svymean", "svymlog", "svymlogit", "svynbreg", "svyolog",
"svyologit", "svyoprob", "svyoprobit", "svyopts",
"svypois", "svypois_7", "svypoisson", "svyprobit", "svyprobt",
"svyprop", "svyprop_7", "svyratio", "svyreg", "svyreg_p",
"svyregress", "svyset", "svyset_7", "svyset_8", "svytab",
"svytab_7", "svytest", "svytotal", "sw", "sw_8", "swcnreg",
"swcox", "swereg", "swilk", "swlogis", "swlogit",
"swologit", "swoprbt", "swpois", "swprobit", "swqreg",
"swtobit", "swweib", "symmetry", "symmi", "symplot",
"symplot_7", "syntax", "sysdescribe", "sysdir", "sysuse",
"szroeter", "ta", "tab", "tab1", "tab2", "tab_or", "tabd",
"tabdi", "tabdis", "tabdisp", "tabi", "table", "tabodds",
"tabodds_7", "tabstat", "tabu", "tabul", "tabula", "tabulat",
"tabulate", "te", "tempfile", "tempname", "tempvar", "tes",
"test", "testnl", "testparm", "teststd", "tetrachoric",
"time_it", "timer", "tis", "tob", "tobi", "tobit", "tobit_p",
"tobit_sw", "token", "tokeni", "tokeniz", "tokenize",
"tostring", "total", "translate", "translator", "transmap",
"treat_ll", "treatr_p", "treatreg", "trim", "trnb_cons",
"trnb_mean", "trpoiss_d2", "trunc_ll", "truncr_p", "truncreg",
"tsappend", "tset", "tsfill", "tsline", "tsline_ex",
"tsreport", "tsrevar", "tsrline", "tsset", "tssmooth",
"tsunab", "ttest", "ttesti", "tut_chk", "tut_wait", "tutorial",
"tw", "tware_st", "two", "twoway", "twoway__fpfit_serset",
"twoway__function_gen", "twoway__histogram_gen",
"twoway__ipoint_serset", "twoway__ipoints_serset",
"twoway__kdensity_gen", "twoway__lfit_serset",
"twoway__normgen_gen", "twoway__pci_serset",
"twoway__qfit_serset", "twoway__scatteri_serset",
"twoway__sunflower_gen", "twoway_ksm_serset", "ty", "typ",
"type", "typeof", "u", "unab", "unabbrev", "unabcmd",
"update", "us", "use", "uselabel", "var", "var_mkcompanion",
"var_p", "varbasic", "varfcast", "vargranger", "varirf",
"varirf_add", "varirf_cgraph", "varirf_create", "varirf_ctable",
"varirf_describe", "varirf_dir", "varirf_drop", "varirf_erase",
"varirf_graph", "varirf_ograph", "varirf_rename", "varirf_set",
"varirf_table", "varlist", "varlmar", "varnorm", "varsoc",
"varstable", "varstable_w", "varstable_w2", "varwle",
"vce", "vec", "vec_fevd", "vec_mkphi", "vec_p", "vec_p_w",
"vecirf_create", "veclmar", "veclmar_w", "vecnorm",
"vecnorm_w", "vecrank", "vecstable", "verinst", "vers",
"versi", "versio", "version", "view", "viewsource", "vif",
"vwls", "wdatetof", "webdescribe", "webseek", "webuse",
"weib1_lf", "weib2_lf", "weib_lf", "weib_lf0", "weibhet_glf",
"weibhet_glf_sh", "weibhet_glfa", "weibhet_glfa_sh",
"weibhet_gp", "weibhet_ilf", "weibhet_ilf_sh", "weibhet_ilfa",
"weibhet_ilfa_sh", "weibhet_ip", "weibu_sw", "weibul_p",
"weibull", "weibull_c", "weibull_s", "weibullhet",
"wh", "whelp", "whi", "which", "whil", "while", "wilc_st",
"wilcoxon", "win", "wind", "windo", "window", "winexec",
"wntestb", "wntestb_7", "wntestq", "xchart", "xchart_7",
"xcorr", "xcorr_7", "xi", "xi_6", "xmlsav", "xmlsave",
"xmluse", "xpose", "xsh", "xshe", "xshel", "xshell",
"xt_iis", "xt_tis", "xtab_p", "xtabond", "xtbin_p",
"xtclog", "xtcloglog", "xtcloglog_8", "xtcloglog_d2",
"xtcloglog_pa_p", "xtcloglog_re_p", "xtcnt_p", "xtcorr",
"xtdata", "xtdes", "xtfront_p", "xtfrontier", "xtgee",
"xtgee_elink", "xtgee_estat", "xtgee_makeivar", "xtgee_p",
"xtgee_plink", "xtgls", "xtgls_p", "xthaus", "xthausman",
"xtht_p", "xthtaylor", "xtile", "xtint_p", "xtintreg",
"xtintreg_8", "xtintreg_d2", "xtintreg_p", "xtivp_1",
"xtivp_2", "xtivreg", "xtline", "xtline_ex", "xtlogit",
"xtlogit_8", "xtlogit_d2", "xtlogit_fe_p", "xtlogit_pa_p",
"xtlogit_re_p", "xtmixed", "xtmixed_estat", "xtmixed_p",
"xtnb_fe", "xtnb_lf", "xtnbreg", "xtnbreg_pa_p",
"xtnbreg_refe_p", "xtpcse", "xtpcse_p", "xtpois", "xtpoisson",
"xtpoisson_d2", "xtpoisson_pa_p", "xtpoisson_refe_p", "xtpred",
"xtprobit", "xtprobit_8", "xtprobit_d2", "xtprobit_re_p",
"xtps_fe", "xtps_lf", "xtps_ren", "xtps_ren_8", "xtrar_p",
"xtrc", "xtrc_p", "xtrchh", "xtrefe_p", "xtreg", "xtreg_be",
"xtreg_fe", "xtreg_ml", "xtreg_pa_p", "xtreg_re",
"xtregar", "xtrere_p", "xtset", "xtsf_ll", "xtsf_llti",
"xtsum", "xttab", "xttest0", "xttobit", "xttobit_8",
"xttobit_p", "xttrans", "yx", "yxview__barlike_draw",
"yxview_area_draw", "yxview_bar_draw", "yxview_dot_draw",
"yxview_dropline_draw", "yxview_function_draw",
"yxview_iarrow_draw", "yxview_ilabels_draw",
"yxview_normal_draw", "yxview_pcarrow_draw",
"yxview_pcbarrow_draw", "yxview_pccapsym_draw",
"yxview_pcscatter_draw", "yxview_pcspike_draw",
"yxview_rarea_draw", "yxview_rbar_draw", "yxview_rbarm_draw",
"yxview_rcap_draw", "yxview_rcapsym_draw",
"yxview_rconnected_draw", "yxview_rline_draw",
"yxview_rscatter_draw", "yxview_rspike_draw",
"yxview_spike_draw", "yxview_sunflower_draw", "zap_s", "zinb",
"zinb_llf", "zinb_plf", "zip", "zip_llf", "zip_p", "zip_plf",
"zt_ct_5", "zt_hc_5", "zt_hcd_5", "zt_is_5", "zt_iss_5",
"zt_sho_5", "zt_smp_5", "ztbase_5", "ztcox_5", "ztdes_5",
"ztereg_5", "ztfill_5", "ztgen_5", "ztir_5", "ztjoin_5", "ztnb",
"ztnb_p", "ztp", "ztp_p", "zts_5", "ztset_5", "ztspli_5",
"ztsum_5", "zttoct_5", "ztvary_5", "ztweib_5"
)
builtins_functions = (
"Cdhms", "Chms", "Clock", "Cmdyhms", "Cofc", "Cofd", "F",
"Fden", "Ftail", "I", "J", "_caller", "abbrev", "abs", "acos",
"acosh", "asin", "asinh", "atan", "atan2", "atanh",
"autocode", "betaden", "binomial", "binomialp", "binomialtail",
"binormal", "bofd", "byteorder", "c", "ceil", "char",
"chi2", "chi2den", "chi2tail", "cholesky", "chop", "clip",
"clock", "cloglog", "cofC", "cofd", "colnumb", "colsof", "comb",
"cond", "corr", "cos", "cosh", "d", "daily", "date", "day",
"det", "dgammapda", "dgammapdada", "dgammapdadx", "dgammapdx",
"dgammapdxdx", "dhms", "diag", "diag0cnt", "digamma",
"dofC", "dofb", "dofc", "dofh", "dofm", "dofq", "dofw",
"dofy", "dow", "doy", "dunnettprob", "e", "el", "epsdouble",
"epsfloat", "exp", "fileexists", "fileread", "filereaderror",
"filewrite", "float", "floor", "fmtwidth", "gammaden",
"gammap", "gammaptail", "get", "group", "h", "hadamard",
"halfyear", "halfyearly", "has_eprop", "hh", "hhC", "hms",
"hofd", "hours", "hypergeometric", "hypergeometricp", "ibeta",
"ibetatail", "index", "indexnot", "inlist", "inrange", "int",
"inv", "invF", "invFtail", "invbinomial", "invbinomialtail",
"invchi2", "invchi2tail", "invcloglog", "invdunnettprob",
"invgammap", "invgammaptail", "invibeta", "invibetatail",
"invlogit", "invnFtail", "invnbinomial", "invnbinomialtail",
"invnchi2", "invnchi2tail", "invnibeta", "invnorm", "invnormal",
"invnttail", "invpoisson", "invpoissontail", "invsym", "invt",
"invttail", "invtukeyprob", "irecode", "issym", "issymmetric",
"itrim", "length", "ln", "lnfact", "lnfactorial", "lngamma",
"lnnormal", "lnnormalden", "log", "log10", "logit", "lower",
"ltrim", "m", "match", "matmissing", "matrix", "matuniform",
"max", "maxbyte", "maxdouble", "maxfloat", "maxint", "maxlong",
"mdy", "mdyhms", "mi", "min", "minbyte", "mindouble",
"minfloat", "minint", "minlong", "minutes", "missing", "mm",
"mmC", "mod", "mofd", "month", "monthly", "mreldif",
"msofhours", "msofminutes", "msofseconds", "nF", "nFden",
"nFtail", "nbetaden", "nbinomial", "nbinomialp", "nbinomialtail",
"nchi2", "nchi2den", "nchi2tail", "nibeta", "norm", "normal",
"normalden", "normd", "npnF", "npnchi2", "npnt", "nt", "ntden",
"nttail", "nullmat", "plural", "poisson", "poissonp",
"poissontail", "proper", "q", "qofd", "quarter", "quarterly",
"r", "rbeta", "rbinomial", "rchi2", "real", "recode", "regexm",
"regexr", "regexs", "reldif", "replay", "return", "reverse",
"rgamma", "rhypergeometric", "rnbinomial", "rnormal", "round",
"rownumb", "rowsof", "rpoisson", "rt", "rtrim", "runiform", "s",
"scalar", "seconds", "sign", "sin", "sinh", "smallestdouble",
"soundex", "soundex_nara", "sqrt", "ss", "ssC", "strcat",
"strdup", "string", "strlen", "strlower", "strltrim", "strmatch",
"strofreal", "strpos", "strproper", "strreverse", "strrtrim",
"strtoname", "strtrim", "strupper", "subinstr", "subinword",
"substr", "sum", "sweep", "syminv", "t", "tC", "tan", "tanh",
"tc", "td", "tden", "th", "tin", "tm", "tq", "trace",
"trigamma", "trim", "trunc", "ttail", "tukeyprob", "tw",
"twithin", "uniform", "upper", "vec", "vecdiag", "w", "week",
"weekly", "wofd", "word", "wordcount", "year", "yearly",
"yh", "ym", "yofd", "yq", "yw"
)
|
PypiClean
|
/emencia.django.newsletter-0.2.tar.gz/emencia.django.newsletter-0.2/emencia/django/newsletter/utils/excel.py
|
# Based on http://www.djangosnippets.org/snippets/1151/
import datetime
from django.http import HttpResponse
from django.db.models.query import QuerySet
from django.db.models.query import ValuesQuerySet
class ExcelResponse(HttpResponse):
"""ExcelResponse feeded by queryset"""
def __init__(self, data, output_name='excel_data', headers=None,
force_csv=False, encoding='utf8'):
valid_data = False
if isinstance(data, ValuesQuerySet):
data = list(data)
elif isinstance(data, QuerySet):
data = list(data.values())
if hasattr(data, '__getitem__'):
if isinstance(data[0], dict):
if headers is None:
headers = data[0].keys()
data = [[row[col] for col in headers] for row in data]
data.insert(0, headers)
if hasattr(data[0], '__getitem__'):
valid_data = True
assert valid_data is True, "ExcelResponse requires a sequence of sequences"
import StringIO
output = StringIO.StringIO()
# Excel has a limit on number of rows; if we have more than that, make a csv
use_xls = False
if len(data) <= 65536 and force_csv is not True:
try:
import xlwt
except ImportError:
pass
else:
use_xls = True
if use_xls:
book = xlwt.Workbook(encoding=encoding)
sheet = book.add_sheet('Sheet 1')
styles = {'datetime': xlwt.easyxf(num_format_str='yyyy-mm-dd hh:mm:ss'),
'date': xlwt.easyxf(num_format_str='yyyy-mm-dd'),
'time': xlwt.easyxf(num_format_str='hh:mm:ss'),
'default': xlwt.Style.default_style}
for rowx, row in enumerate(data):
for colx, value in enumerate(row):
if isinstance(value, datetime.datetime):
cell_style = styles['datetime']
elif isinstance(value, datetime.date):
cell_style = styles['date']
elif isinstance(value, datetime.time):
cell_style = styles['time']
else:
cell_style = styles['default']
sheet.write(rowx, colx, value, style=cell_style)
book.save(output)
mimetype = 'application/vnd.ms-excel'
file_ext = 'xls'
else:
for row in data:
out_row = []
for value in row:
if not isinstance(value, basestring):
value = unicode(value)
value = value.encode(encoding)
out_row.append(value.replace('"', '""'))
output.write('"%s"\n' %
'","'.join(out_row))
mimetype = 'text/csv'
file_ext = 'csv'
output.seek(0)
super(ExcelResponse, self).__init__(content=output.getvalue(),
mimetype=mimetype)
self['Content-Disposition'] = 'attachment;filename="%s.%s"' % \
(output_name.replace('"', '\"'), file_ext)
|
PypiClean
|
/tgzr.declare-0.0.1rc1.tar.gz/tgzr.declare-0.0.1rc1/src/tgzr/declare/renderer.py
|
from .render_context import RenderContext
from .state_store import StateStore
class Renderer(object):
schema = None
_renderers = {}
#
# TOOLS
#
@classmethod
def ui_from_json(cls, json_string):
return cls.schema.from_json(json_string)
@classmethod
def ui_from_dict(cls, d):
return cls.schema.from_dict(d)
#
# RENDERERS
#
@classmethod
def register(cls, func):
"""
Decorator to set the renderer to use with the
model in schema having the same name.
The decorated func must accept two arguments:
renderer(context: RenderContext, params: <the_renderer_model>)
"""
renderer = func
name = func.__name__
try:
model = getattr(cls.schema, name)
except AttributeError:
raise AttributeError(
f"Error registering renderer {renderer}: the schema {cls.schema} does not have a model named {name!r}."
)
cls._renderers[model] = renderer
return renderer
@classmethod
def get_renderer(cls, component_type):
model = getattr(cls.schema, component_type)
return cls._renderers[model]
@classmethod
def assert_schema_complete(cls):
# TODO: maybe assert renderer signature too ?
missing = set()
for name, params in cls.schema.params_models():
try:
cls._renderers[params]
except KeyError:
missing.add(name)
if missing:
raise Exception(
f"The renderer {cls} is missing support for {missing} components !"
)
#
# INIT
#
def __init__(self, host, check_schema=True):
super().__init__()
if check_schema:
self.assert_schema_complete()
self._root_context_items = dict()
self.update_root_context(renderer=self)
self.set_host(host)
self._state_store = StateStore()
self._handlers = {}
#
# RENDERER
#
def set_host(self, host):
self.update_root_context(
widget=host,
root_widget=host,
layout_parent=host,
)
def update_root_context(self, **name_values):
self._root_context_items.update(**name_values)
def create_root_context(self):
context = RenderContext.create_root_context(**self._root_context_items)
return context
def render(self, ui, parent_context=None):
if parent_context is None:
# We are rendering the root UI here, let's clean up previous render data:
parent_context = self.create_root_context()
self._state_store.clear_bindings()
params = ui
TYPE = params.TYPE
ID = params.ID
# print("--> rendering", TYPE)
try:
model = getattr(self.schema, TYPE)
except Exception:
# print("=============")
# pprint.pprint(ui.dict(), indent=True)
# print("=============")
raise
renderer = self._renderers[model]
with parent_context(TYPE=TYPE, ID=ID) as sub_context:
renderer(context=sub_context, params=params)
return sub_context
#
# ACTIONS
#
def set_handler(self, handler, key=None, action=None):
self._handlers[(key, action)] = handler
def perform_action(self, key, action, context, *args, **kwargs):
# TODO: store a list instead of a single handler and trigger until one of them
# doen't return True ?
# => No, action namespace set by the Group component should be enough.
try:
handler = self._handlers[(key, action)]
except KeyError:
try:
handler = self._handlers[(key, None)]
except KeyError:
try:
handler = self._handlers[(None, action)]
except KeyError:
try:
handler = self._handlers[(None, None)]
except KeyError:
handler = self._default_handler
handler(self, key, action, context, *args, **kwargs)
def _default_handler(self, renderer, action_key, action_type, *args, **kwargs):
print(
f"!! Warning !! Action not handled: type={action_type!r}, key={action_key!r}"
)
#
# STATES
#
def bind_state(self, state_key, update_callback, *default):
"""
Returns an updater and and a setter for the state at `state_key`.
The updater is callable without argument. It will fetch the state
value and call `update_callback(state_value)` with it.
The setter is a callable with one argument. It will store
the argument as the new state, which will trigger all updaters
for this value.
"""
# TODO: see why we still have *default, is it usefull since nobody seams to uses it ?
updater, setter = self._state_store.bind(state_key, update_callback, *default)
return updater, setter
def get_state(self, key, *default):
return self._state_store.get(key, *default)
def get_states(self, prefix=None, strip_prefix=True):
return self._state_store.get_namespace(prefix=prefix, strip_prefix=strip_prefix)
def update_states(self, values):
return self._state_store.update(values)
|
PypiClean
|
/sysidentpy-0.3.1.tar.gz/sysidentpy-0.3.1/docs/landing-page/get-help.md
|
---
template: overrides/main.html
title: Get Help
---
# Get Help
Before asking others for help, it’s generally a good idea for you to try to help yourself. SysIdentPy includes several examples in the documentation with tips and notes about the package that might help you. However, if you have any issues and you can't find the answer, reach out using any method described below.
## Connect with the author
You can:
* <a href="https://github.com/wilsonrljr" class="external-link" target="_blank">Follow me on **GitHub**</a>.
* <a href="https://twitter.com/wilsonrljr" class="external-link" target="_blank">Follow me on
* <a href="https://www.linkedin.com/in/wilsonrljr/" class="external-link" target="_blank">Connect with me on **Linkedin**</a>.
* I'll start to use Twitter more often 🤷♂ (probably).
* Read what I write (or follow me) on <a href="https://medium.com/@wilsonrocha_97367" class="external-link" target="_blank">**Medium**</a>.
## Create issues
You can <a href="https://github.com/wilsonrljr/SysIdentPy/issues/new/choose" class="external-link" target="_blank">create a new issue</a> in the GitHub repository, for example to:
* Ask a **question** or ask about a **problem**.
* Suggest a new **feature**.
## Join the chat
Join the 👥 <a href="https://discord.gg/cu8vNgkU" class="external-link" target="_blank">Discord chat server</a> 👥 and hang out with others in the SysIdentPy community.
!!! note "You can use the chat for anything"
Have in mind that you can use the chat to talk about anything related to SysIdentPy. Conversations about system identification, dynamical systems, new papers, issues, new features are allowed, but have in mind that if some of the questions could help other users, I'll kindly ask you to open an discussion or an issue on Github as well.
I can make sure I always answer everything, even if it takes some time.
|
PypiClean
|
/python-telegram-payment-bot-11.1.2.tar.gz/python-telegram-payment-bot-11.1.2/telegram/files/video.py
|
"""This module contains an object that represents a Telegram Video."""
from telegram import PhotoSize, TelegramObject
class Video(TelegramObject):
"""This object represents a video file.
Attributes:
file_id (:obj:`str`): Unique identifier for this file.
width (:obj:`int`): Video width as defined by sender.
height (:obj:`int`): Video height as defined by sender.
duration (:obj:`int`): Duration of the video in seconds as defined by sender.
thumb (:class:`telegram.PhotoSize`): Optional. Video thumbnail.
mime_type (:obj:`str`): Optional. Mime type of a file as defined by sender.
file_size (:obj:`int`): Optional. File size.
bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods.
Args:
file_id (:obj:`str`): Unique identifier for this file.
width (:obj:`int`): Video width as defined by sender.
height (:obj:`int`): Video height as defined by sender.
duration (:obj:`int`): Duration of the video in seconds as defined by sender.
thumb (:class:`telegram.PhotoSize`, optional): Video thumbnail.
mime_type (:obj:`str`, optional): Mime type of a file as defined by sender.
file_size (:obj:`int`, optional): File size.
bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self,
file_id,
width,
height,
duration,
thumb=None,
mime_type=None,
file_size=None,
bot=None,
**kwargs):
# Required
self.file_id = str(file_id)
self.width = int(width)
self.height = int(height)
self.duration = int(duration)
# Optionals
self.thumb = thumb
self.mime_type = mime_type
self.file_size = file_size
self.bot = bot
self._id_attrs = (self.file_id,)
@classmethod
def de_json(cls, data, bot):
if not data:
return None
data = super(Video, cls).de_json(data, bot)
data['thumb'] = PhotoSize.de_json(data.get('thumb'), bot)
return cls(bot=bot, **data)
def get_file(self, timeout=None, **kwargs):
"""Convenience wrapper over :attr:`telegram.Bot.get_file`
Args:
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.File`
Raises:
:class:`telegram.TelegramError`
"""
return self.bot.get_file(self.file_id, timeout=timeout, **kwargs)
|
PypiClean
|
/django-inteliger-0.1.71.tar.gz/django-inteliger-0.1.71/core/cliente/migrations/0020_auto_20200729_1352.py
|
import compositefk.fields
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('core', '0011_auto_20200728_1746'),
('cliente', '0019_auto_20200728_1752'),
]
operations = [
migrations.CreateModel(
name='CartaoBandeira',
fields=[
('dat_insercao', models.DateTimeField(auto_now_add=True, null=True)),
('dat_edicao', models.DateTimeField(auto_now=True, null=True)),
('dat_delete', models.DateTimeField(null=True)),
('usr_insercao', models.IntegerField(null=True)),
('usr_edicao', models.IntegerField(null=True)),
('usr_delete', models.IntegerField(null=True)),
('origem_insercao_codigo', models.CharField(max_length=200, null=True)),
('origem_insercao_tipo', models.CharField(default='USR.PROFILE', max_length=200, null=True)),
('origem_edicao_codigo', models.CharField(max_length=200, null=True)),
('origem_edicao_tipo', models.CharField(default='USR.PROFILE', max_length=200, null=True)),
('origem_delete_codigo', models.CharField(max_length=200, null=True)),
('origem_delete_tipo', models.CharField(default='USR.PROFILE', max_length=200, null=True)),
('status', models.BooleanField(default=True, null=True)),
('nome', models.CharField(max_length=50, primary_key=True, serialize=False)),
('nm_descritivo', models.CharField(max_length=200, null=True)),
('imagem', models.FileField(default='bandeiras/sem-imagem.jpg', null=True, upload_to='bandeiras')),
('origem_delete', compositefk.fields.CompositeForeignKey(null=True, null_if_equal=[], on_delete=django.db.models.deletion.DO_NOTHING, related_name='cliente_cartaobandeira_origem_delete', to='core.Tipo', to_fields={'codigo': compositefk.fields.LocalFieldValue('origem_delete_codigo'), 'tipo': compositefk.fields.LocalFieldValue('origem_delete_tipo')})),
('origem_edicao', compositefk.fields.CompositeForeignKey(null=True, null_if_equal=[], on_delete=django.db.models.deletion.DO_NOTHING, related_name='cliente_cartaobandeira_origem_edicao', to='core.Tipo', to_fields={'codigo': compositefk.fields.LocalFieldValue('origem_edicao_codigo'), 'tipo': compositefk.fields.LocalFieldValue('origem_edicao_tipo')})),
('origem_insercao', compositefk.fields.CompositeForeignKey(null=True, null_if_equal=[], on_delete=django.db.models.deletion.DO_NOTHING, related_name='cliente_cartaobandeira_origem_insercao', to='core.Tipo', to_fields={'codigo': compositefk.fields.LocalFieldValue('origem_insercao_codigo'), 'tipo': compositefk.fields.LocalFieldValue('origem_insercao_tipo')})),
],
options={
'db_table': 'cliente_cartaobandeira',
'abstract': False,
'managed': True,
},
managers=[
('normal_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='CartaoBandeiraParcela',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dat_insercao', models.DateTimeField(auto_now_add=True, null=True)),
('dat_edicao', models.DateTimeField(auto_now=True, null=True)),
('dat_delete', models.DateTimeField(null=True)),
('usr_insercao', models.IntegerField(null=True)),
('usr_edicao', models.IntegerField(null=True)),
('usr_delete', models.IntegerField(null=True)),
('origem_insercao_codigo', models.CharField(max_length=200, null=True)),
('origem_insercao_tipo', models.CharField(default='USR.PROFILE', max_length=200, null=True)),
('origem_edicao_codigo', models.CharField(max_length=200, null=True)),
('origem_edicao_tipo', models.CharField(default='USR.PROFILE', max_length=200, null=True)),
('origem_delete_codigo', models.CharField(max_length=200, null=True)),
('origem_delete_tipo', models.CharField(default='USR.PROFILE', max_length=200, null=True)),
('status', models.BooleanField(default=True, null=True)),
('quantidade', models.IntegerField(default=1, null=True)),
('is_juros', models.BooleanField(default=False, null=True)),
('bandeira', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='cliente.CartaoBandeira')),
('origem_delete', compositefk.fields.CompositeForeignKey(null=True, null_if_equal=[], on_delete=django.db.models.deletion.DO_NOTHING, related_name='cliente_cartaobandeiraparcela_origem_delete', to='core.Tipo', to_fields={'codigo': compositefk.fields.LocalFieldValue('origem_delete_codigo'), 'tipo': compositefk.fields.LocalFieldValue('origem_delete_tipo')})),
('origem_edicao', compositefk.fields.CompositeForeignKey(null=True, null_if_equal=[], on_delete=django.db.models.deletion.DO_NOTHING, related_name='cliente_cartaobandeiraparcela_origem_edicao', to='core.Tipo', to_fields={'codigo': compositefk.fields.LocalFieldValue('origem_edicao_codigo'), 'tipo': compositefk.fields.LocalFieldValue('origem_edicao_tipo')})),
('origem_insercao', compositefk.fields.CompositeForeignKey(null=True, null_if_equal=[], on_delete=django.db.models.deletion.DO_NOTHING, related_name='cliente_cartaobandeiraparcela_origem_insercao', to='core.Tipo', to_fields={'codigo': compositefk.fields.LocalFieldValue('origem_insercao_codigo'), 'tipo': compositefk.fields.LocalFieldValue('origem_insercao_tipo')})),
],
options={
'db_table': 'cliente_cartaobandeiraparcela',
'abstract': False,
'managed': True,
},
managers=[
('normal_objects', django.db.models.manager.Manager()),
],
),
migrations.AddField(
model_name='cartao',
name='adquirente',
field=compositefk.fields.CompositeForeignKey(null=True, null_if_equal=[], on_delete=django.db.models.deletion.DO_NOTHING, related_name='cliente_cartao_adquirente', to='core.Tipo', to_fields={'codigo': compositefk.fields.LocalFieldValue('tipo_codigo'), 'tipo': compositefk.fields.LocalFieldValue('tipo_tipo')}),
),
migrations.AddField(
model_name='cartao',
name='adquirente_codigo',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='cartao',
name='adquirente_tipo',
field=models.CharField(default='CLIENTE.CARTAO.ADQUIRENTE', max_length=200, null=True),
),
migrations.AddField(
model_name='cartao',
name='cartao_token',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='cartao',
name='cliente_token',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='cartao',
name='dat_val',
field=models.CharField(max_length=5, null=True),
),
migrations.AddField(
model_name='cartao',
name='nm_impresso',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='cartao',
name='tipo',
field=compositefk.fields.CompositeForeignKey(null=True, null_if_equal=[], on_delete=django.db.models.deletion.DO_NOTHING, related_name='cliente_cartao', to='core.Tipo', to_fields={'codigo': compositefk.fields.LocalFieldValue('tipo_codigo'), 'tipo': compositefk.fields.LocalFieldValue('tipo_tipo')}),
),
migrations.AddField(
model_name='cartao',
name='tipo_codigo',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='cartao',
name='tipo_tipo',
field=models.CharField(default='CLIENTE.CARTAO', max_length=200, null=True),
),
migrations.AddField(
model_name='cartao',
name='ultimos_4',
field=models.CharField(max_length=4, null=True),
),
migrations.AddField(
model_name='cartao',
name='bandeira',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='cliente.CartaoBandeira'),
),
]
|
PypiClean
|
/ais_dom-2023.7.2-py3-none-any.whl/homeassistant/components/switcher_kis/sensor.py
|
from __future__ import annotations
from dataclasses import dataclass
from aioswitcher.device import DeviceCategory
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import UnitOfElectricCurrent, UnitOfPower
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import SwitcherDataUpdateCoordinator
from .const import SIGNAL_DEVICE_ADD
@dataclass
class AttributeDescription:
"""Class to describe a sensor."""
name: str
icon: str | None = None
unit: str | None = None
device_class: SensorDeviceClass | None = None
state_class: SensorStateClass | None = None
default_enabled: bool = True
POWER_SENSORS = {
"power_consumption": AttributeDescription(
name="Power Consumption",
unit=UnitOfPower.WATT,
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
),
"electric_current": AttributeDescription(
name="Electric Current",
unit=UnitOfElectricCurrent.AMPERE,
device_class=SensorDeviceClass.CURRENT,
state_class=SensorStateClass.MEASUREMENT,
),
}
TIME_SENSORS = {
"remaining_time": AttributeDescription(
name="Remaining Time",
icon="mdi:av-timer",
),
"auto_off_set": AttributeDescription(
name="Auto Shutdown",
icon="mdi:progress-clock",
default_enabled=False,
),
}
POWER_PLUG_SENSORS = POWER_SENSORS
WATER_HEATER_SENSORS = {**POWER_SENSORS, **TIME_SENSORS}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Switcher sensor from config entry."""
@callback
def async_add_sensors(coordinator: SwitcherDataUpdateCoordinator) -> None:
"""Add sensors from Switcher device."""
if coordinator.data.device_type.category == DeviceCategory.POWER_PLUG:
async_add_entities(
SwitcherSensorEntity(coordinator, attribute, info)
for attribute, info in POWER_PLUG_SENSORS.items()
)
elif coordinator.data.device_type.category == DeviceCategory.WATER_HEATER:
async_add_entities(
SwitcherSensorEntity(coordinator, attribute, info)
for attribute, info in WATER_HEATER_SENSORS.items()
)
config_entry.async_on_unload(
async_dispatcher_connect(hass, SIGNAL_DEVICE_ADD, async_add_sensors)
)
class SwitcherSensorEntity(
CoordinatorEntity[SwitcherDataUpdateCoordinator], SensorEntity
):
"""Representation of a Switcher sensor entity."""
def __init__(
self,
coordinator: SwitcherDataUpdateCoordinator,
attribute: str,
description: AttributeDescription,
) -> None:
"""Initialize the entity."""
super().__init__(coordinator)
self.attribute = attribute
# Entity class attributes
self._attr_name = f"{coordinator.name} {description.name}"
self._attr_icon = description.icon
self._attr_native_unit_of_measurement = description.unit
self._attr_device_class = description.device_class
self._attr_entity_registry_enabled_default = description.default_enabled
self._attr_unique_id = (
f"{coordinator.device_id}-{coordinator.mac_address}-{attribute}"
)
self._attr_device_info = {
"connections": {(dr.CONNECTION_NETWORK_MAC, coordinator.mac_address)}
}
@property
def native_value(self) -> StateType:
"""Return value of sensor."""
return getattr(self.coordinator.data, self.attribute) # type: ignore[no-any-return]
|
PypiClean
|
/displaylang_sympy-0.10.4-py3-none-any.whl/sympy/physics/quantum/qubit.py
|
import math
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.numbers import Integer
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.functions.elementary.complexes import conjugate
from sympy.functions.elementary.exponential import log
from sympy.core.basic import _sympify
from sympy.external.gmpy import SYMPY_INTS
from sympy.matrices import Matrix, zeros
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.hilbert import ComplexSpace
from sympy.physics.quantum.state import Ket, Bra, State
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.matrixutils import (
numpy_ndarray, scipy_sparse_matrix
)
from mpmath.libmp.libintmath import bitcount
__all__ = [
'Qubit',
'QubitBra',
'IntQubit',
'IntQubitBra',
'qubit_to_matrix',
'matrix_to_qubit',
'matrix_to_density',
'measure_all',
'measure_partial',
'measure_partial_oneshot',
'measure_all_oneshot'
]
#-----------------------------------------------------------------------------
# Qubit Classes
#-----------------------------------------------------------------------------
class QubitState(State):
"""Base class for Qubit and QubitBra."""
#-------------------------------------------------------------------------
# Initialization/creation
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
# If we are passed a QubitState or subclass, we just take its qubit
# values directly.
if len(args) == 1 and isinstance(args[0], QubitState):
return args[0].qubit_values
# Turn strings into tuple of strings
if len(args) == 1 and isinstance(args[0], str):
args = tuple( S.Zero if qb == "0" else S.One for qb in args[0])
else:
args = tuple( S.Zero if qb == "0" else S.One if qb == "1" else qb for qb in args)
args = tuple(_sympify(arg) for arg in args)
# Validate input (must have 0 or 1 input)
for element in args:
if element not in (S.Zero, S.One):
raise ValueError(
"Qubit values must be 0 or 1, got: %r" % element)
return args
@classmethod
def _eval_hilbert_space(cls, args):
return ComplexSpace(2)**len(args)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def dimension(self):
"""The number of Qubits in the state."""
return len(self.qubit_values)
@property
def nqubits(self):
return self.dimension
@property
def qubit_values(self):
"""Returns the values of the qubits as a tuple."""
return self.label
#-------------------------------------------------------------------------
# Special methods
#-------------------------------------------------------------------------
def __len__(self):
return self.dimension
def __getitem__(self, bit):
return self.qubit_values[int(self.dimension - bit - 1)]
#-------------------------------------------------------------------------
# Utility methods
#-------------------------------------------------------------------------
def flip(self, *bits):
"""Flip the bit(s) given."""
newargs = list(self.qubit_values)
for i in bits:
bit = int(self.dimension - i - 1)
if newargs[bit] == 1:
newargs[bit] = 0
else:
newargs[bit] = 1
return self.__class__(*tuple(newargs))
class Qubit(QubitState, Ket):
"""A multi-qubit ket in the computational (z) basis.
We use the normal convention that the least significant qubit is on the
right, so ``|00001>`` has a 1 in the least significant qubit.
Parameters
==========
values : list, str
The qubit values as a list of ints ([0,0,0,1,1,]) or a string ('011').
Examples
========
Create a qubit in a couple of different ways and look at their attributes:
>>> from sympy.physics.quantum.qubit import Qubit
>>> Qubit(0,0,0)
|000>
>>> q = Qubit('0101')
>>> q
|0101>
>>> q.nqubits
4
>>> len(q)
4
>>> q.dimension
4
>>> q.qubit_values
(0, 1, 0, 1)
We can flip the value of an individual qubit:
>>> q.flip(1)
|0111>
We can take the dagger of a Qubit to get a bra:
>>> from sympy.physics.quantum.dagger import Dagger
>>> Dagger(q)
<0101|
>>> type(Dagger(q))
<class 'sympy.physics.quantum.qubit.QubitBra'>
Inner products work as expected:
>>> ip = Dagger(q)*q
>>> ip
<0101|0101>
>>> ip.doit()
1
"""
@classmethod
def dual_class(self):
return QubitBra
def _eval_innerproduct_QubitBra(self, bra, **hints):
if self.label == bra.label:
return S.One
else:
return S.Zero
def _represent_default_basis(self, **options):
return self._represent_ZGate(None, **options)
def _represent_ZGate(self, basis, **options):
"""Represent this qubits in the computational basis (ZGate).
"""
_format = options.get('format', 'sympy')
n = 1
definite_state = 0
for it in reversed(self.qubit_values):
definite_state += n*it
n = n*2
result = [0]*(2**self.dimension)
result[int(definite_state)] = 1
if _format == 'sympy':
return Matrix(result)
elif _format == 'numpy':
import numpy as np
return np.array(result, dtype='complex').transpose()
elif _format == 'scipy.sparse':
from scipy import sparse
return sparse.csr_matrix(result, dtype='complex').transpose()
def _eval_trace(self, bra, **kwargs):
indices = kwargs.get('indices', [])
#sort index list to begin trace from most-significant
#qubit
sorted_idx = list(indices)
if len(sorted_idx) == 0:
sorted_idx = list(range(0, self.nqubits))
sorted_idx.sort()
#trace out for each of index
new_mat = self*bra
for i in range(len(sorted_idx) - 1, -1, -1):
# start from tracing out from leftmost qubit
new_mat = self._reduced_density(new_mat, int(sorted_idx[i]))
if (len(sorted_idx) == self.nqubits):
#in case full trace was requested
return new_mat[0]
else:
return matrix_to_density(new_mat)
def _reduced_density(self, matrix, qubit, **options):
"""Compute the reduced density matrix by tracing out one qubit.
The qubit argument should be of type Python int, since it is used
in bit operations
"""
def find_index_that_is_projected(j, k, qubit):
bit_mask = 2**qubit - 1
return ((j >> qubit) << (1 + qubit)) + (j & bit_mask) + (k << qubit)
old_matrix = represent(matrix, **options)
old_size = old_matrix.cols
#we expect the old_size to be even
new_size = old_size//2
new_matrix = Matrix().zeros(new_size)
for i in range(new_size):
for j in range(new_size):
for k in range(2):
col = find_index_that_is_projected(j, k, qubit)
row = find_index_that_is_projected(i, k, qubit)
new_matrix[i, j] += old_matrix[row, col]
return new_matrix
class QubitBra(QubitState, Bra):
"""A multi-qubit bra in the computational (z) basis.
We use the normal convention that the least significant qubit is on the
right, so ``|00001>`` has a 1 in the least significant qubit.
Parameters
==========
values : list, str
The qubit values as a list of ints ([0,0,0,1,1,]) or a string ('011').
See also
========
Qubit: Examples using qubits
"""
@classmethod
def dual_class(self):
return Qubit
class IntQubitState(QubitState):
"""A base class for qubits that work with binary representations."""
@classmethod
def _eval_args(cls, args, nqubits=None):
# The case of a QubitState instance
if len(args) == 1 and isinstance(args[0], QubitState):
return QubitState._eval_args(args)
# otherwise, args should be integer
elif not all(isinstance(a, (int, Integer)) for a in args):
raise ValueError('values must be integers, got (%s)' % (tuple(type(a) for a in args),))
# use nqubits if specified
if nqubits is not None:
if not isinstance(nqubits, (int, Integer)):
raise ValueError('nqubits must be an integer, got (%s)' % type(nqubits))
if len(args) != 1:
raise ValueError(
'too many positional arguments (%s). should be (number, nqubits=n)' % (args,))
return cls._eval_args_with_nqubits(args[0], nqubits)
# For a single argument, we construct the binary representation of
# that integer with the minimal number of bits.
if len(args) == 1 and args[0] > 1:
#rvalues is the minimum number of bits needed to express the number
rvalues = reversed(range(bitcount(abs(args[0]))))
qubit_values = [(args[0] >> i) & 1 for i in rvalues]
return QubitState._eval_args(qubit_values)
# For two numbers, the second number is the number of bits
# on which it is expressed, so IntQubit(0,5) == |00000>.
elif len(args) == 2 and args[1] > 1:
return cls._eval_args_with_nqubits(args[0], args[1])
else:
return QubitState._eval_args(args)
@classmethod
def _eval_args_with_nqubits(cls, number, nqubits):
need = bitcount(abs(number))
if nqubits < need:
raise ValueError(
'cannot represent %s with %s bits' % (number, nqubits))
qubit_values = [(number >> i) & 1 for i in reversed(range(nqubits))]
return QubitState._eval_args(qubit_values)
def as_int(self):
"""Return the numerical value of the qubit."""
number = 0
n = 1
for i in reversed(self.qubit_values):
number += n*i
n = n << 1
return number
def _print_label(self, printer, *args):
return str(self.as_int())
def _print_label_pretty(self, printer, *args):
label = self._print_label(printer, *args)
return prettyForm(label)
_print_label_repr = _print_label
_print_label_latex = _print_label
class IntQubit(IntQubitState, Qubit):
"""A qubit ket that store integers as binary numbers in qubit values.
The differences between this class and ``Qubit`` are:
* The form of the constructor.
* The qubit values are printed as their corresponding integer, rather
than the raw qubit values. The internal storage format of the qubit
values in the same as ``Qubit``.
Parameters
==========
values : int, tuple
If a single argument, the integer we want to represent in the qubit
values. This integer will be represented using the fewest possible
number of qubits.
If a pair of integers and the second value is more than one, the first
integer gives the integer to represent in binary form and the second
integer gives the number of qubits to use.
List of zeros and ones is also accepted to generate qubit by bit pattern.
nqubits : int
The integer that represents the number of qubits.
This number should be passed with keyword ``nqubits=N``.
You can use this in order to avoid ambiguity of Qubit-style tuple of bits.
Please see the example below for more details.
Examples
========
Create a qubit for the integer 5:
>>> from sympy.physics.quantum.qubit import IntQubit
>>> from sympy.physics.quantum.qubit import Qubit
>>> q = IntQubit(5)
>>> q
|5>
We can also create an ``IntQubit`` by passing a ``Qubit`` instance.
>>> q = IntQubit(Qubit('101'))
>>> q
|5>
>>> q.as_int()
5
>>> q.nqubits
3
>>> q.qubit_values
(1, 0, 1)
We can go back to the regular qubit form.
>>> Qubit(q)
|101>
Please note that ``IntQubit`` also accepts a ``Qubit``-style list of bits.
So, the code below yields qubits 3, not a single bit ``1``.
>>> IntQubit(1, 1)
|3>
To avoid ambiguity, use ``nqubits`` parameter.
Use of this keyword is recommended especially when you provide the values by variables.
>>> IntQubit(1, nqubits=1)
|1>
>>> a = 1
>>> IntQubit(a, nqubits=1)
|1>
"""
@classmethod
def dual_class(self):
return IntQubitBra
def _eval_innerproduct_IntQubitBra(self, bra, **hints):
return Qubit._eval_innerproduct_QubitBra(self, bra)
class IntQubitBra(IntQubitState, QubitBra):
"""A qubit bra that store integers as binary numbers in qubit values."""
@classmethod
def dual_class(self):
return IntQubit
#-----------------------------------------------------------------------------
# Qubit <---> Matrix conversion functions
#-----------------------------------------------------------------------------
def matrix_to_qubit(matrix):
"""Convert from the matrix repr. to a sum of Qubit objects.
Parameters
----------
matrix : Matrix, numpy.matrix, scipy.sparse
The matrix to build the Qubit representation of. This works with
SymPy matrices, numpy matrices and scipy.sparse sparse matrices.
Examples
========
Represent a state and then go back to its qubit form:
>>> from sympy.physics.quantum.qubit import matrix_to_qubit, Qubit
>>> from sympy.physics.quantum.represent import represent
>>> q = Qubit('01')
>>> matrix_to_qubit(represent(q))
|01>
"""
# Determine the format based on the type of the input matrix
format = 'sympy'
if isinstance(matrix, numpy_ndarray):
format = 'numpy'
if isinstance(matrix, scipy_sparse_matrix):
format = 'scipy.sparse'
# Make sure it is of correct dimensions for a Qubit-matrix representation.
# This logic should work with sympy, numpy or scipy.sparse matrices.
if matrix.shape[0] == 1:
mlistlen = matrix.shape[1]
nqubits = log(mlistlen, 2)
ket = False
cls = QubitBra
elif matrix.shape[1] == 1:
mlistlen = matrix.shape[0]
nqubits = log(mlistlen, 2)
ket = True
cls = Qubit
else:
raise QuantumError(
'Matrix must be a row/column vector, got %r' % matrix
)
if not isinstance(nqubits, Integer):
raise QuantumError('Matrix must be a row/column vector of size '
'2**nqubits, got: %r' % matrix)
# Go through each item in matrix, if element is non-zero, make it into a
# Qubit item times the element.
result = 0
for i in range(mlistlen):
if ket:
element = matrix[i, 0]
else:
element = matrix[0, i]
if format in ('numpy', 'scipy.sparse'):
element = complex(element)
if element != 0.0:
# Form Qubit array; 0 in bit-locations where i is 0, 1 in
# bit-locations where i is 1
qubit_array = [int(i & (1 << x) != 0) for x in range(nqubits)]
qubit_array.reverse()
result = result + element*cls(*qubit_array)
# If SymPy simplified by pulling out a constant coefficient, undo that.
if isinstance(result, (Mul, Add, Pow)):
result = result.expand()
return result
def matrix_to_density(mat):
"""
Works by finding the eigenvectors and eigenvalues of the matrix.
We know we can decompose rho by doing:
sum(EigenVal*|Eigenvect><Eigenvect|)
"""
from sympy.physics.quantum.density import Density
eigen = mat.eigenvects()
args = [[matrix_to_qubit(Matrix(
[vector, ])), x[0]] for x in eigen for vector in x[2] if x[0] != 0]
if (len(args) == 0):
return S.Zero
else:
return Density(*args)
def qubit_to_matrix(qubit, format='sympy'):
"""Converts an Add/Mul of Qubit objects into it's matrix representation
This function is the inverse of ``matrix_to_qubit`` and is a shorthand
for ``represent(qubit)``.
"""
return represent(qubit, format=format)
#-----------------------------------------------------------------------------
# Measurement
#-----------------------------------------------------------------------------
def measure_all(qubit, format='sympy', normalize=True):
"""Perform an ensemble measurement of all qubits.
Parameters
==========
qubit : Qubit, Add
The qubit to measure. This can be any Qubit or a linear combination
of them.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
=======
result : list
A list that consists of primitive states and their probabilities.
Examples
========
>>> from sympy.physics.quantum.qubit import Qubit, measure_all
>>> from sympy.physics.quantum.gate import H
>>> from sympy.physics.quantum.qapply import qapply
>>> c = H(0)*H(1)*Qubit('00')
>>> c
H(0)*H(1)*|00>
>>> q = qapply(c)
>>> measure_all(q)
[(|00>, 1/4), (|01>, 1/4), (|10>, 1/4), (|11>, 1/4)]
"""
m = qubit_to_matrix(qubit, format)
if format == 'sympy':
results = []
if normalize:
m = m.normalized()
size = max(m.shape) # Max of shape to account for bra or ket
nqubits = int(math.log(size)/math.log(2))
for i in range(size):
if m[i] != 0.0:
results.append(
(Qubit(IntQubit(i, nqubits=nqubits)), m[i]*conjugate(m[i]))
)
return results
else:
raise NotImplementedError(
"This function cannot handle non-SymPy matrix formats yet"
)
def measure_partial(qubit, bits, format='sympy', normalize=True):
"""Perform a partial ensemble measure on the specified qubits.
Parameters
==========
qubits : Qubit
The qubit to measure. This can be any Qubit or a linear combination
of them.
bits : tuple
The qubits to measure.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
=======
result : list
A list that consists of primitive states and their probabilities.
Examples
========
>>> from sympy.physics.quantum.qubit import Qubit, measure_partial
>>> from sympy.physics.quantum.gate import H
>>> from sympy.physics.quantum.qapply import qapply
>>> c = H(0)*H(1)*Qubit('00')
>>> c
H(0)*H(1)*|00>
>>> q = qapply(c)
>>> measure_partial(q, (0,))
[(sqrt(2)*|00>/2 + sqrt(2)*|10>/2, 1/2), (sqrt(2)*|01>/2 + sqrt(2)*|11>/2, 1/2)]
"""
m = qubit_to_matrix(qubit, format)
if isinstance(bits, (SYMPY_INTS, Integer)):
bits = (int(bits),)
if format == 'sympy':
if normalize:
m = m.normalized()
possible_outcomes = _get_possible_outcomes(m, bits)
# Form output from function.
output = []
for outcome in possible_outcomes:
# Calculate probability of finding the specified bits with
# given values.
prob_of_outcome = 0
prob_of_outcome += (outcome.H*outcome)[0]
# If the output has a chance, append it to output with found
# probability.
if prob_of_outcome != 0:
if normalize:
next_matrix = matrix_to_qubit(outcome.normalized())
else:
next_matrix = matrix_to_qubit(outcome)
output.append((
next_matrix,
prob_of_outcome
))
return output
else:
raise NotImplementedError(
"This function cannot handle non-SymPy matrix formats yet"
)
def measure_partial_oneshot(qubit, bits, format='sympy'):
"""Perform a partial oneshot measurement on the specified qubits.
A oneshot measurement is equivalent to performing a measurement on a
quantum system. This type of measurement does not return the probabilities
like an ensemble measurement does, but rather returns *one* of the
possible resulting states. The exact state that is returned is determined
by picking a state randomly according to the ensemble probabilities.
Parameters
----------
qubits : Qubit
The qubit to measure. This can be any Qubit or a linear combination
of them.
bits : tuple
The qubits to measure.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
-------
result : Qubit
The qubit that the system collapsed to upon measurement.
"""
import random
m = qubit_to_matrix(qubit, format)
if format == 'sympy':
m = m.normalized()
possible_outcomes = _get_possible_outcomes(m, bits)
# Form output from function
random_number = random.random()
total_prob = 0
for outcome in possible_outcomes:
# Calculate probability of finding the specified bits
# with given values
total_prob += (outcome.H*outcome)[0]
if total_prob >= random_number:
return matrix_to_qubit(outcome.normalized())
else:
raise NotImplementedError(
"This function cannot handle non-SymPy matrix formats yet"
)
def _get_possible_outcomes(m, bits):
"""Get the possible states that can be produced in a measurement.
Parameters
----------
m : Matrix
The matrix representing the state of the system.
bits : tuple, list
Which bits will be measured.
Returns
-------
result : list
The list of possible states which can occur given this measurement.
These are un-normalized so we can derive the probability of finding
this state by taking the inner product with itself
"""
# This is filled with loads of dirty binary tricks...You have been warned
size = max(m.shape) # Max of shape to account for bra or ket
nqubits = int(math.log(size, 2) + .1) # Number of qubits possible
# Make the output states and put in output_matrices, nothing in them now.
# Each state will represent a possible outcome of the measurement
# Thus, output_matrices[0] is the matrix which we get when all measured
# bits return 0. and output_matrices[1] is the matrix for only the 0th
# bit being true
output_matrices = []
for i in range(1 << len(bits)):
output_matrices.append(zeros(2**nqubits, 1))
# Bitmasks will help sort how to determine possible outcomes.
# When the bit mask is and-ed with a matrix-index,
# it will determine which state that index belongs to
bit_masks = []
for bit in bits:
bit_masks.append(1 << bit)
# Make possible outcome states
for i in range(2**nqubits):
trueness = 0 # This tells us to which output_matrix this value belongs
# Find trueness
for j in range(len(bit_masks)):
if i & bit_masks[j]:
trueness += j + 1
# Put the value in the correct output matrix
output_matrices[trueness][i] = m[i]
return output_matrices
def measure_all_oneshot(qubit, format='sympy'):
"""Perform a oneshot ensemble measurement on all qubits.
A oneshot measurement is equivalent to performing a measurement on a
quantum system. This type of measurement does not return the probabilities
like an ensemble measurement does, but rather returns *one* of the
possible resulting states. The exact state that is returned is determined
by picking a state randomly according to the ensemble probabilities.
Parameters
----------
qubits : Qubit
The qubit to measure. This can be any Qubit or a linear combination
of them.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
-------
result : Qubit
The qubit that the system collapsed to upon measurement.
"""
import random
m = qubit_to_matrix(qubit)
if format == 'sympy':
m = m.normalized()
random_number = random.random()
total = 0
result = 0
for i in m:
total += i*i.conjugate()
if total > random_number:
break
result += 1
return Qubit(IntQubit(result, int(math.log(max(m.shape), 2) + .1)))
else:
raise NotImplementedError(
"This function cannot handle non-SymPy matrix formats yet"
)
|
PypiClean
|
/ebbs-2.2.28.tar.gz/ebbs-2.2.28/README.md
|
# Eons Basic Build System
EBBS (or ebbs) is a framework for designing modular build pipelines for any language and system. Builders are python scripts that are downloaded and run on the fly with configuration provided by json config files, environment variables, and command line arguments!
Here, at eons, we have found building and distributing code to be too hard and far too disparate between languages. Thus, we designed ebbs to make packaging and distributing code consistent. No matter what language you're working with and no matter what you want to do with your code.
Want to compile C++ in a containerized environment then publish that code as an image back to Docker? How about publish your python code to PyPI or even just build a simple Wordpress plugin? No matter how intricate or big your project becomes, you'll be able to rely on ebbs to automate every step of your build process. It's just python. That's literally it.
With ebbs, there will be no more:
* having to learn a new way to package code for every language.
* having to change your code to fit your build system.
* having to specify unnecessary configuration for every project.
Instead, you write your code the way you want and your ebbs build system will put the pieces together for you.
Ebbs has been written in adherence to the [eons naming conventions](https://eons.llc/convention/naming/) and [eons directory conventions](https://eons.llc/convention/uri-names/). However, we do try to make overriding these conventions as easy as possible so that you don't have to change your existing code to use our systems.
For example, if you use "include" instead of the eons-preferred "inc", you can tell ebbs:
```json
"copy" : [
{"../include" : "inc"}
]
```
In the same fashion, you can bypass the eons directory scheme ("my_project.exe", "my_project.lib", "my_project.img", etc.) by specifying `"name" : "my_project"` and `"type" : "exe"` or whatever you'd like.
If you find ebbs to be getting in the way or overly challenging, let us know! Seriously, building code should be easy and we're always happy to know how we can improve. Feel free to open issues or just email us at [email protected].
## Installation
`pip install ebbs`
## Usage
Ebbs must be invoked from the directory you wish to build from.
For example, a well-designed project should allow you compile it locally by:
```shell
cd build
ebbs
```
Per (eons)[https://github.com/eons-dev/eons.lib], ebbs supports:
* `-v` or `--verbose` (count, i.e `-vv` = 2) or `--verbosity #`, where # is some number, or the `verbosity` environment or config value: will show more information and increase the logging level, e.g. print debug messages (3 for debug; 2 for info).
* `--config` or `-c` (string): the path to a json config file from which other values may be retrieved.
* `--no-repo` or the `no_repo` environment or config value (bool, i.e. 'True', 'true', etc.): whether or not to enable reaching out to online servers for code (see Dynamic Functionality, below).
* `--log-file` or the `log_file` environment or config value (string; supports formatting, e.g. '/var/log/eons/{this.name}.log'): optional value for logging to a file in addition to stderr.
As always, use `ebbs --help` for help ;)
### Configuration
Ebbs is intended to keep your build process separate from your code. With that said, it can be useful to specify some project-wide settings and build configurations.
Note that there isn't any real reason you can't move the build.json or even write an ebbs script to generate build.json and then call ebbs with it ;)
When running ebbs, the builder you select will pull its configuration values from the following external sources:
1. the command line (e.g. in case you want to override anything)
2. a "build.json" in the provided build folder (which can be specified via `--config`)
3. a json file provided to `ebbs`.
4. the system environment (e.g. for keeping passwords out of repo-files and commands)
Any existing member variables will override these external values.
You can manually specify the builder you'd like in one of 2 ways:
1. the `-b` argument to ebbs.
2. `"build" : "SOMETHING"` in the build.json
Lastly, you can specify a build folder (i.e. a folder to create within your project for all build output) with:
1. `-i` on the cli; the default is "build" (e.g. "/path/to/my/project/build")
2. `"build_in" : "BUILD_FOLDER"` in the build.json
You can also specify any number of other arguments in any of the command line, build.json, and system environments.
For example, `export pypi_username="__token__"` would make `this.Fetch('pypi_username)` in the "py" Builder return `__token__`, assuming you don't set `"pypi_username" : "something else"` in the build.json nor specify `--pypi-username "something else"` on the command line.
**IMPORTANT NOTE: Most ebbs Builders will DELETE the build folder you pass to them.**
This is done so that previous builds cannot create stale data which influence future builds. However, if you mess up and call, say, `ebbs -b cpp` from `./src` instead of from `./build`, you could lose your "src" folder. Please use this tool responsibly and read up on what each Builder does.
To make things easy, you can search for `clearBuildPath` in the python file and `clear_build_path` in the config files. If you see `this.clearBuildPath = False` it should be okay to use that Builder with any directory (such is the case for the Publish Builder, which zips the contents of any directory and uploads them to an online repository). Conversely, take note of where `"clear_build_path": true` is set.
### Where Are These "Builders"?
All Builders are searched for in the local file system from where ebbs was called within the following folders:
```python
"./eons" #per the eons.Executor.defaultRepoDirectory
```
If the build you specified is not found within one of those directories, ebbs will try to download it from the remote repository with a name of `{builder}.build`. The downloaded build script will be saved to whatever directory you set in `--repo-store` (default "./eons/").
Unfortunately, python class names cannot have dashes ("-") in them. Instead, a series of underscores (`_`) is often used instead. While this deviates from the eons naming schema, it should still be intelligible for short names. You are, of course, welcome to use whatever naming scheme you would like instead!
### The Build Path
As mentioned, ebbs depends on the directory it is invoked from. The `rootPath` provided to each Builder will be this directory. The `buildPath` is specified relative to the `rootPath`. If you would like to use a single folder for all Builders, please set the `repo_store` environment variable with an absolute path.
For example, if you have a "git" and a "workspace" folder in your home directory and you want to use your custom Builder, "my_build" on all the projects in the git folder, instead of copying my_build to every project's workspace, you could simply `export repo_store="~/workspace"` and call ebbs from the appropriate build directory for each project.
Something like: `me@mine:~/git/my_cpp_project.exe/build$ ebbs -b my_build`. NOTE: if the build.json file `~/git/my_cpp_project.exe/build/build.json` exists, it will affect the behavior of `my_build` and potentially even effect other Builders. To ensure no side-effects are generated from project build configurations, you should create an empty folder to invoke your custom build process from (e.g `local/`).
Your home folder would then look something like:
```
home/
├─ git/
│ ├─ my_cpp_project.exe/
├─ workspace/
│ ├─ my_build.py
```
### Repository
Online repository settings can be specified with:
```
--repo-store (default = ./eons/)
--repo-url (default = https://api.infrastructure.tech/v1/package)
--repo-username
--repo-password
```
NOTE: you do not need to supply any repo credentials or other settings in order to download packages from the public repository.
For more info on the repo integration, see [the eons library](https://github.com/eons-dev/lib_eons#online-repository)
It is also worth noting that the online repository system is handled upstream (and downstream, for Publish) of ebbs.
By default, ebbs will use the [infrastructure.tech](https://infrastructure.tech) package repository. See the [Infrastructure web server](https://github.com/infrastructure-tech/infrastructure.srv) for more info.
**IMPORTANT CAVEAT FOR ONLINE PACKAGES:** the package name must be suffixed with the "build" extension to be found by ebbs.
For example, if you want to use `-b my_build` from the repository, ebbs will attempt to download "my_build.build". The package zip (my_build.build.zip) is then downloaded, extracted, registered, and instantiated.
All packages are .zip files.
### Example Build Scripts:
* [Python](https://github.com/eons-dev/py.build)
* [C++](https://github.com/eons-dev/cpp.build)
* [Docker](https://github.com/eons-dev/docker.build)
* [Publish](https://github.com/eons-dev/publish.build) <- this one makes other Builders available online.
* [In Container](https://github.com/eons-dev/in_container.build) <- this one moves the remaining build process into a docker container.
* [Arbitrary](https://github.com/eons-dev/arbitrary.build) <- this one just runs commands.
* [Proxy](https://github.com/eons-dev/proxy.build) <- this one loads another json config file.
* [Test](https://github.com/eons-dev/test.build) <- this one runs commands and validates the outputs.
### Cascading Builds
As with any good build system, you aren't limited to just one step or even one file. With ebbs, you can specify "next" in your build.json (see below), which will execute a series of Builders after the initial.
Here's an example build.json that builds a C++ project then pushes it to Dockerhub (taken from the [Infrastructure web server](https://github.com/infrastructure-tech/infrastructure.srv)):
```json
{
"clear_build_path" : true,
"next": [
{
"build" : "in_container",
"config" : {
"image" : "eons/dev-webserver.img",
"copy_env" : [
"docker_username",
"docker_password"
],
"next" : [
{
"build" : "cpp",
"build_in" : "build",
"copy" : [
{"../../inc/" : "inc/"},
{"../../src/" : "src/"}
],
"config" : {
"file_name" : "entrypoint",
"cpp_version" : 17,
"libs_shared": [
"restbed",
"cpr"
],
"next" : [
{
"build": "docker",
"path" : "infrastructure.srv",
"copy" : [
{"out/" : "src/"}
],
"config" : {
"base_image" : "eons/webserver.img",
"image_name" : "eons/infrastructure.srv",
"image_os" : "debian",
"entrypoint" : "/usr/local/bin/entrypoint",
"also" : [
"EXPOSE 80"
]
}
}
]
}
}
]
}
}
]
}
```
This script can be invoked with just `ebbs` (assuming the appropriate docker credentials are stored in your environment, docker is installed, etc.).
For other examples, check out the `build` folder of this repo and any other mentioned above!
## Design
### I Want One!
Before diving too deep into EBBS, please also give a quick look at the parent library: [eons](https://github.com/eons-dev/eons.lib).
The [UserFunctor Utilities](https://github.com/eons-dev/eons.lib#user-functor) will be of particular use in your Builders.
Ebbs builds packages or whatever with `ebbs.Builders`, which extend the self-registering `eons.UserFunctor`. This means you can write your own build scripts and publish them, distribute them with your code, or store them locally in the `repo_store` (see above). A major driving force behind ebbs is to encourage you to share your automation tools with colleagues, friends, and enemies! For example, you could create "my_build.py", containing something like:
```python
import logging
from ebbs import Builder
class my_build(Builder):
def __init__(this, name="My Build"):
super().__init__(name)
# delete whatever dir was provided to this, so we can start fresh.
this.clearBuildPath = True
this.supportedProjectTypes = [] #all
#or
# this.supportedProjectTypes.append("lib")
# this.supportedProjectTypes.append("exe")
# this.supportedProjectTypes.append("test")
#this.requiredKWArgs will cause an error to be thrown prior to execution (i.e. .*Build methods) iff they are not found in the system environment, build.json, nor command line.
this.requiredKWArgs.append("my_required_arg")
#this.my_optional_arg will be "some default value" unless the user overrides it from the command line or build.json file.
this.optionalKWArgs["my_optional_arg"] = "some default value"
#Check if the output of all your this.RunCommand() and whatever other calls did what you expected.
#The "next" step will only be executed if this step succeeded.
def DidBuildSucceed(this):
return True; #yeah, why not?
def PreBuild(this):
logging.info(f"Got {this.my_required_arg} and {this.my_optional_arg}")
#Required Builder method. See that class for details.
def Build(this):
#DO STUFF!
```
That file can then go in a "./ebbs/" or "./eons/" directory, perhaps within your project repository or on [infrastructure.tech](https://infrastructure.tech)!
ebbs can then be invoked with something like: `ebbs -b my_build --my-required-arg my-value`, which will run your Builder in the current path!
Also note the "--" preceding "--my-required-arg", which evaluates to "my_required_arg" (without the "--" and with "_" in place of "-") once in the Builder. This is done for convenience of both command line syntax and python code.
You could also do something like:
```shell
cat << EOF > ./build.json
{
"my_required_arg" : "my-value",
"my_optional_arg" : [
"some",
"other",
"value",
"that",
"you",
"don't",
"want",
"to",
"type"
]
}
EOF
ebbs -b my_build
```
Here, the build.json file will be automatically read in, removing the need to specify the args for your build.
If you'd like to take this a step further, you can remove the need for `-b my_build` by specifying it under an empty builder in the build.json, like so:
```shell
cat << EOF > ./build.json
{
"next": [
{
"build" : "my_build",
"build_in" : "build",
"copy" : [
{"../src/" : "src/"},
{"../inc/" : "inc/"},
{"../test/" : "test/"}
],
"config" : {
"my_required_arg" : "my-value",
"my_optional_arg" : [
"some",
"other",
"value",
"that",
"you",
"don't",
"want",
"to",
"type"
]
}
}
]
}
EOF
ebbs #no args needed!
```
Regarding `this.clearBuildPath`, as mentioned above, it is important to not call ebbs on the wrong directory. If your Builder does not need a fresh build path, set `this.clearBuildPath = False`.
With that said, most compilation, packaging, etc. can be broken by stale data from past builds, so make sure to set `this.clearBuildPath = True` if you need to.
You may also have noticed the combination of camelCase and snake_case. This is used to specify builtInValues from user_provided_values. This convention may change with a future release (let us know what you think!).
For `supportedProjectTypes`, the `Builder` class will split the folder containing the buildPath (i.e. the `rootPath`) on underscores (`_`), storing the first value as `this.projectType` and the second as `this.projectName`. The `projectType` is checked against the used build's `supportedProjectTypes`. If no match is found, the build is aborted prior to executing the build. If you would like your Builder to work with all project types (and thus ignore that whole naming nonsense), set `this.supportedProjectTypes = []`, where none (i.e. `[]`, not actually `None`) means "all".
You'll also get the following paths variables populated by default:
```python
this.srcPath = f"{this.rootPath}/src"
this.incPath = f"{this.rootPath}/inc"
this.depPath = f"{this.rootPath}/dep"
this.libPath = f"{this.rootPath}/lib"
this.exePath = f"{this.rootPath}/exe"
this.testPath = f"{this.rootPath}/test"
```
When a `Builder` is executed, the following are called in order:
(kwargs is the same for all)
```python
this.ValidateArgs() # <- not recommended to override.
this.BeforeFunction() # <- virtual (ok to override)
#Builder sets the above mentioned variables here
this.PreBuild() # <- virtual (ok to override)
#Supported project types are checked here
this.Build() # <- abstract method for you (MUST override)
this.PostBuild() # <- virtual (ok to override)
if (this.DidBuildSucceed()):
this.BuildNext()
this.AfterFunction() # <- virtual (ok to override)
```
|
PypiClean
|
/uniqgift_custom-1.0.7-py3-none-any.whl/django/views/generic/edit.py
|
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.forms import Form
from django.forms import models as model_forms
from django.http import HttpResponseRedirect
from django.views.generic.base import ContextMixin, TemplateResponseMixin, View
from django.views.generic.detail import (
BaseDetailView,
SingleObjectMixin,
SingleObjectTemplateResponseMixin,
)
class FormMixin(ContextMixin):
"""Provide a way to show and handle a form in a request."""
initial = {}
form_class = None
success_url = None
prefix = None
def get_initial(self):
"""Return the initial data to use for forms on this view."""
return self.initial.copy()
def get_prefix(self):
"""Return the prefix to use for forms."""
return self.prefix
def get_form_class(self):
"""Return the form class to use."""
return self.form_class
def get_form(self, form_class=None):
"""Return an instance of the form to be used in this view."""
if form_class is None:
form_class = self.get_form_class()
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""Return the keyword arguments for instantiating the form."""
kwargs = {
"initial": self.get_initial(),
"prefix": self.get_prefix(),
}
if self.request.method in ("POST", "PUT"):
kwargs.update(
{
"data": self.request.POST,
"files": self.request.FILES,
}
)
return kwargs
def get_success_url(self):
"""Return the URL to redirect to after processing a valid form."""
if not self.success_url:
raise ImproperlyConfigured("No URL to redirect to. Provide a success_url.")
return str(self.success_url) # success_url may be lazy
def form_valid(self, form):
"""If the form is valid, redirect to the supplied URL."""
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
"""If the form is invalid, render the invalid form."""
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
"""Insert the form into the context dict."""
if "form" not in kwargs:
kwargs["form"] = self.get_form()
return super().get_context_data(**kwargs)
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""Provide a way to show and handle a ModelForm in a request."""
fields = None
def get_form_class(self):
"""Return the form class to use in this view."""
if self.fields is not None and self.form_class:
raise ImproperlyConfigured(
"Specifying both 'fields' and 'form_class' is not permitted."
)
if self.form_class:
return self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif getattr(self, "object", None) is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
if self.fields is None:
raise ImproperlyConfigured(
"Using ModelFormMixin (base class of %s) without "
"the 'fields' attribute is prohibited." % self.__class__.__name__
)
return model_forms.modelform_factory(model, fields=self.fields)
def get_form_kwargs(self):
"""Return the keyword arguments for instantiating the form."""
kwargs = super().get_form_kwargs()
if hasattr(self, "object"):
kwargs.update({"instance": self.object})
return kwargs
def get_success_url(self):
"""Return the URL to redirect to after processing a valid form."""
if self.success_url:
url = self.success_url.format(**self.object.__dict__)
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model."
)
return url
def form_valid(self, form):
"""If the form is valid, save the associated model."""
self.object = form.save()
return super().form_valid(form)
class ProcessFormView(View):
"""Render a form on GET and processes it on POST."""
def get(self, request, *args, **kwargs):
"""Handle GET requests: instantiate a blank version of the form."""
return self.render_to_response(self.get_context_data())
def post(self, request, *args, **kwargs):
"""
Handle POST requests: instantiate a form instance with the passed
POST variables and then check if it's valid.
"""
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""A base view for displaying a form."""
class FormView(TemplateResponseMixin, BaseFormView):
"""A view for displaying a form and rendering a template response."""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating a new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super().post(request, *args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating a new object, with a response rendered by a template.
"""
template_name_suffix = "_form"
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super().post(request, *args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""View for updating an object, with a response rendered by a template."""
template_name_suffix = "_form"
class DeletionMixin:
"""Provide the ability to delete objects."""
success_url = None
def delete(self, request, *args, **kwargs):
"""
Call the delete() method on the fetched object and then redirect to the
success URL.
"""
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
return HttpResponseRedirect(success_url)
# Add support for browsers which only accept GET and POST for now.
def post(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url.format(**self.object.__dict__)
else:
raise ImproperlyConfigured("No URL to redirect to. Provide a success_url.")
# RemovedInDjango50Warning.
class DeleteViewCustomDeleteWarning(Warning):
pass
class BaseDeleteView(DeletionMixin, FormMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
form_class = Form
def __init__(self, *args, **kwargs):
# RemovedInDjango50Warning.
if self.__class__.delete is not DeletionMixin.delete:
warnings.warn(
f"DeleteView uses FormMixin to handle POST requests. As a "
f"consequence, any custom deletion logic in "
f"{self.__class__.__name__}.delete() handler should be moved "
f"to form_valid().",
DeleteViewCustomDeleteWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
def post(self, request, *args, **kwargs):
# Set self.object before the usual form processing flow.
# Inlined because having DeletionMixin as the first base, for
# get_success_url(), makes leveraging super() with ProcessFormView
# overly complex.
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
success_url = self.get_success_url()
self.object.delete()
return HttpResponseRedirect(success_url)
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with self.get_object(), with a
response rendered by a template.
"""
template_name_suffix = "_confirm_delete"
|
PypiClean
|
/netoprmgr-1.3.5.tar.gz/netoprmgr-1.3.5/cffi/ffiplatform.py
|
import sys, os
from .error import VerificationError
LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs',
'extra_objects', 'depends']
def get_extension(srcfilename, modname, sources=(), **kwds):
_hack_at_distutils()
from distutils.core import Extension
allsources = [srcfilename]
for src in sources:
allsources.append(os.path.normpath(src))
return Extension(name=modname, sources=allsources, **kwds)
def compile(tmpdir, ext, compiler_verbose=0, debug=None):
"""Compile a C extension module using distutils."""
_hack_at_distutils()
saved_environ = os.environ.copy()
try:
outputfilename = _build(tmpdir, ext, compiler_verbose, debug)
outputfilename = os.path.abspath(outputfilename)
finally:
# workaround for a distutils bugs where some env vars can
# become longer and longer every time it is used
for key, value in saved_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
return outputfilename
def _build(tmpdir, ext, compiler_verbose=0, debug=None):
# XXX compact but horrible :-(
from distutils.core import Distribution
import distutils.errors, distutils.log
#
dist = Distribution({'ext_modules': [ext]})
dist.parse_config_files()
options = dist.get_option_dict('build_ext')
if debug is None:
debug = sys.flags.debug
options['debug'] = ('ffiplatform', debug)
options['force'] = ('ffiplatform', True)
options['build_lib'] = ('ffiplatform', tmpdir)
options['build_temp'] = ('ffiplatform', tmpdir)
#
try:
old_level = distutils.log.set_threshold(0) or 0
try:
distutils.log.set_verbosity(compiler_verbose)
dist.run_command('build_ext')
cmd_obj = dist.get_command_obj('build_ext')
[soname] = cmd_obj.get_outputs()
finally:
distutils.log.set_threshold(old_level)
except (distutils.errors.CompileError,
distutils.errors.LinkError) as e:
raise VerificationError('%s: %s' % (e.__class__.__name__, e))
#
return soname
try:
from os.path import samefile
except ImportError:
def samefile(f1, f2):
return os.path.abspath(f1) == os.path.abspath(f2)
def maybe_relative_path(path):
if not os.path.isabs(path):
return path # already relative
dir = path
names = []
while True:
prevdir = dir
dir, name = os.path.split(prevdir)
if dir == prevdir or not dir:
return path # failed to make it relative
names.append(name)
try:
if samefile(dir, os.curdir):
names.reverse()
return os.path.join(*names)
except OSError:
pass
# ____________________________________________________________
try:
int_or_long = (int, long)
import cStringIO
except NameError:
int_or_long = int # Python 3
import io as cStringIO
def _flatten(x, f):
if isinstance(x, str):
f.write('%ds%s' % (len(x), x))
elif isinstance(x, dict):
keys = sorted(x.keys())
f.write('%dd' % len(keys))
for key in keys:
_flatten(key, f)
_flatten(x[key], f)
elif isinstance(x, (list, tuple)):
f.write('%dl' % len(x))
for value in x:
_flatten(value, f)
elif isinstance(x, int_or_long):
f.write('%di' % (x,))
else:
raise TypeError(
"the keywords to verify() contains unsupported object %r" % (x,))
def flatten(x):
f = cStringIO.StringIO()
_flatten(x, f)
return f.getvalue()
def _hack_at_distutils():
# Windows-only workaround for some configurations: see
# https://bugs.python.org/issue23246 (Python 2.7 with
# a specific MS compiler suite download)
if sys.platform == "win32":
try:
import setuptools # for side-effects, patches distutils
except ImportError:
pass
|
PypiClean
|
/testoob-1.15.tar.bz2/testoob-1.15/README
|
----========================----
-== Testoob - Testing Out Of (The) Box ==-
----========================----
===========================================================
= Description:
===========================================================
Testoob is an advanced Python unit testing framework that
integrates effortlessly with Python's standard 'unittest'
module.
It's designed and built to be easy to use and extend.
===========================================================
= Links:
===========================================================
Home: http://code.google.com/p/testoob
Discussion: http://groups.google.com/group/testoob
Old docs: http://testoob.sourceforge.net
Source code: http://code.google.com/p/testoob/wiki/SourceControl
===========================================================
= Features:
===========================================================
Start using immediately with the 'testoob' executable, or
replace unittest.main() with testoob.main()
Some advanced options:
- Filtering (see testoob/extractors.py)
- Reporting (see testoob/reporting.py)
- Testing facilities (see testoob/testing.py)
- Running (e.g. threads) (see testoob/running.py)
- And more!
See the full running options by running 'testoob -h', and
check the documentation.
===========================================================
= Installation:
===========================================================
See docs/INSTALL
===========================================================
= Testing
===========================================================
Use GNU make (usually 'make' or 'gmake') from the project root:
make test # test with the default python version
make testall # test with different python versions
===========================================================
= License:
===========================================================
Testoob is distributed under the terms of the Apache
License, Version 2.0. See docs/COPYING for more
information.
===========================================================
= Credits:
===========================================================
Testoob has been created with the help of:
- Ori Peleg (orispammerpel at users.sourceforge.net)
- Barak Schiller (bspamalotschiller at gmail.com)
- Misha Seltzer (mishadontspamme at seltzer dot com)
- Wolfram Kriesing (ilikespamwolfram at kriesing dot de)
- Ronnie van 't Westeinde <Ronnie.gimmespam.van.t.Westeinde at gmail dot com>
To mail us, remove spam-related words from the user names.
Have fun testing!
The Testoob team.
|
PypiClean
|
/tsdat-0.6.1.tar.gz/tsdat-0.6.1/docs/source/config/quality_control.rst
|
.. _quality_control:
Quality Management
==================
Two types of classes can be defined in your pipeline to ensure standardized
data meets quality requirements:
:QualityChecker:
Each Quality Checker performs a specific quality control (QC) test on one or more variables
in your dataset. Quality checkers test a single data variable at a time and return a logical mask, where flagged values are marked as 'True'.
:QualityHandler:
Each Quality Handler can be specified to run if a particular QC test
fails. Quality handlers take the QC Checker's logical mask and use it to apply any QC or custom method to the data variable of question. For instance, it can be used to remove flagged data altogether or correct flagged values, such as interpolating to fill gaps in data.
Custom QC Checkers and QC Handlers are stored (typically) in ``pipelines/<pipeline_module>/qc.py``.
Once written, they must be specified in the ``config/quality.yaml`` file like shown:
.. code-block:: yaml
managers:
- name: Require Valid Coordinate Variables
checker:
classname: tsdat.qc.checkers.CheckMissing
handlers:
- classname: tsdat.qc.handlers.FailPipeline
apply_to: [COORDS]
- name: The name of this quality check
checker:
classname: pipelines.example_pipeline.qc.CustomQualityChecker
parameters: {}
handlers:
- classname: pipelines.example_pipeline.qc.CustomQualityHandler
parameters: {}
apply_to: [COORDS, DATA_VARS]
Quality Checkers
----------------
Quality Checkers are classes that are used to perform a QC test on a specific
variable. Each Quality Checker should extend the ``QualityChecker`` base
class, and implement the abstract ``run`` method as shown below. Each QualityChecker
defined in the pipeline config file will be automatically initialized by the pipeline
and invoked on the specified variables.
.. code-block:: python
@abstractmethod
def run(self, dataset: xr.Dataset, variable_name: str) -> NDArray[np.bool_]:
"""-----------------------------------------------------------------------------
Checks the quality of a specific variable in the dataset and returns the results
of the check as a boolean array where True values represent quality problems and
False values represent data that passes the quality check.
QualityCheckers should not modify dataset variables; changes to the dataset
should be made by QualityHandler(s), which receive the results of a
QualityChecker as input.
Args:
dataset (xr.Dataset): The dataset containing the variable to check.
variable_name (str): The name of the variable to check.
Returns:
NDArray[np.bool_]: The results of the quality check, where True values
indicate a quality problem.
-----------------------------------------------------------------------------"""
Tsdat built-in quality checkers:
.. autosummary::
:nosignatures:
~tsdat.qc.checkers.QualityChecker
~tsdat.qc.checkers.CheckMissing
~tsdat.qc.checkers.CheckMonotonic
~tsdat.qc.checkers.CheckValidDelta
~tsdat.qc.checkers.CheckValidMin
~tsdat.qc.checkers.CheckValidMax
~tsdat.qc.checkers.CheckFailMin
~tsdat.qc.checkers.CheckFailMax
~tsdat.qc.checkers.CheckWarnMin
~tsdat.qc.checkers.CheckWarnMax
Quality Handlers
----------------
Quality Handlers are classes that are used to correct variable data when a specific
quality test fails. An example is interpolating missing values to fill gaps.
Each Quality Handler should extend the ``QualityHandler`` base
class, and implement the abstract `run` method that performs the correction, as shown below.
Each QualityHandler defined in the pipeline config file will be automatically initialized
by the pipeline and invoked on the specified variables.
.. code-block:: python
@abstractmethod
def run(
self, dataset: xr.Dataset, variable_name: str, failures: NDArray[np.bool_]
) -> xr.Dataset:
"""-----------------------------------------------------------------------------
Handles the quality of a variable in the dataset and returns the dataset after
any corrections have been applied.
Args:
dataset (xr.Dataset): The dataset containing the variable to handle.
variable_name (str): The name of the variable whose quality should be
handled.
failures (NDArray[np.bool_]): The results of the QualityChecker for the
provided variable, where True values indicate a quality problem.
Returns:
xr.Dataset: The dataset after the QualityHandler has been run.
-----------------------------------------------------------------------------"""
Tsdat built-in quality handlers:
.. autosummary::
:nosignatures:
~tsdat.qc.handlers.QualityHandler
~tsdat.qc.handlers.RecordQualityResults
~tsdat.qc.handlers.RemoveFailedValues
~tsdat.qc.handlers.SortDatasetByCoordinate
~tsdat.qc.handlers.FailPipeline
.. automodule:: tsdat.qc.checkers
:members:
:undoc-members:
:show-inheritance:
:noindex:
.. automodule:: tsdat.qc.handlers
:members:
:undoc-members:
:show-inheritance:
:noindex:
|
PypiClean
|
/fds.sdk.AxiomaEquityOptimizer-0.21.7-py3-none-any.whl/fds/sdk/AxiomaEquityOptimizer/models/__init__.py
|
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from fds.sdk.AxiomaEquityOptimizer.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from fds.sdk.AxiomaEquityOptimizer.model.account_directories import AccountDirectories
from fds.sdk.AxiomaEquityOptimizer.model.account_directories_root import AccountDirectoriesRoot
from fds.sdk.AxiomaEquityOptimizer.model.axioma_equity_optimization_parameters import AxiomaEquityOptimizationParameters
from fds.sdk.AxiomaEquityOptimizer.model.axioma_equity_optimization_parameters_root import AxiomaEquityOptimizationParametersRoot
from fds.sdk.AxiomaEquityOptimizer.model.calculation_info import CalculationInfo
from fds.sdk.AxiomaEquityOptimizer.model.calculation_info_root import CalculationInfoRoot
from fds.sdk.AxiomaEquityOptimizer.model.client_error_response import ClientErrorResponse
from fds.sdk.AxiomaEquityOptimizer.model.currency import Currency
from fds.sdk.AxiomaEquityOptimizer.model.currency_root import CurrencyRoot
from fds.sdk.AxiomaEquityOptimizer.model.document_directories import DocumentDirectories
from fds.sdk.AxiomaEquityOptimizer.model.document_directories_root import DocumentDirectoriesRoot
from fds.sdk.AxiomaEquityOptimizer.model.error import Error
from fds.sdk.AxiomaEquityOptimizer.model.error_source import ErrorSource
from fds.sdk.AxiomaEquityOptimizer.model.object_root import ObjectRoot
from fds.sdk.AxiomaEquityOptimizer.model.optimal_portfolio import OptimalPortfolio
from fds.sdk.AxiomaEquityOptimizer.model.optimization import Optimization
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_account import OptimizerAccount
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_account_overrides import OptimizerAccountOverrides
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_optimal_holdings import OptimizerOptimalHoldings
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_output_types import OptimizerOutputTypes
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_strategy import OptimizerStrategy
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_strategy_overrides import OptimizerStrategyOverrides
from fds.sdk.AxiomaEquityOptimizer.model.optimizer_trades_list import OptimizerTradesList
|
PypiClean
|
/arger-1.4.8.tar.gz/arger-1.4.8/README.md
|
# Overview
A wrapper around argparser to help build CLIs from functions. Uses type-hints extensively :snake:.
[](https://pypi.python.org/pypi/arger)
[](https://pypi.org/project/arger/)



[](https://pypi.org/project/arger)
# Setup
## :gear: Installation
Install it directly into an activated virtual environment:
``` text
$ pip install arger
```
# :books: Usage
* create a python file called test.py
``` python
from arger import Arger
def main(param1: int, param2: str, kw1=None, kw2=False):
"""Example function with types documented in the docstring.
Args:
param1: The first parameter.
param2: The second parameter.
kw1: this is optional parameter.
kw2: this is boolean. setting flag sets True.
"""
print(locals())
arger = Arger(
main,
prog="pytest", # for testing purpose. otherwise not required
)
if __name__ == "__main__":
arger.run()
```
* Here Arger is just a subclass of `ArgumentParser`. It will not conceal you from using other `argparse` libraries.
* run this normally with
```sh
$ python test.py -h
usage: pytest [-h] [-k KW1] [-w] param1 param2
Example function with types documented in the docstring.
positional arguments:
param1 The first parameter.
param2 The second parameter.
optional arguments:
-h, --help show this help message and exit
-k KW1, --kw1 KW1 this is optional parameter. (default: None)
-w, --kw2 this is boolean. setting flag sets True. (default: False)
```
``` sh
$ python test.py 100 param2
{'param1': 100, 'param2': 'param2', 'kw1': None, 'kw2': False}
```
* Checkout [examples](docs/examples) folder and documentation to see more of `arger` in action. It supports any level of sub-commands.
# Features
- Uses docstring to parse help comment for arguments. Supports
+ [google](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
+ [numpy](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html#example-numpy)
+ [rst](https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html)
- Flags will be generated from parameter-name.
1. e.g. `def main(param: ...)` -> `-p, --param`
2. If needed you could declare it inside docstring like `:param arg1: -a --arg this is the document`.
- one can use `Argument` class to pass any values to the
[parser.add_argument](https://docs.python.org/3/library/argparse.html#the-add-argument-method) function
- The decorated functions can be composed to form nested sub-commands of any level.
- Most of the Standard types [supported](./tests/test_args_opts/test_arguments.py).
Please see [examples](./docs/examples/4-supported-types/src.py) for more supported types with examples.
> **_NOTE_**
> - `*args` supported but no `**kwargs` support yet.
> - all optional arguments that start with underscore is not passed to `Parser`.
> They are considered private to the function implementation.
> Some parameter names with special meaning
> - `_namespace_` -> to get the output from the `ArgumentParser.parse_args()`
> - `_arger_` -> to get the parser instance
# Argparser enhancements
* web-ui : https://github.com/nirizr/argparseweb
* extra actions : https://github.com/kadimisetty/action-hero
* automatic shell completions using [argcomplete](https://github.com/kislyuk/argcomplete)
|
PypiClean
|
/lsv2test-core-2.0.0.tar.gz/lsv2test-core-2.0.0/localstack/aws/protocol/parser.py
|
import abc
import base64
import datetime
import functools
import re
from abc import ABC
from email.utils import parsedate_to_datetime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from typing.io import IO
from xml.etree import ElementTree as ETree
import cbor2
import dateutil.parser
from botocore.model import (
ListShape,
MapShape,
OperationModel,
OperationNotFoundError,
ServiceModel,
Shape,
StructureShape,
)
from werkzeug.exceptions import BadRequest, NotFound
from localstack.aws.api import HttpRequest
from localstack.aws.protocol.op_router import RestServiceOperationRouter
from localstack.config import LEGACY_S3_PROVIDER
def _text_content(func):
"""
This decorator hides the difference between an XML node with text or a plain string.
It's used to ensure that scalar processing operates only on text strings, which
allows the same scalar handlers to be used for XML nodes from the body, HTTP headers,
and across different protocols.
:param func: function which should be wrapped
:return: wrapper function which can be called with a node or a string, where the
wrapped function is always called with a string
"""
def _get_text_content(
self,
request: HttpRequest,
shape: Shape,
node_or_string: Union[ETree.Element, str],
uri_params: Mapping[str, Any] = None,
):
if hasattr(node_or_string, "text"):
text = node_or_string.text
if text is None:
# If an XML node is empty <foo></foo>, we want to parse that as an empty string,
# not as a null/None value.
text = ""
else:
text = node_or_string
return func(self, request, shape, text, uri_params)
return _get_text_content
class RequestParserError(Exception):
"""
Error which is thrown if the request parsing fails.
Super class of all exceptions raised by the parser.
"""
pass
class UnknownParserError(RequestParserError):
"""
Error which indicates that the raised exception of the parser could be caused by invalid data or by any other
(unknown) issue. Errors like this should be reported and indicate an issue in the parser itself.
"""
pass
class ProtocolParserError(RequestParserError):
"""
Error which indicates that the given data is not compliant with the service's specification and cannot be parsed.
This usually results in a response with an HTTP 4xx status code (client error).
"""
pass
class OperationNotFoundParserError(ProtocolParserError):
"""
Error which indicates that the given data cannot be matched to a specific operation.
The request is likely _not_ meant to be handled by the ASF service provider itself.
"""
pass
def _handle_exceptions(func):
"""
Decorator which handles the exceptions raised by the parser. It ensures that all exceptions raised by the public
methods of the parser are instances of RequestParserError.
:param func: to wrap in order to add the exception handling
:return: wrapped function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except RequestParserError:
raise
except Exception as e:
raise UnknownParserError(
"An unknown error occurred when trying to parse the request."
) from e
return wrapper
class RequestParser(abc.ABC):
"""
The request parser is responsible for parsing an incoming HTTP request.
It determines which operation the request was aiming for and parses the incoming request such that the resulting
dictionary can be used to invoke the service's function implementation.
It is the base class for all parsers and therefore contains the basic logic which is used among all of them.
"""
service: ServiceModel
DEFAULT_ENCODING = "utf-8"
# The default timestamp format is ISO8601, but this can be overwritten by subclasses.
TIMESTAMP_FORMAT = "iso8601"
# The default timestamp format for header fields
HEADER_TIMESTAMP_FORMAT = "rfc822"
# The default timestamp format for query fields
QUERY_TIMESTAMP_FORMAT = "iso8601"
def __init__(self, service: ServiceModel) -> None:
super().__init__()
self.service = service
@_handle_exceptions
def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]:
"""
Determines which operation the request was aiming for and parses the incoming request such that the resulting
dictionary can be used to invoke the service's function implementation.
:param request: to parse
:return: a tuple with the operation model (defining the action / operation which the request aims for),
and the parsed service parameters
:raises: RequestParserError (either a ProtocolParserError or an UnknownParserError)
"""
raise NotImplementedError
def _parse_shape(
self, request: HttpRequest, shape: Shape, node: Any, uri_params: Mapping[str, Any] = None
) -> Any:
"""
Main parsing method which dynamically calls the parsing function for the specific shape.
:param request: the complete HttpRequest
:param shape: of the node
:param node: the single part of the HTTP request to parse
:param uri_params: the extracted URI path params
:return: result of the parsing operation, the type depends on the shape
"""
if shape is None:
return None
location = shape.serialization.get("location")
if location is not None:
if location == "header":
header_name = shape.serialization.get("name")
payload = request.headers.get(header_name)
if shape.type_name == "list":
# headers may contain a comma separated list of values (e.g., the ObjectAttributes member in
# s3.GetObjectAttributes), so we prepare it here for the handler, which will be `_parse_list`.
payload = payload.split(",")
elif location == "headers":
payload = self._parse_header_map(shape, request.headers)
# shapes with the location trait "headers" only contain strings and are not further processed
return payload
elif location == "querystring":
query_name = shape.serialization.get("name")
parsed_query = request.args
if shape.type_name == "list":
payload = parsed_query.getlist(query_name)
else:
payload = parsed_query.get(query_name)
elif location == "uri":
uri_param_name = shape.serialization.get("name")
if uri_param_name in uri_params:
payload = uri_params[uri_param_name]
else:
raise UnknownParserError("Unknown shape location '%s'." % location)
else:
# If we don't have to use a specific location, we use the node
payload = node
fn_name = "_parse_%s" % shape.type_name
handler = getattr(self, fn_name, self._noop_parser)
try:
return handler(request, shape, payload, uri_params) if payload is not None else None
except (TypeError, ValueError, AttributeError) as e:
raise ProtocolParserError(
f"Invalid type when parsing {shape.name}: '{payload}' cannot be parsed to {shape.type_name}."
) from e
# The parsing functions for primitive types, lists, and timestamps are shared among subclasses.
def _parse_list(
self,
request: HttpRequest,
shape: ListShape,
node: list,
uri_params: Mapping[str, Any] = None,
):
parsed = []
member_shape = shape.member
for item in node:
parsed.append(self._parse_shape(request, member_shape, item, uri_params))
return parsed
@_text_content
def _parse_integer(self, _, __, node: str, ___) -> int:
return int(node)
@_text_content
def _parse_float(self, _, __, node: str, ___) -> float:
return float(node)
@_text_content
def _parse_blob(self, _, __, node: str, ___) -> bytes:
return base64.b64decode(node)
@_text_content
def _parse_timestamp(self, _, shape: Shape, node: str, ___) -> datetime.datetime:
timestamp_format = shape.serialization.get("timestampFormat")
if not timestamp_format and shape.serialization.get("location") == "header":
timestamp_format = self.HEADER_TIMESTAMP_FORMAT
elif not timestamp_format and shape.serialization.get("location") == "querystring":
timestamp_format = self.QUERY_TIMESTAMP_FORMAT
return self._convert_str_to_timestamp(node, timestamp_format)
@_text_content
def _parse_boolean(self, _, __, node: str, ___) -> bool:
value = node.lower()
if value == "true":
return True
if value == "false":
return False
raise ValueError("cannot parse boolean value %s" % node)
@_text_content
def _noop_parser(self, _, __, node: Any, ___):
return node
_parse_character = _parse_string = _noop_parser
_parse_double = _parse_float
_parse_long = _parse_integer
def _convert_str_to_timestamp(self, value: str, timestamp_format=None):
if timestamp_format is None:
timestamp_format = self.TIMESTAMP_FORMAT
timestamp_format = timestamp_format.lower()
converter = getattr(self, "_timestamp_%s" % timestamp_format)
final_value = converter(value)
return final_value
@staticmethod
def _timestamp_iso8601(date_string: str) -> datetime.datetime:
return dateutil.parser.isoparse(date_string)
@staticmethod
def _timestamp_unixtimestamp(timestamp_string: str) -> datetime.datetime:
return datetime.datetime.utcfromtimestamp(int(timestamp_string))
@staticmethod
def _timestamp_rfc822(datetime_string: str) -> datetime.datetime:
return parsedate_to_datetime(datetime_string)
@staticmethod
def _parse_header_map(shape: Shape, headers: dict) -> dict:
# Note that headers are case insensitive, so we .lower() all header names and header prefixes.
parsed = {}
prefix = shape.serialization.get("name", "").lower()
for header_name, header_value in headers.items():
if header_name.lower().startswith(prefix):
# The key name inserted into the parsed hash strips off the prefix.
name = header_name[len(prefix) :]
parsed[name] = header_value
return parsed
class QueryRequestParser(RequestParser):
"""
The ``QueryRequestParser`` is responsible for parsing incoming requests for services which use the ``query``
protocol. The requests for these services encode the majority of their parameters in the URL query string.
"""
@_handle_exceptions
def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]:
instance = request.values
if "Action" not in instance:
raise ProtocolParserError(
f"Operation detection failed. "
f"Missing Action in request for query-protocol service {self.service}."
)
action = instance["Action"]
try:
operation: OperationModel = self.service.operation_model(action)
except OperationNotFoundError as e:
raise OperationNotFoundParserError(
f"Operation detection failed."
f"Operation {action} could not be found for service {self.service}."
) from e
# There are no uri params in the query protocol (all ops are POST on "/")
uri_params = {}
input_shape: StructureShape = operation.input_shape
parsed = self._parse_shape(request, input_shape, instance, uri_params)
if parsed is None:
return operation, {}
return operation, parsed
def _process_member(
self,
request: HttpRequest,
member_name: str,
member_shape: Shape,
node: dict,
uri_params: Mapping[str, Any] = None,
):
if isinstance(member_shape, (MapShape, ListShape, StructureShape)):
# If we have a complex type, we filter the node and change it's keys to craft a new "context" for the
# new hierarchy level
sub_node = self._filter_node(member_name, node)
else:
# If it is a primitive type we just get the value from the dict
sub_node = node.get(member_name)
# The filtered node is processed and returned (or None if the sub_node is None)
return (
self._parse_shape(request, member_shape, sub_node, uri_params)
if sub_node is not None
else None
)
def _parse_structure(
self,
request: HttpRequest,
shape: StructureShape,
node: dict,
uri_params: Mapping[str, Any] = None,
) -> dict:
result = {}
for member, member_shape in shape.members.items():
# The key in the node is either the serialization config "name" of the shape, or the name of the member
member_name = self._get_serialized_name(member_shape, member, node)
# BUT, if it's flattened and a list, the name is defined by the list's member's name
if member_shape.serialization.get("flattened"):
if isinstance(member_shape, ListShape):
member_name = self._get_serialized_name(member_shape.member, member, node)
value = self._process_member(request, member_name, member_shape, node, uri_params)
if value is not None or member in shape.required_members:
# If the member is required, but not existing, we explicitly set None
result[member] = value
return result if len(result) > 0 else None
def _parse_map(
self, request: HttpRequest, shape: MapShape, node: dict, uri_params: Mapping[str, Any]
) -> dict:
"""
This is what the node looks like for a flattened map::
::
{
"Attribute.1.Name": "MyKey",
"Attribute.1.Value": "MyValue",
"Attribute.2.Name": ...,
...
}
::
This function expects an already filtered / pre-processed node. The node dict would therefore look like:
::
{
"1.Name": "MyKey",
"1.Value": "MyValue",
"2.Name": ...
}
::
"""
key_prefix = ""
# Non-flattened maps have an additional hierarchy level named "entry"
# https://awslabs.github.io/smithy/1.0/spec/core/xml-traits.html#xmlflattened-trait
if not shape.serialization.get("flattened"):
key_prefix += "entry."
result = {}
i = 0
while True:
i += 1
# The key and value can be renamed (with their serialization config's "name").
# By default they are called "key" and "value".
key_name = f"{key_prefix}{i}.{self._get_serialized_name(shape.key, 'key', node)}"
value_name = f"{key_prefix}{i}.{self._get_serialized_name(shape.value, 'value', node)}"
# We process the key and value individually
k = self._process_member(request, key_name, shape.key, node)
v = self._process_member(request, value_name, shape.value, node)
if k is None or v is None:
# technically, if one exists but not the other, then that would be an invalid request
break
result[k] = v
return result if len(result) > 0 else None
def _parse_list(
self,
request: HttpRequest,
shape: ListShape,
node: dict,
uri_params: Mapping[str, Any] = None,
) -> list:
"""
Some actions take lists of parameters. These lists are specified using the param.[member.]n notation.
The "member" is used if the list is not flattened.
Values of n are integers starting from 1.
For example, a list with two elements looks like this:
- Flattened: &AttributeName.1=first&AttributeName.2=second
- Non-flattened: &AttributeName.member.1=first&AttributeName.member.2=second
This function expects an already filtered / processed node. The node dict would therefore look like:
::
{
"1": "first",
"2": "second",
"3": ...
}
::
"""
# The keys might be prefixed (f.e. for flattened lists)
key_prefix = self._get_list_key_prefix(shape, node)
# We collect the list value as well as the integer indicating the list position so we can
# later sort the list by the position, in case they attribute values are unordered
result: List[Tuple[int, Any]] = []
i = 0
while True:
i += 1
key_name = f"{key_prefix}{i}"
value = self._process_member(request, key_name, shape.member, node)
if value is None:
break
result.append((i, value))
return [r[1] for r in sorted(result)] if len(result) > 0 else None
@staticmethod
def _filter_node(name: str, node: dict) -> dict:
"""Filters the node dict for entries where the key starts with the given name."""
filtered = {k[len(name) + 1 :]: v for k, v in node.items() if k.startswith(name)}
return filtered if len(filtered) > 0 else None
def _get_serialized_name(self, shape: Shape, default_name: str, node: dict) -> str:
"""
Returns the serialized name for the shape if it exists.
Otherwise, it will return the given default_name.
"""
return shape.serialization.get("name", default_name)
def _get_list_key_prefix(self, shape: ListShape, node: dict):
key_prefix = ""
# Non-flattened lists have an additional hierarchy level:
# https://awslabs.github.io/smithy/1.0/spec/core/xml-traits.html#xmlflattened-trait
# The hierarchy level's name is the serialization name of its member or (by default) "member".
if not shape.serialization.get("flattened"):
key_prefix += f"{self._get_serialized_name(shape.member, 'member', node)}."
return key_prefix
class BaseRestRequestParser(RequestParser):
"""
The ``BaseRestRequestParser`` is the base class for all "resty" AWS service protocols.
The operation which should be invoked is determined based on the HTTP method and the path suffix.
The body encoding is done in the respective subclasses.
"""
def __init__(self, service: ServiceModel) -> None:
super().__init__(service)
self.ignore_get_body_errors = False
self._operation_router = RestServiceOperationRouter(service)
@_handle_exceptions
def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]:
try:
operation, uri_params = self._operation_router.match(request)
except NotFound as e:
raise OperationNotFoundParserError(
f"Unable to find operation for request to service "
f"{self.service.service_name}: {request.method} {request.path}"
) from e
shape: StructureShape = operation.input_shape
final_parsed = {}
if shape is not None:
self._parse_payload(request, shape, shape.members, uri_params, final_parsed)
return operation, final_parsed
def _parse_payload(
self,
request: HttpRequest,
shape: Shape,
member_shapes: Dict[str, Shape],
uri_params: Mapping[str, Any],
final_parsed: dict,
) -> None:
"""Parses all attributes which are located in the payload / body of the incoming request."""
payload_parsed = {}
non_payload_parsed = {}
if "payload" in shape.serialization:
# If a payload is specified in the output shape, then only that shape is used for the body payload.
payload_member_name = shape.serialization["payload"]
body_shape = member_shapes[payload_member_name]
if body_shape.serialization.get("eventstream"):
body = self._create_event_stream(request, body_shape)
payload_parsed[payload_member_name] = body
elif body_shape.type_name == "string":
# Only set the value if it's not empty (the request's data is an empty binary by default)
if request.data:
body = request.data
if isinstance(body, bytes):
body = body.decode(self.DEFAULT_ENCODING)
payload_parsed[payload_member_name] = body
elif body_shape.type_name == "blob":
# This control path is equivalent to operation.has_streaming_input (shape has a payload which is a blob)
# in which case we assume essentially an IO[bytes] to be passed. Since the payload can be optional, we
# only set the parameter if content_length=0, which indicates an empty request. If the content length is
# not set, it could be a streaming response.
if request.content_length != 0:
payload_parsed[payload_member_name] = self.create_input_stream(request)
else:
original_parsed = self._initial_body_parse(request)
payload_parsed[payload_member_name] = self._parse_shape(
request, body_shape, original_parsed, uri_params
)
else:
# The payload covers the whole body. We only parse the body if it hasn't been handled by the payload logic.
try:
non_payload_parsed = self._initial_body_parse(request)
except ProtocolParserError:
# GET requests should ignore the body, so we just let them pass
if not (request.method in ["GET", "HEAD"] and self.ignore_get_body_errors):
raise
# even if the payload has been parsed, the rest of the shape needs to be processed as well
# (for members which are located outside of the body, like uri or header)
non_payload_parsed = self._parse_shape(request, shape, non_payload_parsed, uri_params)
# update the final result with the parsed body and the parsed payload (where the payload has precedence)
final_parsed.update(non_payload_parsed)
final_parsed.update(payload_parsed)
def _initial_body_parse(self, request: HttpRequest) -> Any:
"""
This method executes the initial parsing of the body (XML, JSON, or CBOR).
The parsed body will afterwards still be walked through and the nodes will be converted to the appropriate
types, but this method does the first round of parsing.
:param request: of which the body should be parsed
:return: depending on the actual implementation
"""
raise NotImplementedError("_initial_body_parse")
def _create_event_stream(self, request: HttpRequest, shape: Shape) -> Any:
# TODO handle event streams
raise NotImplementedError("_create_event_stream")
def create_input_stream(self, request: HttpRequest) -> IO[bytes]:
"""
Returns an IO object that makes the payload of the HttpRequest available for streaming.
:param request: the http request
:return: the input stream that allows services to consume the request payload
"""
# for now _get_stream_for_parsing seems to be a good compromise. it can be used even after `request.data` was
# previously called. however the reverse doesn't work. once the stream has been consumed, `request.data` will
# return b''
return request._get_stream_for_parsing()
class RestXMLRequestParser(BaseRestRequestParser):
"""
The ``RestXMLRequestParser`` is responsible for parsing incoming requests for services which use the ``rest-xml``
protocol. The requests for these services encode the majority of their parameters as XML in the request body.
"""
def __init__(self, service_model: ServiceModel):
super(RestXMLRequestParser, self).__init__(service_model)
self.ignore_get_body_errors = True
self._namespace_re = re.compile("{.*}")
def _initial_body_parse(self, request: HttpRequest) -> ETree.Element:
body = request.data
if not body:
return ETree.Element("")
return self._parse_xml_string_to_dom(body)
def _parse_structure(
self,
request: HttpRequest,
shape: StructureShape,
node: ETree.Element,
uri_params: Mapping[str, Any] = None,
) -> dict:
parsed = {}
xml_dict = self._build_name_to_xml_node(node)
for member_name, member_shape in shape.members.items():
xml_name = self._member_key_name(member_shape, member_name)
member_node = xml_dict.get(xml_name)
# If a shape defines a location trait, the node might be None (since these are extracted from the request's
# metadata like headers or the URI)
if (
member_node is not None
or "location" in member_shape.serialization
or member_shape.serialization.get("eventheader")
):
parsed[member_name] = self._parse_shape(
request, member_shape, member_node, uri_params
)
elif member_shape.serialization.get("xmlAttribute"):
attributes = {}
location_name = member_shape.serialization["name"]
for key, value in node.attrib.items():
new_key = self._namespace_re.sub(location_name.split(":")[0] + ":", key)
attributes[new_key] = value
if location_name in attributes:
parsed[member_name] = attributes[location_name]
elif member_name in shape.required_members:
# If the member is required, but not existing, we explicitly set None
parsed[member_name] = None
return parsed
def _parse_map(
self,
request: HttpRequest,
shape: MapShape,
node: dict,
uri_params: Mapping[str, Any] = None,
) -> dict:
parsed = {}
key_shape = shape.key
value_shape = shape.value
key_location_name = key_shape.serialization.get("name", "key")
value_location_name = value_shape.serialization.get("name", "value")
if shape.serialization.get("flattened") and not isinstance(node, list):
node = [node]
for keyval_node in node:
key_name = val_name = None
for single_pair in keyval_node:
# Within each <entry> there's a <key> and a <value>
tag_name = self._node_tag(single_pair)
if tag_name == key_location_name:
key_name = self._parse_shape(request, key_shape, single_pair, uri_params)
elif tag_name == value_location_name:
val_name = self._parse_shape(request, value_shape, single_pair, uri_params)
else:
raise ProtocolParserError("Unknown tag: %s" % tag_name)
parsed[key_name] = val_name
return parsed
def _parse_list(
self,
request: HttpRequest,
shape: ListShape,
node: dict,
uri_params: Mapping[str, Any] = None,
) -> list:
# When we use _build_name_to_xml_node, repeated elements are aggregated
# into a list. However, we can't tell the difference between a scalar
# value and a single element flattened list. So before calling the
# real _handle_list, we know that "node" should actually be a list if
# it's flattened, and if it's not, then we make it a one element list.
if shape.serialization.get("flattened") and not isinstance(node, list):
node = [node]
return super(RestXMLRequestParser, self)._parse_list(request, shape, node, uri_params)
def _node_tag(self, node: ETree.Element) -> str:
return self._namespace_re.sub("", node.tag)
@staticmethod
def _member_key_name(shape: Shape, member_name: str) -> str:
# This method is needed because we have to special case flattened list
# with a serialization name. If this is the case we use the
# locationName from the list's member shape as the key name for the
# surrounding structure.
if isinstance(shape, ListShape) and shape.serialization.get("flattened"):
list_member_serialized_name = shape.member.serialization.get("name")
if list_member_serialized_name is not None:
return list_member_serialized_name
serialized_name = shape.serialization.get("name")
if serialized_name is not None:
return serialized_name
return member_name
@staticmethod
def _parse_xml_string_to_dom(xml_string: str) -> ETree.Element:
try:
parser = ETree.XMLParser(target=ETree.TreeBuilder())
parser.feed(xml_string)
root = parser.close()
except ETree.ParseError as e:
raise ProtocolParserError(
"Unable to parse request (%s), invalid XML received:\n%s" % (e, xml_string)
) from e
return root
def _build_name_to_xml_node(self, parent_node: Union[list, ETree.Element]) -> dict:
# If the parent node is actually a list. We should not be trying
# to serialize it to a dictionary. Instead, return the first element
# in the list.
if isinstance(parent_node, list):
return self._build_name_to_xml_node(parent_node[0])
xml_dict = {}
for item in parent_node:
key = self._node_tag(item)
if key in xml_dict:
# If the key already exists, the most natural
# way to handle this is to aggregate repeated
# keys into a single list.
# <foo>1</foo><foo>2</foo> -> {'foo': [Node(1), Node(2)]}
if isinstance(xml_dict[key], list):
xml_dict[key].append(item)
else:
# Convert from a scalar to a list.
xml_dict[key] = [xml_dict[key], item]
else:
xml_dict[key] = item
return xml_dict
def _create_event_stream(self, request: HttpRequest, shape: Shape) -> Any:
# TODO handle event streams
raise NotImplementedError("_create_event_stream")
class BaseJSONRequestParser(RequestParser, ABC):
"""
The ``BaseJSONRequestParser`` is the base class for all JSON-based AWS service protocols.
This base-class handles parsing the payload / body as JSON.
"""
TIMESTAMP_FORMAT = "unixtimestamp"
def _parse_structure(
self,
request: HttpRequest,
shape: StructureShape,
value: Optional[dict],
uri_params: Mapping[str, Any] = None,
) -> Optional[dict]:
if shape.is_document_type:
final_parsed = value
else:
if value is None:
# If the comes across the wire as "null" (None in python),
# we should be returning this unchanged, instead of as an
# empty dict.
return None
final_parsed = {}
for member_name, member_shape in shape.members.items():
json_name = member_shape.serialization.get("name", member_name)
raw_value = value.get(json_name)
parsed = self._parse_shape(request, member_shape, raw_value, uri_params)
if parsed is not None or member_name in shape.required_members:
# If the member is required, but not existing, we set it to None anyways
final_parsed[member_name] = parsed
return final_parsed
def _parse_map(
self,
request: HttpRequest,
shape: MapShape,
value: Optional[dict],
uri_params: Mapping[str, Any] = None,
) -> Optional[dict]:
if value is None:
return None
parsed = {}
key_shape = shape.key
value_shape = shape.value
for key, value in value.items():
actual_key = self._parse_shape(request, key_shape, key, uri_params)
actual_value = self._parse_shape(request, value_shape, value, uri_params)
parsed[actual_key] = actual_value
return parsed
def _parse_body_as_json(self, request: HttpRequest) -> dict:
body_contents = request.data
if not body_contents:
return {}
if request.mimetype.startswith("application/x-amz-cbor"):
try:
return cbor2.loads(body_contents)
except ValueError as e:
raise ProtocolParserError("HTTP body could not be parsed as CBOR.") from e
else:
try:
return request.get_json(force=True)
except BadRequest as e:
raise ProtocolParserError("HTTP body could not be parsed as JSON.") from e
def _parse_boolean(
self, request: HttpRequest, shape: Shape, node: bool, uri_params: Mapping[str, Any] = None
) -> bool:
return super()._noop_parser(request, shape, node, uri_params)
def _parse_blob(
self, request: HttpRequest, shape: Shape, node: bool, uri_params: Mapping[str, Any] = None
) -> bytes:
if isinstance(node, bytes) and request.mimetype.startswith("application/x-amz-cbor"):
# CBOR does not base64 encode binary data
return bytes(node)
else:
return super()._parse_blob(request, shape, node, uri_params)
class JSONRequestParser(BaseJSONRequestParser):
"""
The ``JSONRequestParser`` is responsible for parsing incoming requests for services which use the ``json``
protocol.
The requests for these services encode the majority of their parameters as JSON in the request body.
The operation is defined in an HTTP header field.
"""
@_handle_exceptions
def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]:
target = request.headers["X-Amz-Target"]
# assuming that the last part of the target string (e.g., "x.y.z.MyAction") contains the operation name
operation_name = target.rpartition(".")[2]
operation = self.service.operation_model(operation_name)
shape = operation.input_shape
# There are no uri params in the query protocol
uri_params = {}
final_parsed = self._do_parse(request, shape, uri_params)
return operation, final_parsed
def _do_parse(
self, request: HttpRequest, shape: Shape, uri_params: Mapping[str, Any] = None
) -> dict:
parsed = {}
if shape is not None:
event_name = shape.event_stream_name
if event_name:
parsed = self._handle_event_stream(request, shape, event_name)
else:
parsed = self._handle_json_body(request, shape, uri_params)
return parsed
def _handle_event_stream(self, request: HttpRequest, shape: Shape, event_name: str):
# TODO handle event streams
raise NotImplementedError
def _handle_json_body(
self, request: HttpRequest, shape: Shape, uri_params: Mapping[str, Any] = None
) -> Any:
# The json.loads() gives us the primitive JSON types, but we need to traverse the parsed JSON data to convert
# to richer types (blobs, timestamps, etc.)
parsed_json = self._parse_body_as_json(request)
return self._parse_shape(request, shape, parsed_json, uri_params)
class RestJSONRequestParser(BaseRestRequestParser, BaseJSONRequestParser):
"""
The ``RestJSONRequestParser`` is responsible for parsing incoming requests for services which use the ``rest-json``
protocol.
The requests for these services encode the majority of their parameters as JSON in the request body.
The operation is defined by the HTTP method and the path suffix.
"""
def _initial_body_parse(self, request: HttpRequest) -> dict:
return self._parse_body_as_json(request)
def _create_event_stream(self, request: HttpRequest, shape: Shape) -> Any:
raise NotImplementedError
class EC2RequestParser(QueryRequestParser):
"""
The ``EC2RequestParser`` is responsible for parsing incoming requests for services which use the ``ec2``
protocol (which only is EC2). Protocol is quite similar to the ``query`` protocol with some small differences.
"""
def _get_serialized_name(self, shape: Shape, default_name: str, node: dict) -> str:
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
if "queryName" in shape.serialization:
return shape.serialization["queryName"]
elif "name" in shape.serialization:
# A locationName is always capitalized on input for the ec2 protocol.
name = shape.serialization["name"]
return name[0].upper() + name[1:]
else:
return default_name
def _get_list_key_prefix(self, shape: ListShape, node: dict):
# The EC2 protocol does not use a prefix notation for flattened lists
return ""
class S3RequestParser(RestXMLRequestParser):
class VirtualHostRewriter:
"""
Context Manager which rewrites the request object parameters such that - within the context - it looks like a
normal S3 request.
"""
def __init__(self, request: HttpRequest):
self.request = request
self.old_host = None
self.old_path = None
def __enter__(self):
# only modify the request if it uses the virtual host addressing
if self._is_vhost_address(self.request):
# save the original path and host for restoring on context exit
self.old_path = self.request.path
self.old_host = self.request.host
self.old_raw_uri = self.request.environ.get("RAW_URI")
# extract the bucket name from the host part of the request
bucket_name, new_host = self.old_host.split(".", maxsplit=1)
# split the url and put the bucket name at the front
path_parts = self.old_path.split("/")
path_parts = [bucket_name] + path_parts
path_parts = [part for part in path_parts if part]
new_path = "/" + "/".join(path_parts) or "/"
# create a new RAW_URI for the WSGI environment, this is necessary because of our `get_raw_path` utility
if self.old_raw_uri:
path_parts = self.old_raw_uri.split("/")
path_parts = [bucket_name] + path_parts
path_parts = [part for part in path_parts if part]
new_raw_uri = "/" + "/".join(path_parts) or "/"
if qs := self.request.query_string:
new_raw_uri += "?" + qs.decode("utf-8")
else:
new_raw_uri = None
# set the new path and host
self._set_request_props(self.request, new_path, new_host, new_raw_uri)
return self.request
def __exit__(self, exc_type, exc_value, exc_traceback):
# reset the original request properties on exit of the context
if self.old_host or self.old_path:
self._set_request_props(
self.request, self.old_path, self.old_host, self.old_raw_uri
)
@staticmethod
def _set_request_props(
request: HttpRequest, path: str, host: str, raw_uri: Optional[str] = None
):
"""Sets the HTTP request's path and host and clears the cache in the request object."""
request.path = path
request.headers["Host"] = host
if raw_uri:
request.environ["RAW_URI"] = raw_uri
try:
# delete the werkzeug request property cache that depends on path, but make sure all of them are
# initialized first, otherwise `del` will raise a key error
request.host = None # noqa
request.url = None # noqa
request.base_url = None # noqa
request.full_path = None # noqa
request.host_url = None # noqa
request.root_url = None # noqa
del request.host # noqa
del request.url # noqa
del request.base_url # noqa
del request.full_path # noqa
del request.host_url # noqa
del request.root_url # noqa
except AttributeError:
pass
@staticmethod
def _is_vhost_address(request: HttpRequest) -> bool:
from localstack.services.s3.s3_utils import uses_host_addressing
return uses_host_addressing(request.headers)
@_handle_exceptions
def parse(self, request: HttpRequest) -> Tuple[OperationModel, Any]:
if LEGACY_S3_PROVIDER:
"""Handle virtual-host-addressing for S3."""
with self.VirtualHostRewriter(request):
return super().parse(request)
else:
return super().parse(request)
def _parse_shape(
self, request: HttpRequest, shape: Shape, node: Any, uri_params: Mapping[str, Any] = None
) -> Any:
"""
Special handling of parsing the shape for s3 object-names (=key):
trailing '/' are valid and need to be preserved, however, the url-matcher removes it from the key
we check the request.url to verify the name
"""
if (
shape is not None
and uri_params is not None
and shape.serialization.get("location") == "uri"
and shape.serialization.get("name") == "Key"
and request.base_url.endswith(f"{uri_params['Key']}/")
):
uri_params = dict(uri_params)
uri_params["Key"] = uri_params["Key"] + "/"
return super()._parse_shape(request, shape, node, uri_params)
class SQSRequestParser(QueryRequestParser):
def _get_serialized_name(self, shape: Shape, default_name: str, node: dict) -> str:
"""
SQS allows using both - the proper serialized name of a map as well as the member name - as name for maps.
For example, both works for the TagQueue operation:
- Using the proper serialized name "Tag": Tag.1.Key=key&Tag.1.Value=value
- Using the member name "Tag" in the parent structure: Tags.1.Key=key&Tags.1.Value=value
The Java SDK implements the second variant: https://github.com/aws/aws-sdk-java-v2/issues/2524
This has been approved to be a bug and against the spec, but since the client has a lot of users, and AWS SQS
supports both, we need to handle it here.
"""
# ask the super implementation for the proper serialized name
primary_name = super()._get_serialized_name(shape, default_name, node)
# determine a potential suffix for the name of the member in the node
suffix = ""
if shape.type_name == "map":
if not shape.serialization.get("flattened"):
suffix = ".entry.1.Key"
else:
suffix = ".1.Key"
if shape.type_name == "list":
if not shape.serialization.get("flattened"):
suffix = ".member.1"
else:
suffix = ".1"
# if the primary name is _not_ available in the node, but the default name is, we use the default name
if f"{primary_name}{suffix}" not in node and f"{default_name}{suffix}" in node:
return default_name
# otherwise we use the primary name
return primary_name
def create_parser(service: ServiceModel) -> RequestParser:
"""
Creates the right parser for the given service model.
:param service: to create the parser for
:return: RequestParser which can handle the protocol of the service
"""
# Unfortunately, some services show subtle differences in their parsing or operation detection behavior, even though
# their specification states they implement the same protocol.
# In order to avoid bundling the whole complexity in the specific protocols, or even have service-distinctions
# within the parser implementations, the service-specific parser implementations (basically the implicit /
# informally more specific protocol implementation) has precedence over the more general protocol-specific parsers.
service_specific_parsers = {
"s3": S3RequestParser,
"sqs": SQSRequestParser,
}
protocol_specific_parsers = {
"query": QueryRequestParser,
"json": JSONRequestParser,
"rest-json": RestJSONRequestParser,
"rest-xml": RestXMLRequestParser,
"ec2": EC2RequestParser,
}
# Try to select a service-specific parser implementation
if service.service_name in service_specific_parsers:
return service_specific_parsers[service.service_name](service)
else:
# Otherwise, pick the protocol-specific parser for the protocol of the service
return protocol_specific_parsers[service.protocol](service)
|
PypiClean
|
/Parsley-1.3.tar.gz/Parsley-1.3/ometa/_generated/parsley_termactions.py
|
def createParserClass(GrammarBase, ruleGlobals):
if ruleGlobals is None:
ruleGlobals = {}
class parsley_termactions(GrammarBase):
def rule_ruleValue(self):
_locals = {'self': self}
self.locals['ruleValue'] = _locals
self._trace(' ws', (11, 14), self.input.position)
_G_apply_1, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'ruleValue')
self._trace(" '->'", (14, 19), self.input.position)
_G_exactly_2, lastError = self.exactly('->')
self.considerError(lastError, 'ruleValue')
self._trace(' term', (19, 24), self.input.position)
_G_apply_3, lastError = self._apply(self.rule_term, "term", [])
self.considerError(lastError, 'ruleValue')
_locals['tt'] = _G_apply_3
_G_python_4, lastError = eval('t.Action(tt)', self.globals, _locals), None
self.considerError(lastError, 'ruleValue')
return (_G_python_4, self.currentError)
def rule_semanticPredicate(self):
_locals = {'self': self}
self.locals['semanticPredicate'] = _locals
self._trace(' ws', (64, 67), self.input.position)
_G_apply_5, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'semanticPredicate')
self._trace(" '?('", (67, 72), self.input.position)
_G_exactly_6, lastError = self.exactly('?(')
self.considerError(lastError, 'semanticPredicate')
self._trace(' term', (72, 77), self.input.position)
_G_apply_7, lastError = self._apply(self.rule_term, "term", [])
self.considerError(lastError, 'semanticPredicate')
_locals['tt'] = _G_apply_7
self._trace(' ws', (80, 83), self.input.position)
_G_apply_8, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'semanticPredicate')
self._trace(" ')'", (83, 87), self.input.position)
_G_exactly_9, lastError = self.exactly(')')
self.considerError(lastError, 'semanticPredicate')
_G_python_10, lastError = eval('t.Predicate(tt)', self.globals, _locals), None
self.considerError(lastError, 'semanticPredicate')
return (_G_python_10, self.currentError)
def rule_semanticAction(self):
_locals = {'self': self}
self.locals['semanticAction'] = _locals
self._trace(' ws', (124, 127), self.input.position)
_G_apply_11, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'semanticAction')
self._trace(" '!('", (127, 132), self.input.position)
_G_exactly_12, lastError = self.exactly('!(')
self.considerError(lastError, 'semanticAction')
self._trace(' term', (132, 137), self.input.position)
_G_apply_13, lastError = self._apply(self.rule_term, "term", [])
self.considerError(lastError, 'semanticAction')
_locals['tt'] = _G_apply_13
self._trace(' ws', (140, 143), self.input.position)
_G_apply_14, lastError = self._apply(self.rule_ws, "ws", [])
self.considerError(lastError, 'semanticAction')
self._trace(" ')'", (143, 147), self.input.position)
_G_exactly_15, lastError = self.exactly(')')
self.considerError(lastError, 'semanticAction')
_G_python_16, lastError = eval('t.Action(tt)', self.globals, _locals), None
self.considerError(lastError, 'semanticAction')
return (_G_python_16, self.currentError)
def rule_application(self):
_locals = {'self': self}
self.locals['application'] = _locals
def _G_optional_17():
self._trace(' indentation', (178, 190), self.input.position)
_G_apply_18, lastError = self._apply(self.rule_indentation, "indentation", [])
self.considerError(lastError, None)
return (_G_apply_18, self.currentError)
def _G_optional_19():
return (None, self.input.nullError())
_G_or_20, lastError = self._or([_G_optional_17, _G_optional_19])
self.considerError(lastError, 'application')
self._trace(' name', (191, 196), self.input.position)
_G_apply_21, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError, 'application')
_locals['name'] = _G_apply_21
def _G_or_22():
self._trace("'('", (221, 224), self.input.position)
_G_exactly_23, lastError = self.exactly('(')
self.considerError(lastError, None)
self._trace(' term_arglist', (224, 237), self.input.position)
_G_apply_24, lastError = self._apply(self.rule_term_arglist, "term_arglist", [])
self.considerError(lastError, None)
_locals['args'] = _G_apply_24
self._trace(" ')'", (242, 246), self.input.position)
_G_exactly_25, lastError = self.exactly(')')
self.considerError(lastError, None)
_G_python_26, lastError = eval('t.Apply(name, self.rulename, args)', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_26, self.currentError)
def _G_or_27():
_G_python_28, lastError = eval('t.Apply(name, self.rulename, [])', self.globals, _locals), None
self.considerError(lastError, None)
return (_G_python_28, self.currentError)
_G_or_29, lastError = self._or([_G_or_22, _G_or_27])
self.considerError(lastError, 'application')
return (_G_or_29, self.currentError)
if parsley_termactions.globals is not None:
parsley_termactions.globals = parsley_termactions.globals.copy()
parsley_termactions.globals.update(ruleGlobals)
else:
parsley_termactions.globals = ruleGlobals
return parsley_termactions
|
PypiClean
|
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_is.js
|
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
function getWT(v, f) {
if (f === 0) {
return {w: 0, t: 0};
}
while ((f % 10) === 0) {
f /= 10;
v--;
}
return {w: v, t: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"f.h.",
"e.h."
],
"DAY": [
"sunnudagur",
"m\u00e1nudagur",
"\u00feri\u00f0judagur",
"mi\u00f0vikudagur",
"fimmtudagur",
"f\u00f6studagur",
"laugardagur"
],
"MONTH": [
"jan\u00faar",
"febr\u00faar",
"mars",
"apr\u00edl",
"ma\u00ed",
"j\u00fan\u00ed",
"j\u00fal\u00ed",
"\u00e1g\u00fast",
"september",
"okt\u00f3ber",
"n\u00f3vember",
"desember"
],
"SHORTDAY": [
"sun.",
"m\u00e1n.",
"\u00feri.",
"mi\u00f0.",
"fim.",
"f\u00f6s.",
"lau."
],
"SHORTMONTH": [
"jan.",
"feb.",
"mar.",
"apr.",
"ma\u00ed",
"j\u00fan.",
"j\u00fal.",
"\u00e1g\u00fa.",
"sep.",
"okt.",
"n\u00f3v.",
"des."
],
"fullDate": "EEEE, d. MMMM y",
"longDate": "d. MMMM y",
"medium": "d. MMM y HH:mm:ss",
"mediumDate": "d. MMM y",
"mediumTime": "HH:mm:ss",
"short": "d.M.y HH:mm",
"shortDate": "d.M.y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "kr",
"DECIMAL_SEP": ",",
"GROUP_SEP": ".",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "is",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); var wt = getWT(vf.v, vf.f); if (wt.t == 0 && i % 10 == 1 && i % 100 != 11 || wt.t != 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
|
PypiClean
|
/ant_net_monitor-1.0.0-py3-none-any.whl/ant_net_monitor/dist/static/js/app.add16b60.js
|
(function(e){function t(t){for(var n,o,i=t[0],l=t[1],u=t[2],s=0,f=[];s<i.length;s++)o=i[s],Object.prototype.hasOwnProperty.call(a,o)&&a[o]&&f.push(a[o][0]),a[o]=0;for(n in l)Object.prototype.hasOwnProperty.call(l,n)&&(e[n]=l[n]);d&&d(t);while(f.length)f.shift()();return c.push.apply(c,u||[]),r()}function r(){for(var e,t=0;t<c.length;t++){for(var r=c[t],n=!0,o=1;o<r.length;o++){var i=r[o];0!==a[i]&&(n=!1)}n&&(c.splice(t--,1),e=l(l.s=r[0]))}return e}var n={},o={app:0},a={app:0},c=[];function i(e){return l.p+"static/js/"+({}[e]||e)+"."+{"chunk-2e1fafcb":"4043284c","chunk-3f6343b4":"f94e33b3","chunk-6d982b58":"ae7d8436"}[e]+".js"}function l(t){if(n[t])return n[t].exports;var r=n[t]={i:t,l:!1,exports:{}};return e[t].call(r.exports,r,r.exports,l),r.l=!0,r.exports}l.e=function(e){var t=[],r={"chunk-6d982b58":1};o[e]?t.push(o[e]):0!==o[e]&&r[e]&&t.push(o[e]=new Promise((function(t,r){for(var n="static/css/"+({}[e]||e)+"."+{"chunk-2e1fafcb":"31d6cfe0","chunk-3f6343b4":"31d6cfe0","chunk-6d982b58":"34997099"}[e]+".css",a=l.p+n,c=document.getElementsByTagName("link"),i=0;i<c.length;i++){var u=c[i],s=u.getAttribute("data-href")||u.getAttribute("href");if("stylesheet"===u.rel&&(s===n||s===a))return t()}var f=document.getElementsByTagName("style");for(i=0;i<f.length;i++){u=f[i],s=u.getAttribute("data-href");if(s===n||s===a)return t()}var d=document.createElement("link");d.rel="stylesheet",d.type="text/css",d.onload=t,d.onerror=function(t){var n=t&&t.target&&t.target.src||a,c=new Error("Loading CSS chunk "+e+" failed.\n("+n+")");c.code="CSS_CHUNK_LOAD_FAILED",c.request=n,delete o[e],d.parentNode.removeChild(d),r(c)},d.href=a;var b=document.getElementsByTagName("head")[0];b.appendChild(d)})).then((function(){o[e]=0})));var n=a[e];if(0!==n)if(n)t.push(n[2]);else{var c=new Promise((function(t,r){n=a[e]=[t,r]}));t.push(n[2]=c);var u,s=document.createElement("script");s.charset="utf-8",s.timeout=120,l.nc&&s.setAttribute("nonce",l.nc),s.src=i(e);var f=new Error;u=function(t){s.onerror=s.onload=null,clearTimeout(d);var r=a[e];if(0!==r){if(r){var n=t&&("load"===t.type?"missing":t.type),o=t&&t.target&&t.target.src;f.message="Loading chunk "+e+" failed.\n("+n+": "+o+")",f.name="ChunkLoadError",f.type=n,f.request=o,r[1](f)}a[e]=void 0}};var d=setTimeout((function(){u({type:"timeout",target:s})}),12e4);s.onerror=s.onload=u,document.head.appendChild(s)}return Promise.all(t)},l.m=e,l.c=n,l.d=function(e,t,r){l.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},l.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},l.t=function(e,t){if(1&t&&(e=l(e)),8&t)return e;if(4&t&&"object"===typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(l.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var n in e)l.d(r,n,function(t){return e[t]}.bind(null,n));return r},l.n=function(e){var t=e&&e.__esModule?function(){return e["default"]}:function(){return e};return l.d(t,"a",t),t},l.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},l.p="/",l.oe=function(e){throw console.error(e),e};var u=window["webpackJsonp"]=window["webpackJsonp"]||[],s=u.push.bind(u);u.push=t,u=u.slice();for(var f=0;f<u.length;f++)t(u[f]);var d=s;c.push([0,"chunk-vendors"]),r()})({0:function(e,t,r){e.exports=r("56d7")},"29aa":function(e,t,r){},3716:function(e,t,r){},"56d7":function(e,t,r){"use strict";r.r(t);r("e260"),r("e6cf"),r("cca6"),r("a79d");var n=r("7a23"),o=Object(n["m"])(" 设置点什么玩意儿 "),a=Object(n["m"])("保存"),c=Object(n["m"])("取消");function i(e,t,r,i,l,u){var s=Object(n["K"])("head-bar"),f=Object(n["K"])("n-layout-header"),d=Object(n["K"])("side-bar"),b=Object(n["K"])("n-layout-sider"),h=Object(n["K"])("dash-board"),m=Object(n["K"])("n-notification-provider"),p=Object(n["K"])("router-view"),O=Object(n["K"])("n-space"),j=Object(n["K"])("n-layout-content"),g=Object(n["K"])("n-layout"),y=Object(n["K"])("foot-bar"),v=Object(n["K"])("n-layout-footer"),S=Object(n["K"])("n-button"),w=Object(n["K"])("n-card"),C=Object(n["K"])("n-modal"),T=Object(n["K"])("n-config-provider");return Object(n["C"])(),Object(n["j"])("div",null,[Object(n["n"])(T,{theme:i.naiveTheme},{default:Object(n["T"])((function(){return[Object(n["n"])(g,{id:"container"},{default:Object(n["T"])((function(){return[Object(n["n"])(f,{id:"head-bar",bordered:""},{default:Object(n["T"])((function(){return[Object(n["n"])(s,{onChangeTheme:u.changeTheme,onAlarm:u.switchAlarm},null,8,["onChangeTheme","onAlarm"])]})),_:1}),Object(n["n"])(g,{"has-sider":""},{default:Object(n["T"])((function(){return[Object(n["n"])(b,{id:"side-bar"},{default:Object(n["T"])((function(){return[Object(n["n"])(d)]})),_:1}),Object(n["n"])(j,{id:"main-content"},{default:Object(n["T"])((function(){return[Object(n["n"])(O,{vertical:""},{default:Object(n["T"])((function(){return[Object(n["n"])(m,null,{default:Object(n["T"])((function(){return[Object(n["n"])(h)]})),_:1}),Object(n["n"])(p)]})),_:1})]})),_:1})]})),_:1}),Object(n["n"])(v,{id:"foot-bar",bordered:""},{default:Object(n["T"])((function(){return[Object(n["n"])(y)]})),_:1})]})),_:1}),Object(n["n"])(C,{show:i.alarmSettingVisible,"onUpdate:show":t[0]||(t[0]=function(e){return i.alarmSettingVisible=e})},{default:Object(n["T"])((function(){return[Object(n["n"])(w,{style:{width:"600px"},title:"警报设置",bordered:!1,size:"huge",role:"dialog","aria-modal":"true","footer-style":"display:flex;justify-content:flex-end;",closable:"",onClose:u.switchAlarm},{footer:Object(n["T"])((function(){return[Object(n["n"])(S,{type:"primary"},{default:Object(n["T"])((function(){return[a]})),_:1}),Object(n["n"])(S,{type:"default"},{default:Object(n["T"])((function(){return[c]})),_:1})]})),default:Object(n["T"])((function(){return[o]})),_:1},8,["onClose"])]})),_:1},8,["show"])]})),_:1},8,["theme"])])}var l=r("ade3"),u=r("5927"),s=r("48da"),f=r("0bcf"),d=r("8c9c"),b=r("0b196"),h=r("ced7"),m=r("c872"),p=r("6e31"),O=r("4951"),j=r("3519"),g=r("73c1"),y=r("8f5d"),v=r("5c7f"),S=r("1be7"),w=r("a004"),C={id:"header"},T=Object(n["m"])("Ant Net Monitor"),_=Object(n["m"])(" 切换主题 "),k=Object(n["m"])(" 异常警报 ");function x(e,t,r,o,a,c){var i=Object(n["K"])("n-h1"),l=Object(n["K"])("switch-horizontal"),u=Object(n["K"])("n-icon"),s=Object(n["K"])("n-button"),f=Object(n["K"])("thermometer-half"),d=Object(n["K"])("n-space");return Object(n["C"])(),Object(n["j"])("div",C,[Object(n["n"])(i,null,{default:Object(n["T"])((function(){return[T]})),_:1}),Object(n["n"])(d,{id:"header-end"},{default:Object(n["T"])((function(){return[Object(n["n"])(s,{onClick:c.changeTheme,id:"change-theme",quaternary:""},{icon:Object(n["T"])((function(){return[Object(n["n"])(u,null,{default:Object(n["T"])((function(){return[Object(n["n"])(l)]})),_:1})]})),default:Object(n["T"])((function(){return[_]})),_:1},8,["onClick"]),Object(n["n"])(s,{onClick:c.switchAlarm,quaternary:""},{icon:Object(n["T"])((function(){return[Object(n["n"])(u,null,{default:Object(n["T"])((function(){return[Object(n["n"])(f)]})),_:1})]})),default:Object(n["T"])((function(){return[k]})),_:1},8,["onClick"])]})),_:1})])}var A=r("fe8e"),K=r("c678"),N=r("1af6"),P=r("32f6"),L={components:{NH1:A["a"],NButton:j["a"],NIcon:K["a"],NSpace:m["a"],SwitchHorizontal:N["a"],ThermometerHalf:P["a"]},methods:{changeTheme:function(){this.$emit("changeTheme")},switchAlarm:function(){this.$emit("alarm")}}},M=(r("eb46"),r("6b0d")),U=r.n(M);const W=U()(L,[["render",x],["__scopeId","data-v-fe179ed8"]]);var I=W;function B(e,t,r,o,a,c){var i=Object(n["K"])("n-menu");return Object(n["C"])(),Object(n["i"])(i,{options:a.menuOptions},null,8,["options"])}var R=r("6c4e"),E=r("6c02"),q=r("9fcf"),z=r("5af6"),D=[{label:function(){return Object(n["q"])(E["a"],{to:{name:"CPU-Info"}},{default:function(){return"CPU"}})},key:"CPU",icon:F(q["a"])},{label:function(){return Object(n["q"])(E["a"],{to:{name:"RAM-Info"}},{default:function(){return"RAM"}})},key:"RAM",icon:F(z["a"])}];function F(e){return function(){return Object(n["q"])(K["a"],null,{default:function(){return Object(n["q"])(e)}})}}var H={components:{NMenu:R["a"]},data:function(){return{menuOptions:D}}};const V=U()(H,[["render",B]]);var $=V,J=Object(n["m"])("Ant Net Monitor · Made by Ant");function G(e,t,r,o,a,c){var i=Object(n["K"])("n-text");return Object(n["C"])(),Object(n["i"])(i,{depth:"3"},{default:Object(n["T"])((function(){return[J]})),_:1})}var Z=r("48f1"),Q={components:{NText:Z["a"]}};const X=U()(Q,[["render",G]]);var Y=X,ee={id:"dashboard-container"};function te(e,t,r,o,a,c){var i=Object(n["K"])("gauge-chart"),l=Object(n["K"])("n-card");return Object(n["C"])(),Object(n["i"])(l,{id:"dashboard",hoverable:""},{default:Object(n["T"])((function(){return[Object(n["k"])("div",ee,[Object(n["n"])(i,{id:"swap-status",argv:a.SwapStatus},null,8,["argv"]),Object(n["n"])(i,{id:"cpu-status",argv:a.CPUStatus},null,8,["argv"]),Object(n["n"])(i,{id:"ram-status",argv:a.RAMStatus},null,8,["argv"])])]})),_:1})}r("b680");function re(e,t,r,o,a,c){var i=Object(n["K"])("v-chart");return Object(n["C"])(),Object(n["i"])(i,{class:"chart",option:e.option,autoresize:""},null,8,["option"])}r("b0c0");var ne=r("22b4"),oe=r("9be8"),ae=r("f95e");Object(ne["a"])([oe["a"],ae["a"]]);var ce={name:"GaugeChart",components:{VChart:v["c"]},props:["argv"],data:function(){return{option:{tooltip:{formatter:"{a} <br/>{b} : {c}%"},series:[{name:this.argv.name,type:"gauge",progress:{show:!0},detail:{valueAnimation:!0,formatter:"{value} %"},data:[{value:this.argv.value,name:this.argv.name}]}]},height:this.argv.height}},watch:{
//! I hate syntactic sugar
argv:{handler:function(e){this.option.series[0].data[0].value=e.value},deep:!0,immediate:!0}}},ie=function(){Object(n["P"])((function(e){return{"2c55a360":e.height}}))},le=ce.setup;ce.setup=le?function(e,t){return ie(),le(e,t)}:ie;var ue=ce;r("6b65");const se=U()(ue,[["render",re],["__scopeId","data-v-575fcd32"]]);var fe=se,de=r("b775"),be=r("9892"),he={components:{NCard:O["c"],GaugeChart:fe},setup:function(){var e=Object(be["a"])();return{notify:function(t,r){e[t](r)}}},data:function(){return{status:{},CPUStatus:{name:"CPU",value:0,height:"45vh"},RAMStatus:{name:"Used RAM",value:0,height:"35vh"},SwapStatus:{name:"Used Swap",value:0,height:"35vh"},alarmFlag:{}}},methods:{},beforeMount:function(){var e=this;setInterval((function(){Object(de["b"])("production").then((function(t){e.status=t.data,e.CPUStatus.value=e.status.cpu_percent,e.RAMStatus.value=e.status.ram_percent,e.SwapStatus.value=e.status.swap_percent}))}),1e3),setInterval((function(){Object(de["a"])().then((function(t){e.alarmFlag=t.data}))}),5e3)},watch:{alarmFlag:{handler:function(e){e.cpu_usage&&(console.log("cpu_usage"),this.notify("warning",{content:"CPU usage is too high",meta:Date.now()})),e.cpu_steal&&(console.log("cpu_steal"),this.notify("warning",{content:"CPU steal time is too high",meta:Date.now()})),e.cpu_iowait&&(console.log("cpu_iowait"),this.notify("warning",{content:"CPU iowait time is too high",meta:Date.now()}))},deep:!0,immediate:!0}}};r("7fb0");const me=U()(he,[["render",te]]);var pe=me;Object(S["l"])("dark-mode",w);var Oe={name:"App",components:{NConfigProvider:u["a"],NLayout:s["b"],NLayoutHeader:f["a"],NLayoutSider:d["a"],NLayoutContent:b["a"],NLayoutFooter:h["a"],NSpace:m["a"],NModal:p["a"],NCard:O["c"],NButton:j["a"],NNotificationProvider:g["a"],HeadBar:I,SideBar:$,FootBar:Y,DashBoard:pe},setup:function(){return{darkTheme:y["a"],naiveTheme:Object(n["I"])(null),chartTheme:Object(n["I"])("white"),alarmSettingVisible:Object(n["I"])(!1)}},provide:function(){var e=this;return Object(l["a"])({},v["a"],Object(n["g"])((function(){return e.chartTheme})))},methods:{
//! Echarts 主题与naive-ui 主题切换速度不同步,因此将Echarts背景设为透明
changeTheme:function(){this.$store.state.darkMode?(this.naiveTheme=null,this.chartTheme="white"):(this.naiveTheme=y["a"],this.chartTheme="dark-mode"),this.$store.commit("changeTheme")},switchAlarm:function(){this.alarmSettingVisible=!this.alarmSettingVisible}}};r("b903");const je=U()(Oe,[["render",i]]);var ge=je,ye=(r("d3b7"),r("3ca3"),r("ddb0"),[{path:"/status",name:"status",component:function(){return r.e("chunk-6d982b58").then(r.bind(null,"8f33"))},children:[{path:"cpu_status",name:"CPU-Info",meta:{title:"CPU-Info",apiUrl:"/api/status/cpu_status"},component:function(){return r.e("chunk-2e1fafcb").then(r.bind(null,"f89f"))}},{path:"ram_status",name:"RAM-Info",meta:{title:"RAM-Info",apiUrl:"/api/status/ram_status"},component:function(){return r.e("chunk-3f6343b4").then(r.bind(null,"b9ba"))}}]}]),ve=Object(E["b"])({history:Object(E["c"])(),routes:ye}),Se=ve,we=r("5502"),Ce=Object(we["a"])({state:function(){return{darkMode:!1}},mutations:{changeTheme:function(e){e.darkMode=0==e.darkMode}}}),Te=Ce;r("aadd"),r("a12d");Object(n["h"])(ge).use(Se).use(Te).mount("#app")},"6b65":function(e,t,r){"use strict";r("f490")},"7fb0":function(e,t,r){"use strict";r("29aa")},"811e":function(e,t,r){},a004:function(e){e.exports=JSON.parse('{"color":["#fc97af","#87f7cf","#f7f494","#72ccff","#f7c5a0","#d4a4eb","#d2f5a6","#76f2f2"],"backgroundColor":"rgba(255,255,255,0)","textStyle":{},"title":{"textStyle":{"color":"#ffffff"},"subtextStyle":{"color":"#dddddd"}},"line":{"itemStyle":{"borderWidth":"4"},"lineStyle":{"width":"3"},"symbolSize":"0","symbol":"circle","smooth":true},"radar":{"itemStyle":{"borderWidth":"4"},"lineStyle":{"width":"3"},"symbolSize":"0","symbol":"circle","smooth":true},"bar":{"itemStyle":{"barBorderWidth":0,"barBorderColor":"#ccc"}},"pie":{"itemStyle":{"borderWidth":0,"borderColor":"#ccc"}},"scatter":{"itemStyle":{"borderWidth":0,"borderColor":"#ccc"}},"boxplot":{"itemStyle":{"borderWidth":0,"borderColor":"#ccc"}},"parallel":{"itemStyle":{"borderWidth":0,"borderColor":"#ccc"}},"sankey":{"itemStyle":{"borderWidth":0,"borderColor":"#ccc"}},"funnel":{"itemStyle":{"borderWidth":0,"borderColor":"#ccc"}},"gauge":{"itemStyle":{"borderWidth":0,"borderColor":"rgba(255, 255, 255, 0.9)"},"axisLabel":{"color":"rgba(255, 255, 255, 0.9)"},"axisLine":{"color":"rgba(255, 255, 255, 0.9)"},"title":{"color":"rgba(255, 255, 255, 0.9)"},"detail":{"color":"rgba(255, 255, 255, 0.9)"}},"candlestick":{"itemStyle":{"color":"#fc97af","color0":"transparent","borderColor":"#fc97af","borderColor0":"#87f7cf","borderWidth":"2"}},"graph":{"itemStyle":{"borderWidth":0,"borderColor":"#ccc"},"lineStyle":{"width":"1","color":"#ffffff"},"symbolSize":"0","symbol":"circle","smooth":true,"color":["#fc97af","#87f7cf","#f7f494","#72ccff","#f7c5a0","#d4a4eb","#d2f5a6","#76f2f2"],"label":{"color":"#293441"}},"map":{"itemStyle":{"areaColor":"#f3f3f3","borderColor":"#999999","borderWidth":0.5},"label":{"color":"#893448"},"emphasis":{"itemStyle":{"areaColor":"rgba(255,178,72,1)","borderColor":"#eb8146","borderWidth":1},"label":{"color":"rgb(137,52,72)"}}},"geo":{"itemStyle":{"areaColor":"#f3f3f3","borderColor":"#999999","borderWidth":0.5},"label":{"color":"#893448"},"emphasis":{"itemStyle":{"areaColor":"rgba(255,178,72,1)","borderColor":"#eb8146","borderWidth":1},"label":{"color":"rgb(137,52,72)"}}},"categoryAxis":{"axisLine":{"show":true,"lineStyle":{"color":"#666666"}},"axisTick":{"show":false,"lineStyle":{"color":"#333"}},"axisLabel":{"show":true,"color":"#aaaaaa"},"splitLine":{"show":false,"lineStyle":{"color":["#e6e6e6"]}},"splitArea":{"show":false,"areaStyle":{"color":["rgba(250,250,250,0.05)","rgba(200,200,200,0.02)"]}}},"valueAxis":{"axisLine":{"show":true,"lineStyle":{"color":"#666666"}},"axisTick":{"show":false,"lineStyle":{"color":"#333"}},"axisLabel":{"show":true,"color":"#aaaaaa"},"splitLine":{"show":false,"lineStyle":{"color":["#e6e6e6"]}},"splitArea":{"show":false,"areaStyle":{"color":["rgba(250,250,250,0.05)","rgba(200,200,200,0.02)"]}}},"logAxis":{"axisLine":{"show":true,"lineStyle":{"color":"#666666"}},"axisTick":{"show":false,"lineStyle":{"color":"#333"}},"axisLabel":{"show":true,"color":"#aaaaaa"},"splitLine":{"show":false,"lineStyle":{"color":["#e6e6e6"]}},"splitArea":{"show":false,"areaStyle":{"color":["rgba(250,250,250,0.05)","rgba(200,200,200,0.02)"]}}},"timeAxis":{"axisLine":{"show":true,"lineStyle":{"color":"#666666"}},"axisTick":{"show":false,"lineStyle":{"color":"#333"}},"axisLabel":{"show":true,"color":"#aaaaaa"},"splitLine":{"show":false,"lineStyle":{"color":["#e6e6e6"]}},"splitArea":{"show":false,"areaStyle":{"color":["rgba(250,250,250,0.05)","rgba(200,200,200,0.02)"]}}},"toolbox":{"iconStyle":{"borderColor":"#999999"},"emphasis":{"iconStyle":{"borderColor":"#666666"}}},"legend":{"textStyle":{"color":"#999999"}},"tooltip":{"axisPointer":{"lineStyle":{"color":"#cccccc","width":1},"crossStyle":{"color":"#cccccc","width":1}}},"timeline":{"lineStyle":{"color":"#87f7cf","width":1},"itemStyle":{"color":"#87f7cf","borderWidth":1},"controlStyle":{"color":"#87f7cf","borderColor":"#87f7cf","borderWidth":0.5},"checkpointStyle":{"color":"#fc97af","borderColor":"#fc97af"},"label":{"color":"#87f7cf"},"emphasis":{"itemStyle":{"color":"#f7f494"},"controlStyle":{"color":"#87f7cf","borderColor":"#87f7cf","borderWidth":0.5},"label":{"color":"#87f7cf"}}},"visualMap":{"color":["#fc97af","#87f7cf"]},"dataZoom":{"backgroundColor":"rgba(255,255,255,0)","dataBackgroundColor":"rgba(114,204,255,1)","fillerColor":"rgba(114,204,255,0.2)","handleColor":"#72ccff","handleSize":"100%","textStyle":{"color":"#333333"}},"markPoint":{"label":{"color":"#293441"},"emphasis":{"label":{"color":"#293441"}}}}')},b775:function(e,t,r){"use strict";r.d(t,"b",(function(){return i})),r.d(t,"d",(function(){return l})),r.d(t,"e",(function(){return u})),r.d(t,"c",(function(){return s})),r.d(t,"a",(function(){return f}));var n=r("bc3a"),o=r.n(n);o.a.defaults.retry=4;var a=o.a.create({timeout:1e4}),c=(o.a.create({baseURL:"http://localhost:5000/",timeout:3e3}),a);function i(){return c({method:"get",url:"/api/status/basic_status"})}function l(e){return c({method:"get",url:e,params:{type:"init"}})}function u(e){return c({method:"get",url:e,params:{type:"update"}})}function s(e){return c({method:"get",url:e,params:{type:"day"}})}function f(){return c({method:"get",url:"/api/alarm/alarm_item"})}},b903:function(e,t,r){"use strict";r("3716")},eb46:function(e,t,r){"use strict";r("811e")},f490:function(e,t,r){}});
//# sourceMappingURL=app.add16b60.js.map
|
PypiClean
|
/dfir-iris-client-2.0.1.tar.gz/dfir-iris-client-2.0.1/dfir_iris_client/users.py
|
import warnings
from typing import Union
from deprecated.classic import deprecated
from dfir_iris_client.helper.utils import ApiResponse
class User(object):
"""Handles the users type methods"""
def __init__(self, session):
self._s = session
@deprecated('Use the new user_exists method', version="2.0.0", action="error")
def user_id_exists(self, user_id: int) -> bool:
return self.user_exists(user=user_id)
@deprecated('Use the new user_exists method', version="2.0.0", action="error")
def username_exists(self, username: str) -> bool:
return self.user_exists(user=username)
def user_exists(self, user: Union[str, int]) -> bool:
"""
Returns True if the user (login) exists, else false. User ID can also be looked up.
Args:
user: Login or user ID to lookup
Returns:
True if exists else false
"""
if isinstance(user, int):
req = self.get_user(user=user)
else:
req = self.lookup_username(username=user)
return req.is_success()
def lookup_username(self, username: str) -> ApiResponse:
"""
Returns a user ID corresponding to the username, else None
Args:
username: Username to lookup
Returns:
ApiResponse
"""
return self._s.pi_get(f'manage/users/lookup/login/{username}')
def get_user(self, user: Union[int, str], **kwargs) -> ApiResponse:
"""Return a user data
Args:
user: User ID or login of the user to get
Returns:
ApiResponse object
"""
if kwargs.get('user_id') is not None:
warnings.warn("\'user_id\' argument is deprecated, use \'user\' instead",
DeprecationWarning)
user = kwargs.get('user_id')
if isinstance(user, str):
return self.lookup_username(username=user)
return self._s.pi_get(f'manage/users/lookup/id/{user}')
def list_users(self) -> ApiResponse:
"""
Returns a list of the users with a restricted view so it can be called by unprivileged users.
Args:
Returns:
ApiResponse object
"""
return self._s.pi_get(f'manage/users/restricted/list')
|
PypiClean
|
/python-pptx-fix-0.6.21.2.tar.gz/python-pptx-fix-0.6.21.2/pptx/oxml/ns.py
|
from __future__ import absolute_import
#: Maps namespace prefix to namespace name for all known PowerPoint XML
#: namespaces.
_nsmap = {
"a": ("http://schemas.openxmlformats.org/drawingml/2006/main"),
"c": ("http://schemas.openxmlformats.org/drawingml/2006/chart"),
"cp": (
"http://schemas.openxmlformats.org/package/2006/metadata/core-pro" "perties"
),
"ct": ("http://schemas.openxmlformats.org/package/2006/content-types"),
"dc": ("http://purl.org/dc/elements/1.1/"),
"dcmitype": ("http://purl.org/dc/dcmitype/"),
"dcterms": ("http://purl.org/dc/terms/"),
"ep": (
"http://schemas.openxmlformats.org/officeDocument/2006/extended-p" "roperties"
),
"i": (
"http://schemas.openxmlformats.org/officeDocument/2006/relationsh" "ips/image"
),
"m": ("http://schemas.openxmlformats.org/officeDocument/2006/math"),
"mo": ("http://schemas.microsoft.com/office/mac/office/2008/main"),
"mv": ("urn:schemas-microsoft-com:mac:vml"),
"o": ("urn:schemas-microsoft-com:office:office"),
"p": ("http://schemas.openxmlformats.org/presentationml/2006/main"),
"pd": ("http://schemas.openxmlformats.org/drawingml/2006/presentationDra" "wing"),
"pic": ("http://schemas.openxmlformats.org/drawingml/2006/picture"),
"pr": ("http://schemas.openxmlformats.org/package/2006/relationships"),
"r": ("http://schemas.openxmlformats.org/officeDocument/2006/relationsh" "ips"),
"sl": (
"http://schemas.openxmlformats.org/officeDocument/2006/relationsh"
"ips/slideLayout"
),
"v": ("urn:schemas-microsoft-com:vml"),
"ve": ("http://schemas.openxmlformats.org/markup-compatibility/2006"),
"w": ("http://schemas.openxmlformats.org/wordprocessingml/2006/main"),
"w10": ("urn:schemas-microsoft-com:office:word"),
"wne": ("http://schemas.microsoft.com/office/word/2006/wordml"),
"wp": ("http://schemas.openxmlformats.org/drawingml/2006/wordprocessingD" "rawing"),
"xsi": ("http://www.w3.org/2001/XMLSchema-instance"),
}
class NamespacePrefixedTag(str):
"""
Value object that knows the semantics of an XML tag having a namespace
prefix.
"""
def __new__(cls, nstag, *args):
return super(NamespacePrefixedTag, cls).__new__(cls, nstag)
def __init__(self, nstag):
self._pfx, self._local_part = nstag.split(":")
self._ns_uri = _nsmap[self._pfx]
@property
def clark_name(self):
return "{%s}%s" % (self._ns_uri, self._local_part)
@property
def local_part(self):
"""
Return the local part of the tag as a string. E.g. 'foobar' is
returned for tag 'f:foobar'.
"""
return self._local_part
@property
def nsmap(self):
"""
Return a dict having a single member, mapping the namespace prefix of
this tag to it's namespace name (e.g. {'f': 'http://foo/bar'}). This
is handy for passing to xpath calls and other uses.
"""
return {self._pfx: self._ns_uri}
@property
def nspfx(self):
"""
Return the string namespace prefix for the tag, e.g. 'f' is returned
for tag 'f:foobar'.
"""
return self._pfx
@property
def nsuri(self):
"""
Return the namespace URI for the tag, e.g. 'http://foo/bar' would be
returned for tag 'f:foobar' if the 'f' prefix maps to
'http://foo/bar' in _nsmap.
"""
return self._ns_uri
def namespaces(*prefixes):
"""
Return a dict containing the subset namespace prefix mappings specified by
*prefixes*. Any number of namespace prefixes can be supplied, e.g.
namespaces('a', 'r', 'p').
"""
namespaces = {}
for prefix in prefixes:
namespaces[prefix] = _nsmap[prefix]
return namespaces
nsmap = namespaces # alias for more compact use with Element()
def nsdecls(*prefixes):
return " ".join(['xmlns:%s="%s"' % (pfx, _nsmap[pfx]) for pfx in prefixes])
def nsuri(nspfx):
"""
Return the namespace URI corresponding to *nspfx*. For example, it would
return 'http://foo/bar' for an *nspfx* of 'f' if the 'f' prefix maps to
'http://foo/bar' in _nsmap.
"""
return _nsmap[nspfx]
def qn(namespace_prefixed_tag):
"""
Return a Clark-notation qualified tag name corresponding to
*namespace_prefixed_tag*, a string like 'p:body'. 'qn' stands for
*qualified name*. As an example, ``qn('p:cSld')`` returns
``'{http://schemas.../main}cSld'``.
"""
nsptag = NamespacePrefixedTag(namespace_prefixed_tag)
return nsptag.clark_name
|
PypiClean
|
/phuzzy-0.7.4.tar.gz/phuzzy-0.7.4/docs/shapes/superellipse.rst
|
Superellipse
------------
.. code-block:: python
:linenos:
import phuzzy.mpl as phm
se = phm.Superellipse(alpha0=[-1, 2.], alpha1=None, m=1.0, n=.5, number_of_alpha_levels=17)
se.plot(show=True, filepath="/tmp/superellipse.png", title=True)
.. figure:: superellipse.png
:scale: 90 %
:alt: Superellipse fuzzy number
Superellipse fuzzy number
.. figure:: superellipse_var.png
:scale: 90 %
:alt: Superellipse fuzzy number (variation m, n)
Superellipse fuzzy number (variation m, n)
.. figure:: operations_Superellipse.png
:scale: 90 %
:alt: Superellipse fuzzy number operations
Superellipse fuzzy number operations
|
PypiClean
|
/emencia-paste-django-1.8.2.tar.gz/emencia-paste-django-1.8.2/emencia_paste_django/django_buildout/project/webapp_statics/js/royalslider/dev/modules/jquery.rs.fullscreen.js
|
(function($) {
"use strict";
/**
*
* RoyalSlider fullscreen module
* @version 1.0.5:
*
* 1.0.1:
* - Added rsEnterFullscreen and rsExitFullscreen events
*
* 1.0.2
* - Added window scroll detection
*
* 1.0.3
* - Fullscreen button now is added to _controlsContainer element
*
* 1.0.4
* - Fixed issue that could cause small image be loaded in fullscreen
*
* 1.0.5
* - Fix "false" native fullscreen on Android
*
*/
$.extend($.rsProto, {
_initFullscreen: function() {
var self = this;
self._fullscreenDefaults = {
enabled: false,
keyboardNav: true,
buttonFS: true,
nativeFS: false,
doubleTap: true
};
self.st.fullscreen = $.extend({}, self._fullscreenDefaults, self.st.fullscreen);
if(self.st.fullscreen.enabled) {
self.ev.one('rsBeforeSizeSet', function() {
self._setupFullscreen();
});
}
},
_setupFullscreen: function() {
var self = this;
self._fsKeyboard = (!self.st.keyboardNavEnabled && self.st.fullscreen.keyboardNav);
if(self.st.fullscreen.nativeFS) {
// Thanks to John Dyer http://j.hn/
self._fullScreenApi = {
supportsFullScreen: false,
isFullScreen: function() { return false; },
requestFullScreen: function() {},
cancelFullScreen: function() {},
fullScreenEventName: '',
prefix: ''
};
var browserPrefixes = 'webkit moz o ms khtml'.split(' ');
// check for native support
if(!self.isAndroid) {
if (typeof document.cancelFullScreen != 'undefined') {
self._fullScreenApi.supportsFullScreen = true;
} else {
// check for fullscreen support by vendor prefix
for (var i = 0; i < browserPrefixes.length; i++ ) {
self._fullScreenApi.prefix = browserPrefixes[i];
if (typeof document[ self._fullScreenApi.prefix + 'CancelFullScreen' ] != 'undefined' ) {
self._fullScreenApi.supportsFullScreen = true;
break;
}
}
}
}
// update methods to do something useful
if ( self._fullScreenApi.supportsFullScreen) {
self.nativeFS = true;
self._fullScreenApi.fullScreenEventName = self._fullScreenApi.prefix + 'fullscreenchange' + self.ns;
self._fullScreenApi.isFullScreen = function() {
switch (this.prefix) {
case '':
return document.fullScreen;
case 'webkit':
return document.webkitIsFullScreen;
default:
return document[this.prefix + 'FullScreen'];
}
};
self._fullScreenApi.requestFullScreen = function(el) {
return (this.prefix === '') ? el.requestFullScreen() : el[this.prefix + 'RequestFullScreen']();
};
self._fullScreenApi.cancelFullScreen = function(el) {
return (this.prefix === '') ? document.cancelFullScreen() : document[this.prefix + 'CancelFullScreen']();
};
} else {
self._fullScreenApi = false;
}
}
if(self.st.fullscreen.buttonFS) {
self._fsBtn = $('<div class="rsFullscreenBtn"><div class="rsFullscreenIcn"></div></div>')
.appendTo(self._controlsContainer)
.on('click.rs', function() {
if(self.isFullscreen) {
self.exitFullscreen();
} else {
self.enterFullscreen();
}
});
}
},
enterFullscreen: function(preventNative) {
var self = this;
if( self._fullScreenApi ) {
if(!preventNative) {
self._doc.on( self._fullScreenApi.fullScreenEventName, function(e) {
if(!self._fullScreenApi.isFullScreen()) {
self.exitFullscreen(true);
} else {
self.enterFullscreen(true);
}
});
self._fullScreenApi.requestFullScreen($('html')[0]);
return;
} else {
self._fullScreenApi.requestFullScreen($('html')[0]);
}
}
if(self._isFullscreenUpdating) {
return;
}
self._isFullscreenUpdating = true;
self._doc.on('keyup' + self.ns + 'fullscreen', function(e) {
if(e.keyCode === 27) {
self.exitFullscreen();
}
});
if(self._fsKeyboard) {
self._bindKeyboardNav();
}
var win = $(window);
self._fsScrollTopOnEnter = win.scrollTop();
self._fsScrollLeftOnEnter = win.scrollLeft();
self._htmlStyle = $('html').attr('style');
self._bodyStyle = $('body').attr('style');
self._sliderStyle = self.slider.attr('style');
$('body, html').css({
overflow: 'hidden',
height: '100%',
width: '100%',
margin: '0',
padding: '0'
});
self.slider.addClass('rsFullscreen');
var item,
i;
for(i = 0; i < self.numSlides; i++) {
item = self.slides[i];
item.isRendered = false;
if(item.bigImage) {
item.isBig = true;
item.isMedLoaded = item.isLoaded;
item.isMedLoading = item.isLoading;
item.medImage = item.image;
item.medIW = item.iW;
item.medIH = item.iH;
item.slideId = -99;
if(item.bigImage !== item.medImage) {
item.sizeType = 'big';
}
item.isLoaded = item.isBigLoaded;
item.isLoading = false;
item.image = item.bigImage;
item.images[0] = item.bigImage;
item.iW = item.bigIW;
item.iH = item.bigIH;
item.isAppended = item.contentAdded = false;
self._updateItemSrc(item);
}
}
self.isFullscreen = true;
self._isFullscreenUpdating = false;
self.updateSliderSize();
self.ev.trigger('rsEnterFullscreen');
},
exitFullscreen: function(preventNative) {
var self = this;
if( self._fullScreenApi ) {
if(!preventNative) {
self._fullScreenApi.cancelFullScreen($('html')[0]);
return;
}
self._doc.off( self._fullScreenApi.fullScreenEventName );
}
if(self._isFullscreenUpdating) {
return;
}
self._isFullscreenUpdating = true;
self._doc.off('keyup' + self.ns + 'fullscreen');
if(self._fsKeyboard) {
self._doc.off('keydown' + self.ns);
}
$('html').attr('style', self._htmlStyle || '');
$('body').attr('style', self._bodyStyle || '');
var item,
i;
for(i = 0; i < self.numSlides; i++) {
item = self.slides[i];
item.isRendered = false;
if(item.bigImage) {
item.isBig = false;
item.slideId = -99;
item.isBigLoaded = item.isLoaded;
item.isBigLoading = item.isLoading;
item.bigImage = item.image;
item.bigIW = item.iW;
item.bigIH = item.iH;
item.isLoaded = item.isMedLoaded;
item.isLoading = false;
item.image = item.medImage;
item.images[0] = item.medImage;
item.iW = item.medIW;
item.iH = item.medIH;
item.isAppended = item.contentAdded = false;
self._updateItemSrc(item, true);
if(item.bigImage !== item.medImage) {
item.sizeType = 'med';
}
}
}
self.isFullscreen = false;
var win = $(window);
win.scrollTop( self._fsScrollTopOnEnter );
win.scrollLeft( self._fsScrollLeftOnEnter );
self._isFullscreenUpdating = false;
self.slider.removeClass('rsFullscreen');
self.updateSliderSize();
// fix overflow bug
setTimeout(function() {
self.updateSliderSize();
},1);
self.ev.trigger('rsExitFullscreen');
},
_updateItemSrc: function(item, exit) {
var newHTML = (!item.isLoaded && !item.isLoading) ? '<a class="rsImg rsMainSlideImage" href="'+item.image+'"></a>' : '<img class="rsImg rsMainSlideImage" src="'+item.image+'"/>';
if(item.content.hasClass('rsImg')) {
item.content = $(newHTML);
} else {
item.content.find('.rsImg').eq(0).replaceWith(newHTML);
}
if(!item.isLoaded && !item.isLoading && item.holder) {
item.holder.html(item.content);
}
}
});
$.rsModules.fullscreen = $.rsProto._initFullscreen;
})(jQuery);
|
PypiClean
|
/FoundryDataBrowser-190903.1.tar.gz/FoundryDataBrowser-190903.1/viewers/hyperspec_3d_h5.py
|
from ScopeFoundry.data_browser import HyperSpectralBaseView
import numpy as np
import h5py
import pyqtgraph as pg
from .scalebars import ConfocalScaleBar
from matplotlib.cm import ScalarMappable
from pyqtgraph.opengl import GLViewWidget, GLAxisItem, GLGridItem, GLVolumeItem
from scipy.interpolate import interp1d
from qtpy.QtWidgets import QPushButton
import time
class HyperSpec3DH5View(HyperSpectralBaseView):
name = 'hyperspec_3d_h5'
supported_measurements = ['oo_asi_hyperspec_3d_scan',
'andor_asi_hyperspec_3d_scan',]
def scan_specific_setup(self):
pass
def setup(self):
self.settings.New('sample', dtype=str, initial='')
self.settings.New('z_slice', dtype=float, choices=[0.0], initial=0.0)
self.settings.New('show_3d', dtype=bool, initial=False)
self.settings.New('vol_alpha', dtype=float, vmin=0.0, vmax=1.0,
initial=0.5)
self.settings.New(
'vol_colormap', dtype=str, initial='viridis',
choices=['viridis', 'plasma', 'inferno', 'magma', 'cividis',
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'])
# self.settings.New('vol_percentile', dtype=int, vmin=0, vmax=49,
# initial=5)
self.settings.New('vol_percentile_min', dtype=int, vmin=0, vmax=100,
initial=5)
self.settings.New('vol_percentile_max', dtype=int, vmin=0, vmax=100,
initial=95)
self.settings.New('vol_transparent_percentile', dtype=int, vmin=0,
vmax=100, initial=5)
self.settings.New('vol_transparent_min', dtype=bool, initial=False)
self.settings.z_slice.updated_choice_index_value.connect(
self.on_update_zslice_choice)
# self.settings.vol_colormap.updated_value.connect(self.calculate_volume)
# self.settings.vol_alpha.updated_value.connect(self.calculate_volume)
HyperSpectralBaseView.setup(self)
voldata = np.empty((1, 1, 1, 4), dtype=np.ubyte)
voldata[0, 0, 0, :] = [255, 255, 255, 0]
self.volitem = GLVolumeItem(data=voldata)
self.glview = GLViewWidget()
self.glaxis = GLAxisItem()
self.glgrid = GLGridItem()
self.glview.addItem(self.glgrid)
self.glview.addItem(self.glaxis)
self.glview.addItem(self.volitem)
self.gldock = self.dockarea.addDock(name='3D', widget=self.glview,
position='below',
relativeTo=self.image_dock)
self.calculate_3d_pushButton = QPushButton(text='calculate_3d')
self.settings_ui.layout().addWidget(self.calculate_3d_pushButton)
self.calculate_3d_pushButton.clicked.connect(self.calculate_volume)
self.image_dock.raiseDock()
def is_file_supported(self, fname):
return np.any([(meas_name in fname)
for meas_name in self.supported_measurements])
def reset(self):
if hasattr(self, 'dat'):
self.dat.close()
del self.dat
if hasattr(self, 'spec_map'):
del self.spec_map
if hasattr(self, 'scalebar'):
self.imview.getView().removeItem(self.scalebar)
del self.scalebar
if hasattr(self, 'volume'):
spoof_data = np.zeros((1, 1, 1, 4), dtype=np.ubyte)
self.volitem.setData(spoof_data)
del self.volume
self.settings.show_3d.update_value(False)
self.image_dock.raiseDock()
def load_data(self, fname):
self.dat = h5py.File(fname)
for meas_name in self.supported_measurements:
if meas_name in self.dat['measurement']:
self.M = self.dat['measurement'][meas_name]
for map_name in ['hyperspectral_map', 'spec_map']:
if map_name in self.M:
self.spec_map = np.array(self.M[map_name])
self.h_span = self.M['settings'].attrs['h_span']
self.x_array = np.array(self.M['h_array'])
self.z_array = np.array(self.M['z_array'])
units = self.M['settings/units'].attrs['h_span']
if units == 'mm':
self.h_span = self.h_span*1e-3
self.z_span = self.z_array*1e-3
self.settings.z_slice.change_unit('mm')
if 'dark_indices' in list(self.M.keys()):
print('dark indices found')
dark_indices = self.M['dark_indices']
if dark_indices.len() == 0:
self.spec_map = np.delete(
self.spec_map, list(dark_indices.shape), -1)
else:
self.spec_map = np.delete(
self.spec_map, np.array(dark_indices), -1)
else:
print('no dark indices')
self.hyperspec_data = self.spec_map[0, :, :, :]
self.display_image = self.hyperspec_data.sum(axis=-1)
self.settings.z_slice.change_choice_list(self.z_array.tolist())
self.settings.z_slice.update_value(self.z_array[0])
self.spec_x_array = np.arange(self.hyperspec_data.shape[-1])
for x_axis_name in ['wavelength', 'wls', 'wave_numbers',
'raman_shifts']:
if x_axis_name in self.M:
x_array = np.array(self.M[x_axis_name])
if 'dark_indices' in list(self.M.keys()):
dark_indices = self.M['dark_indices']
# The following is to read a dataset I initialized
# incorrectly for dark pixels. This can be replaced with
# the else statement entirely now that the measurement is
# fixed, but I still have a long measurement that will
# benefit from this.
if dark_indices.len() == 0:
x_array = np.delete(
x_array, list(dark_indices.shape), 0)
else:
x_array = np.delete(x_array, np.array(dark_indices), 0)
self.add_spec_x_array(x_axis_name, x_array)
self.x_axis.update_value(x_axis_name)
sample = self.dat['app/settings'].attrs['sample']
self.settings.sample.update_value(sample)
self.calculate_volume()
def on_update_zslice_choice(self, index):
if hasattr(self, 'spec_map'):
self.hyperspec_data = self.spec_map[index, :, :, :]
self.display_images['default'] = self.hyperspec_data
self.display_images['sum'] = self.hyperspec_data.sum(axis=-1)
self.spec_x_arrays['default'] = self.spec_x_array
self.spec_x_arrays['index'] = np.arange(
self.hyperspec_data.shape[-1])
self.recalc_bandpass_map()
self.recalc_median_map()
self.update_display()
def calculate_volume(self):
if not self.settings['show_3d']:
print('calculate_volume called without show_3d')
return
print('calculating 3d volume')
t0 = time.time()
if hasattr(self, 'volume'):
del self.volume
if hasattr(self, 'mappable'):
self.mappable.set_cmap(self.settings['vol_colormap'])
else:
self.mappable = ScalarMappable(cmap=self.settings['vol_colormap'])
z_span = self.z_array[-1] - self.z_array[0]
dx = self.x_array[1] - self.x_array[0]
z_interp_array = np.linspace(np.amin(self.z_array),
np.amax(self.z_array),
num=z_span/dx)
z_interp = None
self.volume = None
nz = len(z_interp_array)
if self.settings['display_image'] == 'bandpass_map':
print('bandpass_map selected')
x, slice = self.get_xhyperspec_data(apply_use_x_slice=True)
ind_min = np.nonzero(self.spec_x_array == x[0])[0][0]
ind_max = np.nonzero(self.spec_x_array == x[-1])[0][0]
data = np.zeros((len(self.z_array),) + slice.shape)
data = self.spec_map[:, :, :, ind_min:ind_max]
# for kk in range(len(self.z_array)):
# print(
# 'grabbing bandpass layer %d of %d' % (kk, len(self.z_array)))
# self.settings.z_slice.update_value(self.z_array[kk])
# x, data[kk, :, :, :] = self.get_xhyperspec_data(
# apply_use_x_slice=True)
z_interp = interp1d(self.z_array, data, axis=0)
else:
z_interp = interp1d(self.z_array, self.spec_map, axis=0)
data = z_interp(z_interp_array)
self.volume = np.zeros(data.shape[:-1] + (4,), dtype=np.ubyte)
pmin = self.settings['vol_percentile_min']
pmax = self.settings['vol_percentile_max']
self.mappable.set_array(data.sum(axis=-1))
vmin = np.percentile(data.sum(axis=-1), pmin)
vmax = np.percentile(data.sum(axis=-1), pmax)
tmin = np.percentile(
data.sum(axis=-1),
self.settings['vol_transparent_percentile'])
self.mappable.set_clim(vmin=vmin, vmax=vmax)
# self.mappable.autoscale()
for kk in range(nz):
print('calculating rgba vals for %d of %d layers' % (kk, nz))
sum_data = data[kk, :, :, :].sum(axis=-1)
# print(sum_data.shape, self.volume.shape)
self.volume[kk, :, :, :] = self.mappable.to_rgba(
sum_data,
alpha=self.settings['vol_alpha'],
bytes=True
)
if self.settings['vol_transparent_min']:
self.volume[kk, :, :, 3][np.nonzero(sum_data <= tmin)] = 0
print('3d volume calculation complete')
t1 = time.time()
print('time elapsed: %0.3f s' % (t1-t0))
kwargs = {'x': len(self.x_array), 'y': len(self.x_array), 'z': nz}
self.glaxis.setSize(**kwargs)
self.glgrid.setSize(**kwargs)
self.glgrid.setSpacing(x=1/dx*5, y=1/dx*5, z=1/dx*5)
# print(self.mappable.get_cmap().name)
# print(data.shape, self.volume.shape)
def update_display(self):
if hasattr(self, 'scalebar'):
self.imview.getView().removeItem(self.scalebar)
if self.display_image is not None:
# pyqtgraph axes are x,y, but data is stored in (frame, y,x, time),
# so we need to transpose
self.imview.getImageItem().setImage(self.display_image.T)
nn = self.display_image.shape
if hasattr(self, 'h_span'):
span = self.h_span
else:
span = -1
self.scalebar = ConfocalScaleBar(span=span, num_px=nn[0])
self.scalebar.setParentItem(self.imview.getView())
self.scalebar.anchor((1, 1), (1, 1), offset=(-20, -20))
if hasattr(self, 'volume') and self.settings['show_3d']:
self.volitem.setData(np.swapaxes(self.volume, 0, 2))
self.on_change_rect_roi()
self.on_update_circ_roi()
def matplotlib_colormap_to_pg_colormap(colormap_name, n_ticks=16):
'''
============= =========================================================
**Arguments**
colormap_name (string) name of a matplotlib colormap i.e. 'viridis'
n_ticks (int) Number of ticks to create when dict of functions
is used. Otherwise unused.
============= =========================================================
returns: (pgColormap) pyqtgraph colormap
primary Usage: <pg.ImageView>.setColorMap(pgColormap)
requires: cmapToColormap by Sebastian Hoefer
https://github.com/pyqtgraph/pyqtgraph/issues/561
'''
from matplotlib import cm
pos, rgba_colors = zip(*cmapToColormap(getattr(cm, colormap_name)), n_ticks)
pgColormap = pg.ColorMap(pos, rgba_colors)
return pgColormap
def cmapToColormap(cmap, nTicks=16):
"""
Converts a Matplotlib cmap to pyqtgraphs colormaps. No dependency on
matplotlib.
Parameters:
*cmap*: Cmap object. Imported from matplotlib.cm.*
*nTicks*: Number of ticks to create when dict of functions is used.
Otherwise unused.
author: Sebastian Hoefer
"""
import collections
# Case #1: a dictionary with 'red'/'green'/'blue' values as list of ranges (e.g. 'jet')
# The parameter 'cmap' is a 'matplotlib.colors.LinearSegmentedColormap' instance ...
if hasattr(cmap, '_segmentdata'):
colordata = getattr(cmap, '_segmentdata')
if ('red' in colordata) and isinstance(colordata['red'], collections.Sequence):
# collect the color ranges from all channels into one dict to get unique indices
posDict = {}
for idx, channel in enumerate(('red', 'green', 'blue')):
for colorRange in colordata[channel]:
posDict.setdefault(colorRange[0], [-1, -1, -1])[idx] = colorRange[2]
indexList = list(posDict.keys())
indexList.sort()
# interpolate missing values (== -1)
for channel in range(3): # R,G,B
startIdx = indexList[0]
emptyIdx = []
for curIdx in indexList:
if posDict[curIdx][channel] == -1:
emptyIdx.append(curIdx)
elif curIdx != indexList[0]:
for eIdx in emptyIdx:
rPos = (eIdx - startIdx) / (curIdx - startIdx)
vStart = posDict[startIdx][channel]
vRange = (posDict[curIdx][channel] - posDict[startIdx][channel])
posDict[eIdx][channel] = rPos * vRange + vStart
startIdx = curIdx
del emptyIdx[:]
for channel in range(3): # R,G,B
for curIdx in indexList:
posDict[curIdx][channel] *= 255
rgb_list = [[i, posDict[i]] for i in indexList]
# Case #2: a dictionary with 'red'/'green'/'blue' values as functions (e.g. 'gnuplot')
elif ('red' in colordata) and isinstance(colordata['red'], collections.Callable):
indices = np.linspace(0., 1., nTicks)
luts = [np.clip(np.array(colordata[rgb](indices), dtype=np.float), 0, 1) * 255 \
for rgb in ('red', 'green', 'blue')]
rgb_list = zip(indices, list(zip(*luts)))
# If the parameter 'cmap' is a 'matplotlib.colors.ListedColormap' instance, with the attributes 'colors' and 'N'
elif hasattr(cmap, 'colors') and hasattr(cmap, 'N'):
colordata = getattr(cmap, 'colors')
# Case #3: a list with RGB values (e.g. 'seismic')
if len(colordata[0]) == 3:
indices = np.linspace(0., 1., len(colordata))
scaledRgbTuples = [(rgbTuple[0] * 255, rgbTuple[1] * 255, rgbTuple[2] * 255) for rgbTuple in colordata]
rgb_list = zip(indices, scaledRgbTuples)
# Case #4: a list of tuples with positions and RGB-values (e.g. 'terrain')
# -> this section is probably not needed anymore!?
elif len(colordata[0]) == 2:
rgb_list = [(idx, (vals[0] * 255, vals[1] * 255, vals[2] * 255)) for idx, vals in colordata]
# Case #X: unknown format or datatype was the wrong object type
else:
raise ValueError("[cmapToColormap] Unknown cmap format or not a cmap!")
# Convert the RGB float values to RGBA integer values
return list([(pos, (int(r), int(g), int(b), 255)) for pos, (r, g, b) in rgb_list])
#
# class HyperSpecSpecMedianH5View(HyperSpectralBaseView):
#
# name = 'hyperspec_spec_median_npz'
#
# def is_file_supported(self, fname):
# return "_spec_scan.npz" in fname
#
#
# def load_data(self, fname):
# self.dat = np.load(fname)
#
# self.spec_map = self.dat['spec_map']
# self.wls = self.dat['wls']
# self.integrated_count_map = self.dat['integrated_count_map']
# self.spec_median_map = np.apply_along_axis(spectral_median, 2,
# self.spec_map[:,:,:],
# self.wls, 0)
# self.hyperspec_data = self.spec_map
# self.display_image = self.spec_median_map
# self.spec_x_array = self.wls
#
# def scan_specific_setup(self):
# self.spec_plot.setLabel('left', 'Intensity', units='counts')
# self.spec_plot.setLabel('bottom', 'Wavelength', units='nm')
#
# if __name__ == '__main__':
# import sys
#
# app = DataBrowser(sys.argv)
# app.load_view(HyperSpecH5View(app))
#
# sys.exit(app.exec_())
|
PypiClean
|
/acceldata_sdk-2.7.2.tar.gz/acceldata_sdk-2.7.2/acceldata_sdk/models/connection.py
|
from enum import Enum, auto
from dataclasses import dataclass
from typing import List
class ConnectionType:
def __init__(self, id, type):
self.id = id
self.type = type
def __eq__(self, other):
return self.id == other.id
def __repr__(self):
return f"ConnectionType({self.__dict__})"
class SecretsManagerType(Enum):
AWS_SECRETS_MANAGER = auto()
@dataclass
class SecretManagerConfiguration:
name: str = None
type: SecretsManagerType = None
def __init__(self,
name: str = None,
type: SecretsManagerType = None, *args, **kwargs):
self.name = name
if isinstance(type, dict):
self.type = SecretsManagerType(**type)
else:
self.type = type
@dataclass
class SecretManagerConfigurationWithOption:
secretManagerConfig: List[SecretManagerConfiguration] = None
defaultOption: bool = None
def __init__(self,
secretManagerConfig: List[SecretManagerConfiguration] = None,
defaultOption: bool = None, *args, **kwargs):
self.defaultOption = defaultOption
self.secretManagerConfig = list()
if secretManagerConfig is not None:
for obj in secretManagerConfig:
if isinstance(obj, dict):
self.secretManagerConfig.append(SecretManagerConfiguration(**obj))
else:
self.secretManagerConfig.append(obj)
@dataclass
class LivyComputeConfig:
isEnabled = None
executorMemory = None
executorCores = None
numExecutors = None
def __init__(self,
isEnabled: bool = None,
executorMemory: str = None,
executorCores=None,
numExecutors=None, *args, **kwargs):
self.isEnabled = isEnabled
self.executorMemory = executorMemory
self.executorCores = executorCores
self.numExecutors = numExecutors
def __repr__(self):
return f"LivyComputeConfig({self.__dict__})"
@dataclass
class DatabricksComputeConfig:
isEnabled = None
minWorkers = None
maxWorkers = None
clusterWorkerType = None
clusterDriverType = None
jobClusterType = None
def __init__(self,
isEnabled: bool = None,
minWorkers=None,
maxWorkers=None,
clusterWorkerType: str = None,
clusterDriverType: str = None,
jobClusterType: str = None,
*args, **kwargs):
self.isEnabled = isEnabled
self.minWorkers = minWorkers
self.maxWorkers = maxWorkers
self.clusterWorkerType = clusterWorkerType
self.clusterDriverType = clusterDriverType
self.jobClusterType = jobClusterType
def __repr__(self):
return f"DatabricksComputeConfig({self.__dict__})"
@dataclass
class YunikornComputeConfig:
isEnabled = None
executorMemory = None
executorCores = None
minExecutors = None
maxExecutors = None
def __init__(self,
isEnabled: bool = None,
executorMemory: str = None,
executorCores=None,
minExecutors=None,
maxExecutors=None, *args, **kwargs):
self.isEnabled = isEnabled
self.executorMemory = executorMemory
self.executorCores = executorCores
self.minExecutors = minExecutors
self.maxExecutors = maxExecutors
@dataclass
class PipelineComputeConfig:
livy: LivyComputeConfig = None
databricks: DatabricksComputeConfig = None
yunikorn: YunikornComputeConfig = None
def __init__(self,
livy: LivyComputeConfig = None,
databricks: DatabricksComputeConfig = None,
yunikorn: YunikornComputeConfig = None, *args, **kwargs):
if isinstance(livy, dict):
self.livy = LivyComputeConfig(**livy)
else:
self.livy = livy
if isinstance(databricks, dict):
self.databricks = DatabricksComputeConfig(**databricks)
else:
self.databricks = databricks
if isinstance(yunikorn, dict):
self.yunikorn = YunikornComputeConfig(**yunikorn)
else:
self.yunikorn = yunikorn
def __repr__(self):
return f"PipelineComputeConfig({self.__dict__})"
class PipelineType(Enum):
ANALYSIS = auto()
MONITOR = auto()
QUERY_ANALYSIS = auto()
@dataclass
class ProcessMetrics:
status = None
reportingTime = None
def __init__(self,
status=None,
reportingTime=None, *args, **kwargs):
self.status = status
self.reportingTime = reportingTime
def __repr__(self):
return f"ProcessMetrics({self.__dict__})"
@dataclass
class KubernetesMetrics:
status = None
reportingTime = None
def __init__(self,
status=None,
reportingTime=None, *args, **kwargs):
self.status = status
self.reportingTime = reportingTime
@dataclass
class DataplaneHealthMetrics:
reportingTime: str = None
process: ProcessMetrics = None
kubernetes: KubernetesMetrics = None
def __init__(self,
reportingTime=None,
process: ProcessMetrics = None,
kubernetes: KubernetesMetrics = None, *args, **kwargs):
self.reportingTime = reportingTime
if isinstance(process, dict):
self.process = ProcessMetrics(**process)
else:
self.process = process
if isinstance(kubernetes, dict):
self.kubernetes = KubernetesMetrics(**kubernetes)
else:
self.kubernetes = kubernetes
def __repr__(self):
return f"DataplaneHealthMetrics({self.__dict__})"
class AnalyticsPipeline:
def __init__(self, id, name, createdAt, updatedAt, url, externalUrl, description, hbaseEnabled,
hdfsEnabled, hiveEnabled, secretsManagerConfiguration=None, measureResultFsType=None,
tenantId = None, pipelineComputeConfig=None, pipelineType=None, version=None,
statusReportInterval=None, status=None, lastStatusReportTime=None, healthReport=None,
sparkMajorVersion=None, parentPipelineId=None, helmTemplateValues=None, **kwargs):
self.name = name
self.createdAt = createdAt
self.updatedAt = updatedAt
self.url = url
self.id = id
self.externalUrl = externalUrl
self.description = description
self.hbaseEnabled = hbaseEnabled
self.hdfsEnabled = hdfsEnabled
self.hiveEnable = hiveEnabled
if isinstance(secretsManagerConfiguration, dict):
self.secretsManagerConfiguration = SecretManagerConfigurationWithOption(**secretsManagerConfiguration)
else:
self.secretsManagerConfiguration = secretsManagerConfiguration
self.measureResultFsType = measureResultFsType
self.tenantId = tenantId
if isinstance(pipelineComputeConfig, dict):
self.pipelineComputeConfig = PipelineComputeConfig(**pipelineComputeConfig)
else:
self.pipelineComputeConfig = pipelineComputeConfig
if isinstance(pipelineType, dict):
self.pipelineType = PipelineType(**pipelineType)
else:
self.pipelineType = pipelineType
self.version = version
self.statusReportInterval = statusReportInterval
self.status = status,
self.lastStatusReportTime = lastStatusReportTime
if isinstance(healthReport, dict):
self.healthReport = DataplaneHealthMetrics(**healthReport)
else:
self.healthReport = healthReport
self.sparkMajorVersion = sparkMajorVersion
self.parentPipelineId = parentPipelineId
self.helmTemplateValues = helmTemplateValues
def __eq__(self, other):
return self.name == other.name
def __repr__(self):
return f"AnalyticsPipeline({self.__dict__})"
class ConfigProperty:
def __init__(self, key, value, id=None, **kwargs):
self.key = key
self.value = value
self.id = id
def __eq__(self, other):
return self.id == other.id
def __repr__(self):
return f"ConfigProperty({self.__dict__})"
class Connection:
def __init__(self, name, connectionType, createdAt, updatedAt, analyticsPipeline, configuration, assemblyCount=0,
description=None, properties=None,
id=None, **kwargs):
self.name = name
self.configuration = configuration
self.connectionType = connectionType
self.createdAt = createdAt
self.updatedAt = updatedAt
self.assemblyCount = assemblyCount
self.description = description
self.id = id
if isinstance(connectionType, dict):
self.connectionType = ConnectionType(**connectionType)
else:
self.connectionType = connectionType
if isinstance(analyticsPipeline, dict):
self.analyticsPipeline = AnalyticsPipeline(**analyticsPipeline)
else:
self.analyticsPipeline = analyticsPipeline
self.properties = list()
if properties is not None:
for obj in properties:
if isinstance(obj, dict):
self.properties.append(ConfigProperty(**obj))
else:
self.properties.append(obj)
def __eq__(self, other):
return self.name == other.name
def __repr__(self):
return f"Connection({self.__dict__})"
class ConnectionCheckStatus(Enum):
SUCCESS = 1
FAILURE = 2
class ConnectionCheck:
def __init__(self, message, status, **kwargs):
self.message = message
self.status = status
def __repr__(self):
return f"ConnectionCheckResponse({self.__dict__})"
|
PypiClean
|
/cbw_api_toolbox-2.3.2-py3-none-any.whl/examples/host_detail_export_xlsx.py
|
import os
from configparser import ConfigParser
import xlsxwriter
from cbw_api_toolbox.cbw_api import CBWApi
def connect_api():
'''Connect to the API and test connection'''
conf = ConfigParser()
conf.read(os.path.join(os.path.abspath(
os.path.dirname(__file__)), '..', 'api.conf'))
client = CBWApi(conf.get('cyberwatch', 'url'), conf.get(
'cyberwatch', 'api_key'), conf.get('cyberwatch', 'secret_key'))
client.ping()
return client
def get_node_names(client):
"""Build a list with node and it's details"""
node_names = {}
for node in client.nodes():
node_names[node.id] = node.name
return node_names
def hosts_details(client):
"""Build a list with each host and it's details"""
hosts_list = []
for host in client.hosts():
host = client.host(str(host.id))
hosts_list.append(host)
return hosts_list
def export_xls(client):
"""Export differents categories to the XLSX file"""
file = xlsxwriter.Workbook('export.xlsx')
hosts = hosts_details(client)
computer_tab = file.add_worksheet("Hosts")
# Create each column
computer_tab.write(0, 0, "ID")
computer_tab.write(0, 1, "Hostname")
computer_tab.write(0, 2, "Address")
computer_tab.write(0, 3, "Source")
computer_tab.write(0, 4, "Category")
computer_tab.write(0, 5, "Associated asset")
host_details(computer_tab, hosts, get_node_names(client))
file.close()
def host_details(computer_tab, hosts, nodes):
"""Write each Host and it's details in `Computers` tab"""
row = 0
col = 0
for host in hosts:
computer_tab.write(row + 1, col, host.id)
computer_tab.write(row + 1, col + 1, host.hostname)
computer_tab.write(row + 1, col + 2, host.target)
computer_tab.write(row + 1, col + 3, nodes[host.node_id])
computer_tab.write(row + 1, col + 4, host.discovery.type)
if host.server_ids:
computer_tab.write(row + 1, col + 5, str(host.server_ids))
row += 2
def launch_script():
'''Launch script'''
client = connect_api()
export_xls(client)
print("INFO: Done.")
def main():
'''Main function'''
launch_script()
if __name__ == '__main__':
main()
|
PypiClean
|
/sentry-arroyo-2.14.6.tar.gz/sentry-arroyo-2.14.6/arroyo/processing/strategies/guard.py
|
import logging
from typing import Callable, MutableMapping, Optional, Union, cast
from arroyo.dlq import InvalidMessage, InvalidMessageState
from arroyo.processing.strategies import MessageRejected, ProcessingStrategy
from arroyo.types import FilteredPayload, Message, Partition, TStrategyPayload
BasicStrategy = ProcessingStrategy[Union[FilteredPayload, TStrategyPayload]]
BasicMessage = Message[Union[FilteredPayload, TStrategyPayload]]
BuildProcessingStrategy = Callable[
[BasicStrategy[TStrategyPayload]], BasicStrategy[TStrategyPayload]
]
logger = logging.getLogger(__name__)
class _StrategyGuardAfter(BasicStrategy[TStrategyPayload]):
def __init__(self, next_step: BasicStrategy[TStrategyPayload]):
self.__next_step = next_step
self.__committable: MutableMapping[Partition, int] = {}
def submit(self, message: BasicMessage[TStrategyPayload]) -> None:
for partition, offset in message.committable.items():
if self.__committable.setdefault(partition, offset) > offset:
logger.warn(
"Submitted a message with committable {%s: %s}, "
"but we already submitted a message with a higher offset before.\n\n"
"Either Arroyo has a bug, or you are writing a custom "
"strategy that has odd control flow and cannot use "
"StrategyGuard",
partition,
offset,
)
self.__next_step.submit(message)
def poll(self) -> None:
self.__next_step.poll()
def join(self, timeout: Optional[float] = None) -> None:
self.__next_step.join(timeout=timeout)
def close(self) -> None:
self.__next_step.close()
def terminate(self) -> None:
self.__next_step.terminate()
class StrategyGuard(BasicStrategy[TStrategyPayload]):
"""
A wrapper around a strategy class that implements message filtering and
dead letter queue support for the strategy.
Can only be used in certain situations where the strategy's own control
flow is simple and immediate, i.e. does not involve any batching.
This is currently only an experiment in that we are not sure about its API.
Only for internal use.
Note that custom strategies generally don't have to deal with FilteredPayload and
InvalidMessage exceptions in simple consumers. If there is no filter step
or DLQing, there will be no filtered messages. If there is no explicit
handling of InvalidMessage, DLQed messages are not counted against commit
policy, but otherwise DLQing works.
"""
def __init__(
self,
build_inner_strategy: BuildProcessingStrategy[TStrategyPayload],
next_step: BasicStrategy[TStrategyPayload],
) -> None:
self.__next_step = next_step
self.__next_step_wrapper = _StrategyGuardAfter(next_step)
self.__inner_strategy = build_inner_strategy(self.__next_step_wrapper)
self.__invalid_messages = InvalidMessageState()
def submit(self, message: BasicMessage[TStrategyPayload]) -> None:
if isinstance(message.payload, FilteredPayload):
self.__next_step_wrapper.submit(cast(Message[FilteredPayload], message))
else:
try:
self.__inner_strategy.submit(message)
except InvalidMessage as e:
self.__invalid_messages.append(e)
raise e
def __forward_invalid_offsets(self) -> None:
if len(self.__invalid_messages):
self.__inner_strategy.poll()
filter_msg = self.__invalid_messages.build()
if filter_msg:
try:
self.__next_step_wrapper.submit(filter_msg)
self.__invalid_messages.reset()
except MessageRejected:
pass
def poll(self) -> None:
self.__forward_invalid_offsets()
try:
self.__inner_strategy.poll()
except InvalidMessage as e:
self.__invalid_messages.append(e)
raise e
def join(self, timeout: Optional[float] = None) -> None:
try:
self.__inner_strategy.join(timeout)
except InvalidMessage:
# We cannot forward offsets here since the inner strategy is already
# marked as closed. Log the exception and move on. The message will get
# reprocessed properly when the consumer is restarted.
logger.warning("Invalid message in join", exc_info=True)
def close(self) -> None:
# Forward invalid offsets first. Once the inner strategy is closed, we can no
# longer submit invalid messages to it.
self.__forward_invalid_offsets()
self.__inner_strategy.close()
def terminate(self) -> None:
self.__inner_strategy.terminate()
|
PypiClean
|
/adafruit-blinka-pyportal-2.0.1.tar.gz/adafruit-blinka-pyportal-2.0.1/adafruit_pyportal/graphics.py
|
try:
import board
DISPLAY_ARG_REQUIRED = False
except AttributeError:
# okay to run Generic Linux
DISPLAY_ARG_REQUIRED = True
import displayio
import adafruit_ili9341
from PIL import Image
from adafruit_portalbase.graphics import GraphicsBase
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_Blinka_PyPortal.git"
class Graphics(GraphicsBase):
"""Graphics Helper Class for the PyPortal Library
:param default_bg: The path to your default background image file or a hex color.
Defaults to 0x000000.
:param debug: Turn on debug print outs. Defaults to False.
"""
# pylint: disable=too-few-public-methods
def __init__(self, *, default_bg=None, display=None, spi=None, debug=False):
if display is None:
if DISPLAY_ARG_REQUIRED:
raise RuntimeError(
"Display must be provided on platforms without board."
)
display_bus = displayio.FourWire(
spi, command=board.D25, chip_select=board.CE0
)
display = adafruit_ili9341.ILI9341(
display_bus, width=320, height=240, backlight_pin=board.D18
)
if display is None:
raise RuntimeError("Display not found or provided")
super().__init__(display, default_bg=default_bg, debug=debug)
# Tracks whether we've hidden the background when we showed the QR code.
self._qr_only = False
# pylint: disable=arguments-differ
def qrcode(self, qr_data, *, qr_size=1, x=0, y=0, hide_background=False):
"""Display a QR code
:param qr_data: The data for the QR code.
:param int qr_size: The scale of the QR code.
:param x: The x position of upper left corner of the QR code on the display.
:param y: The y position of upper left corner of the QR code on the display.
"""
super().qrcode(
qr_data, qr_size=qr_size, x=x, y=y,
)
if hide_background:
self.display.show(self._qr_group)
self._qr_only = hide_background
# pylint: enable=arguments-differ
def hide_QR(self): # pylint: disable=invalid-name
"""Clear any QR codes that are currently on the screen"""
if self._qr_only:
self.display.show(self.splash)
else:
try:
self._qr_group.pop()
except (IndexError, AttributeError): # later test if empty
pass
@staticmethod
def resize_image(filename, width, height):
"""Resize the image to be within the width and height while maintaining
proper scaling
param: str filename: The location of the image file to resize
param int width: The maximum width to resize to
param int height: The maximum height to resize to
"""
# Open image
image = Image.open(filename)
image_ratio = image.width / image.height
target_ratio = width / height
# Resize with sample
if target_ratio < image_ratio:
scaled_width = image.width * height // image.height
scaled_height = height
else:
scaled_width = width
scaled_height = image.height * width // image.width
image = image.resize((scaled_width, scaled_height), Image.BICUBIC)
# Save to same filename
image.save(filename)
# pylint: enable=no-self-use
|
PypiClean
|
/flask_leaflet-0.1.3-py3-none-any.whl/flask_leaflet/basic_types.py
|
from uuid import UUID, uuid4
from markupsafe import Markup
from .mixins import Renderable, RenderOptions, RendersVarName
class LatLng(Renderable, RendersVarName):
"""Object representing Latitud and Longitud"""
lat: float
lng: float
alt: float = None
def __init__(self, lat: float, lng: float, alt: float = None, id: str | UUID = None) -> None:
self.id = id or uuid4()
self.lat = lat
self.lng = lng
self.alt = alt
def __render_html__(self, as_variable: bool = False) -> Markup:
string = f"[{self.lat}, {self.lng}]"
if as_variable:
string = f"var {self.var_name} = L.latlng({string});"
return Markup(string)
class LatLngBounds(Renderable, RendersVarName):
corner_1: LatLng
corner_2: LatLng
def __init__(self, corner_1: LatLng | list[float], corner_2: LatLng | list[float], id: str | UUID = None) -> None:
self.id = id or uuid4()
self.corner_1 = corner_1 if isinstance(corner_1, LatLng) else LatLng(*corner_1)
self.corner_2 = corner_2 if isinstance(corner_2, LatLng) else LatLng(*corner_2)
def __render_html__(self, as_variable: bool = False) -> Markup:
string = f"[{str(self.corner_1.__render_html__())}, {str(self.corner_2.__render_html__())}]"
if as_variable:
string = f"var {self.var_name} = L.latLngBounds({string});"
return Markup(string)
class Point(Renderable, RendersVarName):
x: int
y: int
def __init__(self, *args, id: str | UUID = None) -> None:
if isinstance(args[0], self.__class__):
self.id = args[0].id or (id or uuid4())
self.x = args[0].x
self.y = args[0].y
elif isinstance(args[0], (tuple, list)) and len(args[0]) == 2:
self.x, self.y = args[0]
elif isinstance(args[0], (int, float)) and isinstance(args[1], (int, float)) and len(args) == 2:
self.x, self.y = list(args)
else:
raise ValueError(f'Error trying to intialize Point with given args: {args}')
def __render_html__(self, as_variable: bool = False) -> Markup:
string = f"[{self.x}, {self.y}]"
if as_variable:
string = f"var {self.var_name} = L.point({string});"
return Markup(string)
class Icon(Renderable, RendersVarName, RenderOptions):
__not_render_options__ = ["id"]
icon_url: str = None
icon_retina_url: str = None
icon_size: Point = None
icon_anchor: Point = None
popup_anchor: Point = [0,0]
tooltip_anchor: Point = [0,0]
shadow_url: str = None
shadow_retina_url = None
shadow_size: Point = None
shadow_anchor: Point = None
class_name: str = ""
cross_origin: bool | str = False
def __init__(self,
id: str | UUID = None,
icon_url: str = None,
icon_retina_url: str = None,
icon_size: Point | list[int] = None,
icon_anchor: Point | list[int] = None,
popup_anchor: Point | list[int] = [0,0],
tooltip_anchor: Point | list[int] = [0,0],
shadow_url: str = None,
shadow_retina_url = None,
shadow_size: Point | list[int] = None,
shadow_anchor: Point | list[int] = None,
class_name: str = "",
cross_origin: bool | str = False) -> None:
self.id = id or uuid4()
self.icon_url = icon_url
self.icon_retina_url = icon_retina_url
self.icon_size = Point(icon_size) if icon_size else None
self.icon_anchor = Point(icon_anchor) if icon_anchor else None
self.popup_anchor = Point(popup_anchor) if popup_anchor else None
self.tooltip_anchor = Point(tooltip_anchor) if tooltip_anchor else None
self.shadow_url = shadow_url
self.shadow_retina_url = shadow_retina_url
self.shadow_size = Point(shadow_size) if shadow_size else None
self.shadow_anchor = Point(shadow_anchor) if shadow_anchor else None
self.class_name = class_name
self.cross_origin = cross_origin
def __render_html__(self, as_variable: bool = False) -> Markup:
string = f"L.icon({self.render_options()})"
if as_variable:
string = f"var {self.var_name} = {string};"
return Markup(string)
class DivIcon(Icon):
html: str = ""
bg_pos: Point = [0,0]
def __init__(self, html: str = "", bg_pos: Point | list[int] = [0,0], **kwargs) -> None:
super().__init__(**kwargs)
self.html = html
self.bg_pos = Point(bg_pos) if bg_pos else None
def __render_html__(self, as_variable: bool = False) -> Markup:
string = f"L.divIcon({self.render_options()})"
if as_variable:
string = f"var {self.var_name} = {string};"
return Markup(string)
|
PypiClean
|
/bolt11-voltage-0.0.1.4.tar.gz/bolt11-voltage-0.0.1.4/bolt11/core.py
|
import base58 # type: ignore
import re
from bech32 import bech32_encode, bech32_decode, CHARSET # type: ignore
from binascii import unhexlify
from bitstring import ConstBitStream # type: ignore
from ecdsa import SECP256k1, VerifyingKey # type: ignore
from ecdsa.util import sigdecode_string # type: ignore
from hashlib import sha256
from .types import LightningInvoice, MilliSatoshi, Route, Signature
from .utils import amount_to_msat, trim_to_bytes, bitarray_to_u5, u5_to_bitarray
base58_prefix_map = {"bc": (0, 5), "tb": (111, 196)}
def _pull_tagged(stream):
tag = stream.read(5).uint
length = stream.read(5).uint * 32 + stream.read(5).uint
return (CHARSET[tag], stream.read(length * 5), stream)
def _parse_fallback(fallback, currency):
if currency in ["bc", "tb"]:
wver = fallback[0:5].uint
if wver == 17:
addr = base58.b58encode_check(bytes([base58_prefix_map[currency][0]]) + fallback[5:].tobytes())
elif wver == 18:
addr = base58.b58encode_check(bytes([base58_prefix_map[currency][1]]) + fallback[5:].tobytes())
elif wver <= 16:
addr = bech32_encode(currency, bitarray_to_u5(fallback))
else:
return None
else:
addr = fallback.tobytes()
return addr
def _readable_scid(short_channel_id: int) -> str:
return "{blockheight}x{transactionindex}x{outputindex}".format(
blockheight=((short_channel_id >> 40) & 0xFFFFFF),
transactionindex=((short_channel_id >> 16) & 0xFFFFFF),
outputindex=(short_channel_id & 0xFFFF),
)
def encode():
raise NotImplementedError
def decode(bech32_pr: str) -> LightningInvoice:
hrp, bech32_data = bech32_decode(bech32_pr)
route_hints = []
tags = {}
if not hrp or not bech32_data or not hrp.startswith("ln"):
raise ValueError("Bech32 is not valid.")
matches = re.match(r"ln(bc|bcrt|tb)(\w+)?", hrp)
assert matches, "Human readable part is not valid."
currency, amount_str = matches.groups()
data_part = u5_to_bitarray(bech32_data)
# Final signature is 65 bytes, split it off.
# signature =>
# "a valid 512-bit secp256k1 signature of the SHA2 256-bit hash of the human-readable part
# represented as UTF-8 bytes, concatenated with the data part (excluding the signature) with 0 bits appended
# to pad the data to the next byte boundary, with a trailing byte containing the recovery ID (0, 1, 2, or 3)"
if len(data_part) < 65 * 8:
raise ValueError("Too short to contain signature")
signature_data = data_part[-65 * 8 :].tobytes()
data = ConstBitStream(data_part[: -65 * 8])
signature = Signature(data=signature_data, signing_data=(hrp.encode("utf-8") + data.tobytes()))
timestamp = data.read(35).uint
# Look for tags in data
while data.pos != data.len:
tag, tagdata, data = _pull_tagged(data)
data_length = len(tagdata) // 5
if tag == "p" and data_length == 52:
# p (1): data_length 52. 256-bit SHA256 payment_hash. Preimage of this provides proof of payment.
tags["p"] = trim_to_bytes(tagdata).hex()
elif tag == "x":
# x (6): data_length variable. expiry time in seconds (big-endian). Default is 3600.
tags["x"] = tagdata.uint
elif tag == "d":
# d (13): data_length variable. Short description of purpose of payment (UTF-8).
tags["d"] = trim_to_bytes(tagdata).decode("utf-8")
elif tag == "h" and data_length == 52:
# h (23): data_length 52. 256-bit description of purpose of payment (SHA256).
tags["h"] = trim_to_bytes(tagdata).hex()
elif tag == "s" and data_length == 52:
# s (16): data_length 52. This 256-bit secret prevents forwarding nodes from probing the payment recipient.
tags["s"] = trim_to_bytes(tagdata).hex()
elif tag == "c":
# c (24): data_length variable. min_final_cltv_expiry to use for the last HTLC in the route. Default is 9.
tags["c"] = tagdata.uint
elif tag == "n" and data_length == 53:
# n (19): data_length 53. 33-byte public key of the payee node.
tags["n"] = trim_to_bytes(tagdata).hex()
elif tag == "f":
# f (9): data_length variable, depending on version. Fallback on-chain address.
tags["f"] = _parse_fallback(tagdata, currency)
elif tag == "r":
# r (3): `data_length` variable.
# One or more entries containing extra routing information for a private route;
# there may be more than one `r` field, too.
# * `pubkey` (264 bits)
# * `short_channel_id` (64 bits)
# * `feebase` (32 bits, big-endian)
# * `feerate` (32 bits, big-endian)
# * `cltv_expiry_delta` (16 bits, big-endian)
s = ConstBitStream(tagdata)
while s.pos + 264 + 64 + 32 + 32 + 16 < s.len:
route_hints.append(
Route(
public_key=s.read(264).tobytes().hex(),
short_channel_id=_readable_scid(s.read(64).intbe),
base_fee=MilliSatoshi(s.read(32).intbe),
ppm_fee=s.read(32).intbe,
cltv_expiry_delta=s.read(16).intbe,
)
)
# A reader MUST use the `n` field to validate the signature instead of
# performing signature recovery if a valid `n` field is provided.
message = bytearray([ord(c) for c in hrp]) + data.tobytes()
sig = signature_data[0:64]
if "n" in tags:
payee_public_key = tags["n"]
vk = VerifyingKey.from_string(unhexlify(payee_public_key), curve=SECP256k1)
if not vk.verify(sig, message, sha256, sigdecode=sigdecode_string):
raise ValueError("Could not verify public key")
else:
vk = VerifyingKey.from_public_key_recovery(sig, message, SECP256k1, sha256)
signaling_byte = signature_data[64]
key = vk[int(signaling_byte)]
payee_public_key = key.to_string("compressed").hex()
return LightningInvoice(
amount=amount_to_msat(amount_str) if amount_str else None,
currency=currency,
timestamp=timestamp,
payee_public_key=payee_public_key,
route_hints=route_hints,
signature=signature,
tags=tags,
)
|
PypiClean
|
/mantis-ml-1.6.5.tar.gz/mantis-ml-1.6.5/mantis_ml/modules/unsupervised_learn/dimens_reduction.py
|
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA, SparsePCA
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans, AgglomerativeClustering
from umap import UMAP
from time import time
import random
import sys
import seaborn as sns
from bokeh.plotting import figure, output_file, save
from bokeh.io import export_svgs, show
from bokeh.models import HoverTool, ColumnDataSource
class DimensionalityReduction:
def __init__(self, cfg):
self.cfg = cfg
def calc_principal_components(self, df, n_comp=20, method='PCA'):
'''
Run PCA and Sparse PCA on feature table
:param df:
:return:
'''
print(">> Running " + method + "...")
if df.shape[1] <= n_comp:
n_comp = df.shape[1] - 1
tmp_drop_cols = ['Gene_Name', self.cfg.Y]
X = df.drop(tmp_drop_cols, axis=1)
pca_data = X.copy()
pca = None
if method == 'SparsePCA':
pca = SparsePCA(n_components=n_comp)
else:
pca = PCA(n_components=n_comp)
principal_components = pca.fit_transform(pca_data)
columns = []
for i in range(1, n_comp+1):
columns.append('PC' + str(i))
pca_df = pd.DataFrame(data = principal_components, columns = columns)
pca_df = pd.concat([pca_df, df[tmp_drop_cols]], axis=1)
filepath = str(self.cfg.unsuperv_out / (method + ".table.tsv"))
pca_df.to_csv(filepath, sep='\t', index=None)
return pca, pca_df
def make_scree_plot(self, pca, method='PCA'):
var = pca.explained_variance_ratio_
print(type(pca))
print(pca)
print(pca.n_components_)
n_comp_to_show = pca.n_components_
# cum_var = np.cumsum(np.round(var, decimals=4) * 100)
fig = plt.figure(figsize=(10, 10))
ax = fig.gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.bar(range(1, n_comp_to_show + 1), var * 100)
plt.title(method + ' - Scree Plot')
plt.xlabel('Principal Components')
plt.ylabel('% Variance explained')
plt.xticks(range(1, n_comp_to_show+1), ['PC' + str(i) for i in range(1, n_comp_to_show+1)], fontsize=8)
# plt.show()
plot_filename = method + "_Scree_plot.pdf"
fig.savefig(str(self.cfg.unsuperv_figs_out / plot_filename), bbox_inches='tight')
def calc_umap(self, df, n_neighbors=5, min_dist=0.3, metric='correlation', data_type='original_data'):
print(">> Running UMAP from " + data_type + "...")
tmp_drop_cols = ['Gene_Name', self.cfg.Y]
X = df.drop(tmp_drop_cols, axis=1)
umap = UMAP(n_neighbors=n_neighbors, min_dist=min_dist, metric=metric)
t0 = time()
X_umap = umap.fit_transform(X)
total_time = time() - t0
X_umap = pd.DataFrame(X_umap)
X_umap.columns = [('d' + str(c)) for c in X_umap.columns.values]
#print(X_umap)
X_umap = pd.concat([X_umap, df[tmp_drop_cols]], axis=1)
filepath = str(self.cfg.unsuperv_out / ("UMAP" + data_type + ".tsv"))
X_umap.to_csv(filepath, sep='\t', index=None)
return X_umap, total_time
def calc_tsne(self, df, n_comp=2, data_type='original_data', perplexity=30):
'''
Calculate t-SNE
:param df:
:param n_comp:
:param data_type: table used for t-SNE calculations - 'original_data' or 'principal_components'
:return:
'''
print(">> Running t-SNE from " + data_type + "...")
tmp_drop_cols = ['Gene_Name', self.cfg.Y]
X = df.drop(tmp_drop_cols, axis=1)
tsne = TSNE(n_comp, init='pca', random_state=0, perplexity=perplexity)
t0 = time()
X_tsne = tsne.fit_transform(X)
total_time = time() - t0
X_tsne = pd.DataFrame(X_tsne)
X_tsne.columns = [('d' + str(c)) for c in X_tsne.columns.values]
#print(X_tsne)
X_tsne = pd.concat([X_tsne, df[tmp_drop_cols]], axis=1)
filepath = str(self.cfg.unsuperv_out / ("tSNE.perplexity" + str(perplexity) + "." + data_type + ".tsv"))
X_tsne.to_csv(filepath, sep='\t', index=None)
return X_tsne, total_time
def get_clustering_from_tsne(self, X_tsne, n_clusters=15, perplexity=30):
gene_names = X_tsne['Gene_Name']
known_genes = X_tsne[self.cfg.Y]
tsne_repr = X_tsne.drop([self.cfg.Y, 'Gene_Name'], axis=1)
agglom_cl = AgglomerativeClustering(n_clusters)
agglom_cl.fit(tsne_repr)
tsne_repr.columns = ['x', 'y']
tsne_repr['cluster'] = agglom_cl.labels_
tsne_repr['Gene_Name'] = gene_names
tsne_repr['known_CKD_gene'] = known_genes
return agglom_cl, tsne_repr, gene_names
def plot_embedding_w_clusters(self, agglom_cl, tsne_repr, gene_list=[], gene_names=None, filename_prefix='embedding_w_clusters', figsize=(16, 16)):
plt.rc('font', size=14)
sns.set_style('white')
# define a custom palette
palette = sns.color_palette("Paired") + sns.color_palette("Set2")
palette = palette[:agglom_cl.n_clusters]
fig, ax = plt.subplots(figsize=figsize)
_ = plt.title('t-SNE plots with highlighted k-means clusters (k=15)')
for i in range(agglom_cl.n_clusters):
_ = ax.scatter(x=tsne_repr.loc[tsne_repr.cluster == i, 'x'],
y=tsne_repr.loc[tsne_repr.cluster == i, 'y'],
color=palette[i], label=i, s=40, marker='.')
lgnd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), title='Clusters', fancybox=True)
for handle in lgnd.legendHandles:
handle.set_sizes([500])
compon1 = list(tsne_repr.loc[:, 'x'])
compon2 = list(tsne_repr.loc[:, 'y'])
for i, gene in enumerate(gene_list): # enumerate(list(gene_names)):
idx = gene_names[gene_names == gene].index[0]
_ = ax.text(compon1[idx], compon2[idx] + random.randint(1, 4), gene)
fig.savefig(str(self.cfg.unsuperv_figs_out / (filename_prefix + '.pdf')), bbox_inches='tight')
def plot_embedding_w_labels(self, df, highlighted_genes, x, y, plot_title, filename_prefix, figsize=(10, 10)):
'''
Plot a (static) dimensionality reduction embedding (e.g. PCA, t-SNE)
with label annotation for selected data points
'''
gene_names = df['Gene_Name']
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1, 1, 1)
ax.set_title(plot_title, fontsize=20)
targets = [0, 1]
colors = ['#bdbdbd', '#ef3b2c']
for target, color in zip(targets, colors):
indicesToKeep = df[self.cfg.Y] == target
ax.scatter(df.loc[indicesToKeep, x],
df.loc[indicesToKeep, y],
c=color,
s=20)
plt.xlabel("Dimension-1")
plt.ylabel("Dimension-2")
ax.legend(targets, loc=2)
compon1 = list(df.loc[:, x])
compon2 = list(df.loc[:, y])
for i, gene in enumerate(highlighted_genes):
try:
idx = gene_names[gene_names == gene].index[0]
ax.annotate(gene, (compon1[idx], compon2[idx]))
except Exception as e:
print('[Warning]:', gene, ' not found in gene list')
plot_filename = filename_prefix + "_plot.pdf"
fig.savefig(str(self.cfg.unsuperv_figs_out / plot_filename), bbox_inches='tight')
def plot_interactive_viz(self, data, highlighted_genes, method, pos_label, neg_label, show_plot=False, save_plot=False):
'''
Plot an interactive dimensionality reduction embedding (e.g. PCA, t-SNE)
with label annotation for selected data points
'''
# Highlight genes of interest
data['colors'] = data.known_gene.copy()
color_mapping = {pos_label: '#ef3b2c', neg_label: '#bdbdbd'}
data = data.replace({'colors': color_mapping})
data = data.sort_values(by=[self.cfg.Y], ascending=True)
known_genes_highlight_color = '#31a354'
data.loc[data['Gene_Name'] == 'PKD1', 'colors'] = known_genes_highlight_color
data.loc[data['Gene_Name'] == 'PKD2', 'colors'] = known_genes_highlight_color
selected_gene_rows = data.loc[data['Gene_Name'].isin(highlighted_genes), :]
data = data[~data.Gene_Name.isin(highlighted_genes)]
data = pd.concat([data, selected_gene_rows], axis=0)
data.loc[data['Gene_Name'].isin(highlighted_genes), 'colors'] = '#252525'
data['annotation'] = data.known_gene.copy()
data.loc[data.annotation == pos_label, 'annotation'] = 'Yes'
data.loc[data.annotation == neg_label, 'annotation'] = 'No'
# Plot
source = ColumnDataSource(dict(
x=data['x'],
y=data['y'],
color=data['colors'],
content=data['Gene_Name'],
annot=data['annotation'],
))
interact_viz = figure(plot_width=900, plot_height=900,
title=method, tools="pan,wheel_zoom,box_zoom,reset,hover,previewsave",
x_axis_type=None, y_axis_type=None, min_border=1)
interact_viz.scatter(x='x', y='y',
source=source,
color='color',
alpha=0.8, size=10,
legend=method)
# hover tools
hover = interact_viz.select(dict(type=HoverTool))
hover.tooltips = [("gene", "@content")]
interact_viz.legend.location = "top_left"
plot_filename = method + "_interactive_plot.html"
output_file(str(self.cfg.unsuperv_figs_out / plot_filename))
save(interact_viz)
if show_plot:
show(interact_viz)
if save_plot:
interact_viz.output_backend = "svg"
plot_filename = method + '_interactive_plot.svg'
export_svgs(interact_viz, filename=(self.cfg.unsuperv_figs_out / plot_filename))
|
PypiClean
|
/python_packaging_distribution-0.1.0.tar.gz/python_packaging_distribution-0.1.0/README.rst
|
=============================
python-packaging-distribution
=============================
.. image:: https://img.shields.io/pypi/v/python_packaging_distribution.svg
:target: https://pypi.python.org/pypi/python_packaging_distribution
.. image:: https://img.shields.io/travis/premvikash/python_packaging_distribution.svg
:target: https://travis-ci.org/premvikash/python_packaging_distribution
.. image:: https://readthedocs.org/projects/python-packaging-distribution/badge/?version=latest
:target: https://python-packaging-distribution.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
Basic Python Project for packaging and distribution
* Free software: GNU General Public License v3
* Documentation: https://python-packaging-distribution.readthedocs.io.
Features
--------
* TODO
Credits
-------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
|
PypiClean
|
/soc-faker-4.0.2.tar.gz/soc-faker-4.0.2/socfaker/data/windows-event/event-4750.md
|
# Event ID 4750: A security-disabled global group was changed
## Description
This event generates every time security-disabled (distribution) global group is changed.This event generates only on domain controllers.
Some changes do not invoke a 4750 event, for example, changes made using the Active Directory Users and Computers management console in Managed By tab in group account properties.
If you change the name of the group (SAM Account Name), you also get “4781: The name of an account was changed” if “Audit User Account Management” subcategory success auditing is enabled.
If you change the group type, you get a change event from the new group type auditing subcategory instead of 4750. If you need to monitor for group type changes, it is better to monitor for “4764: A group’s type was changed.”
[MS Source](https://github.com/MicrosoftDocs/windows-itpro-docs/blob/master/windows/security/threat-protection/auditing/event-4750.md)
## Event Log Illustration & Event XML
[MS Source](https://github.com/MicrosoftDocs/windows-itpro-docs/blob/master/windows/security/threat-protection/auditing/event-4750.md)
## Data Dictionary
| Standard Name | Field Name | Type | Description | Sample Value |
| ---------------- | ---------------- | ---------------- | ---------------- | ---------------- |
| target_group_name | TargetUserName | string | the name of the group that was changed. For example: ServiceDesk | ServiceDeskMain |
| target_group_domain | TargetDomainName | string | domain name of changed group. | CONTOSO |
| target_group_sid | TargetSid | string | SID of changed group. Event Viewer automatically tries to resolve SIDs and show the group name. If the SID cannot be resolved, you will see the source data in the event. | S-1-5-21-3457937927-2839227994-823803824-6119 |
| user_sid | SubjectUserSid | string | SID of account that requested the “change group” operation. Event Viewer automatically tries to resolve SIDs and show the account name. | S-1-5-21-3457937927-2839227994-823803824-1104 |
| user_name | SubjectUserName | string | the name of the account that requested the “change group” operation. | dadmin |
| user_domain | SubjectDomainName | string | subject’s domain name. | CONTOSO |
| user_logon_id | SubjectLogonId | integer | hexadecimal value that can help you correlate this event with recent events that might contain the same Logon ID, for example, “4624: An account was successfully logged on.” | 0x3007b |
| user_privilege_list | PrivilegeList | string | the list of user privileges which were used during the operation, for example, SeBackupPrivilege. | - |
| target_group_sam_name | SamAccountName | string | This is a new name of changed group used to support clients and servers from previous versions of Windows (pre-Windows 2000 logon name). If the value of sAMAccountName attribute of group object was changed, you will see the new value here. For example: ServiceDesk. | ServiceDeskMain |
| target_group_sid_history | SidHistory | string | contains previous SIDs used for the object if the object was moved from another domain. Whenever an object is moved from one domain to another, a new SID is created and becomes the objectSID. The previous SID is added to the sIDHistory property. If the value of sIDHistory attribute of group object was changed, you will see the new value here. | - |
|
PypiClean
|
/protopipe-0.4.0.post1.tar.gz/protopipe-0.4.0.post1/docs/contribute/instructions.rst
|
.. _instructions:
Instructions
============
.. contents::
:local:
:depth: 2
| These are some guidelines on how to contribute to *protopipe*.
| This of course makes sense only for the development branch, aka the *master*
branch.
This is usually done is 4 steps:
1. you start using *protopipe*,
2. you find that either there is problem or *protopipe*
is missing a feature that is important for your research,
3. you open an issue (or pull-request, if you already have a solution!)
Open an issue
-------------
| It is always preferable to open an issue first, in order to warn other
users/developers and possibly trigger a discussion.
| This will be useful to identify more precisely what needs to be done.
| If you are not able to do it, the administrators of the repository should **label
your issue** depending on its nature.
| Labels are used to classify and prioritise issues within projects.
The labels normally used are quite self-explanatory, e.g.:
- bug
- fix
- wrong behaviour
- enhancement
- documentation
- dependency update
- summary
An issue can have multiple labels. You can propose new ones if needed.
Prepare and open a pull-request
-------------------------------
.. warning::
It is assumed that you installed *protopipe* as a developer (:ref:`install-development`).
1. update your **local** *master* branch with ``git pull upstream master``
2. create and move to a new **local** branch from your **local** *master* with
``git checkout -b your_branch``
3. develop inside it
4. push it to *origin*, thereby creating a copy of your branch also there
5. before pushing, please go through some checks (:ref:`beforepushing`)
6. start a *pull request* using the web interface from *origin/your_branch*
to *upstream/master*
1. wait for an outcome
2. if necessary, you can update or fix things in your branch because now
everything is traced!
(**local/your_branch** --> **origin/your_branch** --> **pull request**)
If your pull-request targets an issue, it should:
- have the same labels of that issue,
- if related to one ore more opened issues, its description should contain,
- the phrase `Closes #X #Y ...` where X is the number associated to the issue(s) if any,
- a reference to the issue, e.g. "as reported in #X ..." or similar.
This will keep things clean and organised, so when you or
someone else land on the Projects page, the information is readily available
and updated.
.. Note::
If your developments take a relatively long time, consider to update
periodically your **local** *master* branch.
If while doing this you see that the files on which you are working have been
modified *upstream*,
* move into your **local** branch,
* merge the new master into your branch ``git merge master``,
* resolve eventual conflicts
* push to origin
In this way, your pull request will be up-to-date with the master branch into
which you want to merge your changes.
If your changes are relatively small and
`you know what you are doing <https://www.atlassian.com/git/tutorials/merging-vs-rebasing>`_,
you can use ``git rebase master``, instead of merging.
Making your contribution visible
--------------------------------
Together with your changes, you should always check that,
- the email and name that you want to use is listed in the ``.mailmap``
- your name appears in the ``CODEOWNERS`` file according to your contribution
.. Note::
| It can happen that, if you forget, the mantainer(s) will do it for you, but
please remember that it can be overlooked.
| It is supposed to be a
responsibility of the authors of the pull request.
|
PypiClean
|
/thirdweb_sdk-3.1.1a0.tar.gz/thirdweb_sdk-3.1.1a0/thirdweb/abi/erc1155_receiver.py
|
# pylint: disable=too-many-arguments
import json
from typing import ( # pylint: disable=unused-import
Any,
List,
Optional,
Tuple,
Union,
)
from eth_utils import to_checksum_address
from mypy_extensions import TypedDict # pylint: disable=unused-import
from hexbytes import HexBytes
from web3 import Web3
from web3.contract import ContractFunction
from web3.datastructures import AttributeDict
from web3.providers.base import BaseProvider
from zero_ex.contract_wrappers.bases import ContractMethod, Validator
from zero_ex.contract_wrappers.tx_params import TxParams
# Try to import a custom validator class definition; if there isn't one,
# declare one that we can instantiate for the default argument to the
# constructor for ERC1155Receiver below.
try:
# both mypy and pylint complain about what we're doing here, but this
# works just fine, so their messages have been disabled here.
from . import ( # type: ignore # pylint: disable=import-self
ERC1155ReceiverValidator,
)
except ImportError:
class ERC1155ReceiverValidator(Validator): # type: ignore
"""No-op input validator."""
try:
from .middleware import MIDDLEWARE # type: ignore
except ImportError:
pass
class OnErc1155BatchReceivedMethod(
ContractMethod
): # pylint: disable=invalid-name
"""Various interfaces to the onERC1155BatchReceived method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self,
operator: str,
_from: str,
ids: List[int],
values: List[int],
data: Union[bytes, str],
):
"""Validate the inputs to the onERC1155BatchReceived method."""
self.validator.assert_valid(
method_name="onERC1155BatchReceived",
parameter_name="operator",
argument_value=operator,
)
operator = self.validate_and_checksum_address(operator)
self.validator.assert_valid(
method_name="onERC1155BatchReceived",
parameter_name="from",
argument_value=_from,
)
_from = self.validate_and_checksum_address(_from)
self.validator.assert_valid(
method_name="onERC1155BatchReceived",
parameter_name="ids",
argument_value=ids,
)
self.validator.assert_valid(
method_name="onERC1155BatchReceived",
parameter_name="values",
argument_value=values,
)
self.validator.assert_valid(
method_name="onERC1155BatchReceived",
parameter_name="data",
argument_value=data,
)
return (operator, _from, ids, values, data)
def call(
self,
operator: str,
_from: str,
ids: List[int],
values: List[int],
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(
operator,
_from,
ids,
values,
data,
) = self.validate_and_normalize_inputs(
operator, _from, ids, values, data
)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(
operator, _from, ids, values, data
).call(tx_params.as_dict())
return Union[bytes, str](returned)
def send_transaction(
self,
operator: str,
_from: str,
ids: List[int],
values: List[int],
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(
operator,
_from,
ids,
values,
data,
) = self.validate_and_normalize_inputs(
operator, _from, ids, values, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
operator, _from, ids, values, data
).transact(tx_params.as_dict())
def build_transaction(
self,
operator: str,
_from: str,
ids: List[int],
values: List[int],
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(
operator,
_from,
ids,
values,
data,
) = self.validate_and_normalize_inputs(
operator, _from, ids, values, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
operator, _from, ids, values, data
).buildTransaction(tx_params.as_dict())
def estimate_gas(
self,
operator: str,
_from: str,
ids: List[int],
values: List[int],
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(
operator,
_from,
ids,
values,
data,
) = self.validate_and_normalize_inputs(
operator, _from, ids, values, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
operator, _from, ids, values, data
).estimateGas(tx_params.as_dict())
class OnErc1155ReceivedMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the onERC1155Received method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(
self,
operator: str,
_from: str,
_id: int,
value: int,
data: Union[bytes, str],
):
"""Validate the inputs to the onERC1155Received method."""
self.validator.assert_valid(
method_name="onERC1155Received",
parameter_name="operator",
argument_value=operator,
)
operator = self.validate_and_checksum_address(operator)
self.validator.assert_valid(
method_name="onERC1155Received",
parameter_name="from",
argument_value=_from,
)
_from = self.validate_and_checksum_address(_from)
self.validator.assert_valid(
method_name="onERC1155Received",
parameter_name="id",
argument_value=_id,
)
# safeguard against fractional inputs
_id = int(_id)
self.validator.assert_valid(
method_name="onERC1155Received",
parameter_name="value",
argument_value=value,
)
# safeguard against fractional inputs
value = int(value)
self.validator.assert_valid(
method_name="onERC1155Received",
parameter_name="data",
argument_value=data,
)
return (operator, _from, _id, value, data)
def call(
self,
operator: str,
_from: str,
_id: int,
value: int,
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(
operator,
_from,
_id,
value,
data,
) = self.validate_and_normalize_inputs(
operator, _from, _id, value, data
)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(
operator, _from, _id, value, data
).call(tx_params.as_dict())
return Union[bytes, str](returned)
def send_transaction(
self,
operator: str,
_from: str,
_id: int,
value: int,
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(
operator,
_from,
_id,
value,
data,
) = self.validate_and_normalize_inputs(
operator, _from, _id, value, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
operator, _from, _id, value, data
).transact(tx_params.as_dict())
def build_transaction(
self,
operator: str,
_from: str,
_id: int,
value: int,
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(
operator,
_from,
_id,
value,
data,
) = self.validate_and_normalize_inputs(
operator, _from, _id, value, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
operator, _from, _id, value, data
).buildTransaction(tx_params.as_dict())
def estimate_gas(
self,
operator: str,
_from: str,
_id: int,
value: int,
data: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(
operator,
_from,
_id,
value,
data,
) = self.validate_and_normalize_inputs(
operator, _from, _id, value, data
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(
operator, _from, _id, value, data
).estimateGas(tx_params.as_dict())
class SupportsInterfaceMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the supportsInterface method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, interface_id: Union[bytes, str]):
"""Validate the inputs to the supportsInterface method."""
self.validator.assert_valid(
method_name="supportsInterface",
parameter_name="interfaceId",
argument_value=interface_id,
)
return interface_id
def call(
self,
interface_id: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(interface_id).call(
tx_params.as_dict()
)
return bool(returned)
def send_transaction(
self,
interface_id: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(interface_id).transact(
tx_params.as_dict()
)
def build_transaction(
self,
interface_id: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(interface_id).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
interface_id: Union[bytes, str],
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(interface_id).estimateGas(
tx_params.as_dict()
)
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class ERC1155Receiver:
"""Wrapper class for ERC1155Receiver Solidity contract.
All method parameters of type `bytes`:code: should be encoded as UTF-8,
which can be accomplished via `str.encode("utf_8")`:code:.
"""
on_erc1155_batch_received: OnErc1155BatchReceivedMethod
"""Constructor-initialized instance of
:class:`OnErc1155BatchReceivedMethod`.
"""
on_erc1155_received: OnErc1155ReceivedMethod
"""Constructor-initialized instance of
:class:`OnErc1155ReceivedMethod`.
"""
supports_interface: SupportsInterfaceMethod
"""Constructor-initialized instance of
:class:`SupportsInterfaceMethod`.
"""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
validator: ERC1155ReceiverValidator = None,
):
"""Get an instance of wrapper for smart contract.
:param web3_or_provider: Either an instance of `web3.Web3`:code: or
`web3.providers.base.BaseProvider`:code:
:param contract_address: where the contract has been deployed
:param validator: for validation of method inputs.
"""
# pylint: disable=too-many-statements
self.contract_address = contract_address
if not validator:
validator = ERC1155ReceiverValidator(
web3_or_provider, contract_address
)
web3 = None
if isinstance(web3_or_provider, BaseProvider):
web3 = Web3(web3_or_provider)
elif isinstance(web3_or_provider, Web3):
web3 = web3_or_provider
else:
raise TypeError(
"Expected parameter 'web3_or_provider' to be an instance of either"
+ " Web3 or BaseProvider"
)
# if any middleware was imported, inject it
try:
MIDDLEWARE
except NameError:
pass
else:
try:
for middleware in MIDDLEWARE:
web3.middleware_onion.inject(
middleware["function"],
layer=middleware["layer"],
)
except ValueError as value_error:
if value_error.args == (
"You can't add the same un-named instance twice",
):
pass
self._web3_eth = web3.eth
functions = self._web3_eth.contract(
address=to_checksum_address(contract_address),
abi=ERC1155Receiver.abi(),
).functions
self.on_erc1155_batch_received = OnErc1155BatchReceivedMethod(
web3_or_provider,
contract_address,
functions.onERC1155BatchReceived,
validator,
)
self.on_erc1155_received = OnErc1155ReceivedMethod(
web3_or_provider,
contract_address,
functions.onERC1155Received,
validator,
)
self.supports_interface = SupportsInterfaceMethod(
web3_or_provider,
contract_address,
functions.supportsInterface,
validator,
)
@staticmethod
def abi():
"""Return the ABI to the underlying contract."""
return json.loads(
'[{"inputs":[{"internalType":"address","name":"operator","type":"address"},{"internalType":"address","name":"from","type":"address"},{"internalType":"uint256[]","name":"ids","type":"uint256[]"},{"internalType":"uint256[]","name":"values","type":"uint256[]"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"onERC1155BatchReceived","outputs":[{"internalType":"bytes4","name":"","type":"bytes4"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"operator","type":"address"},{"internalType":"address","name":"from","type":"address"},{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"onERC1155Received","outputs":[{"internalType":"bytes4","name":"","type":"bytes4"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes4","name":"interfaceId","type":"bytes4"}],"name":"supportsInterface","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"}]' # noqa: E501 (line-too-long)
)
# pylint: disable=too-many-lines
|
PypiClean
|
/django_carbondesign-0.0.5-py3-none-any.whl/carbondesign/mithril-js/notification.js
|
import m from 'mithril/hyperscript';
//-
import messages from './messages.json';
import { Node } from './base';
export class Notification extends Node
{
WANT_CHILDREN = true
SLOTS = ['action']
MODES = ['inline', 'toast']
NODE_PROPS = ['variant', 'low_contrast']
prepare(vnode, values, context)
{
values.txt_close = gettext("close");
let variant = this.variant = vnode.attrs.variant || 'info';
if (variant === messages.DEBUG || variant === messages.INFO)
{
this.variant = 'info';
}
else if (variant === messages.SUCCESS)
{
this.variant = 'success';
}
else if (variant === messages.WARNING)
{
this.variant = 'warning';
}
else if (varaint === messages.ERROR)
{
this.variant = 'error';
}
values['class'].push(`bx--${this.mode}-notification--${variant}`);
if (vnode.attrs.low_contrast)
{
values['class'].push(`bx--${this.mode}-notification--low-contrast`);
}
context.mode = this.mode;
}
render_inline(vnode, values, context)
{
return (
//##
m('div',
{
'data-notification': '',
'class': `bx--inline-notification ${values['class']}`,
role: 'alert',
},
[
m('div.bx--inline-notification__details', null,
[
this.tmpl('icon', vnode, values, context),
m('div.bx--inline-notification__text-wrapper', null, values.child),
]),
this.slot('action', vnode, values, context),
this.tmpl('close', vnode, values, context),
])
//##
);
}
render_toast(vnode, values, context)
{
return (
//##
m('div',
{
'data-notification': '',
'class': `bx--toast-notification ${values['class']}`,
role: 'alert',
},
[
this.tmpl('icon', vnode, values, context),
m('div.bx--toast-notification__details', null, values.child),
this.tmpl('close', vnode, values, context),
])
//##
);
}
render_tmpl_icon(vnode, values, context)
{
if (this.variant === 'info')
{
return (
//##
m('svg',
{
focusable: false,
preserveAspectRatio: 'xMidYMid meet',
xmlns: 'http://www.w3.org/2000/svg',
fill: 'currentColor',
'class': `bx--${this.mode}-notification__icon`,
width: 20,
height: 20,
viewBox: '0 0 32 32',
'aria-hidden': true,
},
[
m('path',
{
fill: 'none',
d: 'M16,8a1.5,1.5,0,1,1-1.5,1.5A1.5,1.5,0,0,1,16,8Zm4,13.875H17.\
125v-8H13v2.25h1.875v5.75H12v2.25h8Z',
'data-icon-path': 'inner-path',
}),
m('path',
{
d: 'M16,2A14,14,0,1,0,30,16,14,14,0,0,0,16,2Zm0,6a1.5,1.5,0,1,1-1.5,\
1.5A1.5,1.5,0,0,1,16,8Zm4,16.125H12v-2.25h2.875v-5.75H13v-2.25h4.\
125v8H20Z',
}),
])
//##
);
}
if (this.variant === 'error')
{
return (
//##
m('svg',
{
focusable: false,
preserveAspectRatio: 'xMidYMid meet',
xmlns: 'http://www.w3.org/2000/svg',
fill: 'currentColor',
'class': `bx--${this.mode}-notification__icon`,
width: 20,
height: 20,
viewBox: '0 0 20 20',
'aria-hidden': true,
},
[
m('path',
{
d: 'M10,1c-5,0-9,4-9,9s4,9,9,9s9-4,9-9S15,1,10,1z M13.5,14.5l-8-8l1-\
1l8,8L13.5,14.5z',
}),
m('path',
{
d: 'M13.5,14.5l-8-8l1-1l8,8L13.5,14.5z',
'data-icon-path': 'inner-path',
opacity: 0,
}),
])
//##
);
}
if (this.variant === 'success')
{
return (
//##
m('svg',
{
focusable: false,
preserveAspectRatio: 'xMidYMid meet',
xmlns: 'http://www.w3.org/2000/svg',
fill: 'currentColor',
'class': `bx--${this.mode}-notification__icon`,
width: 20,
height: 20,
viewBox: '0 0 20 20',
'aria-hidden': true,
},
[
m('path',
{
d: 'M10,1c-4.9,0-9,4.1-9,9s4.1,9,9,9s9-4,9-9S15,1,10,1z M8.7,13.5l-\
3.2-3.2l1-1l2.2,2.2l4.8-4.8l1,1L8.7,13.5z',
}),
m('path',
{
fill: 'none',
d: 'M8.7,13.5l-3.2-3.2l1-1l2.2,2.2l4.8-4.8l1,1L8.7,13.5z',
'data-icon-path': 'inner-path',
opacity: 0,
}),
])
//##
);
}
if (this.variant === 'warning')
{
return (
//##
m('svg',
{
focusable: false,
preserveAspectRatio: 'xMidYMid meet',
xmlns: 'http://www.w3.org/2000/svg',
fill: 'currentColor',
'class': `bx--${this.mode}-notification__icon`,
width: 20,
height: 20,
viewBox: '0 0 20 20',
'aria-hidden': true,
},
[
m('path',
{
d: 'M10,1c-5,0-9,4-9,9s4,9,9,9s9-4,9-9S15,1,10,1z M9.2,5h1.5v7H9.2V5z \
M10,16c-0.6,0-1-0.4-1-1s0.4-1,1-1 s1,0.4,1,1S10.6,16,10,16z',
}),
m('path',
{
d: 'M9.2,5h1.5v7H9.2V5z M10,16c-0.6,0-1-0.4-1-1s0.4-1,1-1s1,0.4,1,\
1S10.6,16,10,16z',
'data-icon-path': 'inner-path',
opacity: 0,
}),
])
//##
);
}
}
render_tmpl_close(vnode, values, context)
{
return (
//##
m('button',
{
'data-notification-btn': '',
'class': `bx--${this.mode}-notification__close-button`,
type: 'button',
'aria-label': values.txt_close,
},
m('svg',
{
focusable: false,
preserveAspectRatio: 'xMidYMid meet',
xmlns: 'http://www.w3.org/2000/svg',
fill: 'currentColor',
'class': `bx--${this.mode}-notification__close-icon`,
width: 20,
height: 20,
viewBox: '0 0 32 32',
'aria-hidden': true,
},
m('path',
{
d: 'M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 \
22.6 24 24 22.6 17.4 16 24 9.4z',
})))
//##
);
}
}
export class NotificationButton extends Node
{
WANT_CHILDREN = true
DEFAULT_TAG = 'button'
render_default(vnode, values, context)
{
return (
//##
m(values.astag,
{
tabindex: 0,
'class': `bx--inline-notification__action-button bx--btn bx--btn--sm bx--btn--ghost ${values['class']}`,
...values.props,
},
values.child)
//##
);
}
}
export class NotificationTitle extends Node
{
WANT_CHILDREN = true
DEFAULT_TAG = 'h3'
render_default(vnode, values, context)
{
return (
//##
m(values.astag,
{
'class': `bx--${context.mode}-notification__title ${values['class']}`,
...values.props,
},
values.child)
//##
);
}
}
export class NotificationSubtitle extends Node
{
WANT_CHILDREN = true
DEFAULT_TAG = 'p'
render_default(vnode, values, context)
{
return (
//##
m(values.astag,
{
'class': `bx--${context.mode}-notification__subtitle ${values['class']}`,
...values.props,
},
values.child)
//##
);
}
}
export class NotificationCaption extends Node
{
WANT_CHILDREN = true
DEFAULT_TAG = 'p'
render_default(vnode, values, context)
{
return (
//##
m(values.astag,
{
'class': `bx--${context.mode}-notification__caption ${values['class']}`,
...values.props,
},
values.child)
//##
);
}
}
|
PypiClean
|
/uniqgift_custom-1.0.7-py3-none-any.whl/django/db/models/sql/compiler.py
|
import collections
import json
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value
from django.db.models.functions import Cast, Random
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (
CURSOR,
GET_ITERATOR_CHUNK_SIZE,
MULTI,
NO_RESULTS,
ORDER_DIR,
SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r"^(.*)\s(?:ASC|DESC).*",
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using, elide_empty=True):
self.query = query
self.connection = connection
self.using = using
# Some queries, e.g. coalesced aggregation, need to be executed even if
# they would return an empty result set.
self.elide_empty = elide_empty
self.quote_cache = {"*": "*"}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk')
# .annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, "as_sql"):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)}
for expr, _, _ in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
if not self._meta_ordering:
for expr, (sql, params, is_ref) in order_by:
# Skip references to the SELECT clause, as all expressions in
# the SELECT clause are already part of the GROUP BY.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (
getattr(expr, "target", None) == self.query.model._meta.pk
and getattr(expr, "alias", None) == self.query.base_table
):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias
for expr in expressions
if hasattr(expr, "target") and expr.target.primary_key
}
expressions = [pk] + [
expr
for expr in expressions
if expr in having
or (
getattr(expr, "alias", None) is not None
and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr
for expr in expressions
if (
hasattr(expr, "target")
and expr.target.primary_key
and self.connection.features.allows_group_by_selected_pks_on_model(
expr.target.model
)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr
for expr in expressions
if expr in pks or getattr(expr, "alias", None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
cols = self.get_default_columns()
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
"model": self.query.model,
"select_fields": select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info["related_klass_infos"] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info["related_klass_infos"]:
if ki["from_parent"]:
ki["select_fields"] = (
klass_info["select_fields"] + ki["select_fields"]
)
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
empty_result_set_value = getattr(
col, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
# Select a predicate that's always False.
sql, params = "0", ()
else:
sql, params = self.compile(Value(empty_result_set_value))
else:
sql, params = col.select_format(self, sql, params)
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def _order_by_pairs(self):
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif self.query.get_meta().ordering:
ordering = self.query.get_meta().ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
default_order, _ = ORDER_DIR["ASC"]
else:
default_order, _ = ORDER_DIR["DESC"]
for field in ordering:
if hasattr(field, "resolve_expression"):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
yield field, False
continue
if field == "?": # random
yield OrderBy(Random()), False
continue
col, order = get_order_dir(field, default_order)
descending = order == "DESC"
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
yield (
OrderBy(
Ref(col, self.query.annotation_select[col]),
descending=descending,
),
True,
)
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
if self.query.combinator and self.select:
# Don't use the resolved annotation because other
# combinated queries might define it differently.
expr = F(col)
else:
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
yield OrderBy(expr, descending=descending), False
continue
if "." in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split(".", 1)
yield (
OrderBy(
RawSQL(
"%s.%s" % (self.quote_name_unless_alias(table), col), []
),
descending=descending,
),
False,
)
continue
if self.query.extra and col in self.query.extra:
if col in self.query.extra_select:
yield (
OrderBy(
Ref(col, RawSQL(*self.query.extra[col])),
descending=descending,
),
True,
)
else:
yield (
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False,
)
else:
if self.query.combinator and self.select:
# Don't use the first model's field because other
# combinated queries might define it differently.
yield OrderBy(F(col), descending=descending), False
else:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
yield from self.find_ordering_name(
field,
self.query.get_meta(),
default_order=default_order,
)
def get_order_by(self):
"""
Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for
the ORDER BY clause.
The order_by clause can alter the select clause (for example it can add
aliases to clauses that do not yet have one, or it can add totally new
select clauses).
"""
result = []
seen = set()
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator and self.select:
src = resolved.get_source_expressions()[0]
expr_src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL("%d" % (idx + 1), ())])
break
else:
if col_alias:
raise DatabaseError(
"ORDER BY term does not match any column in the result set."
)
# Add column used in ORDER BY clause to the selected
# columns and to each combined query.
order_by_idx = len(self.query.select) + 1
col_name = f"__orderbycol{order_by_idx}"
for q in self.query.combined_queries:
q.add_annotation(expr_src, col_name)
self.query.add_select_col(resolved, col_name)
resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if (
(name in self.query.alias_map and name not in self.query.table_map)
or name in self.query.extra_select
or (
self.query.external_aliases.get(name)
and name not in self.query.table_map
)
):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, "as_" + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection, self.elide_empty)
for query in self.query.combined_queries
if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError(
"LIMIT/OFFSET not allowed in subqueries of compound statements."
)
if compiler.get_order_by():
raise DatabaseError(
"ORDER BY not allowed in subqueries of compound statements."
)
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values(
(
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
)
)
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = "SELECT * FROM ({})".format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif not features.supports_slicing_ordering_in_compound:
part_sql = "({})".format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == "union" or (combinator == "difference" and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == "union":
combinator_sql += " ALL"
braces = "({})" if features.supports_slicing_ordering_in_compound else "{}"
sql_parts, args_parts = zip(
*((braces.format(sql), args) for sql, args in parts)
)
result = [" {} ".format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (
self.query.high_mark is not None or self.query.low_mark
)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, "supports_select_{}".format(combinator)):
raise NotSupportedError(
"{} is not supported on this database backend.".format(
combinator
)
)
result, params = self.get_combinator_sql(
combinator, self.query.combinator_all
)
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
try:
where, w_params = (
self.compile(self.where) if self.where is not None else ("", [])
)
except EmptyResultSet:
if self.elide_empty:
raise
# Use a predicate that's always False.
where, w_params = "0 = 1", []
having, h_params = (
self.compile(self.having) if self.having is not None else ("", [])
)
result = ["SELECT"]
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name(alias),
)
elif with_col_aliases:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name("col%d" % col_idx),
)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result += [", ".join(out_cols), "FROM", *from_]
params.extend(f_params)
if self.query.select_for_update and features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
if (
with_limit_offset
and not features.supports_select_for_update_with_limit
):
raise NotSupportedError(
"LIMIT/OFFSET is not supported with "
"select_for_update on this database backend."
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
no_key = self.query.select_for_no_key_update
# If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
# backend doesn't support it, raise NotSupportedError to
# prevent a possible deadlock.
if nowait and not features.has_select_for_update_nowait:
raise NotSupportedError(
"NOWAIT is not supported on this database backend."
)
elif skip_locked and not features.has_select_for_update_skip_locked:
raise NotSupportedError(
"SKIP LOCKED is not supported on this database backend."
)
elif of and not features.has_select_for_update_of:
raise NotSupportedError(
"FOR UPDATE OF is not supported on this database backend."
)
elif no_key and not features.has_select_for_no_key_update:
raise NotSupportedError(
"FOR NO KEY UPDATE is not supported on this "
"database backend."
)
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
no_key=no_key,
)
if for_update_part and features.for_update_after_from:
result.append(for_update_part)
if where:
result.append("WHERE %s" % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented."
)
order_by = order_by or self.connection.ops.force_no_ordering()
result.append("GROUP BY %s" % ", ".join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append("HAVING %s" % having)
params.extend(h_params)
if self.query.explain_info:
result.insert(
0,
self.connection.ops.explain_query_prefix(
self.query.explain_info.format,
**self.query.explain_info.options,
),
)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append("ORDER BY %s" % ", ".join(ordering))
if with_limit_offset:
result.append(
self.connection.ops.limit_offset_sql(
self.query.low_mark, self.query.high_mark
)
)
if for_update_part and not features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if not alias and with_col_aliases:
alias = "col%d" % index
if alias:
sub_selects.append(
"%s.%s"
% (
self.connection.ops.quote_name("subquery"),
self.connection.ops.quote_name(alias),
)
)
else:
select_clone = select.relabeled_clone(
{select.alias: "subquery"}
)
subselect, subparams = select_clone.as_sql(
self, self.connection
)
sub_selects.append(subselect)
sub_params.extend(subparams)
return "SELECT %s FROM (%s) subquery" % (
", ".join(sub_selects),
" ".join(result),
), tuple(sub_params + params)
return " ".join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if (
from_parent
and model is not None
and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model
)
):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(
parts, opts, None
)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(
self, name, opts, alias=None, default_order="ASC", already_seen=None
):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == "DESC"
pieces = name.split(LOOKUP_SEP)
(
field,
targets,
alias,
joins,
path,
opts,
transform_function,
) = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified.
if (
field.is_relation
and opts.ordering
and getattr(field, "attname", None) != pieces[-1]
and name != "pk"
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(
getattr(self.query.alias_map[j], "join_cols", None) for j in joins
)
if join_tuple in already_seen:
raise FieldError("Infinite loop caused by ordering.")
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, "resolve_expression") and not isinstance(
item, OrderBy
):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append((item, False))
continue
results.extend(
self.find_ordering_name(item, opts, alias, order, already_seen)
)
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [
(OrderBy(transform_function(t, alias), descending=descending), False)
for t in targets
]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(
pieces, opts, alias
)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if (
alias not in self.query.alias_map
or self.query.alias_refcount[alias] == 1
):
result.append(", %s" % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(
self,
select,
opts=None,
root_alias=None,
cur_depth=1,
requested=None,
restricted=None,
):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects
if f.field.unique
)
return chain(
direct_choices, reverse_choices, self.query._filtered_relations
)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info["related_klass_infos"] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s"
% (
f.name,
", ".join(_get_field_choices()) or "(none)",
)
)
else:
next = False
if not select_related_descend(
f, restricted, requested, only_load.get(field_model)
):
continue
klass_info = {
"model": f.remote_field.model,
"field": f,
"reverse": False,
"local_setter": f.set_cached_value,
"remote_setter": f.remote_field.set_cached_value
if f.unique
else lambda x, y: None,
"from_parent": False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(
start_alias=alias, opts=f.remote_field.model._meta
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_klass_infos = self.get_related_selections(
select,
f.remote_field.model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(
f, restricted, requested, only_load.get(model), reverse=True
):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins(
[related_field_name], opts, root_alias
)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": f.remote_field.set_cached_value,
"remote_setter": f.set_cached_value,
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1, next, restricted
)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins(
[name], opts, root_alias
)
model = join_opts.model
alias = joins[-1]
from_parent = (
issubclass(model, opts.model) and model is not opts.model
)
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": local_setter,
"remote_setter": partial(remote_setter, name),
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select,
opts=model._meta,
root_alias=alias,
cur_depth=cur_depth + 1,
requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
"Invalid field name(s) given in select_related: %s. "
"Choices are: %s"
% (
", ".join(invalid_fields),
", ".join(_get_field_choices()) or "(none)",
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info["model"]._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
"model": parent_model,
"field": parent_link,
"reverse": False,
"select_fields": [
select_index
for select_index in klass_info["select_fields"]
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model
or self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info["model"]._meta.concrete_model
for select_index in klass_info["select_fields"]:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield "self"
else:
field = klass_info["field"]
if klass_info["reverse"]:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get("related_klass_infos", [])
)
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == "self":
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get("related_klass_infos", []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info["field"]
if related_klass_info["reverse"]:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: %s."
% (
", ".join(invalid_names),
", ".join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(
self,
results=None,
tuple_expected=False,
chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE,
):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(
MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size
)
fields = [s[0] for s in self.select[0 : self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
return bool(self.execute_sql(SINGLE))
def execute_sql(
self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0 : self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor,
self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = "%s.%s" % (qn(alias), qn2(columns[index]))
self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), "AND")
sql, params = self.as_sql()
return "EXISTS (%s)" % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
output_formatter = (
json.dumps if self.query.explain_info.format == "json" else str
)
for row in result[0]:
if not isinstance(row, str):
yield " ".join(output_formatter(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = tuple()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, "as_sql"):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, "get_placeholder"):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = "%s", [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self.query, allow_joins=False, for_save=True
)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
"can only be used to update, not to insert." % (value, field)
)
if value.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query (%s=%r)."
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(
ignore_conflicts=self.query.ignore_conflicts
)
result = ["%s %s" % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append("(%s)" % ", ".join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[
self.prepare_value(field, self.pre_save_val(field, obj))
for field in fields
]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [
[self.connection.ops.pk_default_value()] for _ in self.query.objs
]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (
not self.returning_fields and self.connection.features.has_bulk_insert
)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql(
ignore_conflicts=self.query.ignore_conflicts
)
if (
self.returning_fields
and self.connection.features.can_return_columns_from_insert
):
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(
self.connection.ops.bulk_insert_sql(fields, placeholder_rows)
)
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(
self.returning_fields
)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields
and len(self.query.objs) != 1
and not self.connection.features.can_return_rows_from_bulk_insert
)
opts = self.query.get_meta()
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if (
self.connection.features.can_return_rows_from_bulk_insert
and len(self.query.objs) > 1
):
rows = self.connection.ops.fetch_returned_insert_rows(cursor)
elif self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
rows = [
self.connection.ops.fetch_returned_insert_columns(
cursor,
self.returning_params,
)
]
else:
rows = [
(
self.connection.ops.last_insert_id(
cursor,
opts.db_table,
opts.pk.column,
),
)
]
cols = [field.get_col(opts.db_table) for field in self.returning_fields]
converters = self.get_converters(cols)
if converters:
rows = list(self.apply_converters(rows, converters))
return rows
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
@classmethod
def _expr_refs_base_model(cls, expr, base_model):
if isinstance(expr, Query):
return expr.model == base_model
if not hasattr(expr, "get_source_expressions"):
return False
return any(
cls._expr_refs_base_model(source_expr, base_model)
for source_expr in expr.get_source_expressions()
)
@cached_property
def contains_self_reference_subquery(self):
return any(
self._expr_refs_base_model(expr, self.query.model)
for expr in chain(
self.query.annotations.values(), self.query.where.children
)
)
def _as_sql(self, query):
result = ["DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)]
where, params = self.compile(query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias and not self.contains_self_reference_subquery:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [pk.get_col(self.query.get_initial_alias())]
outerq = Query(self.query.model)
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params)
outerq.add_filter("pk__in", innerq)
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return "", ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, "resolve_expression"):
val = val.resolve_expression(
self.query, allow_joins=False, for_save=True
)
if val.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
elif hasattr(val, "prepare_database_save"):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, "get_placeholder"):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = "%s"
name = field.column
if hasattr(val, "as_sql"):
sql, params = self.compile(val)
values.append("%s = %s" % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append("%s = %s" % (qn(name), placeholder))
update_params.append(val)
else:
values.append("%s = NULL" % qn(name))
table = self.query.base_table
result = [
"UPDATE %s SET" % qn(table),
", ".join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(force=True)
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = (
count > 1 and not self.connection.features.update_can_self_select
)
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.clear_where()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter("pk__in", idents)
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter("pk__in", query)
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ", ".join(sql)
params = tuple(params)
inner_query_sql, inner_query_params = self.query.inner_query.get_compiler(
self.using,
elide_empty=self.elide_empty,
).as_sql(with_col_aliases=True)
sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql)
params = params + inner_query_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
PypiClean
|
/moai-mdk-0.1.5a16.tar.gz/moai-mdk-0.1.5a16/moai/visualization/visdom/density2d.py
|
from moai.visualization.visdom.base import Base
from moai.utils.arguments import assert_numeric
from moai.monads.execution.cascade import _create_accessor
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
import torch
import seaborn
import typing
import logging
import numpy as np
import pytorch_lightning
log = logging.getLogger(__name__)
__all__ = ["Density2d"]
class Density2d(Base, pytorch_lightning.Callback):
def __init__(self,
key: typing.Union[str, typing.Sequence[str]],
fill: bool=True,
palette: str='coral',
levels: int=10,
width: int=720,
height: int=480,
name: str="default",
ip: str="http://localhost",
port: int=8097,
):
super().__init__(name, ip, port)
self.levels = levels
self.fill = fill
self.palette = palette
self.width = width / 300.0
self.height = height / 300.0
self.names = [key] if isinstance(key, str) else list(key)
self.keys = [_create_accessor(k) for k in self.names]
self.cache = {}
@property
def name(self) -> str:
return self.env_name
def __call__(self, tensors: typing.Dict[str, torch.Tensor]) -> None:
for n, k in zip(self.names, self.keys):
self.cache[n] = k(tensors)
def on_train_epoch_end(self,
trainer: pytorch_lightning.Trainer,
pl_module: pytorch_lightning.LightningModule,
) -> None:
for n in self.names:
fig = Figure(figsize=(self.width, self.height), dpi=300.0)#self.dpi)
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(111)
seaborn.kdeplot(
x=self.cache[n][:, 0],
y=self.cache[n][:, 1],
fill=self.fill, palette=self.palette,
ax=ax, levels=self.levels,
)
canvas.draw()
img = np.asarray(canvas.buffer_rgba())
self.visualizer.images(
img[np.newaxis, ..., :3].transpose(0, 3, 1, 2),
win=n, env=self.name
)
|
PypiClean
|
/pulumi_nomad-0.4.2a1691129707.tar.gz/pulumi_nomad-0.4.2a1691129707/pulumi_nomad/get_job_parser.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetJobParserResult',
'AwaitableGetJobParserResult',
'get_job_parser',
'get_job_parser_output',
]
@pulumi.output_type
class GetJobParserResult:
"""
A collection of values returned by getJobParser.
"""
def __init__(__self__, canonicalize=None, hcl=None, id=None, json=None):
if canonicalize and not isinstance(canonicalize, bool):
raise TypeError("Expected argument 'canonicalize' to be a bool")
pulumi.set(__self__, "canonicalize", canonicalize)
if hcl and not isinstance(hcl, str):
raise TypeError("Expected argument 'hcl' to be a str")
pulumi.set(__self__, "hcl", hcl)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if json and not isinstance(json, str):
raise TypeError("Expected argument 'json' to be a str")
pulumi.set(__self__, "json", json)
@property
@pulumi.getter
def canonicalize(self) -> Optional[bool]:
"""
`(boolean: true)` - flag to enable setting any unset fields to their default values.
"""
return pulumi.get(self, "canonicalize")
@property
@pulumi.getter
def hcl(self) -> str:
"""
`(string)` - the HCL definition of the job.
"""
return pulumi.get(self, "hcl")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def json(self) -> str:
"""
`(string)` - the parsed job as JSON string.
"""
return pulumi.get(self, "json")
class AwaitableGetJobParserResult(GetJobParserResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetJobParserResult(
canonicalize=self.canonicalize,
hcl=self.hcl,
id=self.id,
json=self.json)
def get_job_parser(canonicalize: Optional[bool] = None,
hcl: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetJobParserResult:
"""
Parse a HCL jobspec and produce the equivalent JSON encoded job.
## Example Usage
```python
import pulumi
import pulumi_nomad as nomad
my_job = nomad.get_job_parser(hcl=(lambda path: open(path).read())(f"{path['module']}/jobspec.hcl"),
canonicalize=False)
```
:param bool canonicalize: `(boolean: true)` - flag to enable setting any unset fields to their default values.
:param str hcl: `(string)` - the HCL definition of the job.
"""
__args__ = dict()
__args__['canonicalize'] = canonicalize
__args__['hcl'] = hcl
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('nomad:index/getJobParser:getJobParser', __args__, opts=opts, typ=GetJobParserResult).value
return AwaitableGetJobParserResult(
canonicalize=__ret__.canonicalize,
hcl=__ret__.hcl,
id=__ret__.id,
json=__ret__.json)
@_utilities.lift_output_func(get_job_parser)
def get_job_parser_output(canonicalize: Optional[pulumi.Input[Optional[bool]]] = None,
hcl: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetJobParserResult]:
"""
Parse a HCL jobspec and produce the equivalent JSON encoded job.
## Example Usage
```python
import pulumi
import pulumi_nomad as nomad
my_job = nomad.get_job_parser(hcl=(lambda path: open(path).read())(f"{path['module']}/jobspec.hcl"),
canonicalize=False)
```
:param bool canonicalize: `(boolean: true)` - flag to enable setting any unset fields to their default values.
:param str hcl: `(string)` - the HCL definition of the job.
"""
...
|
PypiClean
|
/boot-synth-1.2.0.tar.gz/boot-synth-1.2.0/synth/projects_master/nginx_router/frontend/react/node_modules/caniuse-lite/data/regions/GG.js
|
module.exports={D:{"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0.006108,"35":0,"36":0,"37":0,"38":0.006108,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0.18324,"50":0,"51":0.006108,"52":0,"53":0.024432,"54":0,"55":0,"56":0,"57":0.012216,"58":0.012216,"59":0,"60":0,"61":0.006108,"62":0,"63":0.006108,"64":0,"65":0.048864,"66":0,"67":0.054972,"68":0.348156,"69":0.024432,"70":0.006108,"71":0.1527,"72":0.103836,"73":0.09162,"74":0.134376,"75":0.42756,"76":10.206468,"77":3.799176,"78":0.03054,"79":0.006108,"80":0},C:{"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0.225996,"46":0,"47":0,"48":0.097728,"49":0.006108,"50":0.024432,"51":0,"52":0.024432,"53":0,"54":0.006108,"55":0,"56":0,"57":0.042756,"58":0,"59":0,"60":0.024432,"61":0,"62":0,"63":0,"64":0,"65":0,"66":0.018324,"67":0.054972,"68":0.677988,"69":1.551432,"70":0,"71":0,"3.5":0,"3.6":0},F:{"9":0,"11":0,"12":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0,"52":0,"53":0.012216,"54":0,"55":0,"56":0,"57":0,"58":0,"60":0,"62":0.006108,"9.5-9.6":0,"10.0-10.1":0,"10.5":0,"10.6":0,"11.1":0,"11.5":0,"11.6":0,"12.1":0},E:{"4":0,"5":0,"6":0,"7":0,"8":0.018324,"9":0.018324,"10":0.006108,"11":0.036648,"12":0.238212,"13":0.531396,_:"0","3.1":0,"3.2":0,"5.1":0,"6.1":0,"7.1":0,"9.1":0.128268,"10.1":0.195456,"11.1":0.299292,"12.1":2.638656,"13.1":0},G:{"8":0,"3.2":0.018207087269816,"4.0-4.1":0.0091035436349079,"4.2-4.3":0,"5.0-5.1":0.031862402722178,"6.0-6.1":0,"7.0-7.1":0.036414174539632,"8.1-8.4":0.068276577261809,"9.0-9.2":0.077380120896717,"9.3":0.55986793354684,"10.0-10.2":0.26855453722978,"10.3":0.67821400080064,"11.0-11.2":0.73738703442754,"11.3-11.4":1.333669142514,"12.0-12.1":2.2394717341874,"12.2-12.3":36.236655438751,"13.0-13.1":3.1862402722178},I:{"3":0.008845631840796,"4":0.049282805970149,_:"76","2.1":0,"2.2":0.013900278606965,"2.3":0.0075819701492537,"4.1":0.11246589054726,"4.2-4.3":0.66468604975124,"4.4":0,"4.4.3-4.4.4":0.41321737313433},P:{"4":0.061219359430605,"5.0-5.4":0,"6.2-6.4":0.012243871886121,"7.2-7.4":0,"8.2":0.012243871886121,"9.2":1.4570207544484,"10.1":1.8978001423488},B:{"12":0.024432,"13":0.024432,"14":0.03054,"15":0.006108,"16":0.06108,"17":1.044468,"18":1.752996,_:"76"},K:{_:"0 10 11 12 11.1 11.5 12.1"},A:{"6":0,"7":0,"8":0.006108,"9":0,"10":0,"11":4.892508,"5.5":0},N:{"10":0,"11":0.031136},J:{"7":0,"10":0},L:{"0":16.594316},R:{_:"0"},M:{"0":0.482608},O:{"0":0},Q:{"1.2":0},S:{"2.5":0},H:{"0":0.066324494845361}};
|
PypiClean
|
/siobrultech-protocols-0.12.0.tar.gz/siobrultech-protocols-0.12.0/siobrultech_protocols/gem/protocol.py
|
from __future__ import annotations
import asyncio
import logging
from collections import deque
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum, unique
from typing import Any, Callable, Deque, Generic, List, Optional, Set, TypeVar, Union
from .const import (
CMD_DELAY_NEXT_PACKET,
ESCAPE_SEQUENCE,
PACKET_DELAY_CLEAR_TIME_DEFAULT,
TARGET_SERIAL_NUMBER_PREFIX,
)
from .packets import (
BIN32_ABS,
BIN32_NET,
BIN48_ABS,
BIN48_NET,
BIN48_NET_TIME,
ECM_1220,
ECM_1240,
MalformedPacketException,
Packet,
PacketFormatType,
)
LOG = logging.getLogger(__name__)
PACKET_HEADER = bytes.fromhex("feff")
API_RESPONSE_WAIT_TIME = timedelta(seconds=3) # Time to wait for an API response
@dataclass(frozen=True)
class PacketProtocolMessage:
"""Base class for messages sent by a PacketProtocol."""
protocol: PacketProtocol
@dataclass(frozen=True)
class ConnectionMadeMessage(PacketProtocolMessage):
"""Message sent when a new connection has been made to a protocol. Sent once shortly after creation of the protocol instance."""
pass
@dataclass(frozen=True)
class PacketReceivedMessage(PacketProtocolMessage):
"""Message sent when a packet has been received by the protocol."""
packet: Packet
@dataclass(frozen=True)
class ConnectionLostMessage(PacketProtocolMessage):
"""Message sent when a protocol loses its connection. exc is the exception that caused the connection to drop, if any."""
exc: Optional[BaseException]
class PacketProtocol(asyncio.Protocol):
"""Protocol implementation for processing a stream of data packets from a GreenEye Monitor."""
def __init__(
self,
queue: asyncio.Queue[PacketProtocolMessage],
):
"""
Create a new protocol instance.
Whenever a data packet is received from the GEM, a `Packet` instance will be enqueued to `queue`.
"""
self._buffer = bytearray()
self._queue = queue
self._transport: Optional[asyncio.BaseTransport] = None
self._packet_type: PacketFormatType | None = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
LOG.info("%d: Connection opened", id(self))
assert self._transport is None
self._transport = transport
self._queue.put_nowait(ConnectionMadeMessage(protocol=self))
def connection_lost(self, exc: Optional[BaseException]) -> None:
if exc is not None:
LOG.warning("%d: Connection lost due to exception", id(self), exc_info=exc)
else:
LOG.info("%d: Connection closed", id(self))
self._transport = None
self._queue.put_nowait(ConnectionLostMessage(protocol=self, exc=exc))
def data_received(self, data: bytes) -> None:
LOG.debug("%d: Received %d bytes", id(self), len(data))
self._buffer.extend(data)
try:
should_continue = True
while should_continue:
should_continue = self._process_buffer()
self._ensure_transport()
except Exception:
LOG.exception("%d: Exception while attempting to parse a packet.", id(self))
def close(self) -> None:
"""Closes the underlying transport, if any."""
if self._transport:
self._transport.close()
self._transport = None
def _process_buffer(self) -> bool:
"""
Attempts to process one chunk of data in the buffer.
- If the buffer starts with a complete packet, delivers that packet to the queue and returns True
- If the buffer starts with an incomplete packet, returns False
- If the buffer starts with data that is not a packet, removes that data from the buffer, passes it
to unknown_data_received(), and returns True
Subclasses override unknown_data_received to process data in the buffer that is not packets (e.g. responses
to API calls).
Returns True if another call to _process_buffer might be able to process more of the buffer, False if the caller
should wait for more data to be added to the buffer before calling again.
"""
def skip_malformed_packet(msg: str, *args: Any, **kwargs: Any):
header_index = self._buffer.find(PACKET_HEADER, 1)
end = header_index if header_index != -1 else len(self._buffer)
LOG.debug(
"%d Skipping malformed packet due to " + msg + ". Buffer contents: %s",
id(self),
*args,
self._buffer[0:end],
)
del self._buffer[0:end]
header_index = self._buffer.find(PACKET_HEADER)
if header_index != 0:
end = header_index if header_index != -1 else len(self._buffer)
self.unknown_data_received(self._buffer[0:end])
del self._buffer[0:end]
return len(self._buffer) > 0
if len(self._buffer) < len(PACKET_HEADER) + 1:
# Not enough length yet
LOG.debug(
"%d: Not enough data in buffer yet (%d bytes): %s",
id(self),
len(self._buffer),
self._buffer,
)
return False
format_code = self._buffer[len(PACKET_HEADER)]
if format_code == 8:
packet_format = BIN32_ABS
elif format_code == 7:
packet_format = BIN32_NET
elif format_code == 6:
packet_format = BIN48_ABS
elif format_code == 5:
packet_format = BIN48_NET
elif format_code == 3:
packet_format = ECM_1240
elif format_code == 1:
packet_format = ECM_1220
else:
skip_malformed_packet("unknown format code 0x%x", format_code)
return len(self._buffer) > 0
if len(self._buffer) < packet_format.size:
# Not enough length yet
LOG.debug(
"%d: Not enough data in buffer yet (%d bytes)",
id(self),
len(self._buffer),
)
return False
try:
packet = None
try:
packet = packet_format.parse(self._buffer)
except MalformedPacketException:
if packet_format != BIN48_NET:
raise
if packet is None:
if len(self._buffer) < BIN48_NET_TIME.size:
# Not enough length yet
LOG.debug(
"%d: Not enough data in buffer yet (%d bytes)",
id(self),
len(self._buffer),
)
return False
packet = BIN48_NET_TIME.parse(self._buffer)
self._packet_type = packet.packet_format.type
LOG.debug("%d: Parsed one %s packet.", id(self), packet.packet_format.name)
del self._buffer[0 : packet.packet_format.size]
self._queue.put_nowait(PacketReceivedMessage(protocol=self, packet=packet))
except MalformedPacketException as e:
skip_malformed_packet(e.args[0])
return len(self._buffer) > 0
def unknown_data_received(self, data: bytes) -> None:
LOG.debug(
"%d: No header found. Discarding junk data: %s",
id(self),
data,
)
def _ensure_transport(self) -> asyncio.BaseTransport:
if not self._transport:
raise EOFError
return self._transport
@unique
class ProtocolState(Enum):
RECEIVING_PACKETS = 1 # Receiving packets from the GEM
SENT_PACKET_DELAY_REQUEST = 2 # Sent the packet delay request prior to an API request, waiting for any in-flight packets
SENDING_API_REQUEST = 3 # Sending a multi-part request
SENT_API_REQUEST = 4 # Sent an API request, waiting for a response
RECEIVED_API_RESPONSE = 5 # Received an API response, waiting for end call
class ProtocolStateException(Exception):
def __init__(
self,
actual: ProtocolState,
expected: Union[ProtocolState, Set[ProtocolState]],
*args: object,
) -> None:
self._actual = actual
self._expected = expected
super().__init__(*args)
def __str__(self) -> str:
if isinstance(self._expected, set):
expected = [s.name for s in self._expected]
if len(expected) > 1:
expected_str = ", ".join(expected[:-1]) + f", or {expected[-1]}"
else:
expected_str = expected[0]
else:
expected_str = self._expected.name
return f"Expected state to be {expected_str}; but got {self._actual.name}!"
# Argument type of an ApiCall.
T = TypeVar("T")
# Return type of an ApiCall response parser.
R = TypeVar("R")
@unique
class ApiType(Enum):
ECM = 1
GEM = 2
@dataclass
class ApiCall(Generic[T, R]):
"""
Helper class for making API calls with BidirectionalProtocol. There is one instance of
this class for each supported API call. This class handles the send_api_request and
receive_api_response parts of driving the protocol, since those are specific to each API
request type.
"""
def __init__(
self,
gem_formatter: Callable[[T], str],
gem_parser: Callable[[str], R | None] | None,
ecm_formatter: Callable[[T], List[bytes]] | None,
ecm_parser: Callable[[bytes], R | None] | None,
) -> None:
"""
Create a new APICall.
gem_formatter - a callable that, given a parameter of type T, returns the string to send to the GEM to make the API call
gem_parser - a callable that, given a string, parses it into a value of type R.
If there is not enough data to parse yet, it should return None.
If there is enough data to parse, but it is malformed, it should raise an Exception.
ecm_formatter - a callable that, given a parameter of type T, returns the series of bytes chunks to send to the ECM to make the API call
ecm_parser - a callable that, given a bytes, parses it into a value of type R.
If there is not enough data to parse yet, it should return None.
If there is enough data to parse, but it is malformed, it should raise an Exception.
"""
self._gem_formatter = gem_formatter
self._gem_parser = gem_parser
self._ecm_formatter = ecm_formatter
self._ecm_parser = ecm_parser
def format(
self,
api_type: ApiType,
arg: T,
serial_number: int | None,
) -> List[bytes]:
if api_type == ApiType.GEM:
result = self._gem_formatter(arg)
if serial_number:
result = result.replace(
ESCAPE_SEQUENCE,
f"{TARGET_SERIAL_NUMBER_PREFIX}{serial_number%100000:05}",
)
return [result.encode()]
elif api_type == ApiType.ECM:
assert self._ecm_formatter
result = self._ecm_formatter(arg)
return result
else:
assert False
def has_parser(self, api_type: ApiType) -> bool:
if api_type == ApiType.GEM:
return self._gem_parser is not None
elif api_type == ApiType.ECM:
return self._ecm_parser is not None
else:
assert False
def parse(self, api_type: ApiType, response: bytes) -> R | None:
if api_type == ApiType.GEM:
return self._gem_parser(response.decode()) if self._gem_parser else None
elif api_type == ApiType.ECM:
return self._ecm_parser(response) if self._ecm_parser else None
else:
assert False
class BidirectionalProtocol(PacketProtocol):
"""Protocol implementation for bi-directional communication with a GreenEye Monitor."""
"""
Create a new BidirectionalProtocol
The passed in queue contains full packets that have been received.
The packet_delay_clear_time plus API_RESPONSE_WAIT_TIME must be less than 15 seconds.
"""
def __init__(
self,
queue: asyncio.Queue[PacketProtocolMessage],
packet_delay_clear_time: timedelta = PACKET_DELAY_CLEAR_TIME_DEFAULT,
send_packet_delay: bool = True,
api_type: ApiType | None = None,
):
# Ensure that the clear time and the response wait time fit within the 15 second packet delay interval that is requested.
assert (packet_delay_clear_time + API_RESPONSE_WAIT_TIME) < timedelta(
seconds=15
)
super().__init__(queue)
self._api_buffer = bytearray()
self.send_packet_delay = send_packet_delay
self._packet_delay_clear_time = packet_delay_clear_time
self._state = ProtocolState.RECEIVING_PACKETS
self._api_call: ApiCall[Any, Any] | None = None
self._api_result: asyncio.Future[Any] | None = None
self._api_type = api_type
self._api_requests: Deque[bytes] = deque()
@property
def packet_delay_clear_time(self) -> timedelta:
return self._packet_delay_clear_time
@property
def api_type(self) -> ApiType:
if self._api_type is None:
if (
self._packet_type == PacketFormatType.ECM_1220
or self._packet_type == PacketFormatType.ECM_1240
):
self._api_type = ApiType.ECM
elif self._packet_type:
self._api_type = ApiType.GEM
result = self._api_type
assert result
return result
@api_type.setter
def api_type(self, type: ApiType) -> None:
self._api_type = type
def unknown_data_received(self, data: bytes) -> None:
if self._state == ProtocolState.SENDING_API_REQUEST:
# We're in the middle of an ECM API call, which
# has multiple roundtrips to send all the request chunks
assert self._api_call
if data.startswith(b"\xfc"):
# ECM acks each chunk with \xfc
if len(self._api_requests) > 0:
self._send_next_api_request_chunk()
else:
# No more chunks means that we've now completely sent the request
self._state = ProtocolState.SENT_API_REQUEST
if self._api_call.has_parser(self.api_type):
# This API call is expecting a response, and
# the ACK character might be immediately followed
# by response data, so we pull off the ACK character
# and fall through to the rest of the method,
# which then pulls the response from the data
data = data[1:]
else:
# Last ACK of a request with no response
self._set_result(result=None)
else:
self._set_result(
exception=Exception("Bad response from device: {data}")
)
if self._state == ProtocolState.SENT_API_REQUEST:
assert self._api_call is not None
self._api_buffer.extend(data)
response = bytes(self._api_buffer)
LOG.debug("%d: Attempting to parse API response: %s", id(self), response)
result = self._api_call.parse(self.api_type, response)
if result:
if self.api_type == ApiType.ECM:
self._ensure_write_transport().write(b"\xfc")
self._set_result(result=result)
elif self._state == ProtocolState.RECEIVING_PACKETS:
super().unknown_data_received(data)
def begin_api_request(self) -> timedelta:
"""
Begin the process of sending an API request.
Calls WriteTransport.write on the associated transport with bytes that need to be sent.
Returns a timedelta. Callers must wait for that amount of time, then call send_api_request with the actual request.
"""
self._expect_state(ProtocolState.RECEIVING_PACKETS)
self._state = ProtocolState.SENT_PACKET_DELAY_REQUEST
if self.api_type == ApiType.GEM and self.send_packet_delay:
LOG.debug("%d: Starting API request. Requesting packet delay...", id(self))
self._ensure_write_transport().write(
CMD_DELAY_NEXT_PACKET.encode()
) # Delay packets for 15 seconds
return self._packet_delay_clear_time
else:
return timedelta(seconds=0)
def invoke_api(
self,
api: ApiCall[T, R],
arg: T,
result: asyncio.Future[R],
serial_number: Optional[int] = None,
) -> None:
"""
Send the given API request, after having called begin_api_request.
Calls WriteTransport.write on the associated transport with bytes that need to be sent.
Returns a timedelta. Callers must wait for that amount of time, then call receive_api_response to receive the response.
"""
self._expect_state(ProtocolState.SENT_PACKET_DELAY_REQUEST)
assert len(self._api_requests) == 0
self._api_call = api
self._api_result = result
self._api_requests.extend(api.format(self.api_type, arg, serial_number))
self._send_next_api_request_chunk()
def _send_next_api_request_chunk(self) -> None:
assert len(self._api_requests) > 0
assert self._api_call
assert self._api_result
request = self._api_requests.popleft()
LOG.debug("%d: Sending API request %s...", id(self), request)
self._ensure_write_transport().write(request)
self._state = (
ProtocolState.SENT_API_REQUEST
if self.api_type == ApiType.GEM
else ProtocolState.SENDING_API_REQUEST
)
if (
not self._api_call.has_parser(self.api_type)
and self.api_type == ApiType.GEM
):
# GEM API calls without a response are just a single send and you're done
# (ECM API calls without a response still have multiple request chunks to send,
# and even the last chunk is still acked with \xfc, so we don't advance the
# state for them here)
assert len(self._api_requests) == 0
self._set_result(result=None)
def _set_result(
self, result: Any | None = None, exception: Exception | None = None
) -> None:
assert (
self._state == ProtocolState.SENT_API_REQUEST
or self._state == ProtocolState.SENDING_API_REQUEST
)
assert self._api_result
assert not self._api_result.done()
if exception is None:
self._state = ProtocolState.RECEIVED_API_RESPONSE
self._api_result.set_result(result)
else:
assert result is None
self._api_result.set_exception(exception)
self._api_result = None
self._api_call = None
def end_api_request(self) -> None:
"""
Ends an API request. Every begin_api_request call must have a matching end_api_request call,
even if an error occurred in between.
"""
self._expect_state(
{
ProtocolState.RECEIVED_API_RESPONSE,
ProtocolState.SENDING_API_REQUEST,
ProtocolState.SENT_API_REQUEST,
ProtocolState.SENT_PACKET_DELAY_REQUEST,
}
)
self._api_buffer.clear()
self._api_call = None
self._api_requests.clear()
self._api_result = None
LOG.debug("%d: Ended API request", id(self))
self._state = ProtocolState.RECEIVING_PACKETS
def _ensure_write_transport(self) -> asyncio.WriteTransport:
transport = self._ensure_transport()
assert isinstance(transport, asyncio.WriteTransport)
return transport
def _expect_state(self, expected_state: Union[ProtocolState, Set[ProtocolState]]):
if not isinstance(expected_state, set):
expected_state = {expected_state}
assert len(expected_state) > 0
if self._state not in expected_state:
raise ProtocolStateException(actual=self._state, expected=expected_state)
|
PypiClean
|
/ThreadFixProAPI-1.0.13.tar.gz/ThreadFixProAPI-1.0.13/_utils/_cicd.py
|
__author__ = "Evan Schlesinger"
__copyright__ = "(C) 2019 Denim group"
__contributors__ = ["Evan Schlesinger"]
__status__ = "Production"
__license__ = "MIT"
import requests
import urllib3
import requests.exceptions
import requests.packages.urllib3
from ._utilities import ThreadFixProResponse
class CICDAPI(object):
def __init__(self, host, api_key, verify_ssl=True, timeout=30, user_agent=None, cert=None, debug=False):
"""
Initialize a ThreadFix Pro CI/CD API instance.
:param host: The URL for the ThreadFix Pro server. (e.g., http://localhost:8080/threadfix/) NOTE: must include http:// TODO: make it so that it is required or implicitly added if forgotten
:param api_key: The API key generated on the ThreadFix Pro API Key page.
:param verify_ssl: Specify if API requests will verify the host's SSL certificate, defaults to true.
:param timeout: HTTP timeout in seconds, default is 30.
:param user_agent: HTTP user agent string, default is "threadfix_pro_api/[version]".
:param cert: You can also specify a local cert to use as client side certificate, as a single file (containing
the private key and the certificate) or as a tuple of both file’s path
:param debug: Prints requests and responses, useful for debugging.
"""
self.host = host
self.api_key = api_key
self.verify_ssl = verify_ssl
self.timeout = timeout
if not user_agent:
self.user_agent = 'threadfix_pro_api/2.7.5'
else:
self.user_agent = user_agent
self.cert = cert
self.debug = debug # Prints request and response information.
if not self.verify_ssl:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # Disabling SSL warning messages if verification is disabled.
def create_cicd_pass_criteria(self, severity, max_allowed=None, max_introduced=None):
"""
Creates a new CI/CD pass criteria
:param severity: Name of severity
:param max_allowed: The maximum number of vulnerabilities allowed for the Pass Criteria. If no value is specified there is no limit.
:param max_introduced: The maximum number of new vulnerabilities in a scan for the Pass Criteria. If no value is specified there is no limit.
"""
params = {'severity' : severity}
if max_allowed:
params['maxAllowed'] = max_allowed
if max_introduced:
params['maxIntroduced'] = max_introduced
return self._request('POST', 'rest/cicd/passCriteria/create', params)
def update_ci_cd_pass_criteria(self, cicd_id, severity, max_allowed=None, max_introduced=None):
"""
Update CI/CD pass criteria
:param cicd_id: CI/CD identifier
:param severity: Name of severity
:param max_allowed: The maximum number of vulnerabilities allowed for the Pass Criteria. If no value is specified there is no limit.
:param max_introduced: The maximum number of new vulnerabilities in a scan for the Pass Criteria. If no value is specified there is no limit.
"""
params = {'severity' : severity}
if max_allowed:
params['maxAllowed'] = max_allowed
if max_introduced:
params['maxIntroduced'] = max_introduced
return self._request('POST', 'rest/cicd/passCriteria/' + str(cicd_id) + '/update', params)
def list_cicd_pass_criteria(self):
"""
Lists CI/CD pass criteria
"""
return self._request('GET', 'rest/cicd/passCriteria')
def get_cicd_pass_criteria_details(self, cicd_id):
"""
Returns detailed information about the specified CI/CD pass criteria
:param cicd_id: CI/CD identifier
"""
return self._request('GET', 'rest/cicd/passCriteria/' + str(cicd_id) + '/detail')
def delete_cicd_pass_criteria(self, cicd_id):
"""
Deletes the specified CI/CD pass criteria
:param cicd_id: CI/CD identifier
"""
return self._request('DELETE', 'rest/cicd/passCriteria/' + str(cicd_id) + '/delete')
def add_application_to_cicd_pass_criteria(self, pass_criteria_id, application_id):
"""
Attaches the specified application to the specified pass criteria
:param pass_criteria_id: Pass Criteria identifier
:param application_id: Application identifier
"""
return self._request('PUT', 'rest/cicd/passCriteria/' + str(pass_criteria_id) + '/addApplication/' + str(application_id))
def remove_application_from_cicd_pass_criteria(self, pass_criteria_id, application_id):
"""
Removes the specified application to the specified pass criteria
:param pass_criteria_id: Pass Criteria identifier
:param application_id: Application identifier
"""
return self._request('DELETE', 'rest/cicd/passCriteria/' + str(pass_criteria_id) + '/removeApplication/' + str(application_id))
def evaluate_cicd_pass_criteria(self, application_id, from_date=None, to_date=None):
"""
Checks the specified application against all of the CI/CD pass criteria attached to it
:param application_id: Application identifier
:param from_date: Evaluate against any new open vulnerabilities from this date. If no date is specified, the start date will be December 31, 1969.
The time will be the start of day, 00:00:00. Format as yyyy-MM-dd
:param to_date: Evaluate against any new open vulnerabilities until this date. If no start date is specified, the end date will be the current date.
The time will be the end of day, 23:59:59. Format as yyyy-MM-dd
"""
params = {}
if from_date:
params['fromDate'] = from_date
if to_date:
parms['toDate'] = to_date
return self._request('GET', 'rest/policy/status/application/' + str(application_id) + '/evaluate', params)
def create_cicd_defect_reporter(self, severity, minimum=None, group_by=None):
"""
Creates a new CI/CD defect reporter
:param severity: Name of severity
:param minimum: If true, includes all severities greater than the specified one as well. Default value is false.
:param group_by: How to group vulnerabilities for defects
"""
params = {'severity' : severity}
if minimum:
params['minimum'] = minimum
if group_by:
params['groupBy'] = group_by
return self._request('POST', 'rest/cicd/defectReporting/create', params)
def update_cicd_defect_reporter(self, cicd_id, severity, minimum=None, group_by=None):
"""
Creates a new CI/CD defect reporter
:param cicd_id: CI/CD identifier
:param severity: Name of severity
:param minimum: If true, includes all severities greater than the specified one as well. Default value is false.
:param group_by: How to group vulnerabilities for defects
"""
params = {'severity' : severity}
if minimum:
params['minimum'] = minimum
if group_by:
params['groupBy'] = group_by
return self._request('PUT', 'rest/cicd/defectReporting/' + str(cicd_id) + '/update', params)
def list_cicd_defect_reporters(self):
"""
Lists CI/CD defect reporters
"""
return self._request('GET', 'rest/cicd/defectReporting')
def get_cicd_defect_reporter_details(self, cicd_id):
"""
Returns CI/CD defect reporter details
:param cicd_id: CI/CD identifier
"""
return self._request('GET', 'rest/cicd/defectReporting/' + str(cicd_id) + '/detail')
def delete_cicd_defect_reporter(self, cicd_id):
"""
Deletes the CI/CD defect reporter
:param cicd_id: CI/CD identifier
"""
return self._request('DELETE', 'rest/cicd/defectReporting/' + str(cicd_id) + '/delete')
def add_application_defect_tracker_to_cicd_defect_reporter(self, defect_reporter_id, app_defect_tracker_id):
"""
Attaches the specified Application Defect Tracker to the specified CI/CD Defect Reporter
:param defect_reporter_id: Defect Reporter identifier
:param app_defect_tracker_id: App Defect Tracker identifier
"""
return self._request('PUT', 'rest/cicd/defectReporting/' + str(defect_reporter_id) + '/addApplicationDefectTracker/' + str(app_defect_tracker_id))
def remove_application_defect_tracker_to_cicd_defect_reporter(self, defect_reporter_id, app_defect_tracker_id):
"""
Attaches the specified Application Defect Tracker to the specified CI/CD Defect Reporter
:param defect_reporter_id: Defect Reporter identifier
:param app_defect_tracker_id: App Defect Tracker identifier
"""
return self._request('PUT', 'rest/cicd/defectReporting/' + str(defect_reporter_id) + '/removeApplicationDefectTracker/' + str(app_defect_tracker_id))
# Utility
def _request(self, method, url, params=None, files=None):
"""Common handler for all HTTP requests."""
if not params:
params = {}
headers = {
'Accept': 'application/json',
'Authorization': 'APIKEY ' + self.api_key
}
try:
if self.debug:
print(method + ' ' + self.host + url)
print(params)
response = requests.request(method=method, url=self.host + url, params=params, files=files, headers=headers,
timeout=self.timeout, verify=self.verify_ssl, cert=self.cert)
if self.debug:
print(response.status_code)
print(response.text)
try:
json_response = response.json()
message = json_response['message']
success = json_response['success']
response_code = json_response['responseCode']
data = json_response['object']
return ThreadFixProResponse(message=message, success=success, response_code=response_code, data=data)
except ValueError:
return ThreadFixProResponse(message='JSON response could not be decoded.', success=False)
except requests.exceptions.SSLError:
return ThreadFixProResponse(message='An SSL error occurred.', success=False)
except requests.exceptions.ConnectionError:
return ThreadFixProResponse(message='A connection error occurred.', success=False)
except requests.exceptions.Timeout:
return ThreadFixProResponse(message='The request timed out after ' + str(self.timeout) + ' seconds.',
success=False)
except requests.exceptions.RequestException:
return ThreadFixProResponse(message='There was an error while handling the request.', success=False)
|
PypiClean
|
/ctec_consumer-0.5.1.tar.gz/ctec_consumer-0.5.1/README.md
|
# 电渠rabbitMQ Consumer
## 环境
`Python2` 或 `Python3`
- `gevent`
- `pika`
- `kazoo`
- `psutil`
## 使用指南
### 处理方法开发指南
- 方法有且只有一个入参,WorkerMessage对象
- 方法需要明确返回处理响应码,目前支持:
- CONSUME_SUCCESS,处理成功
- CONSUME_REDELIVER,处理失败,重新投递
- CONSUME_REJECT,处理失败,丢弃消息
### 线程版使用指南
```python
import ctec_consumer.consumer_log as ctec_logging
from ctec_consumer.dummy import consumer
# 创建logger对象方法1:
# 参数1:应用名称
# 参数2:日志路径
# 参数3:是否开启DEBUG,默认关闭。
ctec_logging.APP_NAME = 'app_name'
ctec_logging.LOG_PATH = '/opt/logs'
ctec_logging.DEBUG = True
# 定义处理方法,该方法接收一个参数Message对象
# 方法必须要返回固定值,具体取值范围参照上一节文档
# worker_message对象结构参见下文
def worker(worker_message):
print(worker_message.body)
# 处理逻辑....
return ctec_consumer.CONSUME_SUCCESS
try:
# 创建logger对象方法2:用于写日志。如果不指定,则默认写到STDOUT
# 参数1:应用名称
# 参数2:日志路径
# 参数3:是否开启DEBUG,默认关闭。
logger = ctec_logging.get_logger('app_name', '/opt/logs', debug=True)
# 设置采集线程的Zookeeper地址,若关闭采集线程,此处可不进行设置
data_agent.ZOOKEEPER_HOSTS = '127.0.0.1:2188'
# 创建consumer对象
# 参数1:队列amqp地址
# 参数2:队列名称
# 以下参数均可以省略
# 参数3:日志对象,默认值为STDOUT
# 参数4:Consumer最多拉取消息数量,默认值为30条
# 参数5:线程数量,默认值为5
# 参数6:心跳间隔,默认值为30秒
# 参数7:Consumer标签,默认为None
# 参数8:is_rpc,标识是否为RPC消费者,默认为False
# 参数9:agent,是否启动监控线程,默认为True
# 参数10:app_name,应用名称,监控线程采集使用,默认为None
# 参数11:app_desc,应用描述,监控线程采集使用,默认为None
c = consumer.Consumer('amqp://smallrabbit:[email protected]:5673/journal',
'q.journal.loginsync.save',
logger)
# 注册处理方法
c.register_worker(worker)
c.run()
except Exception as e:
print(e.message)
except KeyboardInterrupt:
c.stop()
```
### 进程版使用指南
与线程版使用方法相同,只是引入的包由`from ctec_consumer.dummy import consumer`替换为`from ctec_consumer import consumer`
**注意**:Python3以下的版本暂时无法使用进程版。
### Gevent版使用指南
与线程版使用方法相同,只是引入的包由`from ctec_consumer.dummy import consumer`替换为`from ctec_consumer.gevent import consumer`
### WorkerMessage对象
对象具有以下成员变量:
- `body`:消息内容
- `basic_deliver`:`pika.Spec.Basic.Deliver`
- `basic_properties`:`pika.Spec.BasicProperties`
- `delivery_tag`
- `properties`
- `is_publish`:是否要转发
- `exchange`:转发的exchange
- `routing_key`:转发的routing key
- `publish_properties`:转发的消息properties
- `publish_body`:转发消息的body,默认为该条消息的body属性
## 批量消费使用指南
**目前批量消费只支持异步客户端**
### 示例代码
```python
import ctec_consumer.consumer_log as ctec_logging
from ctec_consumer.async import consumer
# 创建logger对象方法1:
# 参数1:应用名称
# 参数2:日志路径
# 参数3:是否开启DEBUG,默认关闭。
ctec_logging.APP_NAME = 'app_name'
ctec_logging.LOG_PATH = '/opt/logs'
ctec_logging.DEBUG = True
# 定义处理方法,该方法接收一个参数WorkerMessage对象数组
# 方法必须要返回固定值,具体取值范围参照上一节文档
# WorkerMessage对象结构参见下文
def worker(messages):
print(messages[0].body)
# 处理逻辑....
return ctec_consumer.CONSUME_SUCCESS
try:
# 创建logger对象方法2:用于写日志。如果不指定,则默认写到STDOUT
# 参数1:应用名称
# 参数2:日志路径
# 参数3:是否开启DEBUG,默认关闭。
logger = ctec_logging.get_logger('app_name', '/opt/logs', debug=True)
# 设置采集线程的Zookeeper地址,若关闭采集线程,此处可不进行设置
data_agent.ZOOKEEPER_HOSTS = '127.0.0.1:2188'
# 创建consumer对象
# 参数1:队列amqp地址
# 参数2:队列名称
# 参数3(可省略):日志对象,默认值为STDOUT
# 参数4(可省略):Consumer最多拉取消息数量,默认值为30条
# 参数5(可省略):线程数量,默认值为5
# 参数6(可省略):心跳间隔,默认值为30秒
# 参数7(可省略):Consumer标签,默认为None
# 参数8(可省略):是否为RPC请求,默认为False
# 参数9(可省略):批量消费消息数量,默认为1
# 参数10(可省略):agent,是否启动监控线程,默认为True
# 参数11(可省略):app_name,应用名称,监控线程采集使用,默认为None
# 参数12(可省略):app_desc,应用描述,监控线程采集使用,默认为None
c = consumer.Consumer('amqp://smallrabbit:[email protected]:5673/journal', 'q.journal.loginsync.save', logger)
# 注册处理方法
c.register_worker(worker)
c.run()
except Exception as e:
print(e.message)
except KeyboardInterrupt:
c.stop()
```
### WorkerMessage对象属性
- `body`:消息内容
- `basic_deliver`:`pika.Spec.Basic.Deliver`
- `basic_properties`:`pika.Spec.BasicProperties`
- `delivery_tag`
- `properties`
- `is_publish`:是否要转发
- `exchange`:转发的exchange
- `routing_key`:转发的routing key
- `publish_properties`:转发的消息properties
- `publish_body`:转发消息的body,默认为该条消息的body属性
## 消息转发
支持将某一条消息转发到其它的队列中
在处理消息的过程中,设置`is_publish`为True,并且设置相关的消息转发参数即可。
## 应用状态监控
若开启监控线程(默认为开启),会每隔1分钟上传一次consumer的健康及监控信息到zookeeper中,对于部署在docker中的consumer,
建议设置container中的环境变量`HOST_IP`,以便可以正确统计到部署的主机IP。
监控信息目前包括:
- 应用名称
- 应用描述
- 部署路径
- 消费的队列信息
- 每分钟CPU使用率,保存24小时
- 每分钟内存的使用率,保存24小时
- 当前的线程数
- 每分钟的消费消息数量,保存24小时
- 部署的主机IP
准一zookeeper地址:172.16.50.216:2181
准二zookeeper地址:10.128.91.87:2181
生产zookeeper地址:10.128.2.93:2181,10.128.2.94:2181,10.128.2.95:2181
## 停止Consumer
Consumer中已经注册了2和15信号量,线程版可以直接向Consumer进程发送2和15信号量。参考命令:`kill -15 <pid>`
进程版Consumer需要向进程组号发送信号量,参考命令:`kill -- -<pid>`
## FAQ
- cx_Oracle使用过程中不定期进程退出,报错为OCI xxxxxxxxxxxxxx
在初始化Connection或SessionPool的时候,指定`threaded`参数为`True`
|
PypiClean
|
/collective.js.ckeditor-4.0.3.zip/collective.js.ckeditor-4.0.3/collective/js/ckeditor/resources/lang/en-ca.js
|
/*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.lang['en-ca']={"dir":"ltr","editor":"Rich Text Editor","common":{"editorHelp":"Press ALT 0 for help","browseServer":"Browse Server","url":"URL","protocol":"Protocol","upload":"Upload","uploadSubmit":"Send it to the Server","image":"Image","flash":"Flash","form":"Form","checkbox":"Checkbox","radio":"Radio Button","textField":"Text Field","textarea":"Textarea","hiddenField":"Hidden Field","button":"Button","select":"Selection Field","imageButton":"Image Button","notSet":"<not set>","id":"Id","name":"Name","langDir":"Language Direction","langDirLtr":"Left to Right (LTR)","langDirRtl":"Right to Left (RTL)","langCode":"Language Code","longDescr":"Long Description URL","cssClass":"Stylesheet Classes","advisoryTitle":"Advisory Title","cssStyle":"Style","ok":"OK","cancel":"Cancel","close":"Close","preview":"Preview","resize":"Resize","generalTab":"General","advancedTab":"Advanced","validateNumberFailed":"This value is not a number.","confirmNewPage":"Any unsaved changes to this content will be lost. Are you sure you want to load new page?","confirmCancel":"Some of the options have been changed. Are you sure to close the dialog?","options":"Options","target":"Target","targetNew":"New Window (_blank)","targetTop":"Topmost Window (_top)","targetSelf":"Same Window (_self)","targetParent":"Parent Window (_parent)","langDirLTR":"Left to Right (LTR)","langDirRTL":"Right to Left (RTL)","styles":"Style","cssClasses":"Stylesheet Classes","width":"Width","height":"Height","align":"Align","alignLeft":"Left","alignRight":"Right","alignCenter":"Centre","alignTop":"Top","alignMiddle":"Middle","alignBottom":"Bottom","invalidValue":"Invalid value.","invalidHeight":"Height must be a number.","invalidWidth":"Width must be a number.","invalidCssLength":"Value specified for the \"%1\" field must be a positive number with or without a valid CSS measurement unit (px, %, in, cm, mm, em, ex, pt, or pc).","invalidHtmlLength":"Value specified for the \"%1\" field must be a positive number with or without a valid HTML measurement unit (px or %).","invalidInlineStyle":"Value specified for the inline style must consist of one or more tuples with the format of \"name : value\", separated by semi-colons.","cssLengthTooltip":"Enter a number for a value in pixels or a number with a valid CSS unit (px, %, in, cm, mm, em, ex, pt, or pc).","unavailable":"%1<span class=\"cke_accessibility\">, unavailable</span>"},"about":{"copy":"Copyright © $1. All rights reserved.","dlgTitle":"About CKEditor","help":"Check $1 for help.","moreInfo":"For licensing information please visit our web site:","title":"About CKEditor","userGuide":"CKEditor User's Guide"},"basicstyles":{"bold":"Bold","italic":"Italic","strike":"Strike Through","subscript":"Subscript","superscript":"Superscript","underline":"Underline"},"blockquote":{"toolbar":"Block Quote"},"clipboard":{"copy":"Copy","copyError":"Your browser security settings don't permit the editor to automatically execute copying operations. Please use the keyboard for that (Ctrl/Cmd+C).","cut":"Cut","cutError":"Your browser security settings don't permit the editor to automatically execute cutting operations. Please use the keyboard for that (Ctrl/Cmd+X).","paste":"Paste","pasteArea":"Paste Area","pasteMsg":"Please paste inside the following box using the keyboard (<strong>Ctrl/Cmd+V</strong>) and hit OK","securityMsg":"Because of your browser security settings, the editor is not able to access your clipboard data directly. You are required to paste it again in this window.","title":"Paste"},"contextmenu":{"options":"Context Menu Options"},"toolbar":{"toolbarCollapse":"Collapse Toolbar","toolbarExpand":"Expand Toolbar","toolbarGroups":{"document":"Document","clipboard":"Clipboard/Undo","editing":"Editing","forms":"Forms","basicstyles":"Basic Styles","paragraph":"Paragraph","links":"Links","insert":"Insert","styles":"Styles","colors":"Colors","tools":"Tools"},"toolbars":"Editor toolbars"},"elementspath":{"eleLabel":"Elements path","eleTitle":"%1 element"},"list":{"bulletedlist":"Insert/Remove Bulleted List","numberedlist":"Insert/Remove Numbered List"},"indent":{"indent":"Increase Indent","outdent":"Decrease Indent"},"format":{"label":"Format","panelTitle":"Paragraph Format","tag_address":"Address","tag_div":"Normal (DIV)","tag_h1":"Heading 1","tag_h2":"Heading 2","tag_h3":"Heading 3","tag_h4":"Heading 4","tag_h5":"Heading 5","tag_h6":"Heading 6","tag_p":"Normal","tag_pre":"Formatted"},"horizontalrule":{"toolbar":"Insert Horizontal Line"},"image":{"alertUrl":"Please type the image URL","alt":"Alternative Text","border":"Border","btnUpload":"Send it to the Server","button2Img":"Do you want to transform the selected image button on a simple image?","hSpace":"HSpace","img2Button":"Do you want to transform the selected image on a image button?","infoTab":"Image Info","linkTab":"Link","lockRatio":"Lock Ratio","menu":"Image Properties","resetSize":"Reset Size","title":"Image Properties","titleButton":"Image Button Properties","upload":"Upload","urlMissing":"Image source URL is missing.","vSpace":"VSpace","validateBorder":"Border must be a whole number.","validateHSpace":"HSpace must be a whole number.","validateVSpace":"VSpace must be a whole number."},"fakeobjects":{"anchor":"Anchor","flash":"Flash Animation","hiddenfield":"Hidden Field","iframe":"IFrame","unknown":"Unknown Object"},"link":{"acccessKey":"Access Key","advanced":"Advanced","advisoryContentType":"Advisory Content Type","advisoryTitle":"Advisory Title","anchor":{"toolbar":"Anchor","menu":"Edit Anchor","title":"Anchor Properties","name":"Anchor Name","errorName":"Please type the anchor name","remove":"Remove Anchor"},"anchorId":"By Element Id","anchorName":"By Anchor Name","charset":"Linked Resource Charset","cssClasses":"Stylesheet Classes","emailAddress":"E-Mail Address","emailBody":"Message Body","emailSubject":"Message Subject","id":"Id","info":"Link Info","langCode":"Language Code","langDir":"Language Direction","langDirLTR":"Left to Right (LTR)","langDirRTL":"Right to Left (RTL)","menu":"Edit Link","name":"Name","noAnchors":"(No anchors available in the document)","noEmail":"Please type the e-mail address","noUrl":"Please type the link URL","other":"<other>","popupDependent":"Dependent (Netscape)","popupFeatures":"Popup Window Features","popupFullScreen":"Full Screen (IE)","popupLeft":"Left Position","popupLocationBar":"Location Bar","popupMenuBar":"Menu Bar","popupResizable":"Resizable","popupScrollBars":"Scroll Bars","popupStatusBar":"Status Bar","popupToolbar":"Toolbar","popupTop":"Top Position","rel":"Relationship","selectAnchor":"Select an Anchor","styles":"Style","tabIndex":"Tab Index","target":"Target","targetFrame":"<frame>","targetFrameName":"Target Frame Name","targetPopup":"<popup window>","targetPopupName":"Popup Window Name","title":"Link","toAnchor":"Link to anchor in the text","toEmail":"E-mail","toUrl":"URL","toolbar":"Link","type":"Link Type","unlink":"Unlink","upload":"Upload"},"magicline":{"title":"Insert paragraph here"},"maximize":{"maximize":"Maximize","minimize":"Minimize"},"pastetext":{"button":"Paste as plain text","title":"Paste as Plain Text"},"pastefromword":{"confirmCleanup":"The text you want to paste seems to be copied from Word. Do you want to clean it before pasting?","error":"It was not possible to clean up the pasted data due to an internal error","title":"Paste from Word","toolbar":"Paste from Word"},"removeformat":{"toolbar":"Remove Format"},"sourcearea":{"toolbar":"Source"},"specialchar":{"options":"Special Character Options","title":"Select Special Character","toolbar":"Insert Special Character"},"scayt":{"about":"About SCAYT","aboutTab":"About","addWord":"Add Word","allCaps":"Ignore All-Caps Words","dic_create":"Create","dic_delete":"Delete","dic_field_name":"Dictionary name","dic_info":"Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type its name and click the Restore button.","dic_rename":"Rename","dic_restore":"Restore","dictionariesTab":"Dictionaries","disable":"Disable SCAYT","emptyDic":"Dictionary name should not be empty.","enable":"Enable SCAYT","ignore":"Ignore","ignoreAll":"Ignore All","ignoreDomainNames":"Ignore Domain Names","langs":"Languages","languagesTab":"Languages","mixedCase":"Ignore Words with Mixed Case","mixedWithDigits":"Ignore Words with Numbers","moreSuggestions":"More suggestions","opera_title":"Not supported by Opera","options":"Options","optionsTab":"Options","title":"Spell Check As You Type","toggle":"Toggle SCAYT","noSuggestions":"No suggestion"},"stylescombo":{"label":"Styles","panelTitle":"Formatting Styles","panelTitle1":"Block Styles","panelTitle2":"Inline Styles","panelTitle3":"Object Styles"},"table":{"border":"Border size","caption":"Caption","cell":{"menu":"Cell","insertBefore":"Insert Cell Before","insertAfter":"Insert Cell After","deleteCell":"Delete Cells","merge":"Merge Cells","mergeRight":"Merge Right","mergeDown":"Merge Down","splitHorizontal":"Split Cell Horizontally","splitVertical":"Split Cell Vertically","title":"Cell Properties","cellType":"Cell Type","rowSpan":"Rows Span","colSpan":"Columns Span","wordWrap":"Word Wrap","hAlign":"Horizontal Alignment","vAlign":"Vertical Alignment","alignBaseline":"Baseline","bgColor":"Background Color","borderColor":"Border Color","data":"Data","header":"Header","yes":"Yes","no":"No","invalidWidth":"Cell width must be a number.","invalidHeight":"Cell height must be a number.","invalidRowSpan":"Rows span must be a whole number.","invalidColSpan":"Columns span must be a whole number.","chooseColor":"Choose"},"cellPad":"Cell padding","cellSpace":"Cell spacing","column":{"menu":"Column","insertBefore":"Insert Column Before","insertAfter":"Insert Column After","deleteColumn":"Delete Columns"},"columns":"Columns","deleteTable":"Delete Table","headers":"Headers","headersBoth":"Both","headersColumn":"First column","headersNone":"None","headersRow":"First Row","invalidBorder":"Border size must be a number.","invalidCellPadding":"Cell padding must be a number.","invalidCellSpacing":"Cell spacing must be a number.","invalidCols":"Number of columns must be a number greater than 0.","invalidHeight":"Table height must be a number.","invalidRows":"Number of rows must be a number greater than 0.","invalidWidth":"Table width must be a number.","menu":"Table Properties","row":{"menu":"Row","insertBefore":"Insert Row Before","insertAfter":"Insert Row After","deleteRow":"Delete Rows"},"rows":"Rows","summary":"Summary","title":"Table Properties","toolbar":"Table","widthPc":"percent","widthPx":"pixels","widthUnit":"width unit"},"undo":{"redo":"Redo","undo":"Undo"},"wsc":{"btnIgnore":"Ignore","btnIgnoreAll":"Ignore All","btnReplace":"Replace","btnReplaceAll":"Replace All","btnUndo":"Undo","changeTo":"Change to","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"Spell checker not installed. Do you want to download it now?","manyChanges":"Spell check complete: %1 words changed","noChanges":"Spell check complete: No words changed","noMispell":"Spell check complete: No misspellings found","noSuggestions":"- No suggestions -","notAvailable":"Sorry, but service is unavailable now.","notInDic":"Not in dictionary","oneChange":"Spell check complete: One word changed","progress":"Spell check in progress...","title":"Spell Check","toolbar":"Check Spelling"}};
|
PypiClean
|
/py_to_mindustry-0.0.4.tar.gz/py_to_mindustry-0.0.4/README.md
|
# py_to_mindustry
Translator from Python to Mindustry processor language
The following features are currently supported:
- All built-in commands in Mindustry
- Arbitrarily complex mathematical and logical expressions
- Following keywords: `False`, `None`, `True`, `and`, `break`, `continue`, `def`, `elif`, `else`, `for`, `global`, `if`, `is`, `lambda`, `not`, `or`, `pass`, `return`, `while`
# Usage
```
python -m pip install py_to_mindustry
```
```py
from py_to_mindustry import py_to_mindustry
print(py_to_mindustry(your_program_text))
```
# How to use Mindustry commands
...
|
PypiClean
|
/py_mess_server_by_rufus-0.0.1-py3-none-any.whl/server/common/decos.py
|
import socket
import logging
import sys
sys.path.append('../')
import logs.config_client_log
import logs.config_server_log
# метод определения модуля, источника запуска.
if sys.argv[0].find('client_dist') == -1:
# если не клиент то сервер!
logger = logging.getLogger('server_dist')
else:
# иначе сервер
logger = logging.getLogger('client_dist')
def log(func_to_log):
"""
Декоратор, выполняющий логирование вызовов функций.
Сохраняет события типа debug, содержащие
информацию о имени вызываемой функиции, параметры с которыми
вызывается функция, и модуль, вызывающий функцию.
"""
def log_saver(*args, **kwargs):
logger.debug(
f'Была вызвана функция {func_to_log.__name__} c параметрами {args} , {kwargs}. '
f'Вызов из модуля {func_to_log.__module__}')
ret = func_to_log(*args, **kwargs)
return ret
return log_saver
def login_required(func):
"""
Декоратор, проверяющий, что клиент авторизован на сервере.
Проверяет, что передаваемый объект сокета находится в
списке авторизованных клиентов.
За исключением передачи словаря-запроса
на авторизацию. Если клиент не авторизован,
генерирует исключение TypeError
"""
def checker(*args, **kwargs):
# проверяем, что первый аргумент - экземпляр MessageProcessor
# Импортить необходимо тут, иначе ошибка рекурсивного импорта.
from server_side.core import MessageProcessor
from common.variables import ACTION, PRESENCE
if isinstance(args[0], MessageProcessor):
found = False
for arg in args:
if isinstance(arg, socket.socket):
# Проверяем, что данный сокет есть в списке names класса
# MessageProcessor
for client in args[0].names:
if args[0].names[client] == arg:
found = True
# Теперь надо проверить, что передаваемые аргументы не presence
# сообщение. Если presence, то разрешаем
for arg in args:
if isinstance(arg, dict):
if ACTION in arg and arg[ACTION] == PRESENCE:
found = True
# Если не не авторизован и не сообщение начала авторизации, то
# вызываем исключение.
if not found:
raise TypeError
return func(*args, **kwargs)
return checker
|
PypiClean
|
/graphite-datasets-1.0.5.tar.gz/graphite-datasets-1.0.5/tensorflow_datasets/summarization/booksum/booksum.py
|
import json
import logging
import os
from typing import Dict, Iterator, Text, Tuple
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """
BookSum: A Collection of Datasets for Long-form Narrative Summarization
This implementation currently only supports book and chapter summaries.
GitHub: https://github.com/salesforce/booksum
"""
_CITATION = """\
@article{kryscinski2021booksum,
title={BookSum: A Collection of Datasets for Long-form Narrative Summarization},
author={Wojciech Kry{\'s}ci{\'n}ski and Nazneen Rajani and Divyansh Agarwal and Caiming Xiong and Dragomir Radev},
year={2021},
eprint={2105.08209},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DOCUMENT = "document"
_SUMMARY = "summary"
_SPLIT_FILENAMES = {
"book": {
"train": "book_summaries_aligned_train.jsonl",
"validation": "book_summaries_aligned_val.jsonl",
"test": "book_summaries_aligned_test.jsonl",
},
"chapter": {
"train": "chapter_summary_aligned_train_split.jsonl",
"validation": "chapter_summary_aligned_val_split.jsonl",
"test": "chapter_summary_aligned_test_split.jsonl",
},
}
class BooksumConfig(tfds.core.BuilderConfig):
"""BuilderConfig for BooksumConfig."""
def __init__(self, *, granularity=None, **kwargs):
"""BuilderConfig for BooksumConfig.
Args:
granularity: str ("book", "chapter")
**kwargs: keyword arguments forwarded to super.
"""
super(BooksumConfig, self).__init__(**kwargs)
self.granularity = granularity
class Booksum(tfds.core.GeneratorBasedBuilder):
"""Booksum dataset builder."""
VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial release.",
}
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
1) Go to https://github.com/salesforce/booksum, and run steps 1-3. Place the
whole `booksum` git project in the manual folder.
2) Download the chapterized books from https://storage.cloud.google.com/sfr-books-dataset-chapters-research/all_chapterized_books.zip
and unzip to the manual folder.
The manual folder should contain the following directories:
- `booksum/`
- `all_chapterized_books/`
Note: Because the BookSum dataset is based on the availability of web-scraped
data and may be incomplete, the `_generate_examples` method will automatically
skip missing entries.
"""
BUILDER_CONFIGS = [
BooksumConfig(
name="book",
description="Book-level summarization",
granularity="book",
),
BooksumConfig(
name="chapter",
description="chapter-level summarization",
granularity="chapter",
),
]
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
_DOCUMENT: tfds.features.Text(),
_SUMMARY: tfds.features.Text(),
}),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://github.com/salesforce/booksum",
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
granularity = self._builder_config.granularity
alignments_base_path = os.path.join(
dl_manager.manual_dir, "booksum", "alignments",
f"{granularity}-level-summary-alignments")
return {
"train":
self._generate_examples(
alignments_path=os.path.join(
alignments_base_path,
_SPLIT_FILENAMES[granularity]["train"]),
base_path=dl_manager.manual_dir,
granularity=granularity),
"validation":
self._generate_examples(
alignments_path=os.path.join(
alignments_base_path,
_SPLIT_FILENAMES[granularity]["validation"]),
base_path=dl_manager.manual_dir,
granularity=granularity),
"test":
self._generate_examples(
alignments_path=os.path.join(
alignments_base_path,
_SPLIT_FILENAMES[granularity]["test"]),
base_path=dl_manager.manual_dir,
granularity=granularity),
}
def _generate_examples(
self,
alignments_path: tfds.typing.PathLike,
base_path: tfds.typing.PathLike,
granularity: Text,
) -> Iterator[Tuple[Text, Dict[Text, Text]]]:
"""Yields examples."""
with tf.io.gfile.GFile(alignments_path, "r") as f:
for i, line in enumerate(f.read().strip().splitlines()):
example_data = json.loads(line)
input_path = os.path.join(base_path,
example_data[f"{granularity}_path"])
summary_path = os.path.join(base_path, "booksum", "scripts",
example_data["summary_path"])
if not tf.io.gfile.exists(input_path):
logging.info("Skipping missing input: %s", input_path)
continue
if not tf.io.gfile.exists(summary_path):
logging.info("Skipping missing summary: %s", summary_path)
continue
with tf.io.gfile.GFile(input_path, "r") as f:
input_text = f.read().strip()
with tf.io.gfile.GFile(summary_path, "r") as f:
summary_text = " ".join(json.loads(f.read())["summary"]).strip()
yield str(i), {
_DOCUMENT: input_text,
_SUMMARY: summary_text,
}
|
PypiClean
|
/permedcoe-0.0.11.tar.gz/permedcoe-0.0.11/README.md
|
# HPC/Exascale Centre of Excellence in Personalised Medicine
## Base Building Block
This package provides the base for all **Building Blocks (BBs)** developed in the **HPC/Exascale Centre of Excellence in Personalised Medicine** ([PerMedCoE](https://permedcoe.eu/)) project.
## Table of Contents
- [HPC/Exascale Centre of Excellence in Personalised Medicine](#hpcexascale-centre-of-excellence-in-personalised-medicine)
- [Base Building Block](#base-building-block)
- [Table of Contents](#table-of-contents)
- [User instructions](#user-instructions)
- [Requirements](#requirements)
- [Installation](#installation)
- [Command line](#command-line)
- [Option execute:](#option-execute)
- [Option template:](#option-template)
- [Option deploy:](#option-deploy)
- [Public API](#public-api)
- [Uninstall](#uninstall)
- [Developer instructions](#developer-instructions)
- [Building block](#building-block)
- [License](#license)
- [Contact](#contact)
## User instructions
### Requirements
- Python >= 3.6
- [Singularity](https://singularity.lbl.gov/docs-installation)
### Installation
There are two ways to install this package (from Pypi and manually):
- From Pypi:
This package is publicly available in Pypi:
```shell
pip install permedcoe
```
or more specifically:
```shell
python3 -m pip install permedcoe
```
- From source code:
This package provides an automatic installation script:
```shell
./install.sh
```
### Command line
This package provides the `permedcoe` command:
```shell
$ permedcoe -h
usage: permedcoe [-h] [-d] [-l {debug,info,warning,error,critical}]
{execute,x,template,t,deploy,d} ...
positional arguments:
{execute,x,template,t,deploy,d}
execute (x) Execute a building block.
template (t) Shows an example of the requested template.
deploy (d) Download and deploy the requested workflow or building block.
options:
-h, --help show this help message and exit
-d, --debug Enable debug mode. Overrides log_level (default: False)
-l {debug,info,warning,error,critical}, --log_level {debug,info,warning,error,critical}
Set logging level. (default: error)
```
#### Option execute:
- It enables to execute single building blocks or applications:
```shell
$ permedcoe execute -h
usage: permedcoe execute [-h] {building_block,bb,application,app} ...
positional arguments:
{building_block,bb,application,app}
building_block (bb)
Execute a building block.
application (app) Execute an application.
optional arguments:
-h, --help show this help message and exit
```
- In particular for building blocks:
```shell
$ permedcoe execute building_block -h
usage: permedcoe execute building_block [-h] name [parameters ...]
positional arguments:
name Building Block to execute
parameters Building Block parameters (default: None)
options:
-h, --help show this help message and exit
```
Specifying the particular building block to execute (must be installed), provides more detailed information:
```shell
$ permedcoe execute building_block MaBoSS_BB -h
usage: permedcoe [-h] [-c CONFIG] [-d] [-l {debug,info,warning,error,critical}] [--tmpdir TMPDIR]
[--processes PROCESSES] [--gpus GPUS] [--memory MEMORY] [--mount_points MOUNT_POINTS]
{default,sensitivity} ...
This building block uses MaBoSS to screen all the possible knockouts of a given Boolean model. It
produces a candidate gene list formatted as a text file (single gene per row). More information on
MaBoSS can be found in [Stoll G. et al. (2017)](https://academic.oup.com/bioinformatics/article-
lookup/doi/10.1093/bioinformatics/btx123) and in the [MaBoSS GitHub
repository](https://github.com/maboss-bkmc/MaBoSS-env-2.0).
positional arguments:
{default,sensitivity}
options:
-h, --help show this help message and exit
-c CONFIG, --config CONFIG
(CONFIG) Configuration file path
-d, --debug Enable Building Block debug mode. Overrides log_level
-l {debug,info,warning,error,critical}, --log_level {debug,info,warning,error,critical}
Set logging level
--tmpdir TMPDIR Temp directory to be mounted in the container
--processes PROCESSES
Number of processes for MPI executions
--gpus GPUS Requirements for GPU jobs
--memory MEMORY Memory requirement
--mount_points MOUNT_POINTS
Comma separated alias:folder to be mounted in the container
```
- In particular for building blocks:
```shell
$ permedcoe execute application -h
usage: permedcoe execute application [-h] [-w {none,pycompss,nextflow,snakemake}]
[-f FLAGS [FLAGS ...]]
name [parameters [parameters ...]]
positional arguments:
name Application to execute
parameters Application parameters (default: None)
optional arguments:
-h, --help show this help message and exit
-w {none,pycompss,nextflow,snakemake}, --workflow_manager {none,pycompss,nextflow,snakemake}
Workflow manager to use (default: none)
-f FLAGS [FLAGS ...], --flags FLAGS [FLAGS ...]
Workflow manager flags (default: None)
```
#### Option template:
- It available to create a skeleton of a building block or an application:
```shell
$ permedcoe template -h
usage: permedcoe template [-h] [-t {all,pycompss,nextflow,snakemake}]
{bb,building_block,app,application} name
positional arguments:
{bb,building_block,app,application}
Creates a Building Block or Application template.
name Building Block or Application name.
optional arguments:
-h, --help show this help message and exit
-t {all,pycompss,nextflow,snakemake}, --type {all,pycompss,nextflow,snakemake}
Application type. (default: all)
```
#### Option deploy:
- It available to deploy an existing Building Block or Workflow in a **local machine (e.g. laptop)**:
```shell
$ permedcoe deploy -h
usage: permedcoe deploy [-h] {building_block,bb,workflow,wf} ...
positional arguments:
{building_block,bb,workflow,wf}
building_block (bb)
A specific building block.
workflow (wf) A specific workflow.
options:
-h, --help show this help message and exit
```
For the deployment in supercomputers, please contact PerMedCoE: <https://permedcoe.eu/contact/>.
### Public API
The `permedcoe` package provides a set of public decorators, parameter type definition and functions to be used in the Building Block implementation.
- Public decorators:
```python
from permedcoe import container
from permedcoe import constraint
from permedcoe import binary
from permedcoe import mpi
from permedcoe import task
```
- Parameter type definition:
```python
from permedcoe import Type
from permedcoe import FILE_IN
from permedcoe import FILE_OUT
from permedcoe import FILE_INOUT
from permedcoe import DIRECTORY_IN
from permedcoe import DIRECTORY_OUT
from permedcoe import DIRECTORY_INOUT
from permedcoe import StdIOStream
from permedcoe import STDIN
from permedcoe import STDOUT
from permedcoe import STDERR
```
- Functions:
```python
from permedcoe import get_environment
from permedcoe import set_debug
from permedcoe import invoker
```
- Classes:
```python
from permedcoe import Arguments
```
### Uninstall
Uninstall can be done as usual `pip` packages:
There are two ways to uninstall this package, that depends on the way that it was installed (from Pypi or using `install.sh`):
- From Pypi:
```shell
pip uninstall permedcoe
```
or more specifically:
```shell
python3 -m pip uninstall permedcoe
```
- From manual installation (using `install.sh`):
```shell
./uninstall.sh
```
And then the folder can be cleaned as well using the `clean.sh` script.
```shell
./clean.sh
```
## Developer instructions
### Building block
If you are willing to implement your Building Block (BB), check the following repositories,
where you will find documentation, tutorials, and BB/application samples:
- [Documentation](https://permedcoe.readthedocs.io/en/latest/)
- [Tutorial](https://permedcoe.readthedocs.io/en/latest/04_creating/04_tutorial/tutorial.html)
- [basic_application](https://github.com/PerMedCoE/basic_application)
- [Lysozyme_in_water](https://github.com/PerMedCoE/Lysozyme_in_water)
## License
[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)
## Contact
<https://permedcoe.eu/contact/>
This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).

|
PypiClean
|
/apmondatalib-0.8.3.tar.gz/apmondatalib-0.8.3/README.md
|
# 동식물 모니터링 시스템의 Python용 Datalib.
## 목적
UTC로 기록된 데이터를 국가별 시간대에 맞게 가져오려면 범위에 대한 적절한 변환이 필요하다. 변환 과정은 생각하므로 데이터 분석의 장애물이 될 수 있다. 이러한 장애물을 해소하고 데이터 분석 과정을 돕기 위해 본 라이브러리를 개발하였다.
## 개요
현재 MongoDB에 저장된 데이터는 UTC를 기준으로 저장된다. UTC로 저장하는 이유는 시간대나 위치 등에 구애받지 않고, 데이터를 공통적으로 저장하고 이용하기 위해서이다.
그러나 국가별 시차가 존재한다. 시차로 인해 데이터를 가져올 때 범위의 변환이 필요하다. 예를 들어 한국 시간 기준 15일의 데이터를 가져오려면 UTC 기준 14일 오후 3시부터 15일 오후 3시까지의 데이터를 가져와야 한다.
본 라이브러리는 이러한 변환 기능과 더불어 손쉽게 데이터를 가져올 수 있는 다양한 API를 제공한다.
## API 명세
본 라이브러리의 패치키명은 apmondatalib이다.
### create_raw_data_fetcher 메소드
create_raw_data_fetcher는 apmondatalib의 인스턴스를 생성하는 함수이다. 본 라이브러리를 사용하기 위한 첫 번째 진입 함수이다.
| 역할 | | |
| -------- | ---------------------- | ----------------------------------- |
| 파라메터 | host | MongoDB의 접속 주소 |
| | port | MongoDB의 포트 (기본값: 27017) |
| | database | 데이터베이스 이름 (기본값: apmonv1) |
| | sensor_id | 센서의 식별자 |
| | default_page_size | 기본 페이지 크기 |
| | time_offset | 시간대 (한국의 기본값: 9) |
| 반환값 | DataFetcher의 인스턴스 | |
### SensorRawDataFetcher 클래스
#### 생성자
| 분류 | 이름 | 설명 |
| -------- | ---------------------- | ----------------------------------- |
| 파라메터 | host | MongoDB의 접속 주소 |
| | port | MongoDB의 포트 (기본값: 27017) |
| | database | 데이터베이스 이름 (기본값: apmonv1) |
| | sensor_id | 센서의 식별자 |
| | default_page_size | 기본 페이지 크기 |
| | time_offset | 시간대 (한국의 기본값: 9) |
| 반환값 | DataFetcher의 인스턴스 | |
#### read
데이터베이스에 저장된 센서값을 가져오는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | sensor_type | SensorType Enum 중 하나 |
| | page_number | 페이지 번호 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | RawData 클래스의 리스트 | |
#### read_humidity
| 분류 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | page_number | 페이지 번호 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | RawData 클래스의 리스트 | 센서값의 목록 |
#### read_temperature
| 분류 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | page_number | 페이지 번호 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | RawData 클래스의 리스트 | 센서값의 목록 |
#### read_light
| 분류 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | page_number | 페이지 번호 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | RawData 클래스의 리스트 | 센서값의 목록 |
#### read_motion
| 분류 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | page_number | 페이지 번호 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | RawData 클래스의 리스트 | 센서값의 목록 |
#### count
데이터베이스에 저장된 센서 데이터의 갯수를 반환하는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ------------- | ----------------------- |
| 파라메터 | sensor_type | SensorType Enum 중 하나 |
| 반환값 | 데이터의 갯수 | |
#### count_humidity
습도 데이터의 갯수를 반환하는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ------------- | ---- |
| 반환값 | 데이터의 갯수 | |
#### count_temperature
온도 데이터의 갯수를 반환하는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ------------- | ---- |
| 반환값 | 데이터의 갯수 | |
#### count_light
조도 데이터의 갯수를 반환하는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ------------- | ---- |
| 반환값 | 데이터의 갯수 | |
#### count_motion
움직임 데이터의 갯수를 반환하는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ------------- | ---- |
| 반환값 | 데이터의 갯수 | |
#### count_total_pages
데이터베이스에 저장된 센서 데이타의 총 페이지 갯수를 반환하는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------- | --------------------------- |
| 파라메터 | sensor_type | SensorType Enum 중 하나 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | 페이지 수 | |
#### count_total_humidity_pages
습도 데이터의 총 페이지 갯수를 반환하는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | --------- | --------------------------- |
| 파라메터 | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | 페이지 수 | |
#### count_total_temperature_pages
온도 데이터의 총 페이지 갯수를 반환하는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | --------- | --------------------------- |
| 파라메터 | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | 페이지 수 | |
#### count_total_light_pages
조도 데이터의 총 페이지 갯수를 반환하는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | --------- | --------------------------- |
| 파라메터 | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | 페이지 수 | |
#### count_total_motion_pages
움직임 데이터의 총 페이지 갯수를 반환하는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | --------- | --------------------------- |
| 파라메터 | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | 페이지 수 | |
#### read_in_rage
데이터베이스에서 지정된 시간 범위 내의 센서값을 가져오는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | sensor_type | SensorType Enum 중 하나 |
| | from_date | 시작일 |
| | to_date | 종료일 |
| | page_number | 페이지 번호 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | RawData 클래스의 리스트 | |
#### read_humidity_in_range
데이터베이스에서 지정된 시간 범위 내의 습도 센서값을 가져오는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | from_date | 시작일 |
| | to_date | 종료일 |
| | page_number | 페이지 번호 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | RawData 클래스의 리스트 | |
#### read_temperature_in_range
데이터베이스에서 지정된 시간 범위 내의 온도 센서값을 가져오는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | from_date | 시작일 |
| | to_date | 종료일 |
| | page_number | 페이지 번호 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | RawData 클래스의 리스트 | |
#### read_light_in_range
데이터베이스에서 지정된 시간 범위 내의 조도 센서값을 가져오는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | from_date | 시작일 |
| | to_date | 종료일 |
| | page_number | 페이지 번호 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | RawData 클래스의 리스트 | |
#### read_motion_in_range
데이터베이스에서 지정된 시간 범위 내의 움직임 센서값을 가져오는 함수이다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | from_date | 시작일 |
| | to_date | 종료일 |
| | page_number | 페이지 번호 |
| | page_size | 한번에 가져올 데이터의 갯수 |
| 반환값 | RawData 클래스의 리스트 | |
### DailyDataFetcher 클래스
#### 생성자
| 분류 | 이름 | 설명 |
| -------- | ---------------------- | ----------------------------------- |
| 파라메터 | host | MongoDB의 접속 주소 |
| | port | MongoDB의 포트 (기본값: 27017) |
| | database | 데이터베이스 이름 (기본값: apmonv1) |
| | sensor_id | 센서의 식별자 |
| | default_page_size | 기본 페이지 크기 |
| | time_offset | 시간대 (한국의 기본값: 9) |
| 반환값 | DataFetcher의 인스턴스 | |
#### read
특정 날짜의 DailySummary를 가져온다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | sensor_type | SensorType Enum 중 하나 |
| | year | 년도 |
| | month | 월 |
| | day | 날짜 |
| 반환값 | DailySummary 클래스 | |
#### read_with_date
특정 날짜의 DailySummary를 가져온다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | sensor_type | SensorType Enum 중 하나 |
| | d | |
| 반환값 | DailySummary 클래스 | |
#### read_in_range
특정 시간 날짜 범위에 대한 DailySummary의 배열을 가져온다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | sensor_type | SensorType Enum 중 하나 |
| | from_year | 시작 년도 |
| | from_month | 시작 월 |
| | from_day | 시작 날짜 |
| | to_year | 끝 년도 |
| | to_month | 끝 월 |
| | to_day | 끝 날짜 |
| 반환값 | DailySummary 클래스 | |
#### read_in_range_with_date
특정 시간 날짜 범위에 대한 DailySummary의 배열을 가져온다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | sensor_type | SensorType Enum 중 하나 |
| | from_date | 시작 날짜 |
| | to_date | 끝 날짜 |
| 반환값 | DailySummary 클래스의 배열 | |
#### append_prediction
특정 시간 날짜 범위에 대한 DailySummary의 배열을 가져온다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | sensor_type | SensorType Enum 중 하나 |
| | year | 년도 |
| | month | 월 |
| | day | 날짜 |
| | prediction_set | 예측치 |
| 반환값 | True | 예측치가 DailySummary에 업데이트 된 경우 |
| | False | DailySummary가 존재하지 않거나 prediction_set == DailySummary.prediction_set 인 경우 |
def (self, sensor_type, year, month, day, prediction_set):
#### SensorType (Enum)
시스템이 지원하는 센서의 타입이며, 다음과 같은 항목을 사용할 수 있다.
| 이름 | 설명 |
| ----------- | ---------------- |
| Humidity | 습도 센서 |
| Temperature | 온도 센서 |
| Light | 조도 센서 |
| Motion | 움직임 감지 센서 |
#### RawData 클래스
센서값을 표현하는 클래스이며, 현재 네 가지 항목을 가지고 있다.
| 이름 | 설명 |
| --------- | ---------------------------- |
| id | MongoDB에 저장된 데이터의 ID |
| sensor_id | 센서 보드의 식별자 |
| type | SensorType Enum 중 하나 |
| value | 측정값 |
| timestamp | 기록시간 |
#### HourlySummary 클래스
시간당 센서값의 통계값을 표현하는 클래스이며, 현재 네 가지 항목을 가지고 있다
| 이름 | 설명 |
| --------- | ---------------------------- |
| hour | 시간 (0~23시) |
| average | 평균값 |
| min | 최소값 |
| max | 최대값 |
#### DailySummary 클래스
센서값을 표현하는 클래스이며, 현재 네 가지 항목을 가지고 있다. 추후에 시간 정보가 추가될 예정이다.
| 이름 | 설명 |
| --------- | ---------------------------- |
| id | MongoDB에 저장된 데이터의 ID |
| sensor_id | 센서 보드의 식별자 |
| type | SensorType Enum 중 하나 |
| year | 년도 |
| month | 월 |
| day | 일 |
| average | 평균값 |
| min | 최소값 |
| max | 최대값 |
| data_set | HourlySummary의 배열 (24개) |
#### Sensor 클래스
센서를 표현하느 클래스이며, 현재 다섯 가지 항목을 가지고 있다.
##### 프로퍼티
| 이름 | 설명 |
| --------- | ---------------------------- |
| id | MongoDB에 저장된 데이터의 ID |
| sensor_id | 센서 보드의 식별자 |
| current_values | 현재 센서값 |
| last_updated_time | 최근 업데이트 된 시간 |
| time_offset | 시차 | |
#### AIslandClient 클래스
AIsland의 모든 기능에 접근할 수 있게 해주는 인터페이스 클래스이다.
##### Static Methods
###### create_client
센서값에 엑세스 할 수 있는 raw_data_fetcher의 인스턴스를 가져온다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | host | MongoDB의 접속 주소 |
| | port | MongoDB의 포트 (기본값: 27017) |
| | database | 데이터베이스 이름 (기본값: apmonv1) |
| 반환값 | AIslandClient 클래스의 인스턴스 | 없음
##### Instance Methods
###### get_raw_data_fetcher
센서값에 엑세스 할 수 있는 raw_data_fetcher의 인스턴스를 가져온다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | 없음 | 없음 |
| 반환값 | SensorRawDataFetcher 클래스의 인스턴스 | 없음 |
###### get_daily_summary_fetcher
일일 통계값에 엑세스 할 수 있는 daily_summary_fetcher의 인스턴스를 가져온다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | 없음 | 없음 |
| 반환값 | DailyDataFetcher 클래스의 인스턴스 | 없음 |
###### get_sensors
시스템에 등록되어 있는 모든 센서를 가져온다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | 없음 | 없음 |
| 반환값 | Sensor 클래스의 배열 | 없음 |
###### get_sensor
특정 이름을 갖는 센서를 가져온다.
| 파라메터 | 이름 | 설명 |
| -------- | ----------------------- | --------------------------- |
| 파라메터 | sensorId | 가져오려는 센서의 식별자 |
| 반환값 | Sensor 클래스 | 없음 |
| | False | 해당 이름의 센서가 등록되어 있지 않 |
## 설치방법
```bash
pip install apmondatalib
```
위의 명령어를 입력하여 설치한다.
## 버전 정보
### 0.0.1
- read, read_* 계열의 함수 구현
### 0.0.3
- count, count_*, count_total_pages, count_*_total_pages 계열의 함수 구현
### 0.0.4
- read_in_range 계열의 함수 구현
- page_size 파라메터를 함수의 가장 뒤로 옮기고, 기본값을 생성자에 설정하도록 변경
- 쿼리문에 timestamp를 기준으로 내림차순 정렬하여 Raw Data를 가져오도록 변경
- Raw Data 출력 결과에 timestamp가 포함되도록 변경
### 0.0.5
- DailyDataFetcher 클래스 및 기타 클래스 추가
### 0.0.6
- DailySummary의 일부 시간별 데이터가 비어있는 경우 min, max, average가 None값을 갖도록 변경
### 0.0.7
- 시간 범위로 DailySummary를 읽어오는 함수 read_in_range와 read_in_range_with_date 함수 추가
- read_with_date 함수 추가
### 0.0.8
- Sensor 클래스 추가
- AIslandClient 클래스 추가
- DailyDataFetcher.append_prediction 함수 추가
- common 서브 패키지 추가
## 예제
다음의 예제는 각 타입의 데이터를 가져오는 기본적인 예제이다.
```python
import sys
from datetime import datetime, timedelta, timezone, date
from apmondatalib import DataFetcher
def main():
d = DataFetcher.create_raw_data_fetcher("49.247.210.243", 27017, "apmonv1", "SEN03", 9)
sensor_types = [DataFetcher.SensorType.Humidity,
DataFetcher.SensorType.Temperature,
DataFetcher.SensorType.Light,
DataFetcher.SensorType.Motion]
test_read_methods(d, sensor_types)
test_count_methods(d, sensor_types)
test_count_total_page_methods(d, sensor_types)
test_read_in_range_methods(d, sensor_types)
d = DataFetcher.create_daily_summary_fetcher("49.247.210.243", 27017, "apmonv1", "SEN03", 9)
test_read_daily_summary(d, sensor_types)
test_read_in_range(d, sensor_types)
test_read_daily_summary_in_range_with_date(d, sensor_types)
def utc_to_local(utc_dt):
if sys.version_info >= (3, 7):
return utc_dt.astimezone()
else:
return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=None)
def test_read_in_range_methods(d, sensor_types):
date_from = utc_to_local(datetime(2018, 11, 10, 0, 0, 0, 0))
date_to = utc_to_local(datetime(2018, 11, 11, 0, 0, 0, 0))
for t in sensor_types:
print ('== read_in_range(%s) ==================================' % t)
for t in d.read_in_range(
DataFetcher.SensorType.Temperature,
date_from,
date_to, 1, 1000):
print(t)
for f in [d.read_humidity_in_range, d.read_temperature_in_range, d.read_light_in_range, d.read_motion_in_range]:
print('== read_in_range(%s, %s, %s) ==================================' % (f.__name__, date_from, date_to))
for x in f(date_from, date_to, 1, 1000):
print(x)
def test_read_methods(d, sensor_types):
for t in sensor_types:
print ('== read(%s) ==================================' % t)
for x in d.read(t, 50, 5):
print(x)
for f in [d.read_humidity, d.read_temperature, d.read_light, d.read_motion]:
print('== %s ==================================' % f.__name__)
for x in f(50, 5):
print(x)
def test_count_methods(d, sensor_types):
for t in sensor_types:
print("== count(%s) ==================================" % t)
print(d.count(t))
for f in [d.count_humidity, d.count_temperature, d.count_light, d.count_motion]:
print("== %s ==================================" % f.__name__)
print(f())
def test_count_total_page_methods(d, sensor_types):
for t in sensor_types:
print("== count_total_pages(%s) ==================================" % t)
print(d.count_total_pages(t, 100))
for f in [d.count_total_humidity_pages,
d.count_total_temperature_pages,
d.count_total_light_pages,
d.count_total_motion_pages]:
print("== %s ==================================" % f.__name__)
print(f(100))
def test_read_daily_summary(d, sensor_types):
for t in sensor_types:
print ('== test_read_daily_summary(%s, 2018,12, 15) ==================================' % t)
s = d.read(t, 2018, 12, 18)
if s is not None:
print(s)
for x in s.data_set:
print(x)
def test_read_in_range(d, sensor_types):
for t in sensor_types:
print ('== test_read_in_range(%s, 2018, 11, 5, 2018, 12, 30) ==================================' % t)
s = d.read_in_range(t, 2018, 11, 5, 2018, 12, 30)
if s is None:
print('No results.')
else:
for x in s:
print(x)
def test_read_daily_summary_in_range_with_date(d, sensor_types):
for t in sensor_types:
print ('== read_in_range_with_date(%s, 2018-11-5~2018-12-15) ==================================' % t)
from_date = date(2018, 11, 5)
to_date = date(2018, 12, 15)
s = d.read_in_range_with_date(t, from_date, to_date)
if s is None:
print('No results.')
else:
for x in s:
print(x)
if __name__ == "__main__":
main()
```
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/domain/DeliveryPlayConfig.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.DeliveryFullSendConfig import DeliveryFullSendConfig
from alipay.aop.api.domain.DeliverySingleSendConfig import DeliverySingleSendConfig
class DeliveryPlayConfig(object):
def __init__(self):
self._delivery_full_send_config = None
self._delivery_single_send_config = None
@property
def delivery_full_send_config(self):
return self._delivery_full_send_config
@delivery_full_send_config.setter
def delivery_full_send_config(self, value):
if isinstance(value, DeliveryFullSendConfig):
self._delivery_full_send_config = value
else:
self._delivery_full_send_config = DeliveryFullSendConfig.from_alipay_dict(value)
@property
def delivery_single_send_config(self):
return self._delivery_single_send_config
@delivery_single_send_config.setter
def delivery_single_send_config(self, value):
if isinstance(value, DeliverySingleSendConfig):
self._delivery_single_send_config = value
else:
self._delivery_single_send_config = DeliverySingleSendConfig.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.delivery_full_send_config:
if hasattr(self.delivery_full_send_config, 'to_alipay_dict'):
params['delivery_full_send_config'] = self.delivery_full_send_config.to_alipay_dict()
else:
params['delivery_full_send_config'] = self.delivery_full_send_config
if self.delivery_single_send_config:
if hasattr(self.delivery_single_send_config, 'to_alipay_dict'):
params['delivery_single_send_config'] = self.delivery_single_send_config.to_alipay_dict()
else:
params['delivery_single_send_config'] = self.delivery_single_send_config
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DeliveryPlayConfig()
if 'delivery_full_send_config' in d:
o.delivery_full_send_config = d['delivery_full_send_config']
if 'delivery_single_send_config' in d:
o.delivery_single_send_config = d['delivery_single_send_config']
return o
|
PypiClean
|
/vnpy_ctabacktester-1.1.1.tar.gz/vnpy_ctabacktester-1.1.1/vnpy_ctabacktester/ui/widget.py
|
import csv
import subprocess
from datetime import datetime, timedelta
from copy import copy
from typing import List, Tuple
import numpy as np
import pyqtgraph as pg
from pandas import DataFrame
from vnpy.trader.constant import Interval, Direction, Exchange
from vnpy.trader.engine import MainEngine, BaseEngine
from vnpy.trader.ui import QtCore, QtWidgets, QtGui
from vnpy.trader.ui.widget import BaseMonitor, BaseCell, DirectionCell, EnumCell
from vnpy.event import Event, EventEngine
from vnpy.chart import ChartWidget, CandleItem, VolumeItem
from vnpy.trader.utility import load_json, save_json
from vnpy.trader.object import BarData, TradeData, OrderData
from vnpy.trader.database import DB_TZ
from vnpy_ctastrategy.backtesting import DailyResult
from ..engine import (
APP_NAME,
EVENT_BACKTESTER_LOG,
EVENT_BACKTESTER_BACKTESTING_FINISHED,
EVENT_BACKTESTER_OPTIMIZATION_FINISHED,
OptimizationSetting
)
class BacktesterManager(QtWidgets.QWidget):
""""""
setting_filename: str = "cta_backtester_setting.json"
signal_log: QtCore.Signal = QtCore.Signal(Event)
signal_backtesting_finished: QtCore.Signal = QtCore.Signal(Event)
signal_optimization_finished: QtCore.Signal = QtCore.Signal(Event)
def __init__(self, main_engine: MainEngine, event_engine: EventEngine) -> None:
""""""
super().__init__()
self.main_engine: MainEngine = main_engine
self.event_engine: EventEngine = event_engine
self.backtester_engine: BaseEngine = main_engine.get_engine(APP_NAME)
self.class_names: list = []
self.settings: dict = {}
self.target_display: str = ""
self.init_ui()
self.register_event()
self.backtester_engine.init_engine()
self.init_strategy_settings()
self.load_backtesting_setting()
def init_strategy_settings(self) -> None:
""""""
self.class_names = self.backtester_engine.get_strategy_class_names()
self.class_names.sort()
for class_name in self.class_names:
setting: dict = self.backtester_engine.get_default_setting(class_name)
self.settings[class_name] = setting
self.class_combo.addItems(self.class_names)
def init_ui(self) -> None:
""""""
self.setWindowTitle("CTA回测")
# Setting Part
self.class_combo: QtWidgets.QComboBox = QtWidgets.QComboBox()
self.symbol_line: QtWidgets.QLineEdit = QtWidgets.QLineEdit("IF88.CFFEX")
self.interval_combo: QtWidgets.QComboBox = QtWidgets.QComboBox()
for interval in Interval:
self.interval_combo.addItem(interval.value)
end_dt: datetime = datetime.now()
start_dt: datetime = end_dt - timedelta(days=3 * 365)
self.start_date_edit: QtWidgets.QDateEdit = QtWidgets.QDateEdit(
QtCore.QDate(
start_dt.year,
start_dt.month,
start_dt.day
)
)
self.end_date_edit: QtWidgets.QDateEdit = QtWidgets.QDateEdit(
QtCore.QDate.currentDate()
)
self.rate_line: QtWidgets.QLineEdit = QtWidgets.QLineEdit("0.000025")
self.slippage_line: QtWidgets.QLineEdit = QtWidgets.QLineEdit("0.2")
self.size_line: QtWidgets.QLineEdit = QtWidgets.QLineEdit("300")
self.pricetick_line: QtWidgets.QLineEdit = QtWidgets.QLineEdit("0.2")
self.capital_line: QtWidgets.QLineEdit = QtWidgets.QLineEdit("1000000")
backtesting_button: QtWidgets.QPushButton = QtWidgets.QPushButton("开始回测")
backtesting_button.clicked.connect(self.start_backtesting)
optimization_button: QtWidgets.QPushButton = QtWidgets.QPushButton("参数优化")
optimization_button.clicked.connect(self.start_optimization)
self.result_button: QtWidgets.QPushButton = QtWidgets.QPushButton("优化结果")
self.result_button.clicked.connect(self.show_optimization_result)
self.result_button.setEnabled(False)
downloading_button: QtWidgets.QPushButton = QtWidgets.QPushButton("下载数据")
downloading_button.clicked.connect(self.start_downloading)
self.order_button: QtWidgets.QPushButton = QtWidgets.QPushButton("委托记录")
self.order_button.clicked.connect(self.show_backtesting_orders)
self.order_button.setEnabled(False)
self.trade_button: QtWidgets.QPushButton = QtWidgets.QPushButton("成交记录")
self.trade_button.clicked.connect(self.show_backtesting_trades)
self.trade_button.setEnabled(False)
self.daily_button: QtWidgets.QPushButton = QtWidgets.QPushButton("每日盈亏")
self.daily_button.clicked.connect(self.show_daily_results)
self.daily_button.setEnabled(False)
self.candle_button: QtWidgets.QPushButton = QtWidgets.QPushButton("K线图表")
self.candle_button.clicked.connect(self.show_candle_chart)
self.candle_button.setEnabled(False)
edit_button: QtWidgets.QPushButton = QtWidgets.QPushButton("代码编辑")
edit_button.clicked.connect(self.edit_strategy_code)
reload_button: QtWidgets.QPushButton = QtWidgets.QPushButton("策略重载")
reload_button.clicked.connect(self.reload_strategy_class)
for button in [
backtesting_button,
optimization_button,
downloading_button,
self.result_button,
self.order_button,
self.trade_button,
self.daily_button,
self.candle_button,
edit_button,
reload_button
]:
button.setFixedHeight(button.sizeHint().height() * 2)
form: QtWidgets.QFormLayout = QtWidgets.QFormLayout()
form.addRow("交易策略", self.class_combo)
form.addRow("本地代码", self.symbol_line)
form.addRow("K线周期", self.interval_combo)
form.addRow("开始日期", self.start_date_edit)
form.addRow("结束日期", self.end_date_edit)
form.addRow("手续费率", self.rate_line)
form.addRow("交易滑点", self.slippage_line)
form.addRow("合约乘数", self.size_line)
form.addRow("价格跳动", self.pricetick_line)
form.addRow("回测资金", self.capital_line)
result_grid: QtWidgets.QGridLayout = QtWidgets.QGridLayout()
result_grid.addWidget(self.trade_button, 0, 0)
result_grid.addWidget(self.order_button, 0, 1)
result_grid.addWidget(self.daily_button, 1, 0)
result_grid.addWidget(self.candle_button, 1, 1)
left_vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
left_vbox.addLayout(form)
left_vbox.addWidget(backtesting_button)
left_vbox.addWidget(downloading_button)
left_vbox.addStretch()
left_vbox.addLayout(result_grid)
left_vbox.addStretch()
left_vbox.addWidget(optimization_button)
left_vbox.addWidget(self.result_button)
left_vbox.addStretch()
left_vbox.addWidget(edit_button)
left_vbox.addWidget(reload_button)
# Result part
self.statistics_monitor: StatisticsMonitor = StatisticsMonitor()
self.log_monitor: QtWidgets.QTextEdit = QtWidgets.QTextEdit()
self.chart: BacktesterChart = BacktesterChart()
chart: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
chart.addWidget(self.chart)
self.trade_dialog: BacktestingResultDialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测成交记录",
BacktestingTradeMonitor
)
self.order_dialog: BacktestingResultDialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测委托记录",
BacktestingOrderMonitor
)
self.daily_dialog: BacktestingResultDialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测每日盈亏",
DailyResultMonitor
)
# Candle Chart
self.candle_dialog: CandleChartDialog = CandleChartDialog()
# Layout
middle_vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
middle_vbox.addWidget(self.statistics_monitor)
middle_vbox.addWidget(self.log_monitor)
left_hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
left_hbox.addLayout(left_vbox)
left_hbox.addLayout(middle_vbox)
left_widget: QtWidgets.QWidget = QtWidgets.QWidget()
left_widget.setLayout(left_hbox)
right_vbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
right_vbox.addWidget(self.chart)
right_widget: QtWidgets.QWidget = QtWidgets.QWidget()
right_widget.setLayout(right_vbox)
hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox.addWidget(left_widget)
hbox.addWidget(right_widget)
self.setLayout(hbox)
def load_backtesting_setting(self) -> None:
""""""
setting: dict = load_json(self.setting_filename)
if not setting:
return
self.class_combo.setCurrentIndex(
self.class_combo.findText(setting["class_name"])
)
self.symbol_line.setText(setting["vt_symbol"])
self.interval_combo.setCurrentIndex(
self.interval_combo.findText(setting["interval"])
)
start_str: str = setting.get("start", "")
if start_str:
start_dt: QtCore.QDate = QtCore.QDate.fromString(start_str, "yyyy-MM-dd")
self.start_date_edit.setDate(start_dt)
self.rate_line.setText(str(setting["rate"]))
self.slippage_line.setText(str(setting["slippage"]))
self.size_line.setText(str(setting["size"]))
self.pricetick_line.setText(str(setting["pricetick"]))
self.capital_line.setText(str(setting["capital"]))
def register_event(self) -> None:
""""""
self.signal_log.connect(self.process_log_event)
self.signal_backtesting_finished.connect(
self.process_backtesting_finished_event)
self.signal_optimization_finished.connect(
self.process_optimization_finished_event)
self.event_engine.register(EVENT_BACKTESTER_LOG, self.signal_log.emit)
self.event_engine.register(
EVENT_BACKTESTER_BACKTESTING_FINISHED, self.signal_backtesting_finished.emit)
self.event_engine.register(
EVENT_BACKTESTER_OPTIMIZATION_FINISHED, self.signal_optimization_finished.emit)
def process_log_event(self, event: Event) -> None:
""""""
msg = event.data
self.write_log(msg)
def write_log(self, msg) -> None:
""""""
timestamp: str = datetime.now().strftime("%H:%M:%S")
msg: str = f"{timestamp}\t{msg}"
self.log_monitor.append(msg)
def process_backtesting_finished_event(self, event: Event) -> None:
""""""
statistics: dict = self.backtester_engine.get_result_statistics()
self.statistics_monitor.set_data(statistics)
df: DataFrame = self.backtester_engine.get_result_df()
self.chart.set_data(df)
self.trade_button.setEnabled(True)
self.order_button.setEnabled(True)
self.daily_button.setEnabled(True)
# Tick data can not be displayed using candle chart
interval: str = self.interval_combo.currentText()
if interval != Interval.TICK.value:
self.candle_button.setEnabled(True)
def process_optimization_finished_event(self, event: Event) -> None:
""""""
self.write_log("请点击[优化结果]按钮查看")
self.result_button.setEnabled(True)
def start_backtesting(self) -> None:
""""""
class_name: str = self.class_combo.currentText()
if not class_name:
self.write_log("请选择要回测的策略")
return
vt_symbol: str = self.symbol_line.text()
interval: str = self.interval_combo.currentText()
start: datetime = self.start_date_edit.dateTime().toPython()
end: datetime = self.end_date_edit.dateTime().toPython()
rate: float = float(self.rate_line.text())
slippage: float = float(self.slippage_line.text())
size: float = float(self.size_line.text())
pricetick: float = float(self.pricetick_line.text())
capital: float = float(self.capital_line.text())
# Check validity of vt_symbol
if "." not in vt_symbol:
self.write_log("本地代码缺失交易所后缀,请检查")
return
_, exchange_str = vt_symbol.split(".")
if exchange_str not in Exchange.__members__:
self.write_log("本地代码的交易所后缀不正确,请检查")
return
# Save backtesting parameters
backtesting_setting: dict = {
"class_name": class_name,
"vt_symbol": vt_symbol,
"interval": interval,
"start": start.strftime("%Y-%m-%d"),
"rate": rate,
"slippage": slippage,
"size": size,
"pricetick": pricetick,
"capital": capital
}
save_json(self.setting_filename, backtesting_setting)
# Get strategy setting
old_setting: dict = self.settings[class_name]
dialog: BacktestingSettingEditor = BacktestingSettingEditor(class_name, old_setting)
i: int = dialog.exec()
if i != dialog.Accepted:
return
new_setting: dict = dialog.get_setting()
self.settings[class_name] = new_setting
result: bool = self.backtester_engine.start_backtesting(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
new_setting
)
if result:
self.statistics_monitor.clear_data()
self.chart.clear_data()
self.trade_button.setEnabled(False)
self.order_button.setEnabled(False)
self.daily_button.setEnabled(False)
self.candle_button.setEnabled(False)
self.trade_dialog.clear_data()
self.order_dialog.clear_data()
self.daily_dialog.clear_data()
self.candle_dialog.clear_data()
def start_optimization(self) -> None:
""""""
class_name: str = self.class_combo.currentText()
vt_symbol: str = self.symbol_line.text()
interval: str = self.interval_combo.currentText()
start: object = self.start_date_edit.dateTime().toPython()
end: object = self.end_date_edit.dateTime().toPython()
rate: float = float(self.rate_line.text())
slippage: float = float(self.slippage_line.text())
size: float = float(self.size_line.text())
pricetick: float = float(self.pricetick_line.text())
capital: float = float(self.capital_line.text())
parameters: dict = self.settings[class_name]
dialog: OptimizationSettingEditor = OptimizationSettingEditor(class_name, parameters)
i: int = dialog.exec()
if i != dialog.Accepted:
return
optimization_setting, use_ga, max_workers = dialog.get_setting()
self.target_display: str = dialog.target_display
self.backtester_engine.start_optimization(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
optimization_setting,
use_ga,
max_workers
)
self.result_button.setEnabled(False)
def start_downloading(self) -> None:
""""""
vt_symbol: str = self.symbol_line.text()
interval: str = self.interval_combo.currentText()
start_date: QtCore.QDate = self.start_date_edit.date()
end_date: QtCore.QDate = self.end_date_edit.date()
start: datetime = datetime(
start_date.year(),
start_date.month(),
start_date.day(),
)
start: datetime = start.replace(tzinfo=DB_TZ)
end: datetime = datetime(
end_date.year(),
end_date.month(),
end_date.day(),
23,
59,
59,
)
end: datetime = end.replace(tzinfo=DB_TZ)
self.backtester_engine.start_downloading(
vt_symbol,
interval,
start,
end
)
def show_optimization_result(self) -> None:
""""""
result_values: list = self.backtester_engine.get_result_values()
dialog: OptimizationResultMonitor = OptimizationResultMonitor(
result_values,
self.target_display
)
dialog.exec_()
def show_backtesting_trades(self) -> None:
""""""
if not self.trade_dialog.is_updated():
trades: List[TradeData] = self.backtester_engine.get_all_trades()
self.trade_dialog.update_data(trades)
self.trade_dialog.exec_()
def show_backtesting_orders(self) -> None:
""""""
if not self.order_dialog.is_updated():
orders: List[OrderData] = self.backtester_engine.get_all_orders()
self.order_dialog.update_data(orders)
self.order_dialog.exec_()
def show_daily_results(self) -> None:
""""""
if not self.daily_dialog.is_updated():
results: List[DailyResult] = self.backtester_engine.get_all_daily_results()
self.daily_dialog.update_data(results)
self.daily_dialog.exec_()
def show_candle_chart(self) -> None:
""""""
if not self.candle_dialog.is_updated():
history: list = self.backtester_engine.get_history_data()
self.candle_dialog.update_history(history)
trades: List[TradeData] = self.backtester_engine.get_all_trades()
self.candle_dialog.update_trades(trades)
self.candle_dialog.exec_()
def edit_strategy_code(self) -> None:
""""""
class_name: str = self.class_combo.currentText()
if not class_name:
return
file_path: str = self.backtester_engine.get_strategy_class_file(class_name)
cmd: list = ["code", file_path]
p: subprocess.CompletedProcess = subprocess.run(cmd, shell=True)
if p.returncode:
QtWidgets.QMessageBox.warning(
self,
"启动代码编辑器失败",
"请检查是否安装了Visual Studio Code,并将其路径添加到了系统全局变量中!"
)
def reload_strategy_class(self) -> None:
""""""
self.backtester_engine.reload_strategy_class()
current_strategy_name: str = self.class_combo.currentText()
self.class_combo.clear()
self.init_strategy_settings()
ix: int = self.class_combo.findText(current_strategy_name)
self.class_combo.setCurrentIndex(ix)
def show(self) -> None:
""""""
self.showMaximized()
class StatisticsMonitor(QtWidgets.QTableWidget):
""""""
KEY_NAME_MAP: dict = {
"start_date": "首个交易日",
"end_date": "最后交易日",
"total_days": "总交易日",
"profit_days": "盈利交易日",
"loss_days": "亏损交易日",
"capital": "起始资金",
"end_balance": "结束资金",
"total_return": "总收益率",
"annual_return": "年化收益",
"max_drawdown": "最大回撤",
"max_ddpercent": "百分比最大回撤",
"total_net_pnl": "总盈亏",
"total_commission": "总手续费",
"total_slippage": "总滑点",
"total_turnover": "总成交额",
"total_trade_count": "总成交笔数",
"daily_net_pnl": "日均盈亏",
"daily_commission": "日均手续费",
"daily_slippage": "日均滑点",
"daily_turnover": "日均成交额",
"daily_trade_count": "日均成交笔数",
"daily_return": "日均收益率",
"return_std": "收益标准差",
"sharpe_ratio": "夏普比率",
"return_drawdown_ratio": "收益回撤比"
}
def __init__(self) -> None:
""""""
super().__init__()
self.cells: dict = {}
self.init_ui()
def init_ui(self) -> None:
""""""
self.setRowCount(len(self.KEY_NAME_MAP))
self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values()))
self.setColumnCount(1)
self.horizontalHeader().setVisible(False)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch
)
self.setEditTriggers(self.NoEditTriggers)
for row, key in enumerate(self.KEY_NAME_MAP.keys()):
cell: QtWidgets.QTableWidgetItem = QtWidgets.QTableWidgetItem()
self.setItem(row, 0, cell)
self.cells[key] = cell
def clear_data(self) -> None:
""""""
for cell in self.cells.values():
cell.setText("")
def set_data(self, data: dict) -> None:
""""""
data["capital"] = f"{data['capital']:,.2f}"
data["end_balance"] = f"{data['end_balance']:,.2f}"
data["total_return"] = f"{data['total_return']:,.2f}%"
data["annual_return"] = f"{data['annual_return']:,.2f}%"
data["max_drawdown"] = f"{data['max_drawdown']:,.2f}"
data["max_ddpercent"] = f"{data['max_ddpercent']:,.2f}%"
data["total_net_pnl"] = f"{data['total_net_pnl']:,.2f}"
data["total_commission"] = f"{data['total_commission']:,.2f}"
data["total_slippage"] = f"{data['total_slippage']:,.2f}"
data["total_turnover"] = f"{data['total_turnover']:,.2f}"
data["daily_net_pnl"] = f"{data['daily_net_pnl']:,.2f}"
data["daily_commission"] = f"{data['daily_commission']:,.2f}"
data["daily_slippage"] = f"{data['daily_slippage']:,.2f}"
data["daily_turnover"] = f"{data['daily_turnover']:,.2f}"
data["daily_trade_count"] = f"{data['daily_trade_count']:,.2f}"
data["daily_return"] = f"{data['daily_return']:,.2f}%"
data["return_std"] = f"{data['return_std']:,.2f}%"
data["sharpe_ratio"] = f"{data['sharpe_ratio']:,.2f}"
data["return_drawdown_ratio"] = f"{data['return_drawdown_ratio']:,.2f}"
for key, cell in self.cells.items():
value = data.get(key, "")
cell.setText(str(value))
class BacktestingSettingEditor(QtWidgets.QDialog):
"""
For creating new strategy and editing strategy parameters.
"""
def __init__(
self, class_name: str, parameters: dict
) -> None:
""""""
super(BacktestingSettingEditor, self).__init__()
self.class_name: str = class_name
self.parameters: dict = parameters
self.edits: dict = {}
self.init_ui()
def init_ui(self) -> None:
""""""
form: QtWidgets.QFormLayout = QtWidgets.QFormLayout()
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"策略参数配置:{self.class_name}")
button_text: str = "确定"
parameters: dict = self.parameters
for name, value in parameters.items():
type_ = type(value)
edit: QtWidgets.QLineEdit = QtWidgets.QLineEdit(str(value))
if type_ is int:
validator: QtGui.QIntValidator = QtGui.QIntValidator()
edit.setValidator(validator)
elif type_ is float:
validator: QtGui.QDoubleValidator = QtGui.QDoubleValidator()
edit.setValidator(validator)
form.addRow(f"{name} {type_}", edit)
self.edits[name] = (edit, type_)
button: QtWidgets.QPushButton = QtWidgets.QPushButton(button_text)
button.clicked.connect(self.accept)
form.addRow(button)
widget: QtWidgets.QWidget = QtWidgets.QWidget()
widget.setLayout(form)
scroll: QtWidgets.QScrollArea = QtWidgets.QScrollArea()
scroll.setWidgetResizable(True)
scroll.setWidget(widget)
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addWidget(scroll)
self.setLayout(vbox)
def get_setting(self) -> dict:
""""""
setting: dict = {}
for name, tp in self.edits.items():
edit, type_ = tp
value_text = edit.text()
if type_ == bool:
if value_text == "True":
value = True
else:
value = False
else:
value = type_(value_text)
setting[name] = value
return setting
class BacktesterChart(pg.GraphicsLayoutWidget):
""""""
def __init__(self) -> None:
""""""
super().__init__(title="Backtester Chart")
self.dates: dict = {}
self.init_ui()
def init_ui(self) -> None:
""""""
pg.setConfigOptions(antialias=True)
# Create plot widgets
self.balance_plot = self.addPlot(
title="账户净值",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.drawdown_plot = self.addPlot(
title="净值回撤",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.pnl_plot = self.addPlot(
title="每日盈亏",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.distribution_plot = self.addPlot(title="盈亏分布")
# Add curves and bars on plot widgets
self.balance_curve = self.balance_plot.plot(
pen=pg.mkPen("#ffc107", width=3)
)
dd_color: str = "#303f9f"
self.drawdown_curve = self.drawdown_plot.plot(
fillLevel=-0.3, brush=dd_color, pen=dd_color
)
profit_color: str = 'r'
loss_color: str = 'g'
self.profit_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=profit_color, pen=profit_color
)
self.loss_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=loss_color, pen=loss_color
)
self.pnl_plot.addItem(self.profit_pnl_bar)
self.pnl_plot.addItem(self.loss_pnl_bar)
distribution_color: str = "#6d4c41"
self.distribution_curve = self.distribution_plot.plot(
fillLevel=-0.3, brush=distribution_color, pen=distribution_color
)
def clear_data(self) -> None:
""""""
self.balance_curve.setData([], [])
self.drawdown_curve.setData([], [])
self.profit_pnl_bar.setOpts(x=[], height=[])
self.loss_pnl_bar.setOpts(x=[], height=[])
self.distribution_curve.setData([], [])
def set_data(self, df) -> None:
""""""
if df is None:
return
count: int = len(df)
self.dates.clear()
for n, date in enumerate(df.index):
self.dates[n] = date
# Set data for curve of balance and drawdown
self.balance_curve.setData(df["balance"])
self.drawdown_curve.setData(df["drawdown"])
# Set data for daily pnl bar
profit_pnl_x: list = []
profit_pnl_height: list = []
loss_pnl_x: list = []
loss_pnl_height: list = []
for count, pnl in enumerate(df["net_pnl"]):
if pnl >= 0:
profit_pnl_height.append(pnl)
profit_pnl_x.append(count)
else:
loss_pnl_height.append(pnl)
loss_pnl_x.append(count)
self.profit_pnl_bar.setOpts(x=profit_pnl_x, height=profit_pnl_height)
self.loss_pnl_bar.setOpts(x=loss_pnl_x, height=loss_pnl_height)
# Set data for pnl distribution
hist, x = np.histogram(df["net_pnl"], bins="auto")
x = x[:-1]
self.distribution_curve.setData(x, hist)
class DateAxis(pg.AxisItem):
"""Axis for showing date data"""
def __init__(self, dates: dict, *args, **kwargs) -> None:
""""""
super().__init__(*args, **kwargs)
self.dates: dict = dates
def tickStrings(self, values, scale, spacing) -> list:
""""""
strings: list = []
for v in values:
dt = self.dates.get(v, "")
strings.append(str(dt))
return strings
class OptimizationSettingEditor(QtWidgets.QDialog):
"""
For setting up parameters for optimization.
"""
DISPLAY_NAME_MAP: dict = {
"总收益率": "total_return",
"夏普比率": "sharpe_ratio",
"收益回撤比": "return_drawdown_ratio",
"日均盈亏": "daily_net_pnl"
}
def __init__(
self, class_name: str, parameters: dict
) -> None:
""""""
super().__init__()
self.class_name: str = class_name
self.parameters: dict = parameters
self.edits: dict = {}
self.optimization_setting: OptimizationSetting = None
self.use_ga: bool = False
self.init_ui()
def init_ui(self) -> None:
""""""
QLabel: QtWidgets.QLabel = QtWidgets.QLabel
self.target_combo: QtWidgets.QComboBox = QtWidgets.QComboBox()
self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys()))
self.worker_spin: QtWidgets.QSpinBox = QtWidgets.QSpinBox()
self.worker_spin.setRange(0, 10000)
self.worker_spin.setValue(0)
self.worker_spin.setToolTip("设为0则自动根据CPU核心数启动对应数量的进程")
grid: QtWidgets.QGridLayout = QtWidgets.QGridLayout()
grid.addWidget(QLabel("优化目标"), 0, 0)
grid.addWidget(self.target_combo, 0, 1, 1, 3)
grid.addWidget(QLabel("进程上限"), 1, 0)
grid.addWidget(self.worker_spin, 1, 1, 1, 3)
grid.addWidget(QLabel("参数"), 2, 0)
grid.addWidget(QLabel("开始"), 2, 1)
grid.addWidget(QLabel("步进"), 2, 2)
grid.addWidget(QLabel("结束"), 2, 3)
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"优化参数配置:{self.class_name}")
validator: QtGui.QDoubleValidator = QtGui.QDoubleValidator()
row: int = 3
for name, value in self.parameters.items():
type_ = type(value)
if type_ not in [int, float]:
continue
start_edit: QtWidgets.QLineEdit = QtWidgets.QLineEdit(str(value))
step_edit: QtWidgets.QLineEdit = QtWidgets.QLineEdit(str(1))
end_edit: QtWidgets.QLineEdit = QtWidgets.QLineEdit(str(value))
for edit in [start_edit, step_edit, end_edit]:
edit.setValidator(validator)
grid.addWidget(QLabel(name), row, 0)
grid.addWidget(start_edit, row, 1)
grid.addWidget(step_edit, row, 2)
grid.addWidget(end_edit, row, 3)
self.edits[name] = {
"type": type_,
"start": start_edit,
"step": step_edit,
"end": end_edit
}
row += 1
parallel_button: QtWidgets.QPushButton = QtWidgets.QPushButton("多进程优化")
parallel_button.clicked.connect(self.generate_parallel_setting)
grid.addWidget(parallel_button, row, 0, 1, 4)
row += 1
ga_button: QtWidgets.QPushButton = QtWidgets.QPushButton("遗传算法优化")
ga_button.clicked.connect(self.generate_ga_setting)
grid.addWidget(ga_button, row, 0, 1, 4)
widget: QtWidgets.QWidget = QtWidgets.QWidget()
widget.setLayout(grid)
scroll: QtWidgets.QScrollArea = QtWidgets.QScrollArea()
scroll.setWidgetResizable(True)
scroll.setWidget(widget)
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addWidget(scroll)
self.setLayout(vbox)
def generate_ga_setting(self) -> None:
""""""
self.use_ga: bool = True
self.generate_setting()
def generate_parallel_setting(self) -> None:
""""""
self.use_ga: bool = False
self.generate_setting()
def generate_setting(self) -> None:
""""""
self.optimization_setting = OptimizationSetting()
self.target_display: str = self.target_combo.currentText()
target_name: str = self.DISPLAY_NAME_MAP[self.target_display]
self.optimization_setting.set_target(target_name)
for name, d in self.edits.items():
type_ = d["type"]
start_value = type_(d["start"].text())
step_value = type_(d["step"].text())
end_value = type_(d["end"].text())
if start_value == end_value:
self.optimization_setting.add_parameter(name, start_value)
else:
self.optimization_setting.add_parameter(
name,
start_value,
end_value,
step_value
)
self.accept()
def get_setting(self) -> Tuple[OptimizationSetting, bool, int]:
""""""
return self.optimization_setting, self.use_ga, self.worker_spin.value()
class OptimizationResultMonitor(QtWidgets.QDialog):
"""
For viewing optimization result.
"""
def __init__(
self, result_values: list, target_display: str
) -> None:
""""""
super().__init__()
self.result_values: list = result_values
self.target_display: str = target_display
self.init_ui()
def init_ui(self) -> None:
""""""
self.setWindowTitle("参数优化结果")
self.resize(1100, 500)
# Creat table to show result
table: QtWidgets.QTableWidget = QtWidgets.QTableWidget()
table.setColumnCount(2)
table.setRowCount(len(self.result_values))
table.setHorizontalHeaderLabels(["参数", self.target_display])
table.setEditTriggers(table.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.horizontalHeader().setSectionResizeMode(
0, QtWidgets.QHeaderView.ResizeToContents
)
table.horizontalHeader().setSectionResizeMode(
1, QtWidgets.QHeaderView.Stretch
)
for n, tp in enumerate(self.result_values):
setting, target_value, _ = tp
setting_cell: QtWidgets.QTableWidgetItem = QtWidgets.QTableWidgetItem(str(setting))
target_cell: QtWidgets.QTableWidgetItem = QtWidgets.QTableWidgetItem(f"{target_value:.2f}")
setting_cell.setTextAlignment(QtCore.Qt.AlignCenter)
target_cell.setTextAlignment(QtCore.Qt.AlignCenter)
table.setItem(n, 0, setting_cell)
table.setItem(n, 1, target_cell)
# Create layout
button: QtWidgets.QPushButton = QtWidgets.QPushButton("保存")
button.clicked.connect(self.save_csv)
hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox.addStretch()
hbox.addWidget(button)
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addWidget(table)
vbox.addLayout(hbox)
self.setLayout(vbox)
def save_csv(self) -> None:
"""
Save table data into a csv file
"""
path, _ = QtWidgets.QFileDialog.getSaveFileName(
self, "保存数据", "", "CSV(*.csv)")
if not path:
return
with open(path, "w") as f:
writer = csv.writer(f, lineterminator="\n")
writer.writerow(["参数", self.target_display])
for tp in self.result_values:
setting, target_value, _ = tp
row_data: list = [str(setting), str(target_value)]
writer.writerow(row_data)
class BacktestingTradeMonitor(BaseMonitor):
"""
Monitor for backtesting trade data.
"""
headers: dict = {
"tradeid": {"display": "成交号 ", "cell": BaseCell, "update": False},
"orderid": {"display": "委托号", "cell": BaseCell, "update": False},
"symbol": {"display": "代码", "cell": BaseCell, "update": False},
"exchange": {"display": "交易所", "cell": EnumCell, "update": False},
"direction": {"display": "方向", "cell": DirectionCell, "update": False},
"offset": {"display": "开平", "cell": EnumCell, "update": False},
"price": {"display": "价格", "cell": BaseCell, "update": False},
"volume": {"display": "数量", "cell": BaseCell, "update": False},
"datetime": {"display": "时间", "cell": BaseCell, "update": False},
"gateway_name": {"display": "接口", "cell": BaseCell, "update": False},
}
class BacktestingOrderMonitor(BaseMonitor):
"""
Monitor for backtesting order data.
"""
headers: dict = {
"orderid": {"display": "委托号", "cell": BaseCell, "update": False},
"symbol": {"display": "代码", "cell": BaseCell, "update": False},
"exchange": {"display": "交易所", "cell": EnumCell, "update": False},
"type": {"display": "类型", "cell": EnumCell, "update": False},
"direction": {"display": "方向", "cell": DirectionCell, "update": False},
"offset": {"display": "开平", "cell": EnumCell, "update": False},
"price": {"display": "价格", "cell": BaseCell, "update": False},
"volume": {"display": "总数量", "cell": BaseCell, "update": False},
"traded": {"display": "已成交", "cell": BaseCell, "update": False},
"status": {"display": "状态", "cell": EnumCell, "update": False},
"datetime": {"display": "时间", "cell": BaseCell, "update": False},
"gateway_name": {"display": "接口", "cell": BaseCell, "update": False},
}
class FloatCell(BaseCell):
"""
Cell used for showing pnl data.
"""
def __init__(self, content, data) -> None:
""""""
content: str = f"{content:.2f}"
super().__init__(content, data)
class DailyResultMonitor(BaseMonitor):
"""
Monitor for backtesting daily result.
"""
headers: dict = {
"date": {"display": "日期", "cell": BaseCell, "update": False},
"trade_count": {"display": "成交笔数", "cell": BaseCell, "update": False},
"start_pos": {"display": "开盘持仓", "cell": BaseCell, "update": False},
"end_pos": {"display": "收盘持仓", "cell": BaseCell, "update": False},
"turnover": {"display": "成交额", "cell": FloatCell, "update": False},
"commission": {"display": "手续费", "cell": FloatCell, "update": False},
"slippage": {"display": "滑点", "cell": FloatCell, "update": False},
"trading_pnl": {"display": "交易盈亏", "cell": FloatCell, "update": False},
"holding_pnl": {"display": "持仓盈亏", "cell": FloatCell, "update": False},
"total_pnl": {"display": "总盈亏", "cell": FloatCell, "update": False},
"net_pnl": {"display": "净盈亏", "cell": FloatCell, "update": False},
}
class BacktestingResultDialog(QtWidgets.QDialog):
""""""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
title: str,
table_class: QtWidgets.QTableWidget
) -> None:
""""""
super().__init__()
self.main_engine: MainEngine = main_engine
self.event_engine: EventEngine = event_engine
self.title: str = title
self.table_class: QtWidgets.QTableWidget = table_class
self.updated: bool = False
self.init_ui()
def init_ui(self) -> None:
""""""
self.setWindowTitle(self.title)
self.resize(1100, 600)
self.table: QtWidgets.QTableWidget = self.table_class(self.main_engine, self.event_engine)
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addWidget(self.table)
self.setLayout(vbox)
def clear_data(self) -> None:
""""""
self.updated = False
self.table.setRowCount(0)
def update_data(self, data: list) -> None:
""""""
self.updated = True
data.reverse()
for obj in data:
self.table.insert_new_row(obj)
def is_updated(self) -> bool:
""""""
return self.updated
class CandleChartDialog(QtWidgets.QDialog):
""""""
def __init__(self) -> None:
""""""
super().__init__()
self.updated: bool = False
self.dt_ix_map: dict = {}
self.ix_bar_map: dict = {}
self.high_price = 0
self.low_price = 0
self.price_range = 0
self.items: list = []
self.init_ui()
def init_ui(self) -> None:
""""""
self.setWindowTitle("回测K线图表")
self.resize(1400, 800)
# Create chart widget
self.chart: ChartWidget = ChartWidget()
self.chart.add_plot("candle", hide_x_axis=True)
self.chart.add_plot("volume", maximum_height=200)
self.chart.add_item(CandleItem, "candle", "candle")
self.chart.add_item(VolumeItem, "volume", "volume")
self.chart.add_cursor()
# Create help widget
text1: str = "红色虚线 —— 盈利交易"
label1: QtWidgets.QLabel = QtWidgets.QLabel(text1)
label1.setStyleSheet("color:red")
text2: str = "绿色虚线 —— 亏损交易"
label2: QtWidgets.QLabel = QtWidgets.QLabel(text2)
label2.setStyleSheet("color:#00FF00")
text3: str = "黄色向上箭头 —— 买入开仓 Buy"
label3: QtWidgets.QLabel = QtWidgets.QLabel(text3)
label3.setStyleSheet("color:yellow")
text4: str = "黄色向下箭头 —— 卖出平仓 Sell"
label4: QtWidgets.QLabel = QtWidgets.QLabel(text4)
label4.setStyleSheet("color:yellow")
text5: str = "紫红向下箭头 —— 卖出开仓 Short"
label5: QtWidgets.QLabel = QtWidgets.QLabel(text5)
label5.setStyleSheet("color:magenta")
text6: str = "紫红向上箭头 —— 买入平仓 Cover"
label6: QtWidgets.QLabel = QtWidgets.QLabel(text6)
label6.setStyleSheet("color:magenta")
hbox1: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox1.addStretch()
hbox1.addWidget(label1)
hbox1.addStretch()
hbox1.addWidget(label2)
hbox1.addStretch()
hbox2: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox2.addStretch()
hbox2.addWidget(label3)
hbox2.addStretch()
hbox2.addWidget(label4)
hbox2.addStretch()
hbox3: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox3.addStretch()
hbox3.addWidget(label5)
hbox3.addStretch()
hbox3.addWidget(label6)
hbox3.addStretch()
# Set layout
vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
vbox.addWidget(self.chart)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
self.setLayout(vbox)
def update_history(self, history: list) -> None:
""""""
self.updated = True
self.chart.update_history(history)
for ix, bar in enumerate(history):
self.ix_bar_map[ix] = bar
self.dt_ix_map[bar.datetime] = ix
if not self.high_price:
self.high_price = bar.high_price
self.low_price = bar.low_price
else:
self.high_price = max(self.high_price, bar.high_price)
self.low_price = min(self.low_price, bar.low_price)
self.price_range = self.high_price - self.low_price
def update_trades(self, trades: list) -> None:
""""""
trade_pairs: list = generate_trade_pairs(trades)
candle_plot: pg.PlotItem = self.chart.get_plot("candle")
scatter_data: list = []
y_adjustment: float = self.price_range * 0.001
for d in trade_pairs:
open_ix = self.dt_ix_map[d["open_dt"]]
close_ix = self.dt_ix_map[d["close_dt"]]
open_price = d["open_price"]
close_price = d["close_price"]
# Trade Line
x: list = [open_ix, close_ix]
y: list = [open_price, close_price]
if d["direction"] == Direction.LONG and close_price >= open_price:
color: str = "r"
elif d["direction"] == Direction.SHORT and close_price <= open_price:
color: str = "r"
else:
color: str = "g"
pen: QtGui.QPen = pg.mkPen(color, width=1.5, style=QtCore.Qt.DashLine)
item: pg.PlotCurveItem = pg.PlotCurveItem(x, y, pen=pen)
self.items.append(item)
candle_plot.addItem(item)
# Trade Scatter
open_bar: BarData = self.ix_bar_map[open_ix]
close_bar: BarData = self.ix_bar_map[close_ix]
if d["direction"] == Direction.LONG:
scatter_color: str = "yellow"
open_symbol: str = "t1"
close_symbol: str = "t"
open_side: int = 1
close_side: int = -1
open_y: float = open_bar.low_price
close_y: float = close_bar.high_price
else:
scatter_color: str = "magenta"
open_symbol: str = "t"
close_symbol: str = "t1"
open_side: int = -1
close_side: int = 1
open_y: float = open_bar.high_price
close_y: float = close_bar.low_price
pen = pg.mkPen(QtGui.QColor(scatter_color))
brush: QtGui.QBrush = pg.mkBrush(QtGui.QColor(scatter_color))
size: int = 10
open_scatter: dict = {
"pos": (open_ix, open_y - open_side * y_adjustment),
"size": size,
"pen": pen,
"brush": brush,
"symbol": open_symbol
}
close_scatter: dict = {
"pos": (close_ix, close_y - close_side * y_adjustment),
"size": size,
"pen": pen,
"brush": brush,
"symbol": close_symbol
}
scatter_data.append(open_scatter)
scatter_data.append(close_scatter)
# Trade text
volume = d["volume"]
text_color: QtGui.QColor = QtGui.QColor(scatter_color)
open_text: pg.TextItem = pg.TextItem(f"[{volume}]", color=text_color, anchor=(0.5, 0.5))
close_text: pg.TextItem = pg.TextItem(f"[{volume}]", color=text_color, anchor=(0.5, 0.5))
open_text.setPos(open_ix, open_y - open_side * y_adjustment * 3)
close_text.setPos(close_ix, close_y - close_side * y_adjustment * 3)
self.items.append(open_text)
self.items.append(close_text)
candle_plot.addItem(open_text)
candle_plot.addItem(close_text)
trade_scatter: pg.ScatterPlotItem = pg.ScatterPlotItem(scatter_data)
self.items.append(trade_scatter)
candle_plot.addItem(trade_scatter)
def clear_data(self) -> None:
""""""
self.updated = False
candle_plot: pg.PlotItem = self.chart.get_plot("candle")
for item in self.items:
candle_plot.removeItem(item)
self.items.clear()
self.chart.clear_all()
self.dt_ix_map.clear()
self.ix_bar_map.clear()
def is_updated(self) -> bool:
""""""
return self.updated
def generate_trade_pairs(trades: list) -> list:
""""""
long_trades: list = []
short_trades: list = []
trade_pairs: list = []
for trade in trades:
trade: TradeData = copy(trade)
if trade.direction == Direction.LONG:
same_direction: list = long_trades
opposite_direction: list = short_trades
else:
same_direction: list = short_trades
opposite_direction: list = long_trades
while trade.volume and opposite_direction:
open_trade: TradeData = opposite_direction[0]
close_volume = min(open_trade.volume, trade.volume)
d: dict = {
"open_dt": open_trade.datetime,
"open_price": open_trade.price,
"close_dt": trade.datetime,
"close_price": trade.price,
"direction": open_trade.direction,
"volume": close_volume,
}
trade_pairs.append(d)
open_trade.volume -= close_volume
if not open_trade.volume:
opposite_direction.pop(0)
trade.volume -= close_volume
if trade.volume:
same_direction.append(trade)
return trade_pairs
|
PypiClean
|
/briantree-3.30.0.tar.gz/briantree-3.30.0/braintree/payment_method.py
|
import braintree
from braintree.address import Address
from braintree.resource import Resource
from braintree.configuration import Configuration
class PaymentMethod(Resource):
@staticmethod
def create(params={}):
return Configuration.gateway().payment_method.create(params)
@staticmethod
def find(payment_method_token):
return Configuration.gateway().payment_method.find(payment_method_token)
@staticmethod
def update(payment_method_token, params):
return Configuration.gateway().payment_method.update(payment_method_token, params)
@staticmethod
def delete(payment_method_token):
return Configuration.gateway().payment_method.delete(payment_method_token)
@staticmethod
def create_signature():
return PaymentMethod.signature("create")
@staticmethod
def signature(type):
signature = [
"billing_address_id",
"cardholder_name",
"customer_id",
"cvv",
"device_data",
"device_session_id",
"expiration_date",
"expiration_month",
"expiration_year",
"number",
"payment_method_nonce",
"token",
{"billing_address": Address.create_signature()},
{"options": [
"fail_on_duplicate_payment_method",
"make_default",
"verification_merchant_account_id",
"verify_card",
]
}
]
return signature
@staticmethod
def update_signature():
signature = [
"billing_address_id",
"cardholder_name",
"cvv",
"device_session_id",
"expiration_date",
"expiration_month",
"expiration_year",
"number",
"token",
"venmo_sdk_payment_method_code",
"device_data",
"fraud_merchant_id",
"payment_method_nonce",
{"options": [
"make_default",
"verify_card",
"verification_merchant_account_id",
"venmo_sdk_session"
]
},
{"billing_address" :
Address.update_signature() +
[{"options": ["update_existing"]}]
}
]
return signature
|
PypiClean
|
/aworda-rainfer-0.0.1.tar.gz/aworda-rainfer-0.0.1/src/aworda/rainfer/entry/message.py
|
# no error
from ..message.chain import MessageChain as MessageChain
from ..message.commander import Arg as Arg
from ..message.commander import Commander as Commander
from ..message.commander import Slot as Slot
from ..message.element import App as App
from ..message.element import At as At
from ..message.element import AtAll as AtAll
from ..message.element import Dice as Dice
from ..message.element import Element as Element
from ..message.element import Face as Face
from ..message.element import File as File
from ..message.element import FlashImage as FlashImage
from ..message.element import Forward as Forward
from ..message.element import ForwardNode as ForwardNode
from ..message.element import Image as Image
from ..message.element import ImageType as ImageType
from ..message.element import MultimediaElement as MultimediaElement
from ..message.element import MusicShare as MusicShare
from ..message.element import NotSendableElement as NotSendableElement
from ..message.element import Plain as Plain
from ..message.element import Poke as Poke
from ..message.element import PokeMethods as PokeMethods
from ..message.element import Quote as Quote
from ..message.element import Source as Source
from ..message.element import Voice as Voice
from ..message.formatter import Formatter as Formatter
from ..message.parser.base import ContainKeyword as ContainKeyword
from ..message.parser.base import DetectPrefix as DetectPrefix
from ..message.parser.base import DetectSuffix as DetectSuffix
from ..message.parser.base import MatchContent as MatchContent
from ..message.parser.base import MatchRegex as MatchRegex
from ..message.parser.base import Mention as Mention
from ..message.parser.base import MentionMe as MentionMe
from ..message.parser.twilight import FORCE as FORCE
from ..message.parser.twilight import NOSPACE as NOSPACE
from ..message.parser.twilight import PRESERVE as PRESERVE
from ..message.parser.twilight import ArgumentMatch as ArgumentMatch
from ..message.parser.twilight import ElementMatch as ElementMatch
from ..message.parser.twilight import FullMatch as FullMatch
from ..message.parser.twilight import Match as Match
from ..message.parser.twilight import RegexMatch as RegexMatch
from ..message.parser.twilight import Sparkle as Sparkle
from ..message.parser.twilight import Twilight as Twilight
from ..message.parser.twilight import UnionMatch as UnionMatch
from ..message.parser.twilight import WildcardMatch as WildcardMatch
from ..util.send import Bypass as Bypass
from ..util.send import Ignore as Ignore
from ..util.send import Safe as Safe
from ..util.send import Strict as Strict
|
PypiClean
|
/golos_lib_python-0.9.21-py3-none-any.whl/golos/amount.py
|
class Amount(dict):
"""
This class helps deal and calculate with the different assets on the chain.
:param str amountString: Amount string as used by the backend (e.g. "10 GBG")
"""
def __init__(self, amount_string="0 GBG"):
if isinstance(amount_string, Amount):
self["amount"] = amount_string["amount"]
self["asset"] = amount_string["asset"]
elif isinstance(amount_string, str):
self["amount"], self["asset"] = amount_string.split(" ")
else:
raise ValueError("Need an instance of 'Amount' or a string with amount and asset")
self["amount"] = float(self["amount"])
@property
def amount(self):
return self["amount"]
@property
def symbol(self):
return self["asset"]
@property
def asset(self):
return self["asset"]
def __str__(self):
# GOLOS
if self["asset"] == "GBG":
prec = 3
elif self["asset"] == "GOLOS":
prec = 3
elif self["asset"] == "GESTS":
prec = 6
# default
else:
prec = 6
return "{:.{prec}f} {}".format(self["amount"], self["asset"], prec=prec)
def __float__(self):
return self["amount"]
def __int__(self):
return int(self["amount"])
def __add__(self, other):
a = Amount(self)
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
a["amount"] += other["amount"]
else:
a["amount"] += float(other)
return a
def __sub__(self, other):
a = Amount(self)
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
a["amount"] -= other["amount"]
else:
a["amount"] -= float(other)
return a
def __mul__(self, other):
a = Amount(self)
if isinstance(other, Amount):
a["amount"] *= other["amount"]
else:
a["amount"] *= other
return a
def __floordiv__(self, other):
a = Amount(self)
if isinstance(other, Amount):
raise Exception("Cannot divide two Amounts")
else:
a["amount"] //= other
return a
def __div__(self, other):
a = Amount(self)
if isinstance(other, Amount):
raise Exception("Cannot divide two Amounts")
else:
a["amount"] /= other
return a
def __mod__(self, other):
a = Amount(self)
if isinstance(other, Amount):
a["amount"] %= other["amount"]
else:
a["amount"] %= other
return a
def __pow__(self, other):
a = Amount(self)
if isinstance(other, Amount):
a["amount"] **= other["amount"]
else:
a["amount"] **= other
return a
def __iadd__(self, other):
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
self["amount"] += other["amount"]
else:
self["amount"] += other
return self
def __isub__(self, other):
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
self["amount"] -= other["amount"]
else:
self["amount"] -= other
return self
def __imul__(self, other):
if isinstance(other, Amount):
self["amount"] *= other["amount"]
else:
self["amount"] *= other
return self
def __idiv__(self, other):
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
return self["amount"] / other["amount"]
else:
self["amount"] /= other
return self
def __ifloordiv__(self, other):
if isinstance(other, Amount):
self["amount"] //= other["amount"]
else:
self["amount"] //= other
return self
def __imod__(self, other):
if isinstance(other, Amount):
self["amount"] %= other["amount"]
else:
self["amount"] %= other
return self
def __ipow__(self, other):
self["amount"] **= other
return self
def __lt__(self, other):
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
return self["amount"] < other["amount"]
else:
return self["amount"] < float(other or 0)
def __le__(self, other):
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
return self["amount"] <= other["amount"]
else:
return self["amount"] <= float(other or 0)
def __eq__(self, other):
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
return self["amount"] == other["amount"]
else:
return self["amount"] == float(other or 0)
def __ne__(self, other):
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
return self["amount"] != other["amount"]
else:
return self["amount"] != float(other or 0)
def __ge__(self, other):
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
return self["amount"] >= other["amount"]
else:
return self["amount"] >= float(other or 0)
def __gt__(self, other):
if isinstance(other, Amount):
assert other["asset"] == self["asset"]
return self["amount"] > other["amount"]
else:
return self["amount"] > float(other or 0)
__repr__ = __str__
__truediv__ = __div__
__truemul__ = __mul__
|
PypiClean
|
/Djblets-3.3.tar.gz/Djblets-3.3/docs/releasenotes/1.0.10.rst
|
.. default-intersphinx:: django1.6 djblets1.0
============================
Djblets 1.0.10 Release Notes
============================
**Release date**: February 19, 2019
djblets.avatars
===============
* Changed some avatar backend behavior to no longer raise exceptions for
unimplemented methods. (:bug:`4634`, :bug:`4635`)
Now, when :py:meth:`AvatarService.get_avatar_urls_uncached()
<djblets.avatars.services.base.AvatarService.get_avatar_urls_uncached>` is
not implemented, it will just return empty string values and log a warning.
:py:meth:`AvatarService.get_etag_data()
<djblets.avatars.services.base.AvatarService.get_etag_data>` is also no
longer required, and will return a default ETag based on the avatar
backend's ID and user ID.
Patch by Florie Cai.
djblets.forms
=============
* Fixed a state sharing bug with
:py:class:`~djblets.forms.fields.ConditionsField`.
If the :py:attr:`ConditionsField.choice_kwargs
<djblets.forms.fields.ConditionsField.choice_kwargs>` attribute on one
instance of a field for a form was modified, that modification would apply
to that field on any other instance of the form. This could impact other
viewers to the page, depending on the consumer's use of this attribute.
djblets.mail
============
* Added better control over :mailheader:`From` header spoofing. (:bug:`4578`)
:py:class:`~djblets.mail.message.EmailMessage` now has the ability to
completely disable spoofing of the :mailheader:`From` header, instead using
the server's predefined e-mail address for all outgoing e-mail messages.
This is done through the new ``from_spoofing`` argument, which is the
successor to the now-deprecated ``enable_smart_spoofing``. This argument
allows consumers to smart-spoof when safe (equivalent to
``enable_smart_spoofing=True``), always spoof
(``enable_smart_spoofing=False``) or to never spoof.
Consumers should move to this and the new equivalent Django setting,
``settings.DJBLETS_EMAIL_FROM_SPOOFING``.
Based on work by Brian LeBlanc.
djblets.webapi
==============
* Improved the capabilities for API test suites.
:py:class:`~djblets.webapi.testing.testcases.WebAPITestCaseMixin` now has
a better, more standardized, documented set of arguments for the various API
testing methods, helping to create test suites for API resources.
Each testing method has gained an ``expected_num_queries`` parameter for
checking that the API handler made a specific number of SQL queries, helping
keep APIs lean.
All the testing methods now wrap
:py:meth:`~djblets.webapi.testing.testcases.WebAPITestCaseMixin.api_call`,
which can be used to test other HTTP methods. It can also be overridden to
provide custom behavior.
Note that this deprecates the ``query=`` argument to these methods.
``data=`` should be used in its place.
Contributors
============
* Brian LeBlanc
* Christian Hammond
* David Trowbridge
* Florie Cai
|
PypiClean
|
/backends_matrix-1.3.0-py3-none-any.whl/matrix/ops/broadcast_batch_to.py
|
import lab as B
from ..constant import Constant, Zero
from ..diagonal import Diagonal
from ..kronecker import Kronecker
from ..lowrank import LowRank
from ..matrix import Dense
from ..triangular import LowerTriangular, UpperTriangular
from ..woodbury import Woodbury
__all__ = []
# We don't implement a fallback method for `broadcast_batch_to`, because the function
# should always preserve the matrix type, which the fallback method will not do.
@B.dispatch
def broadcast_batch_to(a: B.Numeric, *batch: B.Int):
"""Broadcast the batch dimensions of a batched matrix.
Args:
a (matrix): Batched matrix.
*batch (int): Desired batch dimensions.
Returns:
matrix: `a` with broadcasted batch dimensions.
"""
return B.broadcast_to(a, *batch, *B.shape(a, -2, -1))
@B.dispatch
def broadcast_batch_to(a: Zero, *batch: B.Int):
return Zero(a.dtype, *batch, a.rows, a.cols)
@B.dispatch
def broadcast_batch_to(a: Dense, *batch: B.Int):
return Dense(broadcast_batch_to(B.dense(a), *batch))
@B.dispatch
def broadcast_batch_to(a: Constant, *batch: B.Int):
return Constant(B.broadcast_to(a.const, *batch), a.rows, a.cols)
@B.dispatch
def broadcast_batch_to(a: Diagonal, *batch: B.Int):
return Diagonal(B.broadcast_to(a.diag, *batch, B.shape(a.diag, -1)))
@B.dispatch
def broadcast_batch_to(a: LowerTriangular, *batch: B.Int):
return LowerTriangular(B.broadcast_batch_to(a.mat, *batch))
@B.dispatch
def broadcast_batch_to(a: UpperTriangular, *batch: B.Int):
return UpperTriangular(B.broadcast_batch_to(a.mat, *batch))
@B.dispatch
def broadcast_batch_to(a: LowRank, *batch: B.Int):
return LowRank(
B.broadcast_batch_to(a.left, *batch),
B.broadcast_batch_to(a.right, *batch),
B.broadcast_batch_to(a.middle, *batch),
)
@B.dispatch
def broadcast_batch_to(a: Woodbury, *batch: B.Int):
return Woodbury(
B.broadcast_batch_to(a.diag, *batch),
B.broadcast_batch_to(a.lr, *batch),
)
@B.dispatch
def broadcast_batch_to(a: Kronecker, *batch: B.Int):
return Kronecker(
B.broadcast_batch_to(a.left, *batch),
B.broadcast_batch_to(a.right, *batch),
)
B.broadcast_batch_to = broadcast_batch_to
|
PypiClean
|
/django-mapstore-adapter-1.0.0.tar.gz/django-mapstore-adapter-1.0.0/mapstore2_adapter/api/models.py
|
import logging
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_noop as _
from jsonfield import JSONField
log = logging.getLogger(__name__)
class MapStoreResource(models.Model):
user = models.ForeignKey(get_user_model())
id = models.BigIntegerField(
primary_key=True,
unique=True,
editable=True,
null=True,
blank=True)
name = models.CharField(
max_length=255,
unique=False,
blank=False,
null=False)
creation_date = models.DateTimeField(
null=True,
blank=True,
auto_now_add=True)
last_update = models.DateTimeField(
null=True,
blank=True,
auto_now=True)
data = models.OneToOneField(
"MapStoreData",
related_name="data",
null=True,
blank=True)
attributes = models.ManyToManyField(
"MapStoreAttribute",
related_name="attributes",
null=True,
blank=True)
class Meta:
indexes = [
models.Index(fields=['id', ]),
models.Index(fields=['name', ]),
]
class MapStoreAttribute(models.Model):
TYPE_STRING = 'string'
TYPE_NUMBER = 'number'
TYPE_INTEGER = 'integer'
TYPE_BOOLEAN = 'boolean'
TYPE_BINARY = 'binary'
TYPES = ((TYPE_STRING, _("String"),),
(TYPE_NUMBER, _("Number"),),
(TYPE_INTEGER, _("Integer",),),
(TYPE_BOOLEAN, _("Boolean",),),
(TYPE_BINARY, _("Binary",),),
)
name = models.CharField(
max_length=255,
unique=False,
blank=False,
null=False)
label = models.CharField(
max_length=255,
unique=False,
blank=True,
null=True)
type = models.CharField(
max_length=80,
unique=False,
blank=False,
null=False,
choices=TYPES)
value = models.TextField(
db_column='value',
blank=True)
resource = models.ForeignKey(
MapStoreResource,
null=False,
blank=False,
on_delete=models.CASCADE)
class MapStoreData(models.Model):
blob = JSONField(
null=False,
default={})
resource = models.ForeignKey(
MapStoreResource,
null=False,
blank=False,
on_delete=models.CASCADE)
|
PypiClean
|
/djnago_file_explorer-0.0.2-py3-none-any.whl/explorer/views/main.py
|
from django.http import FileResponse
import os
import shutil
from django import http
from django.views.generic.base import TemplateView
from django.urls import reverse
from django.shortcuts import render
from django.core.paginator import Paginator
from .utils.operations import ExplorerOperations
from .utils.request_validator import RequestValidator
class Explorer(TemplateView):
template_name = 'explorer/index.html'
http_method_names = ['get']
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.message = None
self.isFile = False
self.abs_location = None
self.redirect_url = None
# PAGINATOR STAUE
self.max_row_on_page = 12
self.max_page_link = 3
# STATE VARIABLES
self.selected_volume = None
self.location = None
self.action = None
self.page_number = None
return None
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return http.HttpResponseRedirect(f'/admin/login/?next={reverse("explorer-main")}')
return super().dispatch(request, *args, **kwargs)
def _updateState(self, request):
"""Update the status of state variables."""
# MAKING CLASS VARIBALE
validator = RequestValidator(request)
# UPDATING
self.selected_volume = validator.getSelectedVolume()
self.action = validator.getAction()
self.location = validator.getLocation()
self.page_number = validator.getPageNumber()
return None
def _getVolumeContext(self, volume_list):
"""Generating context for the volumes."""
# EXCEPTION FOR EMPTY VOLUME LIST
if len(volume_list) == 0:
self.message = 'No volume assigned. Please contact Admin.'
return None
# EXCEPTION FOR NO SELECTION OF VOLUME
if self.selected_volume is None:
self.selected_volume = volume_list[0]
# EXCEPTION FOR NON EXISTANCE VOLUME
if self.selected_volume not in volume_list:
self.message = f'Volume {self.selected_volume} does not exists.'
return None
# LOOPING THROUGH EACH VOLUME
volumes = []
for volume in volume_list:
if self.selected_volume == volume:
volumes.append({'name': volume, 'selected': 'active'})
else:
volumes.append({'name': volume, 'selected': 'deactive'})
return volumes
def _getPaginatorContext(self, page_data):
"""Generating context for the paginator."""
# EMPTY PAGINATOR DATA
paginator_data = {}
# GETTING PREVIOUS PAGE LINK
if page_data.has_previous():
paginator_data['previous_page_number'] = page_data.previous_page_number()
# GETTING NEXT PAGE LINK
if page_data.has_next():
paginator_data['next_page_number'] = page_data.next_page_number()
# GETTING START PAGE LINK
current_page_number = page_data.number
mid_point = self.max_page_link // 2
start_page_link = current_page_number - mid_point
if start_page_link < 1:
start_page_link = 1
# GETTING LAST PAGE LINK
last_page_link = current_page_number + mid_point
if last_page_link > page_data.paginator.num_pages:
last_page_link = page_data.paginator.num_pages
# LOOPING TO GET MIDDLE PAGES
middle_pages = []
for page in range(start_page_link, last_page_link+1):
# GETTING SELECTED STATUS
if page == current_page_number:
selected = 'active'
else:
selected = 'deactive'
middle_pages.append({
'number': page,
'selected': selected
})
paginator_data['middle_pages'] = middle_pages
return paginator_data
def get(self, request, *args, **kwargs):
# GETTING CONTEXT
context = self.get_context_data()
# UPDATING STATE
self._updateState(request)
# MAKING EXPLORER OPERATION CLASS
xops = ExplorerOperations(request.user)
# GETTING VOLUME INFORMATION
volume_list = xops.getVolumesNameList()
volumes = self._getVolumeContext(volume_list)
if volumes is None: # Error in the case of unknown volume.
return self.render_to_response(context)
context['volumes'] = volumes
volume_root_path = xops.getVolumeLocation(self.selected_volume)
if volume_root_path is None: # Volume root path does not exists.
self.message = xops.message
return self.render_to_response(context)
context['selected_vol'] = self.selected_volume
# GETTING ABS PATH
if (self.location is None) or (self.location == ''):
self.abs_location = volume_root_path
self.location = ''
else:
self.abs_location = os.path.join(volume_root_path, self.location)
abs_location, rel_location = self.abs_location, self.location
context['rel_location'] = rel_location
# CHECKING IF ABS PATH EXISTS
if not os.path.exists(self.abs_location):
self.message = f'{rel_location} does not exists.'
return self.render_to_response(context)
# GETTING ACTION CONTEXT
context['actions'] = xops.getActions(self.selected_volume)
if (self.action not in context['actions']) and (self.action is not None):
self.message = f'{self.action} action is not allowed.'
return self.render_to_response(context)
# DELETING FILE IF REQUIRED
if self.action == 'delete':
if os.path.exists(self.abs_location):
try:
if os.path.isdir(self.abs_location):
shutil.rmtree(self.abs_location)
else:
os.remove(self.abs_location)
except:
self.message = f'Unable to remove: {rel_location}'
return self.render_to_response(context)
rel_location = os.path.split(rel_location)[0]
self.redirect_url = f'/explorer/?volume={self.selected_volume}&location={rel_location}'
return self.render_to_response(context)
else:
self.message = f'{rel_location} does not exists.'
return self.render_to_response(context)
# CHECKING DIR OR FILE
if os.path.isfile(abs_location):
self.isFile = True
return self.render_to_response(context)
# GETTING DIR LIST
list_dir, file_count = xops.listDir(abs_location, rel_location)
context['file_count'] = file_count
# GETTING NEVIGATION BAR
nev_bar = xops.getNevigationBar(rel_location)
context['nev_location_list'] = nev_bar
# GEFINING PAGINATOR
p = Paginator(list_dir, self.max_row_on_page)
page_data = p.get_page(self.page_number)
context['page_data'] = page_data
# CHECKING WEATHER THE PAGINATOR REQUIRED
if len(list_dir) > self.max_row_on_page:
paginator_required = True
paginator_data = self._getPaginatorContext(page_data)
else:
paginator_required = False
paginator_data = None
context['paginator_required'] = paginator_required
context['paginator_data'] = paginator_data
return self.render_to_response(context)
def render_to_response(self, context, **response_kwargs) -> http.HttpResponse:
# EXCEPTION FOR 404 PAGE NOT FOUND
if self.message:
return render(self.request, 'explorer/error.html', {'message': self.message})
# PERFORMING REDIRECT
if self.redirect_url:
return http.HttpResponseRedirect(self.redirect_url)
# PERFORMING OPERATION
if self.action == 'download': # Download case
if not self.isFile:
zip_file_path = os.path.join(os.path.split(self.abs_location)[0], f'{os.path.split(self.abs_location)[-1]}')
shutil.make_archive(zip_file_path, 'zip', self.abs_location)
return FileResponse(open(zip_file_path+'.zip', 'rb'))
else:
return FileResponse(open(self.abs_location, 'rb'), as_attachment=True)
else:
# IF FILE THEN VIEW IT
if self.isFile:
return FileResponse(open(self.abs_location, 'rb'), as_attachment=False)
return super().render_to_response(context, **response_kwargs)
|
PypiClean
|
/lilacai-0.0.9.tar.gz/lilacai-0.0.9/lilac/web/_app/immutable/chunks/singletons.3acb1171.js
|
import{w as u}from"./index.d3776286.js";var p;const k=((p=globalThis.__sveltekit_11myv4l)==null?void 0:p.base)??"";var h;const m=((h=globalThis.__sveltekit_11myv4l)==null?void 0:h.assets)??k,w="1692630711493",R="sveltekit:snapshot",T="sveltekit:scroll",I="sveltekit:index",f={tap:1,hover:2,viewport:3,eager:4,off:-1};function S(e){let t=e.baseURI;if(!t){const n=e.getElementsByTagName("base");t=n.length?n[0].href:e.URL}return t}function x(){return{x:pageXOffset,y:pageYOffset}}function c(e,t){return e.getAttribute(`data-sveltekit-${t}`)}const d={...f,"":f.hover};function g(e){let t=e.assignedSlot??e.parentNode;return(t==null?void 0:t.nodeType)===11&&(t=t.host),t}function O(e,t){for(;e&&e!==t;){if(e.nodeName.toUpperCase()==="A"&&e.hasAttribute("href"))return e;e=g(e)}}function U(e,t){let n;try{n=new URL(e instanceof SVGAElement?e.href.baseVal:e.href,document.baseURI)}catch{}const o=e instanceof SVGAElement?e.target.baseVal:e.target,l=!n||!!o||E(n,t)||(e.getAttribute("rel")||"").split(/\s+/).includes("external"),r=(n==null?void 0:n.origin)===location.origin&&e.hasAttribute("download");return{url:n,external:l,target:o,download:r}}function L(e){let t=null,n=null,o=null,l=null,r=null,a=null,s=e;for(;s&&s!==document.documentElement;)o===null&&(o=c(s,"preload-code")),l===null&&(l=c(s,"preload-data")),t===null&&(t=c(s,"keepfocus")),n===null&&(n=c(s,"noscroll")),r===null&&(r=c(s,"reload")),a===null&&(a=c(s,"replacestate")),s=g(s);function i(v){switch(v){case"":case"true":return!0;case"off":case"false":return!1;default:return null}}return{preload_code:d[o??"off"],preload_data:d[l??"off"],keep_focus:i(t),noscroll:i(n),reload:i(r),replace_state:i(a)}}function _(e){const t=u(e);let n=!0;function o(){n=!0,t.update(a=>a)}function l(a){n=!1,t.set(a)}function r(a){let s;return t.subscribe(i=>{(s===void 0||n&&i!==s)&&a(s=i)})}return{notify:o,set:l,subscribe:r}}function y(){const{set:e,subscribe:t}=u(!1);let n;async function o(){clearTimeout(n);try{const l=await fetch(`${m}/_app/version.json`,{headers:{pragma:"no-cache","cache-control":"no-cache"}});if(!l.ok)return!1;const a=(await l.json()).version!==w;return a&&(e(!0),clearTimeout(n)),a}catch{return!1}}return{subscribe:t,check:o}}function E(e,t){return e.origin!==location.origin||!e.pathname.startsWith(t)}let b;function N(e){b=e.client}function P(e){return(...t)=>b[e](...t)}const V={url:_({}),page:_({}),navigating:u(null),updated:y()};export{I,f as P,T as S,R as a,U as b,L as c,x as d,k as e,O as f,S as g,N as h,E as i,P as j,V as s};
|
PypiClean
|
/graph_diff-0.1.1-py3-none-any.whl/graph_diff/graph/standard_graph_generator.py
|
from .graph_generator import GraphGenerator
from .graph_with_repetitive_nodes_with_root import GraphWithRepetitiveNodesWithRoot, lr_node, rnr_graph
class StandardGraphGenerator(GraphGenerator):
"""Simplest algorithm that generates graphs"""
def __init__(self,
min_node_num=2,
max_node_num=30,
node_number_expectation=None):
self.min_node_num = min_node_num
self.max_node_num = max_node_num
if node_number_expectation is None:
self.node_number_expectation = max_node_num * 0.3
else:
self.node_number_expectation = node_number_expectation
def generate_graph(self):
"""
Generates graph.
Number of node has geometric distribution with
node_number_expectation as its expectation.
Labels have triangular distribution with
(1, (node_number - 1) / 5, node_number) parameters.
Edges between all nodes are generated with probability 1/2.
:return: graph
"""
graph = rnr_graph()
import numpy.random
import math
# Expectation is equal to self.node_number_expectation
node_number = numpy.random.geometric(p=1 / self.node_number_expectation) + 1
node_number = max(self.min_node_num, node_number)
node_number = min(self.max_node_num, node_number)
a_label_number = 1
# Mode for number of labels is 20% of numbers of the nodes.
mode_label_number = math.ceil((node_number - 1) / 5)
b_label_number = node_number
label_number = int(math.ceil(numpy.random.triangular(left=a_label_number,
mode=mode_label_number,
right=b_label_number)))
node_labels = numpy.random.multinomial(n=node_number, pvals=[1 / label_number] * label_number)
node_labels = [ls + 1 for ls in node_labels]
for label, label_size in enumerate(node_labels):
label += 1 # labels start from 1
for i in range(1, label_size + 1): # numbers start from 1
new_node = lr_node(str(label), i)
graph.add_node(new_node)
for node in graph:
if 1 == numpy.random.randint(2) \
and node not in [new_node, GraphWithRepetitiveNodesWithRoot.ROOT]:
graph.add_edge_exp(from_node=node,
to_node=new_node)
return graph
|
PypiClean
|
/nmc_met_base-0.1.5.1-py3-none-any.whl/nmc_met_base/geographical.py
|
# Copyright (c) 2019 NMC Developers.
# Distributed under the terms of the GPL V3 License.
"""
Geodesy calculation.
"""
import numpy as np
from numba import jit
import nmc_met_base.constants as const
def haversine_np(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
All args must be of equal length.
:param lon1: point 1 longitudes.
:param lat1: point 1 latitudes.
:param lon2: point 2 longitudes.
:param lat2: point 2 latitudes.
:return: great circle distance in meters.
"""
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
c = 2 * np.arcsin(np.sqrt(a))
return 6371.e3 * c
def area_weighted_mean(lon, lat, data):
"""Calculate the mean of gridded data on a sphere.
Data points on the Earth's surface are often represented as a grid. As the
grid cells do not have a constant area they have to be weighted when
calculating statistical properties (e.g. mean).
This function returns the weighted mean assuming a perfectly spherical
globe.
refer to:
https://github.com/atmtools/typhon/blob/master/typhon/geographical.py
Parameters:
lon (ndarray): Longitude (M) angles [degree].
lat (ndarray): Latitude (N) angles [degree].
data ()ndarray): Data array (N x M).
Returns:
float: Area weighted mean.
"""
# Calculate coordinates and steradian (in rad).
lon = np.deg2rad(lon)
lat = np.deg2rad(lat)
dlon = np.diff(lon)
dlat = np.diff(lat)
# Longitudal mean
middle_points = (data[:, 1:] + data[:, :-1]) / 2
norm = np.sum(dlon)
lon_integral = np.sum(middle_points * dlon, axis=1) / norm
# Latitudal mean
lon_integral *= np.cos(lat) # Consider varying grid area (N-S).
middle_points = (lon_integral[1:] + lon_integral[:-1]) / 2
norm = np.sum(np.cos((lat[1:] + lat[:-1]) / 2) * dlat)
return np.sum(middle_points * dlat) / norm
def stations_mean_distance(lon, lat):
"""
Determine the mean separation distances of the observing stations.
https://www.atmos.illinois.edu/~jtrapp/Ex3.1.pdf
Arguments:
lon {numpy array} -- longitude array.
lat {numpy array} -- latitude array.
"""
# check input vector
if len(lon) != len(lat):
raise Exception("lon length is not equal to lat length.")
# compute minimu distance
min_dist = np.full(len(lon), 0.0)
for i in range(len(lat)):
dx = const.Re * np.cos(lat) * (lon - lon[i]) * const.d2r
dy = const.Re * (lat - lat[i]) * const.d2r
d = np.sqrt(dx*dx + dy*dy)
min_dist[i] = np.min(d[d != 0])
# return mean distance
return np.mean(min_dist)
|
PypiClean
|
/bourbaki.application-0.10.12-py3-none-any.whl/bourbaki/application/logging/timing.py
|
from typing import Callable, Union, Optional as Opt
from time import time
from datetime import datetime, timedelta
from functools import partial
from logging import Logger, getLogger
from .defaults import PROGRESS, ERROR, DEFAULT_LOG_DATE_FMT
from .helpers import validate_log_level_int
unit_names = dict(
s="s",
seconds="s",
ms="ms",
milliseconds="ms",
mus="μs",
us="μs",
μs="μs",
microseconds="μs",
ns="ns",
nanoseconds="ns",
m="m",
mins="m",
minutes="m",
h="h",
hrs="h",
hours="h",
days="d",
d="d",
)
timedelta_units_kw = dict(
s="seconds", ms="milliseconds", μs="microseconds", m="minutes", h="hours", d="days"
)
from_seconds_multipliers = dict(
s=1.0, ms=1e3, μs=1e6, ns=1e9, m=1.0 / 6e1, h=1.0 / 36e2, d=1.0 / 864e2
)
class TimedTaskContext:
def __init__(
self,
job_name: str,
total_tasks: Opt[int] = None,
task_units: Opt[str] = None,
logger_or_print_func: Opt[Union[Logger, Callable[[str], type(None)]]] = print,
level=PROGRESS,
error_level=ERROR,
time_units: str = "s",
date_fmt=DEFAULT_LOG_DATE_FMT,
):
try:
self.time_units = unit_names[time_units]
# timedelta won't take nanoseconds on some systems; we multiply microseconds by 1000
if time_units == "ns":
self.timedelta_kw = "microseconds"
self.timedelta_multiplier = 1000.0
else:
self.timedelta_kw = timedelta_units_kw[time_units]
self.timedelta_multiplier = 1.0
self.multiplier = from_seconds_multipliers[self.time_units]
except KeyError:
raise ValueError(
"{} is not a valid time units identifer; choose one of {}".format(
time_units, set(unit_names)
)
)
if isinstance(logger_or_print_func, Logger) or logger_or_print_func is None:
level = validate_log_level_int(level)
error_level = validate_log_level_int(error_level)
if logger_or_print_func is None:
logger = getLogger(job_name)
else:
logger = logger_or_print_func
self.logger = logger
self.print = partial(logger.log, level)
self.error = partial(logger.log, error_level)
else:
assert callable(
logger_or_print_func
), "print_func must be callable or a name for a logger"
self.logger = None
self.print = logger_or_print_func
self.error = self.print
self.total = int(total_tasks) if total_tasks is not None else None
self.completed = 0
self.task_units = str(task_units) if task_units is not None else ""
self.date_fmt = date_fmt
self.job_name = str(job_name)
self.start = None
self.n_digits = 4
def elapsed_since(self, start):
end = time()
elapsed = self.multiplier * (end - start)
return elapsed
def total_elapsed(self):
return self.elapsed_since(self.start)
def report_elapsed(self):
self.print(self._elapsed_time_message())
def report_progress(self, n_tasks: int, timing_info=True):
self.completed += n_tasks
msg = self._progress_message()
if timing_info:
msg = "{}; {}".format(msg, self._timing_info_message(prefix=False))
self.print(msg)
@property
def is_finished(self):
if self.total is None:
return False
return self.completed >= self.total
def _elapsed_time_message(self, start=None, prefix=True):
if start is None:
start = self.start
elapsed = self.elapsed_since(start)
return "{}elapsed time {}{}".format(
"{}{}: ".format(
self.job_name, " " + self.task_units if self.task_units else ""
)
if prefix
else "",
round(elapsed, self.n_digits),
self.time_units,
)
def _progress_message(self):
if self.total:
return "{}: completed {} of {}".format(
self.job_name, self.completed, self.total
)
else:
return "{}: completed {}".format(self.job_name, self.completed)
def _timing_info_message(self, prefix=True, estimate=True, completed=None):
if completed is None:
completed = self.completed
rate = completed / self.total_elapsed()
if estimate and self.total:
remaining = self.total - self.completed
expected_end = datetime.now() + timedelta(
**{self.timedelta_kw: remaining * rate / self.timedelta_multiplier}
)
msg = "average rate {} {}/{}; expected completion {}".format(
round(rate, self.n_digits),
self.task_units or "tasks",
self.time_units,
expected_end.strftime(self.date_fmt),
)
else:
msg = "average rate {} {}/{}".format(
round(rate, self.n_digits), self.task_units or "tasks", self.time_units
)
if prefix:
msg = "{}: {}".format(self.job_name, msg)
return msg
def _error_message(self):
if self.total is not None:
return "FAILED {}; completed {} {}of {}".format(
self.job_name,
self.completed,
self.task_units + " " if self.task_units else "",
self.total,
)
return "FAILED {}".format(self.job_name)
def _reset_time(self):
self.start = time()
def __enter__(self):
if self.total is None:
self.print(
"STARTED {} at {}".format(
self.job_name, datetime.now().strftime(self.date_fmt)
)
)
else:
self.print(
"STARTED {} {} {}at {}".format(
self.job_name,
self.total,
self.task_units + " " if self.task_units else "",
datetime.now().strftime(self.date_fmt),
)
)
self._reset_time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None or exc_val is not None:
msg = self._error_message()
self.error(msg + "; {}".format(exc_val or exc_type))
raise (exc_val or exc_type)
else:
if self.total is not None:
self.print(
"FINISHED {}; {}".format(
self._elapsed_time_message(),
self._timing_info_message(
prefix=False,
estimate=False,
completed=self.total or self.completed,
),
)
)
else:
self.print("FINISHED {}".format(self._elapsed_time_message()))
timed_context = TimedTaskContext
|
PypiClean
|
/timi-sqlalchemy-0.0.1.tar.gz/timi-sqlalchemy-0.0.1/lib/sqlalchemy/connectors/mxodbc.py
|
import re
import sys
import warnings
from . import Connector
class MxODBCConnector(Connector):
driver = "mxodbc"
supports_sane_multi_rowcount = False
supports_unicode_statements = True
supports_unicode_binds = True
supports_native_decimal = True
@classmethod
def dbapi(cls):
# this classmethod will normally be replaced by an instance
# attribute of the same name, so this is normally only called once.
cls._load_mx_exceptions()
platform = sys.platform
if platform == "win32":
from mx.ODBC import Windows as Module
# this can be the string "linux2", and possibly others
elif "linux" in platform:
from mx.ODBC import unixODBC as Module
elif platform == "darwin":
from mx.ODBC import iODBC as Module
else:
raise ImportError("Unrecognized platform for mxODBC import")
return Module
@classmethod
def _load_mx_exceptions(cls):
""" Import mxODBC exception classes into the module namespace,
as if they had been imported normally. This is done here
to avoid requiring all SQLAlchemy users to install mxODBC.
"""
global InterfaceError, ProgrammingError
from mx.ODBC import InterfaceError
from mx.ODBC import ProgrammingError
def on_connect(self):
def connect(conn):
conn.stringformat = self.dbapi.MIXED_STRINGFORMAT
conn.datetimeformat = self.dbapi.PYDATETIME_DATETIMEFORMAT
conn.decimalformat = self.dbapi.DECIMAL_DECIMALFORMAT
conn.errorhandler = self._error_handler()
return connect
def _error_handler(self):
""" Return a handler that adjusts mxODBC's raised Warnings to
emit Python standard warnings.
"""
from mx.ODBC.Error import Warning as MxOdbcWarning
def error_handler(connection, cursor, errorclass, errorvalue):
if issubclass(errorclass, MxOdbcWarning):
errorclass.__bases__ = (Warning,)
warnings.warn(
message=str(errorvalue), category=errorclass, stacklevel=2
)
else:
raise errorclass(errorvalue)
return error_handler
def create_connect_args(self, url):
r"""Return a tuple of \*args, \**kwargs for creating a connection.
The mxODBC 3.x connection constructor looks like this:
connect(dsn, user='', password='',
clear_auto_commit=1, errorhandler=None)
This method translates the values in the provided uri
into args and kwargs needed to instantiate an mxODBC Connection.
The arg 'errorhandler' is not used by SQLAlchemy and will
not be populated.
"""
opts = url.translate_connect_args(username="user")
opts.update(url.query)
args = opts.pop("host")
opts.pop("port", None)
opts.pop("database", None)
return (args,), opts
def is_disconnect(self, e, connection, cursor):
# TODO: eGenix recommends checking connection.closed here
# Does that detect dropped connections ?
if isinstance(e, self.dbapi.ProgrammingError):
return "connection already closed" in str(e)
elif isinstance(e, self.dbapi.Error):
return "[08S01]" in str(e)
else:
return False
def _get_server_version_info(self, connection):
# eGenix suggests using conn.dbms_version instead
# of what we're doing here
dbapi_con = connection.connection
version = []
r = re.compile(r"[.\-]")
# 18 == pyodbc.SQL_DBMS_VER
for n in r.split(dbapi_con.getinfo(18)[1]):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _get_direct(self, context):
if context:
native_odbc_execute = context.execution_options.get(
"native_odbc_execute", "auto"
)
# default to direct=True in all cases, is more generally
# compatible especially with SQL Server
return False if native_odbc_execute is True else True
else:
return True
def do_executemany(self, cursor, statement, parameters, context=None):
cursor.executemany(
statement, parameters, direct=self._get_direct(context)
)
def do_execute(self, cursor, statement, parameters, context=None):
cursor.execute(statement, parameters, direct=self._get_direct(context))
|
PypiClean
|
/ecape-0.1.1-py3-none-any.whl/docs/html/_static/searchtools.js
|
"use strict";
/**
* Simple result scoring code.
*/
if (typeof Scorer === "undefined") {
var Scorer = {
// Implement the following function to further tweak the score for each result
// The function takes a result array [docname, title, anchor, descr, score, filename]
// and returns the new score.
/*
score: result => {
const [docname, title, anchor, descr, score, filename] = result
return score
},
*/
// query matches the full name of an object
objNameMatch: 11,
// or matches in the last dotted part of the object name
objPartialMatch: 6,
// Additive scores depending on the priority of the object
objPrio: {
0: 15, // used to be importantResults
1: 5, // used to be objectResults
2: -5, // used to be unimportantResults
},
// Used when the priority is not in the mapping.
objPrioDefault: 0,
// query found in title
title: 15,
partialTitle: 7,
// query found in terms
term: 5,
partialTerm: 2,
};
}
const _removeChildren = (element) => {
while (element && element.lastChild) element.removeChild(element.lastChild);
};
/**
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
*/
const _escapeRegExp = (string) =>
string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
const _displayItem = (item, searchTerms) => {
const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT;
const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
const [docName, title, anchor, descr, score, _filename] = item;
let listItem = document.createElement("li");
let requestUrl;
let linkUrl;
if (docBuilder === "dirhtml") {
// dirhtml builder
let dirname = docName + "/";
if (dirname.match(/\/index\/$/))
dirname = dirname.substring(0, dirname.length - 6);
else if (dirname === "index/") dirname = "";
requestUrl = docUrlRoot + dirname;
linkUrl = requestUrl;
} else {
// normal html builders
requestUrl = docUrlRoot + docName + docFileSuffix;
linkUrl = docName + docLinkSuffix;
}
let linkEl = listItem.appendChild(document.createElement("a"));
linkEl.href = linkUrl + anchor;
linkEl.dataset.score = score;
linkEl.innerHTML = title;
if (descr)
listItem.appendChild(document.createElement("span")).innerHTML =
" (" + descr + ")";
else if (showSearchSummary)
fetch(requestUrl)
.then((responseData) => responseData.text())
.then((data) => {
if (data)
listItem.appendChild(
Search.makeSearchSummary(data, searchTerms)
);
});
Search.output.appendChild(listItem);
};
const _finishSearch = (resultCount) => {
Search.stopPulse();
Search.title.innerText = _("Search Results");
if (!resultCount)
Search.status.innerText = Documentation.gettext(
"Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
);
else
Search.status.innerText = _(
`Search finished, found ${resultCount} page(s) matching the search query.`
);
};
const _displayNextItem = (
results,
resultCount,
searchTerms
) => {
// results left, load the summary and display it
// this is intended to be dynamic (don't sub resultsCount)
if (results.length) {
_displayItem(results.pop(), searchTerms);
setTimeout(
() => _displayNextItem(results, resultCount, searchTerms),
5
);
}
// search finished, update title and status message
else _finishSearch(resultCount);
};
/**
* Default splitQuery function. Can be overridden in ``sphinx.search`` with a
* custom function per language.
*
* The regular expression works by splitting the string on consecutive characters
* that are not Unicode letters, numbers, underscores, or emoji characters.
* This is the same as ``\W+`` in Python, preserving the surrogate pair area.
*/
if (typeof splitQuery === "undefined") {
var splitQuery = (query) => query
.split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu)
.filter(term => term) // remove remaining empty strings
}
/**
* Search Module
*/
const Search = {
_index: null,
_queued_query: null,
_pulse_status: -1,
htmlToText: (htmlString) => {
const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() });
const docContent = htmlElement.querySelector('[role="main"]');
if (docContent !== undefined) return docContent.textContent;
console.warn(
"Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template."
);
return "";
},
init: () => {
const query = new URLSearchParams(window.location.search).get("q");
document
.querySelectorAll('input[name="q"]')
.forEach((el) => (el.value = query));
if (query) Search.performSearch(query);
},
loadIndex: (url) =>
(document.body.appendChild(document.createElement("script")).src = url),
setIndex: (index) => {
Search._index = index;
if (Search._queued_query !== null) {
const query = Search._queued_query;
Search._queued_query = null;
Search.query(query);
}
},
hasIndex: () => Search._index !== null,
deferQuery: (query) => (Search._queued_query = query),
stopPulse: () => (Search._pulse_status = -1),
startPulse: () => {
if (Search._pulse_status >= 0) return;
const pulse = () => {
Search._pulse_status = (Search._pulse_status + 1) % 4;
Search.dots.innerText = ".".repeat(Search._pulse_status);
if (Search._pulse_status >= 0) window.setTimeout(pulse, 500);
};
pulse();
},
/**
* perform a search for something (or wait until index is loaded)
*/
performSearch: (query) => {
// create the required interface elements
const searchText = document.createElement("h2");
searchText.textContent = _("Searching");
const searchSummary = document.createElement("p");
searchSummary.classList.add("search-summary");
searchSummary.innerText = "";
const searchList = document.createElement("ul");
searchList.classList.add("search");
const out = document.getElementById("search-results");
Search.title = out.appendChild(searchText);
Search.dots = Search.title.appendChild(document.createElement("span"));
Search.status = out.appendChild(searchSummary);
Search.output = out.appendChild(searchList);
const searchProgress = document.getElementById("search-progress");
// Some themes don't use the search progress node
if (searchProgress) {
searchProgress.innerText = _("Preparing search...");
}
Search.startPulse();
// index already loaded, the browser was quick!
if (Search.hasIndex()) Search.query(query);
else Search.deferQuery(query);
},
/**
* execute search (requires search index to be loaded)
*/
query: (query) => {
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const titles = Search._index.titles;
const allTitles = Search._index.alltitles;
const indexEntries = Search._index.indexentries;
// stem the search terms and add them to the correct list
const stemmer = new Stemmer();
const searchTerms = new Set();
const excludedTerms = new Set();
const highlightTerms = new Set();
const objectTerms = new Set(splitQuery(query.toLowerCase().trim()));
splitQuery(query.trim()).forEach((queryTerm) => {
const queryTermLower = queryTerm.toLowerCase();
// maybe skip this "word"
// stopwords array is from language_data.js
if (
stopwords.indexOf(queryTermLower) !== -1 ||
queryTerm.match(/^\d+$/)
)
return;
// stem the word
let word = stemmer.stemWord(queryTermLower);
// select the correct list
if (word[0] === "-") excludedTerms.add(word.substr(1));
else {
searchTerms.add(word);
highlightTerms.add(queryTermLower);
}
});
if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js
localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" "))
}
// console.debug("SEARCH: searching for:");
// console.info("required: ", [...searchTerms]);
// console.info("excluded: ", [...excludedTerms]);
// array of [docname, title, anchor, descr, score, filename]
let results = [];
_removeChildren(document.getElementById("search-progress"));
const queryLower = query.toLowerCase();
for (const [title, foundTitles] of Object.entries(allTitles)) {
if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) {
for (const [file, id] of foundTitles) {
let score = Math.round(100 * queryLower.length / title.length)
results.push([
docNames[file],
titles[file] !== title ? `${titles[file]} > ${title}` : title,
id !== null ? "#" + id : "",
null,
score,
filenames[file],
]);
}
}
}
// search for explicit entries in index directives
for (const [entry, foundEntries] of Object.entries(indexEntries)) {
if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
for (const [file, id] of foundEntries) {
let score = Math.round(100 * queryLower.length / entry.length)
results.push([
docNames[file],
titles[file],
id ? "#" + id : "",
null,
score,
filenames[file],
]);
}
}
}
// lookup as object
objectTerms.forEach((term) =>
results.push(...Search.performObjectSearch(term, objectTerms))
);
// lookup as search terms in fulltext
results.push(...Search.performTermsSearch(searchTerms, excludedTerms));
// let the scorer override scores with a custom scoring function
if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item)));
// now sort the results by score (in opposite order of appearance, since the
// display function below uses pop() to retrieve items) and then
// alphabetically
results.sort((a, b) => {
const leftScore = a[4];
const rightScore = b[4];
if (leftScore === rightScore) {
// same score: sort alphabetically
const leftTitle = a[1].toLowerCase();
const rightTitle = b[1].toLowerCase();
if (leftTitle === rightTitle) return 0;
return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
}
return leftScore > rightScore ? 1 : -1;
});
// remove duplicate search results
// note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
let seen = new Set();
results = results.reverse().reduce((acc, result) => {
let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(',');
if (!seen.has(resultStr)) {
acc.push(result);
seen.add(resultStr);
}
return acc;
}, []);
results = results.reverse();
// for debugging
//Search.lastresults = results.slice(); // a copy
// console.info("search results:", Search.lastresults);
// print the results
_displayNextItem(results, results.length, searchTerms);
},
/**
* search for object names
*/
performObjectSearch: (object, objectTerms) => {
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const objects = Search._index.objects;
const objNames = Search._index.objnames;
const titles = Search._index.titles;
const results = [];
const objectSearchCallback = (prefix, match) => {
const name = match[4]
const fullname = (prefix ? prefix + "." : "") + name;
const fullnameLower = fullname.toLowerCase();
if (fullnameLower.indexOf(object) < 0) return;
let score = 0;
const parts = fullnameLower.split(".");
// check for different match types: exact matches of full name or
// "last name" (i.e. last dotted part)
if (fullnameLower === object || parts.slice(-1)[0] === object)
score += Scorer.objNameMatch;
else if (parts.slice(-1)[0].indexOf(object) > -1)
score += Scorer.objPartialMatch; // matches in last name
const objName = objNames[match[1]][2];
const title = titles[match[0]];
// If more than one term searched for, we require other words to be
// found in the name/title/description
const otherTerms = new Set(objectTerms);
otherTerms.delete(object);
if (otherTerms.size > 0) {
const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase();
if (
[...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0)
)
return;
}
let anchor = match[3];
if (anchor === "") anchor = fullname;
else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname;
const descr = objName + _(", in ") + title;
// add custom score for some objects according to scorer
if (Scorer.objPrio.hasOwnProperty(match[2]))
score += Scorer.objPrio[match[2]];
else score += Scorer.objPrioDefault;
results.push([
docNames[match[0]],
fullname,
"#" + anchor,
descr,
score,
filenames[match[0]],
]);
};
Object.keys(objects).forEach((prefix) =>
objects[prefix].forEach((array) =>
objectSearchCallback(prefix, array)
)
);
return results;
},
/**
* search for full-text terms in the index
*/
performTermsSearch: (searchTerms, excludedTerms) => {
// prepare search
const terms = Search._index.terms;
const titleTerms = Search._index.titleterms;
const filenames = Search._index.filenames;
const docNames = Search._index.docnames;
const titles = Search._index.titles;
const scoreMap = new Map();
const fileMap = new Map();
// perform the search on the required terms
searchTerms.forEach((word) => {
const files = [];
const arr = [
{ files: terms[word], score: Scorer.term },
{ files: titleTerms[word], score: Scorer.title },
];
// add support for partial matches
if (word.length > 2) {
const escapedWord = _escapeRegExp(word);
Object.keys(terms).forEach((term) => {
if (term.match(escapedWord) && !terms[word])
arr.push({ files: terms[term], score: Scorer.partialTerm });
});
Object.keys(titleTerms).forEach((term) => {
if (term.match(escapedWord) && !titleTerms[word])
arr.push({ files: titleTerms[word], score: Scorer.partialTitle });
});
}
// no match but word was a required one
if (arr.every((record) => record.files === undefined)) return;
// found search word in contents
arr.forEach((record) => {
if (record.files === undefined) return;
let recordFiles = record.files;
if (recordFiles.length === undefined) recordFiles = [recordFiles];
files.push(...recordFiles);
// set score for the word in each file
recordFiles.forEach((file) => {
if (!scoreMap.has(file)) scoreMap.set(file, {});
scoreMap.get(file)[word] = record.score;
});
});
// create the mapping
files.forEach((file) => {
if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1)
fileMap.get(file).push(word);
else fileMap.set(file, [word]);
});
});
// now check if the files don't contain excluded terms
const results = [];
for (const [file, wordList] of fileMap) {
// check if all requirements are matched
// as search terms with length < 3 are discarded
const filteredTermCount = [...searchTerms].filter(
(term) => term.length > 2
).length;
if (
wordList.length !== searchTerms.size &&
wordList.length !== filteredTermCount
)
continue;
// ensure that none of the excluded terms is in the search result
if (
[...excludedTerms].some(
(term) =>
terms[term] === file ||
titleTerms[term] === file ||
(terms[term] || []).includes(file) ||
(titleTerms[term] || []).includes(file)
)
)
break;
// select one (max) score for the file.
const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
// add result to the result list
results.push([
docNames[file],
titles[file],
"",
null,
score,
filenames[file],
]);
}
return results;
},
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words.
*/
makeSearchSummary: (htmlText, keywords) => {
const text = Search.htmlToText(htmlText);
if (text === "") return null;
const textLower = text.toLowerCase();
const actualStartPosition = [...keywords]
.map((k) => textLower.indexOf(k.toLowerCase()))
.filter((i) => i > -1)
.slice(-1)[0];
const startWithContext = Math.max(actualStartPosition - 120, 0);
const top = startWithContext === 0 ? "" : "...";
const tail = startWithContext + 240 < text.length ? "..." : "";
let summary = document.createElement("p");
summary.classList.add("context");
summary.textContent = top + text.substr(startWithContext, 240).trim() + tail;
return summary;
},
};
_ready(Search.init);
|
PypiClean
|
/azure_mgmt_compute-30.1.0-py3-none-any.whl/azure/mgmt/compute/v2021_10_01/models/__init__.py
|
from ._models_py3 import ApiError
from ._models_py3 import ApiErrorBase
from ._models_py3 import CommunityGalleryInfo
from ._models_py3 import DataDiskImageEncryption
from ._models_py3 import Disallowed
from ._models_py3 import DiskImageEncryption
from ._models_py3 import EncryptionImages
from ._models_py3 import Gallery
from ._models_py3 import GalleryApplication
from ._models_py3 import GalleryApplicationList
from ._models_py3 import GalleryApplicationUpdate
from ._models_py3 import GalleryApplicationVersion
from ._models_py3 import GalleryApplicationVersionList
from ._models_py3 import GalleryApplicationVersionPublishingProfile
from ._models_py3 import GalleryApplicationVersionUpdate
from ._models_py3 import GalleryArtifactPublishingProfileBase
from ._models_py3 import GalleryArtifactSource
from ._models_py3 import GalleryArtifactVersionSource
from ._models_py3 import GalleryDataDiskImage
from ._models_py3 import GalleryDiskImage
from ._models_py3 import GalleryExtendedLocation
from ._models_py3 import GalleryIdentifier
from ._models_py3 import GalleryImage
from ._models_py3 import GalleryImageFeature
from ._models_py3 import GalleryImageIdentifier
from ._models_py3 import GalleryImageList
from ._models_py3 import GalleryImageUpdate
from ._models_py3 import GalleryImageVersion
from ._models_py3 import GalleryImageVersionList
from ._models_py3 import GalleryImageVersionPublishingProfile
from ._models_py3 import GalleryImageVersionStorageProfile
from ._models_py3 import GalleryImageVersionUpdate
from ._models_py3 import GalleryList
from ._models_py3 import GalleryOSDiskImage
from ._models_py3 import GalleryTargetExtendedLocation
from ._models_py3 import GalleryUpdate
from ._models_py3 import ImagePurchasePlan
from ._models_py3 import InnerError
from ._models_py3 import ManagedArtifact
from ._models_py3 import OSDiskImageEncryption
from ._models_py3 import OSDiskImageSecurityProfile
from ._models_py3 import RecommendedMachineConfiguration
from ._models_py3 import RegionalReplicationStatus
from ._models_py3 import RegionalSharingStatus
from ._models_py3 import ReplicationStatus
from ._models_py3 import Resource
from ._models_py3 import ResourceRange
from ._models_py3 import SharingProfile
from ._models_py3 import SharingProfileGroup
from ._models_py3 import SharingStatus
from ._models_py3 import SharingUpdate
from ._models_py3 import SoftDeletePolicy
from ._models_py3 import TargetRegion
from ._models_py3 import UpdateResourceDefinition
from ._models_py3 import UserArtifactManage
from ._models_py3 import UserArtifactSource
from ._compute_management_client_enums import AggregatedReplicationState
from ._compute_management_client_enums import Architecture
from ._compute_management_client_enums import ConfidentialVMEncryptionType
from ._compute_management_client_enums import GalleryApplicationVersionPropertiesProvisioningState
from ._compute_management_client_enums import GalleryExpandParams
from ._compute_management_client_enums import GalleryExtendedLocationType
from ._compute_management_client_enums import GalleryImagePropertiesProvisioningState
from ._compute_management_client_enums import GalleryImageVersionPropertiesProvisioningState
from ._compute_management_client_enums import GalleryPropertiesProvisioningState
from ._compute_management_client_enums import GallerySharingPermissionTypes
from ._compute_management_client_enums import HostCaching
from ._compute_management_client_enums import HyperVGeneration
from ._compute_management_client_enums import OperatingSystemStateTypes
from ._compute_management_client_enums import OperatingSystemTypes
from ._compute_management_client_enums import ReplicationMode
from ._compute_management_client_enums import ReplicationState
from ._compute_management_client_enums import ReplicationStatusTypes
from ._compute_management_client_enums import SelectPermissions
from ._compute_management_client_enums import SharingProfileGroupTypes
from ._compute_management_client_enums import SharingState
from ._compute_management_client_enums import SharingUpdateOperationTypes
from ._compute_management_client_enums import StorageAccountType
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"ApiError",
"ApiErrorBase",
"CommunityGalleryInfo",
"DataDiskImageEncryption",
"Disallowed",
"DiskImageEncryption",
"EncryptionImages",
"Gallery",
"GalleryApplication",
"GalleryApplicationList",
"GalleryApplicationUpdate",
"GalleryApplicationVersion",
"GalleryApplicationVersionList",
"GalleryApplicationVersionPublishingProfile",
"GalleryApplicationVersionUpdate",
"GalleryArtifactPublishingProfileBase",
"GalleryArtifactSource",
"GalleryArtifactVersionSource",
"GalleryDataDiskImage",
"GalleryDiskImage",
"GalleryExtendedLocation",
"GalleryIdentifier",
"GalleryImage",
"GalleryImageFeature",
"GalleryImageIdentifier",
"GalleryImageList",
"GalleryImageUpdate",
"GalleryImageVersion",
"GalleryImageVersionList",
"GalleryImageVersionPublishingProfile",
"GalleryImageVersionStorageProfile",
"GalleryImageVersionUpdate",
"GalleryList",
"GalleryOSDiskImage",
"GalleryTargetExtendedLocation",
"GalleryUpdate",
"ImagePurchasePlan",
"InnerError",
"ManagedArtifact",
"OSDiskImageEncryption",
"OSDiskImageSecurityProfile",
"RecommendedMachineConfiguration",
"RegionalReplicationStatus",
"RegionalSharingStatus",
"ReplicationStatus",
"Resource",
"ResourceRange",
"SharingProfile",
"SharingProfileGroup",
"SharingStatus",
"SharingUpdate",
"SoftDeletePolicy",
"TargetRegion",
"UpdateResourceDefinition",
"UserArtifactManage",
"UserArtifactSource",
"AggregatedReplicationState",
"Architecture",
"ConfidentialVMEncryptionType",
"GalleryApplicationVersionPropertiesProvisioningState",
"GalleryExpandParams",
"GalleryExtendedLocationType",
"GalleryImagePropertiesProvisioningState",
"GalleryImageVersionPropertiesProvisioningState",
"GalleryPropertiesProvisioningState",
"GallerySharingPermissionTypes",
"HostCaching",
"HyperVGeneration",
"OperatingSystemStateTypes",
"OperatingSystemTypes",
"ReplicationMode",
"ReplicationState",
"ReplicationStatusTypes",
"SelectPermissions",
"SharingProfileGroupTypes",
"SharingState",
"SharingUpdateOperationTypes",
"StorageAccountType",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
PypiClean
|
/nats_python-0.8.0-py3-none-any.whl/pynats/client.py
|
import io
import json
import re
import socket
import ssl
from dataclasses import dataclass
from typing import BinaryIO, Callable, Dict, Match, Optional, Pattern, Tuple, Union
from urllib.parse import urlparse
import pkg_resources
from pynats.exceptions import (
NATSInvalidResponse,
NATSInvalidSchemeError,
NATSReadSocketError,
NATSTCPConnectionRequiredError,
NATSTLSConnectionRequiredError,
NATSUnexpectedResponse,
)
from pynats.nuid import NUID
__all__ = ("NATSSubscription", "NATSMessage", "NATSClient")
INFO_OP = b"INFO"
CONNECT_OP = b"CONNECT"
PING_OP = b"PING"
PONG_OP = b"PONG"
SUB_OP = b"SUB"
UNSUB_OP = b"UNSUB"
PUB_OP = b"PUB"
MSG_OP = b"MSG"
OK_OP = b"+OK"
ERR_OP = b"-ERR"
INFO_RE = re.compile(rb"^INFO\s+([^\r\n]+)\r\n")
PING_RE = re.compile(rb"^PING\r\n")
PONG_RE = re.compile(rb"^PONG\r\n")
MSG_RE = re.compile(
rb"^MSG\s+(?P<subject>[^\s\r\n]+)\s+(?P<sid>[^\s\r\n]+)\s+(?P<reply>([^\s\r\n]+)[^\S\r\n]+)?(?P<size>\d+)\r\n" # noqa
)
OK_RE = re.compile(rb"^\+OK\s*\r\n")
ERR_RE = re.compile(rb"^-ERR\s+('.+')?\r\n")
_CRLF_ = b"\r\n"
_SPC_ = b" "
COMMANDS = {
INFO_OP: INFO_RE,
PING_OP: PING_RE,
PONG_OP: PONG_RE,
MSG_OP: MSG_RE,
OK_OP: OK_RE,
ERR_OP: ERR_RE,
}
INBOX_PREFIX = bytearray(b"_INBOX.")
@dataclass
class NATSSubscription:
sid: int
subject: str
queue: str
callback: Callable
max_messages: Optional[int] = None
received_messages: int = 0
def is_wasted(self):
return (
self.max_messages is not None
and self.received_messages == self.max_messages
)
@dataclass
class NATSMessage:
sid: int
subject: str
reply: str
payload: bytes
@dataclass
class NATSConnOptions:
hostname: Optional[str]
port: Optional[int]
username: Optional[str]
password: Optional[str]
scheme: str
name: str = "nats-python"
lang: str = "python"
protocol: int = 0
tls_cacert: Optional[str] = None
tls_client_cert: Optional[str] = None
tls_client_key: Optional[str] = None
tls_hostname: Optional[str] = None
tls_verify: bool = False
version: str = pkg_resources.get_distribution("nats-python").version
verbose: bool = False
pedantic: bool = False
class NATSClient:
__slots__ = (
"_conn_options",
"_socket",
"_socket_file",
"_socket_options",
"_ssid",
"_subs",
"_nuid",
)
def __init__(
self,
url: str = "nats://127.0.0.1:4222",
*,
name: str = "nats-python",
verbose: bool = False,
pedantic: bool = False,
tls_cacert: Optional[str] = None,
tls_client_cert: Optional[str] = None,
tls_client_key: Optional[str] = None,
tls_hostname: Optional[str] = None,
tls_verify: bool = False,
socket_timeout: float = None,
socket_keepalive: bool = False,
) -> None:
parsed = urlparse(url)
self._conn_options = NATSConnOptions(
hostname=parsed.hostname,
port=parsed.port,
username=parsed.username,
password=parsed.password,
scheme=parsed.scheme,
name=name,
tls_cacert=tls_cacert,
tls_client_cert=tls_client_cert,
tls_client_key=tls_client_key,
tls_hostname=tls_hostname,
tls_verify=tls_verify,
verbose=verbose,
pedantic=pedantic,
)
self._socket: socket.socket
self._socket_file: BinaryIO
self._socket_options = {
"timeout": socket_timeout,
"keepalive": socket_keepalive,
}
self._ssid = 0
self._subs: Dict[int, NATSSubscription] = {}
self._nuid = NUID()
def __enter__(self) -> "NATSClient":
self.connect()
return self
def __exit__(self, type_, value, traceback) -> None:
self.close()
def connect(self) -> None:
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._socket_options["keepalive"]:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.settimeout(self._socket_options["timeout"])
sock.connect((self._conn_options.hostname, self._conn_options.port))
self._socket_file = sock.makefile("rb")
self._socket = sock
scheme = self._conn_options.scheme
if scheme == "nats":
self._try_connection(tls_required=False)
elif scheme == "tls":
self._try_connection(tls_required=True)
self._connect_tls()
else:
raise NATSInvalidSchemeError(f"got unsupported URI scheme: {scheme}")
self._send_connect_command()
if self._conn_options.verbose:
self._recv(OK_RE)
def _try_connection(self, *, tls_required: bool) -> None:
_, result = self._recv(INFO_RE)
server_info = json.loads(result.group(1))
server_tls_required = server_info.get("tls_required", False)
if not tls_required and server_tls_required:
raise NATSTLSConnectionRequiredError()
elif tls_required and not server_tls_required:
raise NATSTCPConnectionRequiredError()
def _connect_tls(self) -> None:
ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
if not self._conn_options.tls_verify:
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
if self._conn_options.tls_cacert is not None:
ctx.load_verify_locations(cafile=self._conn_options.tls_cacert)
if (
self._conn_options.tls_client_cert is not None
and self._conn_options.tls_client_key is not None
):
ctx.load_cert_chain(
certfile=self._conn_options.tls_client_cert,
keyfile=self._conn_options.tls_client_key,
)
hostname = self._conn_options.hostname
if self._conn_options.tls_hostname is not None:
hostname = self._conn_options.tls_hostname
self._socket = ctx.wrap_socket(self._socket, server_hostname=hostname)
self._socket_file = self._socket.makefile("rb")
def close(self) -> None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket_file.close()
self._socket.close()
def reconnect(self) -> None:
self.close()
self.connect()
def ping(self) -> None:
self._send(PING_OP)
self._recv(PONG_RE)
def subscribe(
self,
subject: str,
*,
callback: Callable,
queue: str = "",
max_messages: Optional[int] = None,
) -> NATSSubscription:
sub = NATSSubscription(
sid=self._ssid,
subject=subject,
queue=queue,
callback=callback,
max_messages=max_messages,
)
self._ssid += 1
self._subs[sub.sid] = sub
self._send(SUB_OP, sub.subject, sub.queue, sub.sid)
return sub
def unsubscribe(self, sub: NATSSubscription) -> None:
self._send(UNSUB_OP, sub.sid)
self._subs.pop(sub.sid)
def auto_unsubscribe(self, sub: NATSSubscription) -> None:
if sub.max_messages is None:
return
self._send(UNSUB_OP, sub.sid, sub.max_messages)
def publish(self, subject: str, *, payload: bytes = b"", reply: str = "") -> None:
self._send(PUB_OP, subject, reply, len(payload))
self._send(payload)
def request(self, subject: str, *, payload: bytes = b"") -> NATSMessage:
next_inbox = INBOX_PREFIX[:]
next_inbox.extend(self._nuid.next_())
reply_subject = next_inbox.decode()
reply_messages: Dict[int, NATSMessage] = {}
def callback(message: NATSMessage) -> None:
reply_messages[message.sid] = message
sub = self.subscribe(reply_subject, callback=callback, max_messages=1)
self.auto_unsubscribe(sub)
self.publish(subject, payload=payload, reply=reply_subject)
self.wait(count=1)
return reply_messages[sub.sid]
def wait(self, *, count=None) -> None:
total = 0
while True:
command, result = self._recv(MSG_RE, PING_RE, OK_RE)
if command is MSG_RE:
self._handle_message(result)
total += 1
if count is not None and total >= count:
break
elif command is PING_RE:
self._send(PONG_OP)
def _send_connect_command(self) -> None:
options = {
"name": self._conn_options.name,
"lang": self._conn_options.lang,
"protocol": self._conn_options.protocol,
"version": self._conn_options.version,
"verbose": self._conn_options.verbose,
"pedantic": self._conn_options.pedantic,
}
if self._conn_options.username and self._conn_options.password:
options["user"] = self._conn_options.username
options["pass"] = self._conn_options.password
elif self._conn_options.username:
options["auth_token"] = self._conn_options.username
self._send(CONNECT_OP, json.dumps(options))
def _send(self, *parts: Union[bytes, str, int]) -> None:
self._socket.sendall(_SPC_.join(self._encode(p) for p in parts) + _CRLF_)
def _encode(self, value: Union[bytes, str, int]) -> bytes:
if isinstance(value, bytes):
return value
elif isinstance(value, str):
return value.encode()
elif isinstance(value, int): # pragma: no branch
return f"{value:d}".encode()
raise RuntimeError(f"got unsupported type for encoding: type={type(value)}")
def _recv(self, *commands: Pattern[bytes]) -> Tuple[Pattern[bytes], Match[bytes]]:
line = self._readline()
command = self._get_command(line)
if command not in commands:
raise NATSUnexpectedResponse(line)
result = command.match(line)
if result is None:
raise NATSInvalidResponse(line)
return command, result
def _readline(self, *, size: int = None) -> bytes:
read = io.BytesIO()
while True:
line = self._socket_file.readline()
if not line:
raise NATSReadSocketError()
read.write(line)
if size is not None:
if read.tell() == size + len(_CRLF_):
break
elif line.endswith(_CRLF_): # pragma: no branch
break
return read.getvalue()
def _strip(self, line: bytes) -> bytes:
return line[: -len(_CRLF_)]
def _get_command(self, line: bytes) -> Optional[Pattern[bytes]]:
values = self._strip(line).split(b" ", 1)
return COMMANDS.get(values[0])
def _handle_message(self, result: Match[bytes]) -> None:
message_data = result.groupdict()
message_payload_size = int(message_data["size"])
message_payload = self._readline(size=message_payload_size)
message_payload = self._strip(message_payload)
message = NATSMessage(
sid=int(message_data["sid"].decode()),
subject=message_data["subject"].decode(),
reply=message_data["reply"].decode() if message_data["reply"] else "",
payload=message_payload,
)
sub = self._subs[message.sid]
sub.received_messages += 1
if sub.is_wasted():
self._subs.pop(sub.sid)
sub.callback(message)
|
PypiClean
|
/avalon-colorbleed-1.0.2.tar.gz/avalon-colorbleed-1.0.2/colorbleed/launcher_actions.py
|
import os
from avalon import api, lib, pipeline
from avalon.vendor import six
class BaseProjectAction(api.Action):
"""A Base Action that mimics avalon core Application action.
However this one does not need an AVALON_WORKDIR or asset and task
to operate on. It can run just on the base of the project and will
not initialize any work folder.
This allows to use the same .toml setup for defining the application
environment, so we can mimic configuration but allow to run them in
a different state.
"""
def __init__(self):
self.config = lib.get_application(self.name)
def environ(self, session):
"""Build application environment"""
session = session.copy()
# Construct application environment from .toml config
app_environment = self.config.get("environment", {})
for key, value in app_environment.copy().items():
if isinstance(value, list):
# Treat list values as paths, e.g. PYTHONPATH=[]
app_environment[key] = os.pathsep.join(value)
elif isinstance(value, six.string_types):
if lib.PY2:
# Protect against unicode in the environment
encoding = sys.getfilesystemencoding()
app_environment[key] = value.encode(encoding)
else:
app_environment[key] = value
else:
log.error(
"%s: Unsupported environment reference in %s for %s"
% (value, self.name, key)
)
# Build environment
env = os.environ.copy()
env.update(session)
app_environment = self._format(app_environment, **env)
env.update(app_environment)
return env
def launch(self, environment):
executable = lib.which(self.config["executable"])
if executable is None:
raise ValueError(
"'%s' not found on your PATH\n%s"
% (self.config["executable"], os.getenv("PATH"))
)
args = self.config.get("args", [])
return lib.launch(
executable=executable,
args=args,
environment=environment
)
def process(self, session, **kwargs):
"""Process the full Application action"""
environment = self.environ(session)
if kwargs.get("launch", True):
return self.launch(environment)
def _format(self, original, **kwargs):
"""Utility recursive dict formatting that logs the error clearly."""
try:
return lib.dict_format(original, **kwargs)
except KeyError as e:
log.error(
"One of the {variables} defined in the application "
"definition wasn't found in this session.\n"
"The variable was %s " % e
)
log.error(json.dumps(kwargs, indent=4, sort_keys=True))
raise ValueError(
"This is typically a bug in the pipeline, "
"ask your developer.")
class FusionRenderNode(BaseProjectAction):
name = "fusionrendernode9"
label = "F9 Render Node"
icon = "object-group"
order = 997
class VrayRenderSlave(BaseProjectAction):
name = "vrayrenderslave"
label = "V-Ray Slave"
icon = "object-group"
order = 996
def register_launcher_actions():
"""Register specific actions which should be accessible in the launcher"""
try:
pipeline.register_plugin(api.Action, FusionRenderNode)
pipeline.register_plugin(api.Action, VrayRenderSlave)
except Exception:
# Optional
pass
|
PypiClean
|
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/labservices/v20181015/outputs.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'EnvironmentDetailsResponse',
'EnvironmentSizeResponse',
'GalleryImageReferenceResponse',
'LabDetailsResponse',
'LatestOperationResultResponse',
'NetworkInterfaceResponse',
'OperationBatchStatusResponseItemResponse',
'ReferenceVmResponse',
'RegionalAvailabilityResponse',
'ResourceSetResponse',
'ResourceSettingsResponse',
'SizeAvailabilityResponse',
'SizeConfigurationPropertiesResponse',
'SizeInfoResponse',
'VirtualMachineDetailsResponse',
'VmStateDetailsResponse',
]
@pulumi.output_type
class EnvironmentDetailsResponse(dict):
"""
This represents the details about a User's environment and its state.
"""
def __init__(__self__, *,
description: str,
environment_state: str,
id: str,
latest_operation_result: 'outputs.LatestOperationResultResponse',
name: str,
password_last_reset: str,
provisioning_state: str,
total_usage: str,
virtual_machine_details: 'outputs.VirtualMachineDetailsResponse'):
"""
This represents the details about a User's environment and its state.
:param str description: Description of the Environment
:param str environment_state: Publishing state of the environment setting Possible values are Creating, Created, Failed
:param str id: Resource Id of the environment
:param 'LatestOperationResultResponse' latest_operation_result: The details of the latest operation. ex: status, error
:param str name: Name of the Environment
:param str password_last_reset: When the password was last reset on the environment.
:param str provisioning_state: The provisioning state of the environment. This also includes LabIsFull and NotYetProvisioned status.
:param str total_usage: How long the environment has been used by a lab user
:param 'VirtualMachineDetailsResponse' virtual_machine_details: Details of backing DTL virtual machine with compute and network details.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "environment_state", environment_state)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "latest_operation_result", latest_operation_result)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "password_last_reset", password_last_reset)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "total_usage", total_usage)
pulumi.set(__self__, "virtual_machine_details", virtual_machine_details)
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the Environment
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="environmentState")
def environment_state(self) -> str:
"""
Publishing state of the environment setting Possible values are Creating, Created, Failed
"""
return pulumi.get(self, "environment_state")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id of the environment
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="latestOperationResult")
def latest_operation_result(self) -> 'outputs.LatestOperationResultResponse':
"""
The details of the latest operation. ex: status, error
"""
return pulumi.get(self, "latest_operation_result")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the Environment
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="passwordLastReset")
def password_last_reset(self) -> str:
"""
When the password was last reset on the environment.
"""
return pulumi.get(self, "password_last_reset")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the environment. This also includes LabIsFull and NotYetProvisioned status.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="totalUsage")
def total_usage(self) -> str:
"""
How long the environment has been used by a lab user
"""
return pulumi.get(self, "total_usage")
@property
@pulumi.getter(name="virtualMachineDetails")
def virtual_machine_details(self) -> 'outputs.VirtualMachineDetailsResponse':
"""
Details of backing DTL virtual machine with compute and network details.
"""
return pulumi.get(self, "virtual_machine_details")
@pulumi.output_type
class EnvironmentSizeResponse(dict):
"""
Represents a size category supported by this Lab Account (small, medium or large)
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxPrice":
suggest = "max_price"
elif key == "minMemory":
suggest = "min_memory"
elif key == "minNumberOfCores":
suggest = "min_number_of_cores"
elif key == "vmSizes":
suggest = "vm_sizes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EnvironmentSizeResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EnvironmentSizeResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EnvironmentSizeResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_price: float,
min_memory: float,
min_number_of_cores: int,
name: Optional[str] = None,
vm_sizes: Optional[Sequence['outputs.SizeInfoResponse']] = None):
"""
Represents a size category supported by this Lab Account (small, medium or large)
:param float max_price: The pay-as-you-go dollar price per hour this size will cost. It does not include discounts and may not reflect the actual price the size will cost. This is the maximum price of all prices within this tier.
:param float min_memory: The amount of memory available (in GB). This is the minimum amount of memory within this tier.
:param int min_number_of_cores: The number of cores a VM of this size has. This is the minimum number of cores within this tier.
:param str name: The size category
:param Sequence['SizeInfoResponse'] vm_sizes: Represents a set of compute sizes that can serve this given size type
"""
pulumi.set(__self__, "max_price", max_price)
pulumi.set(__self__, "min_memory", min_memory)
pulumi.set(__self__, "min_number_of_cores", min_number_of_cores)
if name is not None:
pulumi.set(__self__, "name", name)
if vm_sizes is not None:
pulumi.set(__self__, "vm_sizes", vm_sizes)
@property
@pulumi.getter(name="maxPrice")
def max_price(self) -> float:
"""
The pay-as-you-go dollar price per hour this size will cost. It does not include discounts and may not reflect the actual price the size will cost. This is the maximum price of all prices within this tier.
"""
return pulumi.get(self, "max_price")
@property
@pulumi.getter(name="minMemory")
def min_memory(self) -> float:
"""
The amount of memory available (in GB). This is the minimum amount of memory within this tier.
"""
return pulumi.get(self, "min_memory")
@property
@pulumi.getter(name="minNumberOfCores")
def min_number_of_cores(self) -> int:
"""
The number of cores a VM of this size has. This is the minimum number of cores within this tier.
"""
return pulumi.get(self, "min_number_of_cores")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The size category
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="vmSizes")
def vm_sizes(self) -> Optional[Sequence['outputs.SizeInfoResponse']]:
"""
Represents a set of compute sizes that can serve this given size type
"""
return pulumi.get(self, "vm_sizes")
@pulumi.output_type
class GalleryImageReferenceResponse(dict):
"""
The reference information for an Azure Marketplace image.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "osType":
suggest = "os_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GalleryImageReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GalleryImageReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GalleryImageReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
offer: Optional[str] = None,
os_type: Optional[str] = None,
publisher: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None):
"""
The reference information for an Azure Marketplace image.
:param str offer: The offer of the gallery image.
:param str os_type: The OS type of the gallery image.
:param str publisher: The publisher of the gallery image.
:param str sku: The SKU of the gallery image.
:param str version: The version of the gallery image.
"""
if offer is not None:
pulumi.set(__self__, "offer", offer)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def offer(self) -> Optional[str]:
"""
The offer of the gallery image.
"""
return pulumi.get(self, "offer")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
The OS type of the gallery image.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter
def publisher(self) -> Optional[str]:
"""
The publisher of the gallery image.
"""
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def sku(self) -> Optional[str]:
"""
The SKU of the gallery image.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
The version of the gallery image.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class LabDetailsResponse(dict):
"""
This represents the details about a lab that the User is in, and its state.
"""
def __init__(__self__, *,
usage_quota: str,
id: Optional[str] = None,
name: Optional[str] = None,
provisioning_state: Optional[str] = None):
"""
This represents the details about a lab that the User is in, and its state.
:param str usage_quota: The maximum duration a user can use a VM in this lab.
:param str id: The Id of the lab.
:param str name: Name of the lab
:param str provisioning_state: The provisioning state of the lab.
"""
pulumi.set(__self__, "usage_quota", usage_quota)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="usageQuota")
def usage_quota(self) -> str:
"""
The maximum duration a user can use a VM in this lab.
"""
return pulumi.get(self, "usage_quota")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The Id of the lab.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the lab
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the lab.
"""
return pulumi.get(self, "provisioning_state")
@pulumi.output_type
class LatestOperationResultResponse(dict):
"""
Details of the status of an operation.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "errorCode":
suggest = "error_code"
elif key == "errorMessage":
suggest = "error_message"
elif key == "httpMethod":
suggest = "http_method"
elif key == "operationUrl":
suggest = "operation_url"
elif key == "requestUri":
suggest = "request_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LatestOperationResultResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LatestOperationResultResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LatestOperationResultResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
error_code: str,
error_message: str,
http_method: str,
operation_url: str,
request_uri: str,
status: str):
"""
Details of the status of an operation.
:param str error_code: Error code on failure.
:param str error_message: The error message.
:param str http_method: The HttpMethod - PUT/POST/DELETE for the operation.
:param str operation_url: The URL to use to check long-running operation status
:param str request_uri: Request URI of the operation.
:param str status: The current status of the operation.
"""
pulumi.set(__self__, "error_code", error_code)
pulumi.set(__self__, "error_message", error_message)
pulumi.set(__self__, "http_method", http_method)
pulumi.set(__self__, "operation_url", operation_url)
pulumi.set(__self__, "request_uri", request_uri)
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="errorCode")
def error_code(self) -> str:
"""
Error code on failure.
"""
return pulumi.get(self, "error_code")
@property
@pulumi.getter(name="errorMessage")
def error_message(self) -> str:
"""
The error message.
"""
return pulumi.get(self, "error_message")
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> str:
"""
The HttpMethod - PUT/POST/DELETE for the operation.
"""
return pulumi.get(self, "http_method")
@property
@pulumi.getter(name="operationUrl")
def operation_url(self) -> str:
"""
The URL to use to check long-running operation status
"""
return pulumi.get(self, "operation_url")
@property
@pulumi.getter(name="requestUri")
def request_uri(self) -> str:
"""
Request URI of the operation.
"""
return pulumi.get(self, "request_uri")
@property
@pulumi.getter
def status(self) -> str:
"""
The current status of the operation.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class NetworkInterfaceResponse(dict):
"""
Network details of the environment
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "privateIpAddress":
suggest = "private_ip_address"
elif key == "rdpAuthority":
suggest = "rdp_authority"
elif key == "sshAuthority":
suggest = "ssh_authority"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkInterfaceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
private_ip_address: str,
rdp_authority: str,
ssh_authority: str,
username: str):
"""
Network details of the environment
:param str private_ip_address: PrivateIp address of the Compute VM
:param str rdp_authority: Connection information for Windows
:param str ssh_authority: Connection information for Linux
:param str username: Username of the VM
"""
pulumi.set(__self__, "private_ip_address", private_ip_address)
pulumi.set(__self__, "rdp_authority", rdp_authority)
pulumi.set(__self__, "ssh_authority", ssh_authority)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> str:
"""
PrivateIp address of the Compute VM
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="rdpAuthority")
def rdp_authority(self) -> str:
"""
Connection information for Windows
"""
return pulumi.get(self, "rdp_authority")
@property
@pulumi.getter(name="sshAuthority")
def ssh_authority(self) -> str:
"""
Connection information for Linux
"""
return pulumi.get(self, "ssh_authority")
@property
@pulumi.getter
def username(self) -> str:
"""
Username of the VM
"""
return pulumi.get(self, "username")
@pulumi.output_type
class OperationBatchStatusResponseItemResponse(dict):
"""
Represents the status of an operation that used the batch API.
"""
def __init__(__self__, *,
operation_url: str,
status: str):
"""
Represents the status of an operation that used the batch API.
:param str operation_url: status of the long running operation for an environment
:param str status: status of the long running operation for an environment
"""
pulumi.set(__self__, "operation_url", operation_url)
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="operationUrl")
def operation_url(self) -> str:
"""
status of the long running operation for an environment
"""
return pulumi.get(self, "operation_url")
@property
@pulumi.getter
def status(self) -> str:
"""
status of the long running operation for an environment
"""
return pulumi.get(self, "status")
@pulumi.output_type
class ReferenceVmResponse(dict):
"""
Details of a Reference Vm
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userName":
suggest = "user_name"
elif key == "vmResourceId":
suggest = "vm_resource_id"
elif key == "vmStateDetails":
suggest = "vm_state_details"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ReferenceVmResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ReferenceVmResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ReferenceVmResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
user_name: str,
vm_resource_id: str,
vm_state_details: 'outputs.VmStateDetailsResponse',
password: Optional[str] = None):
"""
Details of a Reference Vm
:param str user_name: The username of the virtual machine
:param str vm_resource_id: VM resource Id for the environment
:param 'VmStateDetailsResponse' vm_state_details: The state details for the reference virtual machine.
:param str password: The password of the virtual machine. This will be set to null in GET resource API
"""
pulumi.set(__self__, "user_name", user_name)
pulumi.set(__self__, "vm_resource_id", vm_resource_id)
pulumi.set(__self__, "vm_state_details", vm_state_details)
if password is not None:
pulumi.set(__self__, "password", password)
@property
@pulumi.getter(name="userName")
def user_name(self) -> str:
"""
The username of the virtual machine
"""
return pulumi.get(self, "user_name")
@property
@pulumi.getter(name="vmResourceId")
def vm_resource_id(self) -> str:
"""
VM resource Id for the environment
"""
return pulumi.get(self, "vm_resource_id")
@property
@pulumi.getter(name="vmStateDetails")
def vm_state_details(self) -> 'outputs.VmStateDetailsResponse':
"""
The state details for the reference virtual machine.
"""
return pulumi.get(self, "vm_state_details")
@property
@pulumi.getter
def password(self) -> Optional[str]:
"""
The password of the virtual machine. This will be set to null in GET resource API
"""
return pulumi.get(self, "password")
@pulumi.output_type
class RegionalAvailabilityResponse(dict):
"""
The availability information of sizes across regions
"""
def __init__(__self__, *,
region: Optional[str] = None,
size_availabilities: Optional[Sequence['outputs.SizeAvailabilityResponse']] = None):
"""
The availability information of sizes across regions
:param str region: Corresponding region
:param Sequence['SizeAvailabilityResponse'] size_availabilities: List of all the size information for the region
"""
if region is not None:
pulumi.set(__self__, "region", region)
if size_availabilities is not None:
pulumi.set(__self__, "size_availabilities", size_availabilities)
@property
@pulumi.getter
def region(self) -> Optional[str]:
"""
Corresponding region
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="sizeAvailabilities")
def size_availabilities(self) -> Optional[Sequence['outputs.SizeAvailabilityResponse']]:
"""
List of all the size information for the region
"""
return pulumi.get(self, "size_availabilities")
@pulumi.output_type
class ResourceSetResponse(dict):
"""
Represents a VM and the setting Id it was created for.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "resourceSettingId":
suggest = "resource_setting_id"
elif key == "vmResourceId":
suggest = "vm_resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResourceSetResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResourceSetResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResourceSetResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
resource_setting_id: Optional[str] = None,
vm_resource_id: Optional[str] = None):
"""
Represents a VM and the setting Id it was created for.
:param str resource_setting_id: resourceSettingId for the environment
:param str vm_resource_id: VM resource Id for the environment
"""
if resource_setting_id is not None:
pulumi.set(__self__, "resource_setting_id", resource_setting_id)
if vm_resource_id is not None:
pulumi.set(__self__, "vm_resource_id", vm_resource_id)
@property
@pulumi.getter(name="resourceSettingId")
def resource_setting_id(self) -> Optional[str]:
"""
resourceSettingId for the environment
"""
return pulumi.get(self, "resource_setting_id")
@property
@pulumi.getter(name="vmResourceId")
def vm_resource_id(self) -> Optional[str]:
"""
VM resource Id for the environment
"""
return pulumi.get(self, "vm_resource_id")
@pulumi.output_type
class ResourceSettingsResponse(dict):
"""
Represents resource specific settings
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "imageName":
suggest = "image_name"
elif key == "referenceVm":
suggest = "reference_vm"
elif key == "galleryImageResourceId":
suggest = "gallery_image_resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResourceSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResourceSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResourceSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cores: int,
id: str,
image_name: str,
reference_vm: 'outputs.ReferenceVmResponse',
gallery_image_resource_id: Optional[str] = None,
size: Optional[str] = None):
"""
Represents resource specific settings
:param int cores: The translated compute cores of the virtual machine
:param str id: The unique id of the resource setting
:param str image_name: The name of the image used to created the environment setting
:param 'ReferenceVmResponse' reference_vm: Details specific to Reference Vm
:param str gallery_image_resource_id: The resource id of the gallery image used for creating the virtual machine
:param str size: The size of the virtual machine
"""
pulumi.set(__self__, "cores", cores)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "image_name", image_name)
pulumi.set(__self__, "reference_vm", reference_vm)
if gallery_image_resource_id is not None:
pulumi.set(__self__, "gallery_image_resource_id", gallery_image_resource_id)
if size is not None:
pulumi.set(__self__, "size", size)
@property
@pulumi.getter
def cores(self) -> int:
"""
The translated compute cores of the virtual machine
"""
return pulumi.get(self, "cores")
@property
@pulumi.getter
def id(self) -> str:
"""
The unique id of the resource setting
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="imageName")
def image_name(self) -> str:
"""
The name of the image used to created the environment setting
"""
return pulumi.get(self, "image_name")
@property
@pulumi.getter(name="referenceVm")
def reference_vm(self) -> 'outputs.ReferenceVmResponse':
"""
Details specific to Reference Vm
"""
return pulumi.get(self, "reference_vm")
@property
@pulumi.getter(name="galleryImageResourceId")
def gallery_image_resource_id(self) -> Optional[str]:
"""
The resource id of the gallery image used for creating the virtual machine
"""
return pulumi.get(self, "gallery_image_resource_id")
@property
@pulumi.getter
def size(self) -> Optional[str]:
"""
The size of the virtual machine
"""
return pulumi.get(self, "size")
@pulumi.output_type
class SizeAvailabilityResponse(dict):
"""
Represents the size information
"""
def __init__(__self__, *,
is_available: Optional[bool] = None,
size_category: Optional[str] = None):
"""
Represents the size information
:param bool is_available: Whether or not this size category is available
:param str size_category: The category of the size (Basic, Standard, Performance).
"""
if is_available is not None:
pulumi.set(__self__, "is_available", is_available)
if size_category is not None:
pulumi.set(__self__, "size_category", size_category)
@property
@pulumi.getter(name="isAvailable")
def is_available(self) -> Optional[bool]:
"""
Whether or not this size category is available
"""
return pulumi.get(self, "is_available")
@property
@pulumi.getter(name="sizeCategory")
def size_category(self) -> Optional[str]:
"""
The category of the size (Basic, Standard, Performance).
"""
return pulumi.get(self, "size_category")
@pulumi.output_type
class SizeConfigurationPropertiesResponse(dict):
"""
Represents the size configuration under the lab account
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "environmentSizes":
suggest = "environment_sizes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SizeConfigurationPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SizeConfigurationPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SizeConfigurationPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
environment_sizes: Optional[Sequence['outputs.EnvironmentSizeResponse']] = None):
"""
Represents the size configuration under the lab account
:param Sequence['EnvironmentSizeResponse'] environment_sizes: Represents a list of size categories supported by this Lab Account (Small, Medium, Large)
"""
if environment_sizes is not None:
pulumi.set(__self__, "environment_sizes", environment_sizes)
@property
@pulumi.getter(name="environmentSizes")
def environment_sizes(self) -> Optional[Sequence['outputs.EnvironmentSizeResponse']]:
"""
Represents a list of size categories supported by this Lab Account (Small, Medium, Large)
"""
return pulumi.get(self, "environment_sizes")
@pulumi.output_type
class SizeInfoResponse(dict):
"""
Contains detailed information about a size
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "computeSize":
suggest = "compute_size"
elif key == "numberOfCores":
suggest = "number_of_cores"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SizeInfoResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SizeInfoResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SizeInfoResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
compute_size: Optional[str] = None,
memory: Optional[float] = None,
number_of_cores: Optional[int] = None,
price: Optional[float] = None):
"""
Contains detailed information about a size
:param str compute_size: Represents the actual compute size, e.g. Standard_A2_v2.
:param float memory: The amount of memory available (in GB).
:param int number_of_cores: The number of cores a VM of this size has.
:param float price: The pay-as-you-go price per hour this size will cost. It does not include discounts and may not reflect the actual price the size will cost.
"""
if compute_size is not None:
pulumi.set(__self__, "compute_size", compute_size)
if memory is not None:
pulumi.set(__self__, "memory", memory)
if number_of_cores is not None:
pulumi.set(__self__, "number_of_cores", number_of_cores)
if price is not None:
pulumi.set(__self__, "price", price)
@property
@pulumi.getter(name="computeSize")
def compute_size(self) -> Optional[str]:
"""
Represents the actual compute size, e.g. Standard_A2_v2.
"""
return pulumi.get(self, "compute_size")
@property
@pulumi.getter
def memory(self) -> Optional[float]:
"""
The amount of memory available (in GB).
"""
return pulumi.get(self, "memory")
@property
@pulumi.getter(name="numberOfCores")
def number_of_cores(self) -> Optional[int]:
"""
The number of cores a VM of this size has.
"""
return pulumi.get(self, "number_of_cores")
@property
@pulumi.getter
def price(self) -> Optional[float]:
"""
The pay-as-you-go price per hour this size will cost. It does not include discounts and may not reflect the actual price the size will cost.
"""
return pulumi.get(self, "price")
@pulumi.output_type
class VirtualMachineDetailsResponse(dict):
"""
Details of the backing virtual machine.
"""
def __init__(__self__, *,
last_known_power_state: str,
private_ip_address: str,
provisioning_state: str,
rdp_authority: str,
ssh_authority: str,
user_name: str):
"""
Details of the backing virtual machine.
:param str last_known_power_state: Last known compute power state captured in DTL
:param str private_ip_address: PrivateIp address of the compute VM
:param str provisioning_state: Provisioning state of the Dtl VM
:param str rdp_authority: Connection information for Windows
:param str ssh_authority: Connection information for Linux
:param str user_name: Compute VM login user name
"""
pulumi.set(__self__, "last_known_power_state", last_known_power_state)
pulumi.set(__self__, "private_ip_address", private_ip_address)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "rdp_authority", rdp_authority)
pulumi.set(__self__, "ssh_authority", ssh_authority)
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter(name="lastKnownPowerState")
def last_known_power_state(self) -> str:
"""
Last known compute power state captured in DTL
"""
return pulumi.get(self, "last_known_power_state")
@property
@pulumi.getter(name="privateIpAddress")
def private_ip_address(self) -> str:
"""
PrivateIp address of the compute VM
"""
return pulumi.get(self, "private_ip_address")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the Dtl VM
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="rdpAuthority")
def rdp_authority(self) -> str:
"""
Connection information for Windows
"""
return pulumi.get(self, "rdp_authority")
@property
@pulumi.getter(name="sshAuthority")
def ssh_authority(self) -> str:
"""
Connection information for Linux
"""
return pulumi.get(self, "ssh_authority")
@property
@pulumi.getter(name="userName")
def user_name(self) -> str:
"""
Compute VM login user name
"""
return pulumi.get(self, "user_name")
@pulumi.output_type
class VmStateDetailsResponse(dict):
"""
Details about the state of the reference virtual machine.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lastKnownPowerState":
suggest = "last_known_power_state"
elif key == "powerState":
suggest = "power_state"
elif key == "rdpAuthority":
suggest = "rdp_authority"
elif key == "sshAuthority":
suggest = "ssh_authority"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in VmStateDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
VmStateDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
VmStateDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
last_known_power_state: str,
power_state: str,
rdp_authority: str,
ssh_authority: str):
"""
Details about the state of the reference virtual machine.
:param str last_known_power_state: Last known compute power state captured in DTL
:param str power_state: The power state of the reference virtual machine.
:param str rdp_authority: The RdpAuthority property is a server DNS host name or IP address followed by the service port number for RDP (Remote Desktop Protocol).
:param str ssh_authority: The SshAuthority property is a server DNS host name or IP address followed by the service port number for SSH.
"""
pulumi.set(__self__, "last_known_power_state", last_known_power_state)
pulumi.set(__self__, "power_state", power_state)
pulumi.set(__self__, "rdp_authority", rdp_authority)
pulumi.set(__self__, "ssh_authority", ssh_authority)
@property
@pulumi.getter(name="lastKnownPowerState")
def last_known_power_state(self) -> str:
"""
Last known compute power state captured in DTL
"""
return pulumi.get(self, "last_known_power_state")
@property
@pulumi.getter(name="powerState")
def power_state(self) -> str:
"""
The power state of the reference virtual machine.
"""
return pulumi.get(self, "power_state")
@property
@pulumi.getter(name="rdpAuthority")
def rdp_authority(self) -> str:
"""
The RdpAuthority property is a server DNS host name or IP address followed by the service port number for RDP (Remote Desktop Protocol).
"""
return pulumi.get(self, "rdp_authority")
@property
@pulumi.getter(name="sshAuthority")
def ssh_authority(self) -> str:
"""
The SshAuthority property is a server DNS host name or IP address followed by the service port number for SSH.
"""
return pulumi.get(self, "ssh_authority")
|
PypiClean
|
/regulome_web-2.0rc1.tar.gz/regulome_web-2.0rc1/regulome_app/webapp/tools/plot.py
|
import os
import datetime
import time
import subprocess
from regulome_app.config import logger, configs, REGULOME_R_SCRIPT
from regulome_app.webapp import models as models
from flask import request
import json
class Plot:
"""Create the regulome plot"""
CACHE_TEMPLATE = os.path.join(
configs['output']['cache'], 'cache', # Path of the cache
'{build}', '{region}', '{tfbs}', '{snps}', '{chromatin}', # Tree of directories
'{build}_{chromosome}_{start}-{end}' # Name of the file
)
NOCACHE_TEMPLATE = os.path.join(
configs['output']['cache'], 'nocache', # Path of the cache
'{user}_{build}_{chromosome}_{start}-{end}' # Name of the file
)
# Script
CMD = "{r_path} {r_script} "
# Outputs
CMD += "-p {cache_path}.pdf -o {cache_path}.txt -s {cache_path}.snp.txt "
CMD += "-M {cache_path}.motifs.txt "
# Coordinates
CMD += "-B {build} -c {chromosome} -b {start} -e {end} -r {ranges} "
# Data
CMD += "-d {diagram_plot} -m {magic_plot} -u {user_regions} -U {user_snp} -Mt {mapTF} "
CMD += "-Mr {mapRegion} -Mc {mapChromatin} "
CMD += "-f {format_plot}"
def __init__(self, messages, username):
"""Initiate the class. Load the message of the form"""
self.msg = messages
self.user = username
if self.msg['upload_regions'] != 'None' or self.msg['upload_snps'] != 'None':
self.cache = False
self.cache_path = self.NOCACHE_TEMPLATE.format(
user=self.user,
build=self.msg['build'],
chromosome=models.CHROMOSOMES_DICT[
self.msg['chromosome']
],
start=self.msg['start'],
end=self.msg['end']
)
else:
self.cache = True
self.cache_path = self.CACHE_TEMPLATE.format(
build=self.msg['build'],
region=self.msg['region'],
tfbs=self.msg['tfbs'],
snps=self.msg['select_snps'],
chromatin='none' if self.msg['chromatin_profile'] == 'disabled' else self.msg['chromatin'],
chromosome=models.CHROMOSOMES_DICT[
self.msg['chromosome']
],
start=self.msg['start'],
end=self.msg['end']
)
self.base_path = configs['output']['cache']
self.cmd = self.define_cmd()
self.ip = self.get_ip()
# The creation ofg the plot is triggered by: self.run()
def run(self):
"""Create the plot if needed"""
if (not os.path.exists(self.cache_path + ".png") or
self.msg['upload_regions'] != 'None' or
self.msg['upload_snps'] != 'None'):
exist = False
logger.info('Building plot...')
logger.info(self.cmd)
self.do_plot()
self.pdf_to_png()
else:
exist = True
# Write the report
self.report(exist=exist)
def report(self, exist):
"""Write a line in the log file"""
report = {
'date': time.strftime("%Y-%m-%d"),
'time': time.strftime("%H:%M:%S"),
'datetime': datetime.datetime.now().isoformat(),
'ip': self.ip,
'user': self.user,
'gene': None if self.msg['gene'] is None else self.msg['gene'],
'command_line': self.cmd,
'exist': exist
}
with open(configs['logs']['activity_log'], 'a') as log:
log.write(json.dumps(report) + "\n")
def define_cmd(self):
"""Define the R command line"""
d = dict(
r_path=configs['binaries']['Rscript_bin'],
r_script=REGULOME_R_SCRIPT,
cache_path=self.cache_path,
build=self.msg['build'],
chromosome=models.CHROMOSOMES_DICT[self.msg['chromosome']],
start=self.msg['start'],
end=self.msg['end'],
ranges=self.msg['range_region'],
diagram_plot='TRUE' if self.msg['select_snps'] == 'diagram' else 'FALSE',
magic_plot='TRUE' if self.msg['select_snps'] == 'magic' else 'FALSE',
user_regions='FALSE' if self.msg['upload_regions'] == 'None' else os.path.join(
configs['data']['uploads'],
"{}_{}".format(self.user, self.msg['upload_regions'])
),
user_snp='FALSE' if self.msg['upload_snps'] == 'None' else os.path.join(
configs['data']['uploads'],
"{}_{}".format(self.user, self.msg['upload_snps'])
),
format_plot='pdf',
mapTF=self.msg['tfbs'],
mapRegion=self.msg['region'],
mapChromatin='FALSE' if (self.msg['chromatin'] == 'none' or self.msg['chromatin_profile'] == 'disabled') else self.msg['chromatin']
)
return self.CMD.format_map(d)
def pdf_to_png(self):
"""Convert a pdf to a png"""
cmd = [configs['binaries']['imagemagick']]
cmd.extend('-geometry 1000 -quality 100 -density 150 -size 894x734'.split())
cmd.extend([self.cache_path + '.pdf', self.cache_path + '.png'])
with open(configs['logs']['regulome_log'], 'a') as log:
log.write("Converting PDF tp PNG\n")
result = subprocess.run(cmd, stdout=log)
def do_plot(self):
"""Create the plot by executing the R script"""
cmd = self.cmd.split()
with open(configs['logs']['regulome_log'], 'a') as log:
log.write("Creating plot\n")
log.write(self.cmd + "\n")
_ = subprocess.run(cmd, stdout=log)
def get_results_path(self):
"""Return the last layers of the results path"""
common_path = os.path.commonprefix([self.cache_path, configs['output']['cache']])
relative_path = os.path.relpath(self.cache_path, common_path)
return relative_path
def get_results_name(self):
"""Return the name the file will be given"""
name = os.path.split(self.get_results_path())[1]
if self.cache:
return name
else:
return name.split("_", 1)[-1]
@staticmethod
def get_ip():
"""Get the IP address of the user"""
# ip = cgi.escape(os.environ["REMOTE_ADDR"])
# request.remote_addr
# request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
#
# t.environ.get('REMOTE_ADDR', request.remote_addr)
ip = (os.getenv("HTTP_CLIENT_IP") or
os.getenv("HTTP_X_FORWARDED_FOR") or
os.getenv("REMOTE_ADDR") or
"UNKNOWN")
return ip
|
PypiClean
|
/bdbcontrib-0.1.8.1.tar.gz/bdbcontrib-0.1.8.1/examples/Index.ipynb
|
# Introduction
These example notebooks introduce probabilistic programming. We first look at applications, how generative models can be applied to populations in [BayesDB](http://probcomp.csail.mit.edu/bayesdb/). We then progress towards creating new generative models that better fit our intuitions about those populations in [Venture](http://probcomp.csail.mit.edu/venture). BayesDB and Venture are developed by the [MIT Probabilistic Computing Group](http://probcomp.csail.mit.edu/).
We invite you to watch a [presentation](http://video.media.mit.edu/media/play/mansinghka-2016-03-15_h264_512x288.mov)
([slides](https://docs.google.com/presentation/d/1LdO6SPAFyC99Gb2QHa8-ikLLYluOPOB9MTavuOLTY9I/edit))
on the subject of this tutorial by Vikash Mansingka.
Before we get started...
## About you...
Signing up with your name and email helps build a community of support and helps improve your user experience. When you sign up, we collect information including the commands you tried, how long they took, what errors they resulted in, any additional data that you import, etc. If you provide your email, we will invite you to a low-traffic announcements list. Please include the name and email you use below in any reports of bugs or surprises. Send those reports to [email protected] or via [GitHub](https://github.com/probcomp/bdbcontrib/issues/new).
If security is a primary concern, then you should do a security audit (and share the results with us) before using the software. As this is alpha software, results may not be reliable.
DO NOT USE THIS SOFTWARE FOR HIPAA-COVERED, PERSONALLY IDENTIFIABLE, OR SIMILARLY SENSITIVE DATA!
**Please fill in your name and email,** then use shift-return (or the play button above) to run the cell.
```
name = ""
email = ""
with open('bayesdb-session-capture-opt.txt', 'w') as optfile:
optfile.write('%s <%s>\n' % (name, email))
# To opt out, use optfile.write('False') instead.
# Even opting out of sending details, you still allow us to count how often users opt out.
# You can opt-in or opt-out on a per-population basis using the session_capture_name option to Population.
# You must choose to either opt-in or opt-out.
```
# Background
For those unfamiliar with the software, languages, or concepts we will use in this tutorial, we recommend:
* External [Introduction to SQL](http://www.tutorialspoint.com/sql/sql-overview.htm) — we do not implement all of SQL, but the basics are the same.
* External [Introduction to Python](https://docs.python.org/3/tutorial/introduction.html). We will introduce the basics of [pandas](http://pandas.pydata.org/pandas-docs/stable/tutorials.html) and [seaborn](https://stanford.edu/~mwaskom/software/seaborn/tutorial.html) but getting familiar with them might be helpful too.
* External quick explanations of [statistical populations](https://en.wikipedia.org/wiki/Statistical_population), [probability models](http://www.stat.yale.edu/Courses/1997-98/101/probint.htm), [generative models](https://en.wikipedia.org/wiki/Generative_model), and [predictive probabilities](https://en.wikipedia.org/wiki/Posterior_predictive_distribution).
* This page is an IPython Notebook. Many people are happy clicking on cells and running them with shift-enter. If you want to learn more, start by pressing Escape then H for a quick reference. You can also dive deeper with an external [Introduction to iPython notebooks](https://ipython.org/ipython-doc/3/notebook/notebook.html#basic-workflow).
You do not need extensive knowledge of any of these to read our examples, so **feel free to skip ahead**. But if you are not very familiar with one of the technologies, then doing initial learning will be very helpful to you in playing around confidently and doing the suggested exercises.
## BayesDB
[BayesDB](http://probcomp.csail.mit.edu/) allows you to query your data as other SQL database systems do. It also allows you to query the **implications** of your data. We explore these capabilities using information about satellites orbiting our planet.
* [Querying and Plotting the Satellites Data](satellites/querying-and-plotting.ipynb) without doing any probabilistic analysis. This is a good place to start to get used to the language, *before* learning to explore the implications of the data.
* [Satellites Exploration](satellites/Satellites.ipynb) — a bit of the above, plus a short exploration of the results of probabilistic analysis.
**TODO**: The same in smaller chunks, with those chunks expanded, promised here.
<!--
Descriptive Analysis (quantitatively describe a population):
* [Basic BQL querying and plotting](satellites/querying-and-plotting.ipynb).
* [Population recipes](satellites/with-recipes.ipynb).
Exploratory Analysis (find relationships between variables, and suggest areas for future study):
* [Simulate for "What If?" questions](satellites/what-if.ipynb),
* [Estimate predictive relationships](satellites/predictive-relationships.ipynb),
* [Inferring missing values](satellites/missing-values.ipynb),
* [Finding unlikely values](satellites/unlikely-values.ipynb),
* [Mathematics of inference quality analysis](lab3),
* [Limitations of the default metamodel](satellites/cc-limitations.ipynb),
* [Analysis with a foreign model](satellites/foreign-model.ipynb).
And, more suitable for presentation than for learning:
* A [one-notebook summary](satellites/Satellites.ipynb) with fewer details.
-->
## Working with your own data
Because a default BayesDB model is unlikely to model your data plausibly, and because we do not yet have the tools to be confident that any model has captured the relationships in a population well, BayesDB is not ready for use for [higher levels of analysis](http://datascientistinsights.com/2013/01/29/six-types-of-analyses-every-data-scientist-should-know/).
As you work with your data, **do not attempt to use BayesDB for**:
* inferential analysis: drawing conclusions about a larger population from which the data you analyze are a sample,
* predictive analysis: using the population you have to make predictions outside of that population,
* causal analysis: understanding how interventions in one variable will affect other variables, or
* mechanistic analysis: understanding causal and structural relationships between variables.
For somewhat temporary technical reasons, BayesDB is not ready to handle very large populations, except by sub-sampling them (violating the caveat against inferential analysis!).
While the focus of the group is towards better model types and inference strategies, some of these limitations are still in view to grow past. If these interest you, please work with us towards those goals.
With those caveats, we explore a "new" dataset using BayesDB:
* [ma-school-districts](ma-school-districts/MASchoolDistricts.ipynb)
**TODO**: the same in smaller chunks, with those chunks expanded, is promised here.
<!--
* [Preparing the data for analysis, and making initial modeling choices](ma-school-districts/data-prep.ipynb),
* [Describing the population](ma-school-districts/descriptions.ipynb),
* [Creating derived variables](ma-school-districts/derived-variables.ipynb),
* [Exploring relationships in the population](ma-school-districts/exploring-relationships.ipynb),
* [Finding and patching implausible model assumptions](ma-school-districts/implausible-assumptions.ipynb).
-->
To work with your own data, please [contact the group](mailto:[email protected]) to have a conversation about the population you want to explore, about appropriate types of analysis, and to learn how to unlock analysis. We lock this feature because users have frequently misunderstood the limitations of our software, drawing unwarranted inferences. The concepts are easy to misuse, the software is in an early alpha version, and working with our team will help keep egg off your face, or worse.
## Venture
[Venture](http://probcomp.csail.mit.edu/venture) is a prototype general-purpose probabilistic computing platform. In Venture, one can create novel probabilistic models, and inference strategies that allow efficient learning for those models. Venture is programmed primarily in VentureScript, but also supports applications written in other probabilistic or traditional programming languages. In this tutorial we will explore a mix of the VentureScript language and the Python API to Venture.
**TODO**: Tutorial examples promised here.
<!--
* A simple case: [Bayesian linear regression in Venture](venture/bayesian-linear-regression.ipynb)
* More generally: [Setting up a probabilistic model in Venture](venture/probabilistic-model.ipynb)
* [Probabilistic inference in Venture](venture/probabilistic-inference.ipynb)
* [Time-accuracy tradeoffs in inference](venture/time-accuracy-general.ipynb)
* [New techniques for quantifying time and accuracy tradeoffs of MCMC-based inference](venture/time-accuracy-marcoct.ipynb)
-->
## Notes
As I work in these notebooks, where is my work saved? Execute the following cell to find out:
```
import os
os.getcwd()
```
--------------------------------------------
Copyright (c) 2010-2016, MIT Probabilistic Computing Project
Licensed under Apache 2.0 (edit cell for details).
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
|
PypiClean
|
/dagster_graphql-1.4.11-py3-none-any.whl/dagster_graphql/client/query.py
|
ERROR_FRAGMENT = """
fragment errorFragment on PythonError {
message
className
stack
cause {
message
className
stack
cause {
message
className
stack
}
}
}
"""
METADATA_ENTRY_FRAGMENT = """
fragment metadataEntryFragment on MetadataEntry {
__typename
label
description
... on FloatMetadataEntry {
floatValue
}
... on IntMetadataEntry {
intRepr
}
... on BoolMetadataEntry {
boolValue
}
... on JsonMetadataEntry {
jsonString
}
... on MarkdownMetadataEntry {
mdStr
}
... on PathMetadataEntry {
path
}
... on NotebookMetadataEntry {
path
}
... on PythonArtifactMetadataEntry {
module
name
}
... on TextMetadataEntry {
text
}
... on UrlMetadataEntry {
url
}
... on PipelineRunMetadataEntry {
runId
}
... on AssetMetadataEntry {
assetKey {
path
}
}
... on TableMetadataEntry {
table {
records
schema {
constraints { other }
columns {
name
type
constraints { nullable unique other }
}
}
}
}
... on TableSchemaMetadataEntry {
schema {
constraints { other }
columns {
name
type
constraints { nullable unique other }
}
}
}
}
"""
STEP_EVENT_FRAGMENTS = ERROR_FRAGMENT + METADATA_ENTRY_FRAGMENT + """
fragment stepEventFragment on StepEvent {
stepKey
solidHandleID
... on EngineEvent {
metadataEntries {
...metadataEntryFragment
}
markerStart
markerEnd
engineError: error {
...errorFragment
}
}
... on ExecutionStepFailureEvent {
error {
...errorFragment
}
failureMetadata {
label
description
metadataEntries {
...metadataEntryFragment
}
}
}
... on ExecutionStepInputEvent {
inputName
typeCheck {
__typename
success
label
description
metadataEntries {
...metadataEntryFragment
}
}
}
... on ExecutionStepOutputEvent {
outputName
typeCheck {
__typename
success
label
description
metadataEntries {
...metadataEntryFragment
}
}
metadataEntries {
...metadataEntryFragment
}
}
... on ExecutionStepUpForRetryEvent {
retryError: error {
...errorFragment
}
secondsToWait
}
... on ObjectStoreOperationEvent {
stepKey
operationResult {
op
metadataEntries {
...metadataEntryFragment
}
}
}
... on StepExpectationResultEvent {
expectationResult {
success
label
description
metadataEntries {
...metadataEntryFragment
}
}
}
... on MaterializationEvent {
label
description
metadataEntries {
...metadataEntryFragment
}
}
... on MessageEvent {
runId
message
timestamp
level
eventType
}
}
"""
MESSAGE_EVENT_FRAGMENTS = """
fragment messageEventFragment on MessageEvent {
__typename
runId
message
timestamp
level
eventType
...stepEventFragment
... on MaterializationEvent {
label
description
metadataEntries {
__typename
...metadataEntryFragment
}
}
... on ExecutionStepFailureEvent {
stepKey
error {
...errorFragment
}
}
}
""" + STEP_EVENT_FRAGMENTS
SUBSCRIPTION_QUERY = MESSAGE_EVENT_FRAGMENTS + """
subscription subscribeTest($runId: ID!) {
pipelineRunLogs(runId: $runId) {
__typename
... on PipelineRunLogsSubscriptionSuccess {
run {
runId
}
messages {
...messageEventFragment
}
hasMorePastEvents
}
... on PipelineRunLogsSubscriptionFailure {
missingRunId
message
}
}
}
"""
RUN_EVENTS_QUERY = MESSAGE_EVENT_FRAGMENTS + """
query pipelineRunEvents($runId: ID!, $cursor: String) {
logsForRun(runId: $runId, afterCursor: $cursor) {
__typename
... on EventConnection {
events {
...messageEventFragment
}
cursor
}
}
}
"""
LAUNCH_PIPELINE_EXECUTION_MUTATION = ERROR_FRAGMENT + """
mutation($executionParams: ExecutionParams!) {
launchPipelineExecution(executionParams: $executionParams) {
__typename
... on InvalidStepError {
invalidStepKey
}
... on InvalidOutputError {
stepKey
invalidOutputName
}
... on LaunchRunSuccess {
run {
runId
pipeline {
name
}
tags {
key
value
}
status
runConfigYaml
mode
resolvedOpSelection
}
}
... on ConflictingExecutionParamsError {
message
}
... on PresetNotFoundError {
preset
message
}
... on RunConfigValidationInvalid {
pipelineName
errors {
__typename
message
path
reason
}
}
... on PipelineNotFoundError {
message
pipelineName
}
... on PythonError {
...errorFragment
}
}
}
"""
LAUNCH_PIPELINE_REEXECUTION_MUTATION = ERROR_FRAGMENT + """
mutation($executionParams: ExecutionParams, $reexecutionParams: ReexecutionParams) {
launchPipelineReexecution(executionParams: $executionParams, reexecutionParams: $reexecutionParams) {
__typename
... on PythonError {
...errorFragment
}
... on LaunchRunSuccess {
run {
runId
status
pipeline {
name
}
tags {
key
value
}
runConfigYaml
mode
rootRunId
parentRunId
}
}
... on PipelineNotFoundError {
message
pipelineName
}
... on RunConfigValidationInvalid {
pipelineName
errors {
__typename
message
path
reason
}
}
... on InvalidStepError {
invalidStepKey
}
... on InvalidOutputError {
stepKey
invalidOutputName
}
... on ConflictingExecutionParamsError {
message
}
... on PresetNotFoundError {
preset
message
}
}
}
"""
PIPELINE_REEXECUTION_INFO_QUERY = """
query ReexecutionInfoQuery($runId: ID!) {
pipelineRunOrError(runId: $runId) {
__typename
... on PipelineRun {
stepKeysToExecute
}
}
}
"""
LAUNCH_PARTITION_BACKFILL_MUTATION = ERROR_FRAGMENT + """
mutation($backfillParams: LaunchBackfillParams!) {
launchPartitionBackfill(backfillParams: $backfillParams) {
__typename
... on PythonError {
...errorFragment
}
... on PartitionSetNotFoundError {
message
}
... on LaunchBackfillSuccess {
backfillId
launchedRunIds
}
}
}
"""
|
PypiClean
|
/tiamat_pip-1.11.0-py3-none-any.whl/tiamatpip/ext/pip/_internal/resolution/resolvelib/base.py
|
from typing import FrozenSet, Iterable, Optional, Tuple, Union
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
from pip._vendor.packaging.version import LegacyVersion, Version
from pip._internal.models.link import Link, links_equivalent
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.hashes import Hashes
CandidateLookup = Tuple[Optional["Candidate"], Optional[InstallRequirement]]
CandidateVersion = Union[LegacyVersion, Version]
def format_name(project: str, extras: FrozenSet[str]) -> str:
if not extras:
return project
canonical_extras = sorted(canonicalize_name(e) for e in extras)
return "{}[{}]".format(project, ",".join(canonical_extras))
class Constraint:
def __init__(
self, specifier: SpecifierSet, hashes: Hashes, links: FrozenSet[Link]
) -> None:
self.specifier = specifier
self.hashes = hashes
self.links = links
@classmethod
def empty(cls) -> "Constraint":
return Constraint(SpecifierSet(), Hashes(), frozenset())
@classmethod
def from_ireq(cls, ireq: InstallRequirement) -> "Constraint":
links = frozenset([ireq.link]) if ireq.link else frozenset()
return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links)
def __bool__(self) -> bool:
return bool(self.specifier) or bool(self.hashes) or bool(self.links)
def __and__(self, other: InstallRequirement) -> "Constraint":
if not isinstance(other, InstallRequirement):
return NotImplemented
specifier = self.specifier & other.specifier
hashes = self.hashes & other.hashes(trust_internet=False)
links = self.links
if other.link:
links = links.union([other.link])
return Constraint(specifier, hashes, links)
def is_satisfied_by(self, candidate: "Candidate") -> bool:
# Reject if there are any mismatched URL constraints on this package.
if self.links and not all(_match_link(link, candidate) for link in self.links):
return False
# We can safely always allow prereleases here since PackageFinder
# already implements the prerelease logic, and would have filtered out
# prerelease candidates if the user does not expect them.
return self.specifier.contains(candidate.version, prereleases=True)
class Requirement:
@property
def project_name(self) -> NormalizedName:
"""The "project name" of a requirement.
This is different from ``name`` if this requirement contains extras,
in which case ``name`` would contain the ``[...]`` part, while this
refers to the name of the project.
"""
raise NotImplementedError("Subclass should override")
@property
def name(self) -> str:
"""The name identifying this requirement in the resolver.
This is different from ``project_name`` if this requirement contains
extras, where ``project_name`` would not contain the ``[...]`` part.
"""
raise NotImplementedError("Subclass should override")
def is_satisfied_by(self, candidate: "Candidate") -> bool:
return False
def get_candidate_lookup(self) -> CandidateLookup:
raise NotImplementedError("Subclass should override")
def format_for_error(self) -> str:
raise NotImplementedError("Subclass should override")
def _match_link(link: Link, candidate: "Candidate") -> bool:
if candidate.source_link:
return links_equivalent(link, candidate.source_link)
return False
class Candidate:
@property
def project_name(self) -> NormalizedName:
"""The "project name" of the candidate.
This is different from ``name`` if this candidate contains extras,
in which case ``name`` would contain the ``[...]`` part, while this
refers to the name of the project.
"""
raise NotImplementedError("Override in subclass")
@property
def name(self) -> str:
"""The name identifying this candidate in the resolver.
This is different from ``project_name`` if this candidate contains
extras, where ``project_name`` would not contain the ``[...]`` part.
"""
raise NotImplementedError("Override in subclass")
@property
def version(self) -> CandidateVersion:
raise NotImplementedError("Override in subclass")
@property
def is_installed(self) -> bool:
raise NotImplementedError("Override in subclass")
@property
def is_editable(self) -> bool:
raise NotImplementedError("Override in subclass")
@property
def source_link(self) -> Optional[Link]:
raise NotImplementedError("Override in subclass")
def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
raise NotImplementedError("Override in subclass")
def get_install_requirement(self) -> Optional[InstallRequirement]:
raise NotImplementedError("Override in subclass")
def format_for_error(self) -> str:
raise NotImplementedError("Subclass should override")
|
PypiClean
|
/towhee.compiler-0.1.1.tar.gz/towhee.compiler-0.1.1/torchdynamo/variables/misc.py
|
import inspect
import sys
import types
from typing import Dict
from typing import List
from typing import Sequence
import torch._C
from typeguard import typechecked
from .. import variables as vars
from ..bytecode_transformation import create_instruction
from ..exc import unimplemented
from ..guards import Guard
from ..guards import GuardBuilder
from ..guards import GuardSource
from ..source import AttrSource
from ..utils import identity
from ..utils import proxy_args_kwargs
from ..variables import Variable
class SuperVariable(Variable):
def __init__(self, typevar, objvar=None, **kwargs):
super(SuperVariable, self).__init__(**kwargs)
self.typevar = typevar
self.objvar = objvar
def reconstruct(self, codegen):
codegen(vars.BuiltinVariable(super))
codegen(self.typevar)
if self.objvar is not None:
codegen(self.objvar)
return [create_instruction("CALL_FUNCTION", 2)]
else:
return [create_instruction("CALL_FUNCTION", 1)]
def const_getattr(self, tx, name):
assert self.objvar, "1-arg super not implemented"
search_type = self.typevar.as_python_constant()
# We default to the python type of the object. However,
# 1. If this is a `type`, then the original object represents the user
# defined type.
# 2. If this is `torch._C._TensorMeta`, the original object is the user
# defined type of a custom tensor subclass.
# TODO(future PR): figure out how to do this in a less hacky way
type_to_use = self.objvar.python_type()
if type_to_use is type or type_to_use is torch._C._TensorMeta:
type_to_use = self.objvar.value
# TODO(jansel): there is a small chance this could trigger user code, prevent that
return getattr(super(search_type, type_to_use), name)
@typechecked
def call_method(
self,
tx,
name: str,
args: Sequence[Variable],
kwargs: Dict[str, Variable],
) -> Variable:
options = vars.propagate(self, args, kwargs.values(), self.objvar, self.typevar)
inner_fn = self.const_getattr(self, name)
if inner_fn is object.__init__:
return LambdaVariable(identity, **options)
elif isinstance(inner_fn, types.FunctionType):
return vars.userfunc(inner_fn, **options).call_function(
tx, [self.objvar] + args, kwargs
)
elif isinstance(inner_fn, types.MethodType):
return vars.usermethod(
inner_fn.__func__, self.objvar, **options
).call_function(tx, args, kwargs)
else:
unimplemented(f"non-function or method super: {inner_fn}")
class UnknownVariable(Variable):
"""
It could be anything!
"""
class ClosureVariable(UnknownVariable):
def __init__(self, name, **kwargs):
super(ClosureVariable, self).__init__(**kwargs)
self.name = name
def reconstruct(self, codegen):
return [codegen.create_load_closure(self.name)]
class NewCellVariable(Variable):
pass
class NewGlobalVariable(Variable):
pass
class ContextManagerVariable(Variable):
pass
class ContextWrappingVariable(ContextManagerVariable):
"""represents torch.{no_grad,enable_grad,set_grad_mode}()"""
_guards_singleton = {Guard("", GuardSource.GLOBAL, GuardBuilder.GRAD_MODE)}
def __init__(self, target_value, initial_value=None, **kwargs):
super(ContextWrappingVariable, self).__init__(**kwargs)
self.guards = self.guards | self._guards_singleton
self.target_value = target_value
if initial_value is None:
initial_value = self._initial_value()
self.initial_value = initial_value
def enter(self, tx):
self._call_func(tx, self.target_value)
return vars.constant(None, **vars.propagate(self))
def exit(self, tx, *args):
self._call_func(tx, self.initial_value)
return vars.constant(None, **vars.propagate(self))
def reconstruct(self, codegen, target_inst=None):
"""
Generate following Python Bytecode, with a `torch._C._set_grad_enable` call
Python 3.8
0 LOAD_GLOBAL 0 (torch)
2 LOAD_ATTR 1 (_C)
4 LOAD_METHOD 2 (_set_grad_enable)
6 LOAD_CONST 1 (False)
8 CALL_METHOD 1
10 POP_TOP
12 SETUP_FINALLY 10 (to 24)
14 LOAD_GLOBAL 3 (user_inst)
16 CALL_FUNCTION 0
18 POP_TOP
20 POP_BLOCK
22 BEGIN_FINALLY
24 LOAD_GLOBAL 0 (torch)
26 LOAD_ATTR 1 (_C)
28 LOAD_METHOD 2 (_set_grad_enable)
30 LOAD_CONST 2 (True)
32 CALL_METHOD 1
34 POP_TOP
36 END_FINALLY
38 LOAD_CONST 0 (None)
40 RETURN_VALUE
Instructions 0-10 and 24-34 call torch._C.set_grad_enable(True/False)
Python 3.9, 3.10
0 LOAD_GLOBAL 0 (torch)
2 LOAD_ATTR 1 (_C)
4 LOAD_METHOD 2 (_set_grad_enable)
6 LOAD_CONST 1 (False)
8 CALL_METHOD 1
10 POP_TOP
12 SETUP_FINALLY 22 (to 36)
14 LOAD_GLOBAL 3 (user_inst)
16 CALL_FUNCTION 0
18 POP_TOP
20 POP_BLOCK
22 LOAD_GLOBAL 0 (torch)
24 LOAD_ATTR 1 (_C)
26 LOAD_METHOD 2 (_set_grad_enable)
28 LOAD_CONST 2 (True)
30 CALL_METHOD 1
32 POP_TOP
34 JUMP_FORWARD 14 (to 50)
36 LOAD_GLOBAL 0 (torch)
38 LOAD_ATTR 1 (_C)
40 LOAD_METHOD 2 (_set_grad_enable)
42 LOAD_CONST 2 (True)
44 CALL_METHOD 1
46 POP_TOP
48 RERAISE
50 LOAD_CONST 0 (None)
52 RETURN_VALUE
"""
if self.target_value == self.initial_value:
return ([], [])
def set_grad_insts(mode):
global_torch_source = codegen.tx.import_source("torch")
attr_source = AttrSource(global_torch_source, self._func_name())
load_set_grad_enabled_insts = attr_source.reconstruct(codegen)
return [
*load_set_grad_enabled_insts,
codegen.create_load_const(mode),
create_instruction("CALL_FUNCTION", 1),
create_instruction("POP_TOP"),
]
init_block = set_grad_insts(self.target_value)
finally_block = set_grad_insts(self.initial_value)
setup_final_inst = create_instruction("SETUP_FINALLY", target=finally_block[0])
prologue = init_block + [setup_final_inst]
# Generate the epilogue - starts with 20 POP_BLOCK and ends at 34 POP_TOP
if sys.version_info < (3, 9):
# Generate the prologue that ends with setup_finally
epilogue = [
create_instruction("POP_BLOCK"),
codegen.create_begin_finally(),
*finally_block,
create_instruction("END_FINALLY"),
]
else:
except_block = set_grad_insts(self.initial_value)
epilogue = [
create_instruction("POP_BLOCK"),
*except_block,
create_instruction("JUMP_FORWARD", target=target_inst),
*finally_block,
create_instruction("RERAISE"),
]
return (prologue, epilogue)
def _call_func(self, tx, initial_value):
raise NotImplementedError("_call_func called on base")
def _func_name(self):
raise NotImplementedError("_func_name called on base")
def _initial_value(self):
raise NotImplementedError("_initial_value called on base")
class GradModeVariable(ContextWrappingVariable):
def __init__(self, target_value, initial_value=None, **kwargs):
super(GradModeVariable, self).__init__(
target_value=target_value, initial_value=initial_value, **kwargs
)
def enter(self, tx):
assert self.initial_value == torch.is_grad_enabled()
return super(GradModeVariable, self).enter(tx)
def _call_func(self, tx, value):
if self.target_value == self.initial_value:
return
tx.output.graph.create_node(
"call_function", torch._C._set_grad_enabled, (value,), {}
),
torch._C._set_grad_enabled(value)
def _func_name(self):
return "_C._set_grad_enabled"
def _initial_value(self):
return torch.is_grad_enabled()
def fn_name(self):
if self.target_value:
return "enable_grad"
else:
return "no_grad"
class ProfileRecordFunctionVariable(ContextWrappingVariable):
def __init__(self, target_value, initial_value=None, **kwargs):
kwargs_edited = kwargs
super(ProfileRecordFunctionVariable, self).__init__(
target_value=target_value, initial_value=initial_value, **kwargs_edited
)
def enter(self, tx):
self.enter = True
super(ProfileRecordFunctionVariable, self).enter(tx)
def exit(self, tx, *args):
self.enter = False
super(ProfileRecordFunctionVariable, self).exit(tx)
def _call_func(self, tx, value):
if self.enter:
self.proxy_value = tx.output.create_proxy(
"call_function", torch.ops.profiler._record_function_enter, (value,), {}
)
else:
tx.output.create_proxy(
"call_function",
torch.ops.profiler._record_function_exit,
(self.proxy_value,),
{},
)
def _func_name(self):
if self.enter:
return "torch.ops.profiler._record_function_enter"
else:
return "torch.ops.profiler._record_function_exit"
def _initial_value(self):
return self.target_value
class WithExitFunctionVariable(Variable):
def __init__(self, ctx: Variable, target, **kwargs):
super(WithExitFunctionVariable, self).__init__(**kwargs)
self.ctx = ctx
self.target = target
@typechecked
def call_function(
self, tx, args: Sequence[Variable], kwargs: Dict[str, Variable]
) -> Variable:
assert not kwargs
return self.ctx.exit(tx, *args)
def reconstruct(self, codegen):
# Note here we reconstruct the context manager rather than the
# exit function. The handler generated by BlockStackEntry
# will re-enter the context in the resume function.
output = AttrSource(
codegen.tx.import_source("torch"), self.ctx.fn_name()
).reconstruct(codegen)
if codegen.tx.output.partial_convert:
output.extend(
[
create_instruction("CALL_FUNCTION", 0),
create_instruction("SETUP_WITH", target=self.target),
create_instruction("POP_TOP"),
]
)
return output
class InspectSignatureVariable(Variable):
"""represents inspect.signature(...)"""
@staticmethod
def create(callable, **kwargs):
if kwargs:
unimplemented(f"inspect.signature with {kwargs}")
return InspectSignatureVariable(callable)
def __init__(self, inspected, **kwargs):
super(InspectSignatureVariable, self).__init__(**kwargs)
self.inspected = inspected
class AutogradFunctionVariable(Variable):
"""represents a torch.autograd.Function subclass"""
def __init__(self, fn_cls, **kwargs):
super().__init__(**kwargs)
self.fn_cls = fn_cls
def call_apply(self, tx, args, kwargs):
requires_grad = False
def visit(node):
nonlocal requires_grad
if isinstance(node, vars.TensorVariable):
if node.requires_grad is not False:
requires_grad = True
if isinstance(node, vars.NNModuleVariable):
if node.is_training(tx):
requires_grad = True
return node
Variable.apply(visit, (args, kwargs))
if requires_grad and torch.is_grad_enabled():
# TODO(jansel): handle this in training mode
unimplemented("autograd.Function with requires_grad")
args = [BlackHoleVariable()] + list(args)
options = vars.propagate(self, args, kwargs.values())
return vars.userfunc(self.fn_cls.forward, **options).call_function(
tx, args, kwargs
)
class BlackHoleVariable(Variable):
"""A autograd.function context that just ignores everything (for forward extraction)"""
@typechecked
def call_method(
self,
tx,
name: str,
args: List[Variable],
kwargs: Dict[str, Variable],
) -> Variable:
assert name in ("__setattr__", "save_for_backward"), name
return vars.constant(None).trace(self, args, kwargs)
class LambdaVariable(Variable):
def __init__(self, fn, **kwargs):
super(LambdaVariable, self).__init__(**kwargs)
self.fn = fn
def call_function(
self, tx, args: List[Variable], kwargs: Dict[str, Variable]
) -> Variable:
return self.fn(*args, **kwargs).trace(self)
class GetAttrVariable(Variable):
def __init__(self, obj, name, **kwargs):
super(GetAttrVariable, self).__init__(**kwargs)
assert isinstance(obj, Variable)
assert isinstance(name, str)
self.obj = obj
self.name = name
def as_proxy(self):
return getattr(self.obj.as_proxy(), self.name)
def const_getattr(self, tx, name):
if not isinstance(self.obj, vars.NNModuleVariable):
raise NotImplementedError()
step1 = tx.output.get_submodule(self.obj.module_key)
if self.name not in step1.__dict__:
raise NotImplementedError()
step2 = inspect.getattr_static(step1, self.name)
if name not in step2.__dict__:
raise NotImplementedError()
return inspect.getattr_static(step2, name)
def reconstruct(self, codegen):
codegen(self.obj)
return codegen.create_load_attrs(self.name)
@typechecked
def call_function(
self, tx, args: Sequence[Variable], kwargs: Dict[str, Variable]
) -> Variable:
# This variable is True when it corresponds to user code such as
#
# super().__torch_function__(...)
#
# and the super().__torch_function__ attribute resolves
# to torch.Tensor.__torch_function__.
is_original_tensor_torch_function = (
self.name == "__torch_function__"
and isinstance(self.obj, SuperVariable)
# for now, only support one level of inheritance
and len(self.obj.objvar.value.__mro__) > 1
and self.obj.objvar.value.__mro__[1] == torch.Tensor
)
if is_original_tensor_torch_function:
# Instead of tracing inside torch.Tensor.__torch_function__,
# record the `call_function` or `call_method` call into the graph.
from . import TensorVariable
from . import TorchVariable
original_torch_or_getattr_variable = args[0]
new_args = args[2].items
new_kwargs = args[3].items
options = vars.propagate(self, new_args, new_kwargs.values())
# Disable __torch_function__ here to prevent the clone of the
# example tensor from going into the override.
with torch._C.DisableTorchFunction():
if isinstance(args[0], TorchVariable):
return TensorVariable.create(
tx=tx,
proxy=tx.output.create_proxy(
"call_function",
original_torch_or_getattr_variable.value,
*proxy_args_kwargs(new_args, new_kwargs),
),
**options,
)
elif isinstance(args[0], GetAttrVariable):
return TensorVariable.create(
tx=tx,
proxy=tx.output.create_proxy(
"call_method",
original_torch_or_getattr_variable.name,
*proxy_args_kwargs(new_args, new_kwargs),
),
**options,
)
else:
unimplemented(
f"GetAttrVariable.call_function original __torch_function__ {args}"
)
if isinstance(self.obj, AutogradFunctionVariable) and self.name == "apply":
return self.obj.call_apply(tx, args, kwargs).trace(self)
return self.obj.call_method(tx, self.name, args, kwargs).trace(self)
@typechecked
def call_method(
self,
tx,
name,
args: Sequence[Variable],
kwargs: Dict[str, Variable],
) -> Variable:
if (
name == "__len__"
and isinstance(self.obj, InspectSignatureVariable)
and self.name == "parameters"
):
return vars.constant(self.obj.inspected.num_parameters()).trace(
self, self.obj, self.obj.inspected
)
return super(GetAttrVariable, self).call_method(tx, name, args, kwargs)
class PythonModuleVariable(Variable):
_python_type_ = types.ModuleType
class SkipFilesVariable(Variable):
_python_type_ = "self"
_as_python_constant_ = "self"
@typechecked
def call_function(
self, tx, args: Sequence[Variable], kwargs: Dict[str, Variable]
) -> Variable:
if inspect.getattr_static(self.value, "_torchdynamo_disable", False):
unimplemented("call torchdynamo.disable() wrapped function")
else:
try:
path = inspect.getfile(self.value)
except TypeError:
path = f"Builtin {self.value.__name__}"
unimplemented("call_function in skip_files " + path)
class TypingVariable(Variable):
@typechecked
def call_method(
self,
tx,
name: str,
args: Sequence[Variable],
kwargs: Dict[str, Variable],
) -> Variable:
if name == "__getitem__" and len(args) == 1:
index = args[0].as_python_constant()
return vars.constant(self.value[index]).trace(self, args)
unimplemented("typing")
class NumpyVariable(Variable):
"""
Wrapper around `numpy.*` for better error messages.
"""
pass
|
PypiClean
|
/tb-rest-client-3.5.tar.gz/tb-rest-client-3.5/tb_rest_client/models/models_pe/queue_id.py
|
# Copyright 2023. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
import re # noqa: F401
import six
class QueueId(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'entity_type': 'str'
}
attribute_map = {
'id': 'id',
'entity_type': 'entityType'
}
def __init__(self, id=None, entity_type=None): # noqa: E501
"""QueueId - a model defined in Swagger""" # noqa: E501
self._id = None
self._entity_type = None
self.discriminator = None
self.id = id
self.entity_type = entity_type
@property
def id(self):
"""Gets the id of this QueueId. # noqa: E501
ID of the entity, time-based UUID v1 # noqa: E501
:return: The id of this QueueId. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this QueueId.
ID of the entity, time-based UUID v1 # noqa: E501
:param id: The id of this QueueId. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def entity_type(self):
"""Gets the entity_type of this QueueId. # noqa: E501
:return: The entity_type of this QueueId. # noqa: E501
:rtype: str
"""
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
"""Sets the entity_type of this QueueId.
:param entity_type: The entity_type of this QueueId. # noqa: E501
:type: str
"""
if entity_type is None:
raise ValueError("Invalid value for `entity_type`, must not be `None`") # noqa: E501
allowed_values = ["ALARM", "API_USAGE_STATE", "ASSET", "ASSET_PROFILE", "BLOB_ENTITY", "CONVERTER", "CUSTOMER", "DASHBOARD", "DEVICE", "DEVICE_PROFILE", "EDGE", "ENTITY_GROUP", "ENTITY_VIEW", "GROUP_PERMISSION", "INTEGRATION", "NOTIFICATION", "NOTIFICATION_REQUEST", "NOTIFICATION_RULE", "NOTIFICATION_TARGET", "NOTIFICATION_TEMPLATE", "OTA_PACKAGE", "QUEUE", "ROLE", "RPC", "RULE_CHAIN", "RULE_NODE", "SCHEDULER_EVENT", "TB_RESOURCE", "TENANT", "TENANT_PROFILE", "USER", "WIDGETS_BUNDLE", "WIDGET_TYPE"] # noqa: E501
if entity_type not in allowed_values:
raise ValueError(
"Invalid value for `entity_type` ({0}), must be one of {1}" # noqa: E501
.format(entity_type, allowed_values)
)
self._entity_type = entity_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(QueueId, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QueueId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/apimanagement/v20180101/_enums.py
|
from enum import Enum
__all__ = [
'ApiType',
'ApimIdentityType',
'AuthorizationMethod',
'BackendProtocol',
'BearerTokenSendingMethod',
'BearerTokenSendingMethods',
'ClientAuthenticationMethod',
'Confirmation',
'ContentFormat',
'GrantType',
'GroupType',
'HostnameType',
'IdentityProviderType',
'LoggerType',
'PolicyContentFormat',
'ProductState',
'Protocol',
'SkuType',
'SoapApiType',
'State',
'SubscriptionState',
'UserState',
'VersioningScheme',
'VirtualNetworkType',
]
class ApiType(str, Enum):
"""
Type of API.
"""
HTTP = "http"
SOAP = "soap"
class ApimIdentityType(str, Enum):
"""
The identity type. Currently the only supported type is 'SystemAssigned'.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
class AuthorizationMethod(str, Enum):
HEAD = "HEAD"
OPTIONS = "OPTIONS"
TRACE = "TRACE"
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
DELETE = "DELETE"
class BackendProtocol(str, Enum):
"""
Backend communication protocol.
"""
HTTP = "http"
SOAP = "soap"
class BearerTokenSendingMethod(str, Enum):
AUTHORIZATION_HEADER = "authorizationHeader"
QUERY = "query"
class BearerTokenSendingMethods(str, Enum):
"""
Form of an authorization grant, which the client uses to request the access token.
"""
AUTHORIZATION_HEADER = "authorizationHeader"
QUERY = "query"
class ClientAuthenticationMethod(str, Enum):
BASIC = "Basic"
BODY = "Body"
class Confirmation(str, Enum):
"""
Determines the type of confirmation e-mail that will be sent to the newly created user.
"""
SIGNUP = "signup"
INVITE = "invite"
class ContentFormat(str, Enum):
"""
Format of the Content in which the API is getting imported.
"""
WADL_XML = "wadl-xml"
WADL_LINK_JSON = "wadl-link-json"
SWAGGER_JSON = "swagger-json"
SWAGGER_LINK_JSON = "swagger-link-json"
WSDL = "wsdl"
WSDL_LINK = "wsdl-link"
class GrantType(str, Enum):
AUTHORIZATION_CODE = "authorizationCode"
IMPLICIT = "implicit"
RESOURCE_OWNER_PASSWORD = "resourceOwnerPassword"
CLIENT_CREDENTIALS = "clientCredentials"
class GroupType(str, Enum):
"""
Group type.
"""
CUSTOM = "custom"
SYSTEM = "system"
EXTERNAL = "external"
class HostnameType(str, Enum):
"""
Hostname type.
"""
PROXY = "Proxy"
PORTAL = "Portal"
MANAGEMENT = "Management"
SCM = "Scm"
class IdentityProviderType(str, Enum):
"""
Identity Provider Type identifier.
"""
FACEBOOK = "facebook"
GOOGLE = "google"
MICROSOFT = "microsoft"
TWITTER = "twitter"
AAD = "aad"
AAD_B2_C = "aadB2C"
class LoggerType(str, Enum):
"""
Logger type.
"""
AZURE_EVENT_HUB = "azureEventHub"
APPLICATION_INSIGHTS = "applicationInsights"
class PolicyContentFormat(str, Enum):
"""
Format of the policyContent.
"""
XML = "xml"
XML_LINK = "xml-link"
RAWXML = "rawxml"
RAWXML_LINK = "rawxml-link"
class ProductState(str, Enum):
"""
whether product is published or not. Published products are discoverable by users of developer portal. Non published products are visible only to administrators. Default state of Product is notPublished.
"""
NOT_PUBLISHED = "notPublished"
PUBLISHED = "published"
class Protocol(str, Enum):
HTTP = "http"
HTTPS = "https"
class SkuType(str, Enum):
"""
Name of the Sku.
"""
DEVELOPER = "Developer"
STANDARD = "Standard"
PREMIUM = "Premium"
BASIC = "Basic"
class SoapApiType(str, Enum):
"""
Type of Api to create.
* `http` creates a SOAP to REST API
* `soap` creates a SOAP pass-through API .
"""
SOAP_TO_REST = "http"
SOAP_PASS_THROUGH = "soap"
class State(str, Enum):
"""
Status of the issue.
"""
PROPOSED = "proposed"
OPEN = "open"
REMOVED = "removed"
RESOLVED = "resolved"
CLOSED = "closed"
class SubscriptionState(str, Enum):
"""
Initial subscription state. If no value is specified, subscription is created with Submitted state. Possible states are * active – the subscription is active, * suspended – the subscription is blocked, and the subscriber cannot call any APIs of the product, * submitted – the subscription request has been made by the developer, but has not yet been approved or rejected, * rejected – the subscription request has been denied by an administrator, * cancelled – the subscription has been cancelled by the developer or administrator, * expired – the subscription reached its expiration date and was deactivated.
"""
SUSPENDED = "suspended"
ACTIVE = "active"
EXPIRED = "expired"
SUBMITTED = "submitted"
REJECTED = "rejected"
CANCELLED = "cancelled"
class UserState(str, Enum):
"""
Account state. Specifies whether the user is active or not. Blocked users are unable to sign into the developer portal or call any APIs of subscribed products. Default state is Active.
"""
ACTIVE = "active"
BLOCKED = "blocked"
PENDING = "pending"
DELETED = "deleted"
class VersioningScheme(str, Enum):
"""
An value that determines where the API Version identifer will be located in a HTTP request.
"""
SEGMENT = "Segment"
QUERY = "Query"
HEADER = "Header"
class VirtualNetworkType(str, Enum):
"""
The type of VPN in which API Management service needs to be configured in. None (Default Value) means the API Management service is not part of any Virtual Network, External means the API Management deployment is set up inside a Virtual Network having an Internet Facing Endpoint, and Internal means that API Management deployment is setup inside a Virtual Network having an Intranet Facing Endpoint only.
"""
NONE = "None"
EXTERNAL = "External"
INTERNAL = "Internal"
|
PypiClean
|
/process_engine_client-0.3.tar.gz/process_engine_client-0.3/process_engine_client/core/loop_helper.py
|
import asyncio
import logging
import signal
logger = logging.getLogger(__name__)
_DEFAULT_DELAY = 1
class LoopHelper:
# TODO: mm - remove kwargs with better readable params
def __init__(self, loop=asyncio.get_event_loop(), **kwargs):
self._loop = loop
self._tasks = []
self.on_shutdown = kwargs.get('on_shutdown', self.__internal_on_shutdown)
def create_task(self, task_callback):
task = self._loop.create_task(task_callback())
self._tasks.append(task)
def register_delayed_task(self, task_func, **options):
logger.info(f"create delayed tasks with options ({options}).")
task = self._loop.create_task(self.__create_delayed_task(task_func, **options))
self._tasks.append(task)
return task
def unregister_delayed_task(self, delayed_task, msg=""):
return self.__unregister_task(delayed_task, msg)
async def __create_delayed_task(self, task_func, **options):
async def _worker(delay):
try:
await asyncio.sleep(delay)
if asyncio.iscoroutinefunction(task_func):
logger.debug("running delayed job (async)")
await task_func()
else:
logger.debug("running delayed job (sync)")
task_func()
except asyncio.CancelledError as ce:
logger.debug(f"Cancel the task {ce}")
delay = options.get('delay', _DEFAULT_DELAY)
return await _worker(delay)
def register_background_task(self, task_func, **options):
logger.info(f"create background worker with options ({options}).")
task = self._loop.create_task(self.__create_background_task(task_func, **options))
self._tasks.append(task)
return task
def unregister_background_task(self, background_task, msg=""):
return self.__unregister_task(background_task, msg)
def __unregister_task(self, task, msg):
can_unregister = True
if self._tasks.index(task) >= 0:
logger.info(f"cancel and unregister task: {msg}")
self._tasks.remove(task)
try:
task.cancel()
logger.info(f"cancelled task: {msg}")
except asyncio.CancelledError as ce:
logger.error(f"__unregister_task: {ce}")
pass
else:
logger.warning("did'nt found task to unregister")
can_unregister = False
return can_unregister
async def __create_background_task(self, task_func, **options):
async def _task(delay):
running = True
while running:
try:
if with_delay:
logger.debug(f"background worker delay for {delay}")
await asyncio.sleep(delay)
if asyncio.iscoroutinefunction(task_func):
logger.debug("running background job (async)")
await task_func()
else:
logger.debug("running background job (sync)")
task_func()
except asyncio.CancelledError:
running = False
delay = options.get('delay', _DEFAULT_DELAY)
with_delay = True if delay > 0 else False
return await _task(delay)
def start(self):
logger.info("Starting event loop.")
try:
self.__register_shutdown()
self._loop.run_forever()
except KeyboardInterrupt:
self._loop.close()
def stop(self):
logger.info("Stopping event loop.")
for task in self._tasks:
try:
task.cancel()
except Exception as e:
logger.warning(f"Task stopped with exception {e}")
self._loop.stop()
async def __internal_on_shutdown(self):
logger.debug('only internal on_shutdown called')
await asyncio.sleep(0)
def __register_shutdown(self):
async def shutdown(sig):
logger.info(f"Received exit signal {sig.name}...")
await self.on_shutdown()
self.stop()
signal_handler = lambda sig: asyncio.create_task(shutdown(sig))
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT, signal.SIGQUIT)
for s in signals:
self._loop.add_signal_handler(s, signal_handler, s)
|
PypiClean
|
/TPHATE-0.1-py3-none-any.whl/tphate/base.py
|
from future.utils import with_metaclass
from builtins import super
from copy import copy as shallow_copy
import numpy as np
import abc
import pygsp
from inspect import signature
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.preprocessing import normalize
from sklearn.utils.graph import graph_shortest_path
from scipy import sparse
import warnings
import numbers
import pickle
import sys
import tasklogger
from . import matrix, utils
_logger = tasklogger.get_tasklogger("graphtools")
class Base(object):
"""Class that deals with key-word arguments but is otherwise
just an object.
"""
def __init__(self):
super().__init__()
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [
p
for p in init_signature.parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
# Extract and sort argument names excluding 'self'
parameters = set([p.name for p in parameters])
# recurse
for superclass in cls.__bases__:
try:
parameters.update(superclass._get_param_names())
except AttributeError:
# object and pygsp.graphs.Graph don't have this method
pass
return parameters
def set_params(self, **kwargs):
# for k in kwargs:
# raise TypeError("set_params() got an unexpected "
# "keyword argument '{}'".format(k))
return self
class Data(Base):
"""Parent class that handles the import and dimensionality reduction of data
Parameters
----------
data : array-like, shape=[n_samples,n_features]
accepted types: `numpy.ndarray`, `scipy.sparse.spmatrix`.
`pandas.DataFrame`, `pandas.SparseDataFrame`.
n_pca : {`int`, `None`, `bool`, 'auto'}, optional (default: `None`)
number of PC dimensions to retain for graph building.
If n_pca in `[None, False, 0]`, uses the original data.
If 'auto' or `True` then estimate using a singular value threshold
Note: if data is sparse, uses SVD instead of PCA
TODO: should we subtract and store the mean?
rank_threshold : `float`, 'auto', optional (default: 'auto')
threshold to use when estimating rank for
`n_pca in [True, 'auto']`.
If 'auto', this threshold is
s_max * eps * max(n_samples, n_features)
where s_max is the maximum singular value of the data matrix
and eps is numerical precision. [press2007]_.
random_state : `int` or `None`, optional (default: `None`)
Random state for random PCA
Attributes
----------
data : array-like, shape=[n_samples,n_features]
Original data matrix
n_pca : int or `None`
data_nu : array-like, shape=[n_samples,n_pca]
Reduced data matrix
data_pca : sklearn.decomposition.PCA or sklearn.decomposition.TruncatedSVD
sklearn PCA operator
"""
def __init__(
self, data, n_pca=None, rank_threshold=None, random_state=None, **kwargs
):
self._check_data(data)
n_pca, rank_threshold = self._parse_n_pca_threshold(data, n_pca, rank_threshold)
if utils.is_SparseDataFrame(data):
data = data.to_coo()
elif utils.is_DataFrame(data):
try:
# sparse data
data = data.sparse.to_coo()
except AttributeError:
# dense data
data = np.array(data)
elif utils.is_Anndata(data):
data = data.X
self.data = data
self.n_pca = n_pca
self.rank_threshold = rank_threshold
self.random_state = random_state
self.data_nu = self._reduce_data()
super().__init__(**kwargs)
def _parse_n_pca_threshold(self, data, n_pca, rank_threshold):
if isinstance(n_pca, str):
n_pca = n_pca.lower()
if n_pca != "auto":
raise ValueError(
"n_pca must be an integer "
"0 <= n_pca < min(n_samples,n_features), "
"or in [None, False, True, 'auto']."
)
if isinstance(n_pca, numbers.Number):
if not float(n_pca).is_integer(): # cast it to integer
n_pcaR = np.round(n_pca).astype(int)
warnings.warn(
"Cannot perform PCA to fractional {} dimensions. "
"Rounding to {}".format(n_pca, n_pcaR),
RuntimeWarning,
)
n_pca = n_pcaR
if n_pca < 0:
raise ValueError(
"n_pca cannot be negative. "
"Please supply an integer "
"0 <= n_pca < min(n_samples,n_features) or None"
)
elif np.min(data.shape) <= n_pca:
warnings.warn(
"Cannot perform PCA to {} dimensions on "
"data with min(n_samples, n_features) = {}".format(
n_pca, np.min(data.shape)
),
RuntimeWarning,
)
n_pca = 0
if n_pca in [0, False, None]: # cast 0, False to None.
n_pca = None
elif n_pca is True: # notify that we're going to estimate rank.
n_pca = "auto"
_logger.info(
"Estimating n_pca from matrix rank. "
"Supply an integer n_pca "
"for fixed amount."
)
if not any([isinstance(n_pca, numbers.Number), n_pca is None, n_pca == "auto"]):
raise ValueError(
"n_pca was not an instance of numbers.Number, "
"could not be cast to False, and not None. "
"Please supply an integer "
"0 <= n_pca < min(n_samples,n_features) or None"
)
if rank_threshold is not None and n_pca != "auto":
warnings.warn(
"n_pca = {}, therefore rank_threshold of {} "
"will not be used. To use rank thresholding, "
"set n_pca = True".format(n_pca, rank_threshold),
RuntimeWarning,
)
if n_pca == "auto":
if isinstance(rank_threshold, str):
rank_threshold = rank_threshold.lower()
if rank_threshold is None:
rank_threshold = "auto"
if isinstance(rank_threshold, numbers.Number):
if rank_threshold <= 0:
raise ValueError(
"rank_threshold must be positive float or 'auto'. "
)
else:
if rank_threshold != "auto":
raise ValueError(
"rank_threshold must be positive float or 'auto'. "
)
return n_pca, rank_threshold
def _check_data(self, data):
if len(data.shape) != 2:
msg = "Expected 2D array, got {}D array " "instead (shape: {}.) ".format(
len(data.shape), data.shape
)
if len(data.shape) < 2:
msg += (
"\nReshape your data either using array.reshape(-1, 1) "
"if your data has a single feature or array.reshape(1, -1) if "
"it contains a single sample."
)
raise ValueError(msg)
def _reduce_data(self):
"""Private method to reduce data dimension.
If data is dense, uses randomized PCA. If data is sparse, uses
randomized SVD.
TODO: should we subtract and store the mean?
TODO: Fix the rank estimation so we do not compute the full SVD.
Returns
-------
Reduced data matrix
"""
if self.n_pca is not None and (
self.n_pca == "auto" or self.n_pca < self.data.shape[1]
):
with _logger.task("PCA"):
n_pca = self.data.shape[1] - 1 if self.n_pca == "auto" else self.n_pca
if sparse.issparse(self.data):
if (
isinstance(self.data, sparse.coo_matrix)
or isinstance(self.data, sparse.lil_matrix)
or isinstance(self.data, sparse.dok_matrix)
):
self.data = self.data.tocsr()
self.data_pca = TruncatedSVD(n_pca, random_state=self.random_state)
else:
self.data_pca = PCA(
n_pca, svd_solver="randomized", random_state=self.random_state
)
self.data_pca.fit(self.data)
if self.n_pca == "auto":
s = self.data_pca.singular_values_
smax = s.max()
if self.rank_threshold == "auto":
threshold = (
smax * np.finfo(self.data.dtype).eps * max(self.data.shape)
)
self.rank_threshold = threshold
threshold = self.rank_threshold
gate = np.where(s >= threshold)[0]
self.n_pca = gate.shape[0]
if self.n_pca == 0:
raise ValueError(
"Supplied threshold {} was greater than "
"maximum singular value {} "
"for the data matrix".format(threshold, smax)
)
_logger.info(
"Using rank estimate of {} as n_pca".format(self.n_pca)
)
# reset the sklearn operator
op = self.data_pca # for line-width brevity..
op.components_ = op.components_[gate, :]
op.explained_variance_ = op.explained_variance_[gate]
op.explained_variance_ratio_ = op.explained_variance_ratio_[gate]
op.singular_values_ = op.singular_values_[gate]
self.data_pca = (
op # im not clear if this is needed due to assignment rules
)
data_nu = self.data_pca.transform(self.data)
return data_nu
else:
data_nu = self.data
if sparse.issparse(data_nu) and not isinstance(
data_nu, (sparse.csr_matrix, sparse.csc_matrix, sparse.bsr_matrix)
):
data_nu = data_nu.tocsr()
return data_nu
def get_params(self):
"""Get parameters from this object
"""
return {"n_pca": self.n_pca, "random_state": self.random_state}
def set_params(self, **params):
"""Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Valid parameters:
- n_pca
- random_state
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
"""
if "n_pca" in params and params["n_pca"] != self.n_pca:
raise ValueError("Cannot update n_pca. Please create a new graph")
if "random_state" in params:
self.random_state = params["random_state"]
super().set_params(**params)
return self
def transform(self, Y):
"""Transform input data `Y` to reduced data space defined by `self.data`
Takes data in the same ambient space as `self.data` and transforms it
to be in the same reduced space as `self.data_nu`.
Parameters
----------
Y : array-like, shape=[n_samples_y, n_features]
n_features must be the same as `self.data`.
Returns
-------
Transformed data, shape=[n_samples_y, n_pca]
Raises
------
ValueError : if Y.shape[1] != self.data.shape[1]
"""
try:
# try PCA first
return self.data_pca.transform(Y)
except ValueError:
# shape is wrong
raise ValueError(
"data of shape {0} cannot be transformed"
" to graph built on data of shape {1}. "
"Expected shape ({2}, {3})".format(
Y.shape, self.data.shape, Y.shape[0], self.data.shape[1]
)
)
except AttributeError: # no pca, try to return data
if len(Y.shape) < 2 or Y.shape[1] != self.data.shape[1]:
# shape is wrong
raise ValueError(
"data of shape {0} cannot be transformed"
" to graph built on data of shape {1}. "
"Expected shape ({2}, {3})".format(
Y.shape, self.data.shape, Y.shape[0], self.data.shape[1]
)
)
else:
return Y
def inverse_transform(self, Y, columns=None):
"""Transform input data `Y` to ambient data space defined by `self.data`
Takes data in the same reduced space as `self.data_nu` and transforms
it to be in the same ambient space as `self.data`.
Parameters
----------
Y : array-like, shape=[n_samples_y, n_pca]
n_features must be the same as `self.data_nu`.
columns : list-like
list of integers referring to column indices in the original data
space to be returned. Avoids recomputing the full matrix where only
a few dimensions of the ambient space are of interest
Returns
-------
Inverse transformed data, shape=[n_samples_y, n_features]
Raises
------
ValueError : if Y.shape[1] != self.data_nu.shape[1]
"""
try:
if not hasattr(self, "data_pca"):
# no pca performed
try:
if Y.shape[1] != self.data_nu.shape[1]:
# shape is wrong
raise ValueError
except IndexError:
# len(Y.shape) < 2
raise ValueError
if columns is None:
return Y
else:
columns = np.array([columns]).flatten()
return Y[:, columns]
else:
if columns is None:
return self.data_pca.inverse_transform(Y)
else:
# only return specific columns
columns = np.array([columns]).flatten()
Y_inv = np.dot(Y, self.data_pca.components_[:, columns])
if hasattr(self.data_pca, "mean_"):
Y_inv += self.data_pca.mean_[columns]
return Y_inv
except ValueError:
# more informative error
raise ValueError(
"data of shape {0} cannot be inverse transformed"
" from graph built on reduced data of shape ({1}, {2}). Expected shape ({3}, {2})".format(
Y.shape, self.data_nu.shape[0], self.data_nu.shape[1], Y.shape[0]
)
)
class BaseGraph(with_metaclass(abc.ABCMeta, Base)):
"""Parent graph class
Parameters
----------
kernel_symm : string, optional (default: '+')
Defines method of kernel symmetrization.
'+' : additive
'*' : multiplicative
'mnn' : min-max MNN symmetrization
'none' : no symmetrization
theta: float (default: 1)
Min-max symmetrization constant.
K = `theta * min(K, K.T) + (1 - theta) * max(K, K.T)`
anisotropy : float, optional (default: 0)
Level of anisotropy between 0 and 1
(alpha in Coifman & Lafon, 2006)
initialize : `bool`, optional (default : `True`)
if false, don't create the kernel matrix.
Attributes
----------
K : array-like, shape=[n_samples, n_samples]
kernel matrix defined as the adjacency matrix with
ones down the diagonal
kernel : synonym for `K`
P : array-like, shape=[n_samples, n_samples] (cached)
diffusion operator defined as a row-stochastic form
of the kernel matrix
diff_op : synonym for `P`
"""
def __init__(
self,
kernel_symm="+",
theta=None,
anisotropy=0,
gamma=None,
initialize=True,
**kwargs
):
if gamma is not None:
warnings.warn(
"gamma is deprecated. " "Setting theta={}".format(gamma), FutureWarning
)
theta = gamma
if kernel_symm == "gamma":
warnings.warn(
"kernel_symm='gamma' is deprecated. " "Setting kernel_symm='mnn'",
FutureWarning,
)
kernel_symm = "mnn"
if kernel_symm == "theta":
warnings.warn(
"kernel_symm='theta' is deprecated. " "Setting kernel_symm='mnn'",
FutureWarning,
)
kernel_symm = "mnn"
self.kernel_symm = kernel_symm
self.theta = theta
self._check_symmetrization(kernel_symm, theta)
if not (isinstance(anisotropy, numbers.Real) and 0 <= anisotropy <= 1):
raise ValueError(
"Expected 0 <= anisotropy <= 1. " "Got {}".format(anisotropy)
)
self.anisotropy = anisotropy
if initialize:
_logger.debug("Initializing kernel...")
self.K
else:
_logger.debug("Not initializing kernel.")
super().__init__(**kwargs)
def _check_symmetrization(self, kernel_symm, theta):
if kernel_symm not in ["+", "*", "mnn", None]:
raise ValueError(
"kernel_symm '{}' not recognized. Choose from "
"'+', '*', 'mnn', or 'none'.".format(kernel_symm)
)
elif kernel_symm != "mnn" and theta is not None:
warnings.warn(
"kernel_symm='{}' but theta is not None. "
"Setting kernel_symm='mnn'.".format(kernel_symm)
)
self.kernel_symm = kernel_symm = "mnn"
if kernel_symm == "mnn":
if theta is None:
self.theta = theta = 1
warnings.warn(
"kernel_symm='mnn' but theta not given. "
"Defaulting to theta={}.".format(self.theta)
)
elif not isinstance(theta, numbers.Number) or theta < 0 or theta > 1:
raise ValueError(
"theta {} not recognized. Expected "
"a float between 0 and 1".format(theta)
)
def _build_kernel(self):
"""Private method to build kernel matrix
Runs public method to build kernel matrix and runs
additional checks to ensure that the result is okay
Returns
-------
Kernel matrix, shape=[n_samples, n_samples]
Raises
------
RuntimeWarning : if K is not symmetric
"""
kernel = self.build_kernel()
kernel = self.symmetrize_kernel(kernel)
kernel = self.apply_anisotropy(kernel)
if (kernel - kernel.T).max() > 1e-5:
warnings.warn("K should be symmetric", RuntimeWarning)
if np.any(kernel.diagonal() == 0):
warnings.warn("K should have a non-zero diagonal", RuntimeWarning)
return kernel
def symmetrize_kernel(self, K):
# symmetrize
if self.kernel_symm == "+":
_logger.debug("Using addition symmetrization.")
K = (K + K.T) / 2
elif self.kernel_symm == "*":
_logger.debug("Using multiplication symmetrization.")
K = K.multiply(K.T)
elif self.kernel_symm == "mnn":
_logger.debug("Using mnn symmetrization (theta = {}).".format(self.theta))
K = self.theta * matrix.elementwise_minimum(K, K.T) + (
1 - self.theta
) * matrix.elementwise_maximum(K, K.T)
elif self.kernel_symm is None:
_logger.debug("Using no symmetrization.")
pass
else:
raise NotImplementedError
return K
def apply_anisotropy(self, K):
if self.anisotropy == 0:
# do nothing
return K
else:
if sparse.issparse(K):
d = np.array(K.sum(1)).flatten()
K = K.tocoo()
K.data = K.data / ((d[K.row] * d[K.col]) ** self.anisotropy)
K = K.tocsr()
else:
d = K.sum(1)
K = K / (np.outer(d, d) ** self.anisotropy)
return K
def get_params(self):
"""Get parameters from this object
"""
return {
"kernel_symm": self.kernel_symm,
"theta": self.theta,
"anisotropy": self.anisotropy,
}
def set_params(self, **params):
"""Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Valid parameters:
Invalid parameters: (these would require modifying the kernel matrix)
- kernel_symm
- theta
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
"""
if "theta" in params and params["theta"] != self.theta:
raise ValueError("Cannot update theta. Please create a new graph")
if "anisotropy" in params and params["anisotropy"] != self.anisotropy:
raise ValueError("Cannot update anisotropy. Please create a new graph")
if "kernel_symm" in params and params["kernel_symm"] != self.kernel_symm:
raise ValueError("Cannot update kernel_symm. Please create a new graph")
super().set_params(**params)
return self
@property
def P(self):
"""Diffusion operator (cached)
Return or calculate the diffusion operator
Returns
-------
P : array-like, shape=[n_samples, n_samples]
diffusion operator defined as a row-stochastic form
of the kernel matrix
"""
try:
return self._diff_op
except AttributeError:
self._diff_op = normalize(self.kernel, "l1", axis=1)
return self._diff_op
@property
def kernel_degree(self):
"""Weighted degree vector (cached)
Return or calculate the degree vector from the affinity matrix
Returns
-------
degrees : array-like, shape=[n_samples]
Row sums of graph kernel
"""
try:
return self._kernel_degree
except AttributeError:
self._kernel_degree = matrix.to_array(self.kernel.sum(axis=1)).reshape(
-1, 1
)
return self._kernel_degree
@property
def diff_aff(self):
"""Symmetric diffusion affinity matrix
Return or calculate the symmetric diffusion affinity matrix
.. math:: A(x,y) = K(x,y) (d(x) d(y))^{-1/2}
where :math:`d` is the degrees (row sums of the kernel.)
Returns
-------
diff_aff : array-like, shape=[n_samples, n_samples]
symmetric diffusion affinity matrix defined as a
doubly-stochastic form of the kernel matrix
"""
row_degrees = self.kernel_degree
if sparse.issparse(self.kernel):
# diagonal matrix
degrees = sparse.csr_matrix(
(
1 / np.sqrt(row_degrees.flatten()),
np.arange(len(row_degrees)),
np.arange(len(row_degrees) + 1),
)
)
return degrees @ self.kernel @ degrees
else:
col_degrees = row_degrees.T
return (self.kernel / np.sqrt(row_degrees)) / np.sqrt(col_degrees)
@property
def diff_op(self):
"""Synonym for P
"""
return self.P
@property
def K(self):
"""Kernel matrix
Returns
-------
K : array-like, shape=[n_samples, n_samples]
kernel matrix defined as the adjacency matrix with
ones down the diagonal
"""
try:
return self._kernel
except AttributeError:
self._kernel = self._build_kernel()
return self._kernel
@property
def kernel(self):
"""Synonym for K
"""
return self.K
@property
def weighted(self):
return self.decay is not None
@abc.abstractmethod
def build_kernel(self):
"""Build the kernel matrix
Abstract method that all child classes must implement.
Must return a symmetric matrix
Returns
-------
K : kernel matrix, shape=[n_samples, n_samples]
symmetric matrix with ones down the diagonal
with no non-negative entries.
"""
raise NotImplementedError
def to_pygsp(self, **kwargs):
"""Convert to a PyGSP graph
For use only when the user means to create the graph using
the flag `use_pygsp=True`, and doesn't wish to recompute the kernel.
Creates a graphtools.graphs.TraditionalGraph with a precomputed
affinity matrix which also inherits from pygsp.graphs.Graph.
Parameters
----------
kwargs
keyword arguments for graphtools.Graph
Returns
-------
G : graphtools.base.PyGSPGraph, graphtools.graphs.TraditionalGraph
"""
from . import api
if "precomputed" in kwargs:
if kwargs["precomputed"] != "affinity":
warnings.warn(
"Cannot build PyGSPGraph with precomputed={}. "
"Using 'affinity' instead.".format(kwargs["precomputed"]),
UserWarning,
)
del kwargs["precomputed"]
if "use_pygsp" in kwargs:
if kwargs["use_pygsp"] is not True:
warnings.warn(
"Cannot build PyGSPGraph with use_pygsp={}. "
"Use True instead.".format(kwargs["use_pygsp"]),
UserWarning,
)
del kwargs["use_pygsp"]
return api.Graph(self.K, precomputed="affinity", use_pygsp=True, **kwargs)
def to_igraph(self, attribute="weight", **kwargs):
"""Convert to an igraph Graph
Uses the igraph.Graph constructor
Parameters
----------
attribute : str, optional (default: "weight")
kwargs : additional arguments for igraph.Graph
"""
try:
import igraph as ig
except ImportError: # pragma: no cover
raise ImportError(
"Please install igraph with " "`pip install --user python-igraph`."
)
try:
W = self.W
except AttributeError:
# not a pygsp graph
W = self.K.copy()
W = matrix.set_diagonal(W, 0)
sources, targets = W.nonzero()
edgelist = list(zip(sources, targets))
g = ig.Graph(W.shape[0], edgelist, **kwargs)
weights = W[W.nonzero()]
weights = matrix.to_array(weights)
g.es[attribute] = weights.flatten().tolist()
return g
def to_pickle(self, path):
"""Save the current Graph to a pickle.
Parameters
----------
path : str
File path where the pickled object will be stored.
"""
pickle_obj = shallow_copy(self)
is_oldpygsp = all(
[isinstance(self, pygsp.graphs.Graph), int(sys.version.split(".")[1]) < 7]
)
if is_oldpygsp:
pickle_obj.logger = pickle_obj.logger.name
with open(path, "wb") as f:
pickle.dump(pickle_obj, f, protocol=pickle.HIGHEST_PROTOCOL)
def _check_shortest_path_distance(self, distance):
if distance == "data" and self.weighted:
raise NotImplementedError(
"Graph shortest path with constant or data distance only "
"implemented for unweighted graphs. "
"For weighted graphs, use `distance='affinity'`."
)
elif distance == "constant" and self.weighted:
raise NotImplementedError(
"Graph shortest path with constant distance only "
"implemented for unweighted graphs. "
"For weighted graphs, use `distance='affinity'`."
)
elif distance == "affinity" and not self.weighted:
raise ValueError(
"Graph shortest path with affinity distance only "
"valid for weighted graphs. "
"For unweighted graphs, use `distance='constant'` "
"or `distance='data'`."
)
def _default_shortest_path_distance(self):
if not self.weighted:
distance = "data"
_logger.info("Using ambient data distances.")
else:
distance = "affinity"
_logger.info("Using negative log affinity distances.")
return distance
def shortest_path(self, method="auto", distance=None):
"""
Find the length of the shortest path between every pair of vertices on the graph
Parameters
----------
method : string ['auto'|'FW'|'D']
method to use. Options are
'auto' : attempt to choose the best method for the current problem
'FW' : Floyd-Warshall algorithm. O[N^3]
'D' : Dijkstra's algorithm with Fibonacci stacks. O[(k+log(N))N^2]
distance : {'constant', 'data', 'affinity'}, optional (default: 'data')
Distances along kNN edges.
'constant' gives constant edge lengths.
'data' gives distances in ambient data space.
'affinity' gives distances as negative log affinities.
Returns
-------
D : np.ndarray, float, shape = [N,N]
D[i,j] gives the shortest distance from point i to point j
along the graph. If no path exists, the distance is np.inf
Notes
-----
Currently, shortest paths can only be calculated on kNNGraphs with
`decay=None`
"""
if distance is None:
distance = self._default_shortest_path_distance()
self._check_shortest_path_distance(distance)
if distance == "constant":
D = self.K
elif distance == "data":
D = sparse.coo_matrix(self.K)
D.data = np.sqrt(
np.sum((self.data_nu[D.row] - self.data_nu[D.col]) ** 2, axis=1)
)
elif distance == "affinity":
D = sparse.csr_matrix(self.K)
D.data = -1 * np.log(D.data)
else:
raise ValueError(
"Expected `distance` in ['constant', 'data', 'affinity']. "
"Got {}".format(distance)
)
P = graph_shortest_path(D, method=method)
# symmetrize for numerical error
P = (P + P.T) / 2
# sklearn returns 0 if no path exists
P[np.where(P == 0)] = np.inf
# diagonal should actually be zero
P[(np.arange(P.shape[0]), np.arange(P.shape[0]))] = 0
return P
class PyGSPGraph(with_metaclass(abc.ABCMeta, pygsp.graphs.Graph, Base)):
"""Interface between BaseGraph and PyGSP.
All graphs should possess these matrices. We inherit a lot
of functionality from pygsp.graphs.Graph.
There is a lot of overhead involved in having both a weight and
kernel matrix
"""
def __init__(self, lap_type="combinatorial", coords=None, plotting=None, **kwargs):
if plotting is None:
plotting = {}
W = self._build_weight_from_kernel(self.K)
super().__init__(
W, lap_type=lap_type, coords=coords, plotting=plotting, **kwargs
)
@property
@abc.abstractmethod
def K():
"""Kernel matrix
Returns
-------
K : array-like, shape=[n_samples, n_samples]
kernel matrix defined as the adjacency matrix with
ones down the diagonal
"""
raise NotImplementedError
def _build_weight_from_kernel(self, kernel):
"""Private method to build an adjacency matrix from
a kernel matrix
Just puts zeroes down the diagonal in-place, since the
kernel matrix is ultimately not stored.
Parameters
----------
kernel : array-like, shape=[n_samples, n_samples]
Kernel matrix.
Returns
-------
Adjacency matrix, shape=[n_samples, n_samples]
"""
weight = kernel.copy()
self._diagonal = weight.diagonal().copy()
weight = matrix.set_diagonal(weight, 0)
return weight
class DataGraph(with_metaclass(abc.ABCMeta, Data, BaseGraph)):
"""Abstract class for graphs built from a dataset
Parameters
----------
data : array-like, shape=[n_samples,n_features]
accepted types: `numpy.ndarray`, `scipy.sparse.spmatrix`.
n_pca : {`int`, `None`, `bool`, 'auto'}, optional (default: `None`)
number of PC dimensions to retain for graph building.
If n_pca in `[None,False,0]`, uses the original data.
If `True` then estimate using a singular value threshold
Note: if data is sparse, uses SVD instead of PCA
TODO: should we subtract and store the mean?
rank_threshold : `float`, 'auto', optional (default: 'auto')
threshold to use when estimating rank for
`n_pca in [True, 'auto']`.
Note that the default kwarg is `None` for this parameter.
It is subsequently parsed to 'auto' if necessary.
If 'auto', this threshold is
smax * np.finfo(data.dtype).eps * max(data.shape)
where smax is the maximum singular value of the data matrix.
For reference, see, e.g.
W. Press, S. Teukolsky, W. Vetterling and B. Flannery,
“Numerical Recipes (3rd edition)”,
Cambridge University Press, 2007, page 795.
random_state : `int` or `None`, optional (default: `None`)
Random state for random PCA and graph building
verbose : `bool`, optional (default: `True`)
Verbosity.
n_jobs : `int`, optional (default : 1)
The number of jobs to use for the computation.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
"""
def __init__(self, data, verbose=True, n_jobs=1, **kwargs):
# kwargs are ignored
self.n_jobs = n_jobs
self.verbose = verbose
_logger.set_level(verbose)
super().__init__(data, **kwargs)
def get_params(self):
"""Get parameters from this object
"""
params = Data.get_params(self)
params.update(BaseGraph.get_params(self))
return params
@abc.abstractmethod
def build_kernel_to_data(self, Y):
"""Build a kernel from new input data `Y` to the `self.data`
Parameters
----------
Y: array-like, [n_samples_y, n_dimensions]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
K_yx: array-like, [n_samples_y, n_samples]
kernel matrix where each row represents affinities of a single
sample in `Y` to all samples in `self.data`.
Raises
------
ValueError: if this Graph is not capable of extension or
if the supplied data is the wrong shape
"""
raise NotImplementedError
def _check_extension_shape(self, Y):
"""Private method to check if new data matches `self.data`
Parameters
----------
Y : array-like, shape=[n_samples_y, n_features_y]
Input data
Returns
-------
Y : array-like, shape=[n_samples_y, n_pca]
(Potentially transformed) input data
Raises
------
ValueError : if `n_features_y` is not either `self.data.shape[1]` or
`self.n_pca`.
"""
if len(Y.shape) != 2:
raise ValueError("Expected a 2D matrix. Y has shape {}".format(Y.shape))
if not Y.shape[1] == self.data_nu.shape[1]:
# try PCA transform
if Y.shape[1] == self.data.shape[1]:
Y = self.transform(Y)
else:
# wrong shape
if self.data.shape[1] != self.data_nu.shape[1]:
# PCA is possible
msg = ("Y must be of shape either " "(n, {}) or (n, {})").format(
self.data.shape[1], self.data_nu.shape[1]
)
else:
# no PCA, only one choice of shape
msg = "Y must be of shape (n, {})".format(self.data.shape[1])
raise ValueError(msg)
return Y
def extend_to_data(self, Y):
"""Build transition matrix from new data to the graph
Creates a transition matrix such that `Y` can be approximated by
a linear combination of samples in `self.data`. Any
transformation of `self.data` can be trivially applied to `Y` by
performing
`transform_Y = self.interpolate(transform, transitions)`
Parameters
----------
Y: array-like, [n_samples_y, n_dimensions]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
transitions : array-like, shape=[n_samples_y, self.data.shape[0]]
Transition matrix from `Y` to `self.data`
"""
Y = self._check_extension_shape(Y)
kernel = self.build_kernel_to_data(Y)
transitions = normalize(kernel, norm="l1", axis=1)
return transitions
def interpolate(self, transform, transitions=None, Y=None):
"""Interpolate new data onto a transformation of the graph data
One of either transitions or Y should be provided
Parameters
----------
transform : array-like, shape=[n_samples, n_transform_features]
transitions : array-like, optional, shape=[n_samples_y, n_samples]
Transition matrix from `Y` (not provided) to `self.data`
Y: array-like, optional, shape=[n_samples_y, n_dimensions]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
Y_transform : array-like, [n_samples_y, n_features or n_pca]
Transition matrix from `Y` to `self.data`
Raises
------
ValueError: if neither `transitions` nor `Y` is provided
"""
if transitions is None:
if Y is None:
raise ValueError("Either `transitions` or `Y` must be provided.")
else:
transitions = self.extend_to_data(Y)
Y_transform = transitions.dot(transform)
return Y_transform
def set_params(self, **params):
"""Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Valid parameters:
- n_jobs
- verbose
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
"""
if "n_jobs" in params:
self.n_jobs = params["n_jobs"]
if "verbose" in params:
self.verbose = params["verbose"]
_logger.set_level(self.verbose)
super().set_params(**params)
return self
|
PypiClean
|
/csr_azure_agent-0.0.5.tar.gz/csr_azure_agent-0.0.5/waagent/azurelinuxagent/common/utils/fileutil.py
|
import glob
import os
import re
import shutil
import pwd
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.future import ustr
import azurelinuxagent.common.utils.textutil as textutil
def copy_file(from_path, to_path=None, to_dir=None):
if to_path is None:
to_path = os.path.join(to_dir, os.path.basename(from_path))
shutil.copyfile(from_path, to_path)
return to_path
def read_file(filepath, asbin=False, remove_bom=False, encoding='utf-8'):
"""
Read and return contents of 'filepath'.
"""
mode = 'rb'
with open(filepath, mode) as in_file:
data = in_file.read()
if data is None:
return None
if asbin:
return data
if remove_bom:
#Remove bom on bytes data before it is converted into string.
data = textutil.remove_bom(data)
data = ustr(data, encoding=encoding)
return data
def write_file(filepath, contents, asbin=False, encoding='utf-8', append=False):
"""
Write 'contents' to 'filepath'.
"""
mode = "ab" if append else "wb"
data = contents
if not asbin:
data = contents.encode(encoding)
with open(filepath, mode) as out_file:
out_file.write(data)
def append_file(filepath, contents, asbin=False, encoding='utf-8'):
"""
Append 'contents' to 'filepath'.
"""
write_file(filepath, contents, asbin=asbin, encoding=encoding, append=True)
def base_name(path):
head, tail = os.path.split(path)
return tail
def get_line_startingwith(prefix, filepath):
"""
Return line from 'filepath' if the line startswith 'prefix'
"""
for line in read_file(filepath).split('\n'):
if line.startswith(prefix):
return line
return None
#End File operation util functions
def mkdir(dirpath, mode=None, owner=None):
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
if mode is not None:
chmod(dirpath, mode)
if owner is not None:
chowner(dirpath, owner)
def chowner(path, owner):
if not os.path.exists(path):
logger.error("Path does not exist: {0}".format(path))
else:
owner_info = pwd.getpwnam(owner)
os.chown(path, owner_info[2], owner_info[3])
def chmod(path, mode):
if not os.path.exists(path):
logger.error("Path does not exist: {0}".format(path))
else:
os.chmod(path, mode)
def rm_files(*args):
for paths in args:
#Find all possible file paths
for path in glob.glob(paths):
if os.path.isfile(path):
os.remove(path)
def rm_dirs(*args):
"""
Remove the contents of each directry
"""
for p in args:
if not os.path.isdir(p):
continue
for pp in os.listdir(p):
path = os.path.join(p, pp)
if os.path.isfile(path):
os.remove(path)
elif os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def trim_ext(path, ext):
if not ext.startswith("."):
ext = "." + ext
return path.split(ext)[0] if path.endswith(ext) else path
def update_conf_file(path, line_start, val, chk_err=False):
conf = []
if not os.path.isfile(path) and chk_err:
raise IOError("Can't find config file:{0}".format(path))
conf = read_file(path).split('\n')
conf = [x for x in conf if x is not None and len(x) > 0 and not x.startswith(line_start)]
conf.append(val)
write_file(path, '\n'.join(conf) + '\n')
def search_file(target_dir_name, target_file_name):
for root, dirs, files in os.walk(target_dir_name):
for file_name in files:
if file_name == target_file_name:
return os.path.join(root, file_name)
return None
def chmod_tree(path, mode):
for root, dirs, files in os.walk(path):
for file_name in files:
os.chmod(os.path.join(root, file_name), mode)
def findstr_in_file(file_path, pattern_str):
"""
Return match object if found in file.
"""
try:
pattern = re.compile(pattern_str)
for line in (open(file_path, 'r')).readlines():
match = re.search(pattern, line)
if match:
return match
except:
raise
return None
def get_all_files(root_path):
"""
Find all files under the given root path
"""
result = []
for root, dirs, files in os.walk(root_path):
result.extend([os.path.join(root, file) for file in files])
return result
|
PypiClean
|
/shell49-0.1.10.tar.gz/shell49-0.1.10/lib/esptool.py
|
from __future__ import division, print_function
import argparse
import base64
import binascii
import copy
import hashlib
import inspect
import io
import os
import shlex
import struct
import sys
import time
import zlib
import string
import serial.tools.list_ports as list_ports
import serial
# check 'serial' is 'pyserial' and not 'serial' https://github.com/espressif/esptool/issues/269
try:
if "serialization" in serial.__doc__ and "deserialization" in serial.__doc__:
raise ImportError("""
esptool.py depends on pyserial, but there is a conflict with a currently installed package named 'serial'.
You may be able to work around this by 'pip uninstall serial; pip install pyserial' \
but this may break other installed Python software that depends on 'serial'.
There is no good fix for this right now, apart from configuring virtualenvs. \
See https://github.com/espressif/esptool/issues/269#issuecomment-385298196 for discussion of the underlying issue(s).""")
except TypeError:
pass # __doc__ returns None for pyserial
__version__ = "2.6-beta1"
MAX_UINT32 = 0xffffffff
MAX_UINT24 = 0xffffff
DEFAULT_TIMEOUT = 3 # timeout for most flash operations
START_FLASH_TIMEOUT = 20 # timeout for starting flash (may perform erase)
CHIP_ERASE_TIMEOUT = 120 # timeout for full chip erase
MAX_TIMEOUT = CHIP_ERASE_TIMEOUT * 2 # longest any command can run
SYNC_TIMEOUT = 0.1 # timeout for syncing with bootloader
MD5_TIMEOUT_PER_MB = 8 # timeout (per megabyte) for calculating md5sum
ERASE_REGION_TIMEOUT_PER_MB = 30 # timeout (per megabyte) for erasing a region
MEM_END_ROM_TIMEOUT = 0.05 # special short timeout for ESP_MEM_END, as it may never respond
DEFAULT_SERIAL_WRITE_TIMEOUT = 10 # timeout for serial port write
def timeout_per_mb(seconds_per_mb, size_bytes):
""" Scales timeouts which are size-specific """
result = seconds_per_mb * (size_bytes / 1e6)
if result < DEFAULT_TIMEOUT:
return DEFAULT_TIMEOUT
return result
DETECTED_FLASH_SIZES = {0x12: '256KB', 0x13: '512KB', 0x14: '1MB',
0x15: '2MB', 0x16: '4MB', 0x17: '8MB', 0x18: '16MB'}
def check_supported_function(func, check_func):
"""
Decorator implementation that wraps a check around an ESPLoader
bootloader function to check if it's supported.
This is used to capture the multidimensional differences in
functionality between the ESP8266 & ESP32 ROM loaders, and the
software stub that runs on both. Not possible to do this cleanly
via inheritance alone.
"""
def inner(*args, **kwargs):
obj = args[0]
if check_func(obj):
return func(*args, **kwargs)
else:
raise NotImplementedInROMError(obj, func)
return inner
def stub_function_only(func):
""" Attribute for a function only supported in the software stub loader """
return check_supported_function(func, lambda o: o.IS_STUB)
def stub_and_esp32_function_only(func):
""" Attribute for a function only supported by software stubs or ESP32 ROM """
return check_supported_function(func, lambda o: o.IS_STUB or o.CHIP_NAME == "ESP32")
PYTHON2 = sys.version_info[0] < 3 # True if on pre-Python 3
# Function to return nth byte of a bitstring
# Different behaviour on Python 2 vs 3
if PYTHON2:
def byte(bitstr, index):
return ord(bitstr[index])
else:
def byte(bitstr, index):
return bitstr[index]
# Provide a 'basestring' class on Python 3
try:
basestring
except NameError:
basestring = str
def esp8266_function_only(func):
""" Attribute for a function only supported on ESP8266 """
return check_supported_function(func, lambda o: o.CHIP_NAME == "ESP8266")
class ESPLoader(object):
""" Base class providing access to ESP ROM & software stub bootloaders.
Subclasses provide ESP8266 & ESP32 specific functionality.
Don't instantiate this base class directly, either instantiate a subclass or
call ESPLoader.detect_chip() which will interrogate the chip and return the
appropriate subclass instance.
"""
CHIP_NAME = "Espressif device"
IS_STUB = False
DEFAULT_PORT = "/dev/ttyUSB0"
# Commands supported by ESP8266 ROM bootloader
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_MEM_BEGIN = 0x05
ESP_MEM_END = 0x06
ESP_MEM_DATA = 0x07
ESP_SYNC = 0x08
ESP_WRITE_REG = 0x09
ESP_READ_REG = 0x0a
# Some comands supported by ESP32 ROM bootloader (or -8266 w/ stub)
ESP_SPI_SET_PARAMS = 0x0B
ESP_SPI_ATTACH = 0x0D
ESP_CHANGE_BAUDRATE = 0x0F
ESP_FLASH_DEFL_BEGIN = 0x10
ESP_FLASH_DEFL_DATA = 0x11
ESP_FLASH_DEFL_END = 0x12
ESP_SPI_FLASH_MD5 = 0x13
# Some commands supported by stub only
ESP_ERASE_FLASH = 0xD0
ESP_ERASE_REGION = 0xD1
ESP_READ_FLASH = 0xD2
ESP_RUN_USER_CODE = 0xD3
# Maximum block sized for RAM and Flash writes, respectively.
ESP_RAM_BLOCK = 0x1800
FLASH_WRITE_SIZE = 0x400
# Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want.
ESP_ROM_BAUD = 115200
# First byte of the application image
ESP_IMAGE_MAGIC = 0xe9
# Initial state for the checksum routine
ESP_CHECKSUM_MAGIC = 0xef
# Flash sector size, minimum unit of erase.
FLASH_SECTOR_SIZE = 0x1000
UART_DATA_REG_ADDR = 0x60000078
# Memory addresses
IROM_MAP_START = 0x40200000
IROM_MAP_END = 0x40300000
# The number of bytes in the UART response that signify command status
STATUS_BYTES_LENGTH = 2
def __init__(self, port=DEFAULT_PORT, baud=ESP_ROM_BAUD, trace_enabled=False):
"""Base constructor for ESPLoader bootloader interaction
Don't call this constructor, either instantiate ESP8266ROM
or ESP32ROM, or use ESPLoader.detect_chip().
This base class has all of the instance methods for bootloader
functionality supported across various chips & stub
loaders. Subclasses replace the functions they don't support
with ones which throw NotImplementedInROMError().
"""
if isinstance(port, basestring):
self._port = serial.serial_for_url(port)
else:
self._port = port
self._slip_reader = slip_reader(self._port, self.trace)
# setting baud rate in a separate step is a workaround for
# CH341 driver on some Linux versions (this opens at 9600 then
# sets), shouldn't matter for other platforms/drivers. See
# https://github.com/espressif/esptool/issues/44#issuecomment-107094446
self._set_port_baudrate(baud)
self._trace_enabled = trace_enabled
# set write timeout, to prevent esptool blocked at write forever.
self._port.write_timeout = DEFAULT_SERIAL_WRITE_TIMEOUT
def _set_port_baudrate(self, baud):
try:
self._port.baudrate = baud
except IOError:
raise FatalError("Failed to set baud rate %d. The driver may not support this rate." % baud)
@staticmethod
def detect_chip(port=DEFAULT_PORT, baud=ESP_ROM_BAUD, connect_mode='default_reset', trace_enabled=False):
""" Use serial access to detect the chip type.
We use the UART's datecode register for this, it's mapped at
the same address on ESP8266 & ESP32 so we can use one
memory read and compare to the datecode register for each chip
type.
This routine automatically performs ESPLoader.connect() (passing
connect_mode parameter) as part of querying the chip.
"""
detect_port = ESPLoader(port, baud, trace_enabled=trace_enabled)
detect_port.connect(connect_mode)
try:
print('Detecting chip type...', end='')
sys.stdout.flush()
date_reg = detect_port.read_reg(ESPLoader.UART_DATA_REG_ADDR)
for cls in [ESP8266ROM, ESP32ROM]:
if date_reg == cls.DATE_REG_VALUE:
# don't connect a second time
inst = cls(detect_port._port, baud, trace_enabled=trace_enabled)
print(' %s' % inst.CHIP_NAME, end='')
return inst
finally:
print('') # end line
raise FatalError("Unexpected UART datecode value 0x%08x. Failed to autodetect chip type." % date_reg)
""" Read a SLIP packet from the serial port """
def read(self):
return next(self._slip_reader)
""" Write bytes to the serial port while performing SLIP escaping """
def write(self, packet):
buf = b'\xc0' \
+ (packet.replace(b'\xdb',b'\xdb\xdd').replace(b'\xc0',b'\xdb\xdc')) \
+ b'\xc0'
self.trace("Write %d bytes: %s", len(buf), HexFormatter(buf))
self._port.write(buf)
def trace(self, message, *format_args):
if self._trace_enabled:
now = time.time()
try:
delta = now - self._last_trace
except AttributeError:
delta = 0.0
self._last_trace = now
prefix = "TRACE +%.3f " % delta
print(prefix + (message % format_args))
""" Calculate checksum of a blob, as it is defined by the ROM """
@staticmethod
def checksum(data, state=ESP_CHECKSUM_MAGIC):
for b in data:
if type(b) is int: # python 2/3 compat
state ^= b
else:
state ^= ord(b)
return state
""" Send a request and read the response """
def command(self, op=None, data=b"", chk=0, wait_response=True, timeout=DEFAULT_TIMEOUT):
saved_timeout = self._port.timeout
new_timeout = min(timeout, MAX_TIMEOUT)
if new_timeout != saved_timeout:
self._port.timeout = new_timeout
try:
if op is not None:
self.trace("command op=0x%02x data len=%s wait_response=%d timeout=%.3f data=%s",
op, len(data), 1 if wait_response else 0, timeout, HexFormatter(data))
pkt = struct.pack(b'<BBHI', 0x00, op, len(data), chk) + data
self.write(pkt)
if not wait_response:
return
# tries to get a response until that response has the
# same operation as the request or a retries limit has
# exceeded. This is needed for some esp8266s that
# reply with more sync responses than expected.
for retry in range(100):
p = self.read()
if len(p) < 8:
continue
(resp, op_ret, len_ret, val) = struct.unpack('<BBHI', p[:8])
if resp != 1:
continue
data = p[8:]
if op is None or op_ret == op:
return val, data
finally:
if new_timeout != saved_timeout:
self._port.timeout = saved_timeout
raise FatalError("Response doesn't match request")
def check_command(self, op_description, op=None, data=b'', chk=0, timeout=DEFAULT_TIMEOUT):
"""
Execute a command with 'command', check the result code and throw an appropriate
FatalError if it fails.
Returns the "result" of a successful command.
"""
val, data = self.command(op, data, chk, timeout=timeout)
# things are a bit weird here, bear with us
# the status bytes are the last 2/4 bytes in the data (depending on chip)
if len(data) < self.STATUS_BYTES_LENGTH:
raise FatalError("Failed to %s. Only got %d byte status response." % (op_description, len(data)))
status_bytes = data[-self.STATUS_BYTES_LENGTH:]
# we only care if the first one is non-zero. If it is, the second byte is a reason.
if byte(status_bytes, 0) != 0:
raise FatalError.WithResult('Failed to %s' % op_description, status_bytes)
# if we had more data than just the status bytes, return it as the result
# (this is used by the md5sum command, maybe other commands?)
if len(data) > self.STATUS_BYTES_LENGTH:
return data[:-self.STATUS_BYTES_LENGTH]
else: # otherwise, just return the 'val' field which comes from the reply header (this is used by read_reg)
return val
def flush_input(self):
self._port.flushInput()
self._slip_reader = slip_reader(self._port, self.trace)
def sync(self):
self.command(self.ESP_SYNC, b'\x07\x07\x12\x20' + 32 * b'\x55',
timeout=SYNC_TIMEOUT)
for i in range(7):
self.command()
def _setDTR(self, state):
self._port.setDTR(state)
def _setRTS(self, state):
self._port.setRTS(state)
# Work-around for adapters on Windows using the usbser.sys driver:
# generate a dummy change to DTR so that the set-control-line-state
# request is sent with the updated RTS state and the same DTR state
self._port.setDTR(self._port.dtr)
def _connect_attempt(self, mode='default_reset', esp32r0_delay=False):
""" A single connection attempt, with esp32r0 workaround options """
# esp32r0_delay is a workaround for bugs with the most common auto reset
# circuit and Windows, if the EN pin on the dev board does not have
# enough capacitance.
#
# Newer dev boards shouldn't have this problem (higher value capacitor
# on the EN pin), and ESP32 revision 1 can't use this workaround as it
# relies on a silicon bug.
#
# Details: https://github.com/espressif/esptool/issues/136
last_error = None
# If we're doing no_sync, we're likely communicating as a pass through
# with an intermediate device to the ESP32
if mode == "no_reset_no_sync":
return last_error
# issue reset-to-bootloader:
# RTS = either CH_PD/EN or nRESET (both active low = chip in reset
# DTR = GPIO0 (active low = boot to flasher)
#
# DTR & RTS are active low signals,
# ie True = pin @ 0V, False = pin @ VCC.
if mode != 'no_reset':
self._setDTR(False) # IO0=HIGH
self._setRTS(True) # EN=LOW, chip in reset
time.sleep(0.1)
if esp32r0_delay:
# Some chips are more likely to trigger the esp32r0
# watchdog reset silicon bug if they're held with EN=LOW
# for a longer period
time.sleep(1.2)
self._setDTR(True) # IO0=LOW
self._setRTS(False) # EN=HIGH, chip out of reset
if esp32r0_delay:
# Sleep longer after reset.
# This workaround only works on revision 0 ESP32 chips,
# it exploits a silicon bug spurious watchdog reset.
time.sleep(0.4) # allow watchdog reset to occur
time.sleep(0.05)
self._setDTR(False) # IO0=HIGH, done
for _ in range(5):
try:
self.flush_input()
self._port.flushOutput()
self.sync()
return None
except FatalError as e:
if esp32r0_delay:
print('_', end='')
else:
print('.', end='')
sys.stdout.flush()
time.sleep(0.05)
last_error = e
return last_error
def connect(self, mode='default_reset'):
""" Try connecting repeatedly until successful, or giving up """
print('Connecting...', end='')
sys.stdout.flush()
last_error = None
try:
for _ in range(7):
last_error = self._connect_attempt(mode=mode, esp32r0_delay=False)
if last_error is None:
return
last_error = self._connect_attempt(mode=mode, esp32r0_delay=True)
if last_error is None:
return
finally:
print('') # end 'Connecting...' line
raise FatalError('Failed to connect to %s: %s' % (self.CHIP_NAME, last_error))
""" Read memory address in target """
def read_reg(self, addr):
# we don't call check_command here because read_reg() function is called
# when detecting chip type, and the way we check for success (STATUS_BYTES_LENGTH) is different
# for different chip types (!)
val, data = self.command(self.ESP_READ_REG, struct.pack('<I', addr))
if byte(data, 0) != 0:
raise FatalError.WithResult("Failed to read register address %08x" % addr, data)
return val
""" Write to memory address in target """
def write_reg(self, addr, value, mask=0xFFFFFFFF, delay_us=0):
return self.check_command("write target memory", self.ESP_WRITE_REG,
struct.pack('<IIII', addr, value, mask, delay_us))
""" Start downloading an application image to RAM """
def mem_begin(self, size, blocks, blocksize, offset):
if self.IS_STUB: # check we're not going to overwrite a running stub with this data
stub = self.STUB_CODE
load_start = offset
load_end = offset + size
for (start, end) in [(stub["data_start"], stub["data_start"] + len(stub["data"])),
(stub["text_start"], stub["text_start"] + len(stub["text"]))]:
if load_start < end and load_end > start:
raise FatalError(("Software loader is resident at 0x%08x-0x%08x. " +
"Can't load binary at overlapping address range 0x%08x-0x%08x. " +
"Either change binary loading address, or use the --no-stub " +
"option to disable the software loader.") % (start, end, load_start, load_end))
return self.check_command("enter RAM download mode", self.ESP_MEM_BEGIN,
struct.pack('<IIII', size, blocks, blocksize, offset))
""" Send a block of an image to RAM """
def mem_block(self, data, seq):
return self.check_command("write to target RAM", self.ESP_MEM_DATA,
struct.pack('<IIII', len(data), seq, 0, 0) + data,
self.checksum(data))
""" Leave download mode and run the application """
def mem_finish(self, entrypoint=0):
# Sending ESP_MEM_END usually sends a correct response back, however sometimes
# (with ROM loader) the executed code may reset the UART or change the baud rate
# before the transmit FIFO is empty. So in these cases we set a short timeout and
# ignore errors.
timeout = DEFAULT_TIMEOUT if self.IS_STUB else MEM_END_ROM_TIMEOUT
data = struct.pack('<II', int(entrypoint == 0), entrypoint)
try:
return self.check_command("leave RAM download mode", self.ESP_MEM_END,
data=data, timeout=timeout)
except FatalError:
if self.IS_STUB:
raise
pass
""" Start downloading to Flash (performs an erase)
Returns number of blocks (of size self.FLASH_WRITE_SIZE) to write.
"""
def flash_begin(self, size, offset):
num_blocks = (size + self.FLASH_WRITE_SIZE - 1) // self.FLASH_WRITE_SIZE
erase_size = self.get_erase_size(offset, size)
t = time.time()
if self.IS_STUB:
timeout = DEFAULT_TIMEOUT
else:
timeout = timeout_per_mb(ERASE_REGION_TIMEOUT_PER_MB, size) # ROM performs the erase up front
self.check_command("enter Flash download mode", self.ESP_FLASH_BEGIN,
struct.pack('<IIII', erase_size, num_blocks, self.FLASH_WRITE_SIZE, offset),
timeout=timeout)
if size != 0 and not self.IS_STUB:
print("Took %.2fs to erase flash block" % (time.time() - t))
return num_blocks
""" Write block to flash """
def flash_block(self, data, seq, timeout=DEFAULT_TIMEOUT):
self.check_command("write to target Flash after seq %d" % seq,
self.ESP_FLASH_DATA,
struct.pack('<IIII', len(data), seq, 0, 0) + data,
self.checksum(data),
timeout=timeout)
""" Leave flash mode and run/reboot """
def flash_finish(self, reboot=False):
pkt = struct.pack('<I', int(not reboot))
# stub sends a reply to this command
self.check_command("leave Flash mode", self.ESP_FLASH_END, pkt)
""" Run application code in flash """
def run(self, reboot=False):
# Fake flash begin immediately followed by flash end
self.flash_begin(0, 0)
self.flash_finish(reboot)
""" Read SPI flash manufacturer and device id """
def flash_id(self):
SPIFLASH_RDID = 0x9F
return self.run_spiflash_command(SPIFLASH_RDID, b"", 24)
def parse_flash_size_arg(self, arg):
try:
return self.FLASH_SIZES[arg]
except KeyError:
raise FatalError("Flash size '%s' is not supported by this chip type. Supported sizes: %s"
% (arg, ", ".join(self.FLASH_SIZES.keys())))
def run_stub(self, stub=None):
if stub is None:
if self.IS_STUB:
raise FatalError("Not possible for a stub to load another stub (memory likely to overlap.)")
stub = self.STUB_CODE
# Upload
print("Uploading stub...")
for field in ['text', 'data']:
if field in stub:
offs = stub[field + "_start"]
length = len(stub[field])
blocks = (length + self.ESP_RAM_BLOCK - 1) // self.ESP_RAM_BLOCK
self.mem_begin(length, blocks, self.ESP_RAM_BLOCK, offs)
for seq in range(blocks):
from_offs = seq * self.ESP_RAM_BLOCK
to_offs = from_offs + self.ESP_RAM_BLOCK
self.mem_block(stub[field][from_offs:to_offs], seq)
print("Running stub...")
self.mem_finish(stub['entry'])
p = self.read()
if p != b'OHAI':
raise FatalError("Failed to start stub. Unexpected response: %s" % p)
print("Stub running...")
return self.STUB_CLASS(self)
@stub_and_esp32_function_only
def flash_defl_begin(self, size, compsize, offset):
""" Start downloading compressed data to Flash (performs an erase)
Returns number of blocks (size self.FLASH_WRITE_SIZE) to write.
"""
num_blocks = (compsize + self.FLASH_WRITE_SIZE - 1) // self.FLASH_WRITE_SIZE
erase_blocks = (size + self.FLASH_WRITE_SIZE - 1) // self.FLASH_WRITE_SIZE
t = time.time()
if self.IS_STUB:
write_size = size # stub expects number of bytes here, manages erasing internally
timeout = DEFAULT_TIMEOUT
else:
write_size = erase_blocks * self.FLASH_WRITE_SIZE # ROM expects rounded up to erase block size
timeout = timeout_per_mb(ERASE_REGION_TIMEOUT_PER_MB, write_size) # ROM performs the erase up front
print("Compressed %d bytes to %d..." % (size, compsize))
self.check_command("enter compressed flash mode", self.ESP_FLASH_DEFL_BEGIN,
struct.pack('<IIII', write_size, num_blocks, self.FLASH_WRITE_SIZE, offset),
timeout=timeout)
if size != 0 and not self.IS_STUB:
# (stub erases as it writes, but ROM loaders erase on begin)
print("Took %.2fs to erase flash block" % (time.time() - t))
return num_blocks
""" Write block to flash, send compressed """
@stub_and_esp32_function_only
def flash_defl_block(self, data, seq, timeout=DEFAULT_TIMEOUT):
self.check_command("write compressed data to flash after seq %d" % seq,
self.ESP_FLASH_DEFL_DATA, struct.pack('<IIII', len(data), seq, 0, 0) + data, self.checksum(data), timeout=timeout)
""" Leave compressed flash mode and run/reboot """
@stub_and_esp32_function_only
def flash_defl_finish(self, reboot=False):
if not reboot and not self.IS_STUB:
# skip sending flash_finish to ROM loader, as this
# exits the bootloader. Stub doesn't do this.
return
pkt = struct.pack('<I', int(not reboot))
self.check_command("leave compressed flash mode", self.ESP_FLASH_DEFL_END, pkt)
self.in_bootloader = False
@stub_and_esp32_function_only
def flash_md5sum(self, addr, size):
# the MD5 command returns additional bytes in the standard
# command reply slot
timeout = timeout_per_mb(MD5_TIMEOUT_PER_MB, size)
res = self.check_command('calculate md5sum', self.ESP_SPI_FLASH_MD5, struct.pack('<IIII', addr, size, 0, 0),
timeout=timeout)
if len(res) == 32:
return res.decode("utf-8") # already hex formatted
elif len(res) == 16:
return hexify(res).lower()
else:
raise FatalError("MD5Sum command returned unexpected result: %r" % res)
@stub_and_esp32_function_only
def change_baud(self, baud):
print("Changing baud rate to %d" % baud)
# stub takes the new baud rate and the old one
second_arg = self._port.baudrate if self.IS_STUB else 0
self.command(self.ESP_CHANGE_BAUDRATE, struct.pack('<II', baud, second_arg))
print("Changed.")
self._set_port_baudrate(baud)
time.sleep(0.05) # get rid of crap sent during baud rate change
self.flush_input()
@stub_function_only
def erase_flash(self):
# depending on flash chip model the erase may take this long (maybe longer!)
self.check_command("erase flash", self.ESP_ERASE_FLASH,
timeout=CHIP_ERASE_TIMEOUT)
@stub_function_only
def erase_region(self, offset, size):
if offset % self.FLASH_SECTOR_SIZE != 0:
raise FatalError("Offset to erase from must be a multiple of 4096")
if size % self.FLASH_SECTOR_SIZE != 0:
raise FatalError("Size of data to erase must be a multiple of 4096")
timeout = timeout_per_mb(ERASE_REGION_TIMEOUT_PER_MB, size)
self.check_command("erase region", self.ESP_ERASE_REGION, struct.pack('<II', offset, size), timeout=timeout)
@stub_function_only
def read_flash(self, offset, length, progress_fn=None):
# issue a standard bootloader command to trigger the read
self.check_command("read flash", self.ESP_READ_FLASH,
struct.pack('<IIII',
offset,
length,
self.FLASH_SECTOR_SIZE,
64))
# now we expect (length // block_size) SLIP frames with the data
data = b''
while len(data) < length:
p = self.read()
data += p
if len(data) < length and len(p) < self.FLASH_SECTOR_SIZE:
raise FatalError('Corrupt data, expected 0x%x bytes but received 0x%x bytes' % (self.FLASH_SECTOR_SIZE, len(p)))
self.write(struct.pack('<I', len(data)))
if progress_fn and (len(data) % 1024 == 0 or len(data) == length):
progress_fn(len(data), length)
if progress_fn:
progress_fn(len(data), length)
if len(data) > length:
raise FatalError('Read more than expected')
digest_frame = self.read()
if len(digest_frame) != 16:
raise FatalError('Expected digest, got: %s' % hexify(digest_frame))
expected_digest = hexify(digest_frame).upper()
digest = hashlib.md5(data).hexdigest().upper()
if digest != expected_digest:
raise FatalError('Digest mismatch: expected %s, got %s' % (expected_digest, digest))
return data
def flash_spi_attach(self, hspi_arg):
"""Send SPI attach command to enable the SPI flash pins
ESP8266 ROM does this when you send flash_begin, ESP32 ROM
has it as a SPI command.
"""
# last 3 bytes in ESP_SPI_ATTACH argument are reserved values
arg = struct.pack('<I', hspi_arg)
if not self.IS_STUB:
# ESP32 ROM loader takes additional 'is legacy' arg, which is not
# currently supported in the stub loader or esptool.py (as it's not usually needed.)
is_legacy = 0
arg += struct.pack('BBBB', is_legacy, 0, 0, 0)
self.check_command("configure SPI flash pins", ESP32ROM.ESP_SPI_ATTACH, arg)
def flash_set_parameters(self, size):
"""Tell the ESP bootloader the parameters of the chip
Corresponds to the "flashchip" data structure that the ROM
has in RAM.
'size' is in bytes.
All other flash parameters are currently hardcoded (on ESP8266
these are mostly ignored by ROM code, on ESP32 I'm not sure.)
"""
fl_id = 0
total_size = size
block_size = 64 * 1024
sector_size = 4 * 1024
page_size = 256
status_mask = 0xffff
self.check_command("set SPI params", ESP32ROM.ESP_SPI_SET_PARAMS,
struct.pack('<IIIIII', fl_id, total_size, block_size, sector_size, page_size, status_mask))
def run_spiflash_command(self, spiflash_command, data=b"", read_bits=0):
"""Run an arbitrary SPI flash command.
This function uses the "USR_COMMAND" functionality in the ESP
SPI hardware, rather than the precanned commands supported by
hardware. So the value of spiflash_command is an actual command
byte, sent over the wire.
After writing command byte, writes 'data' to MOSI and then
reads back 'read_bits' of reply on MISO. Result is a number.
"""
# SPI_USR register flags
SPI_USR_COMMAND = (1 << 31)
SPI_USR_MISO = (1 << 28)
SPI_USR_MOSI = (1 << 27)
# SPI registers, base address differs ESP32 vs 8266
base = self.SPI_REG_BASE
SPI_CMD_REG = base + 0x00
SPI_USR_REG = base + 0x1C
SPI_USR1_REG = base + 0x20
SPI_USR2_REG = base + 0x24
SPI_W0_REG = base + self.SPI_W0_OFFS
# following two registers are ESP32 only
if self.SPI_HAS_MOSI_DLEN_REG:
# ESP32 has a more sophisticated wayto set up "user" commands
def set_data_lengths(mosi_bits, miso_bits):
SPI_MOSI_DLEN_REG = base + 0x28
SPI_MISO_DLEN_REG = base + 0x2C
if mosi_bits > 0:
self.write_reg(SPI_MOSI_DLEN_REG, mosi_bits - 1)
if miso_bits > 0:
self.write_reg(SPI_MISO_DLEN_REG, miso_bits - 1)
else:
def set_data_lengths(mosi_bits, miso_bits):
SPI_DATA_LEN_REG = SPI_USR1_REG
SPI_MOSI_BITLEN_S = 17
SPI_MISO_BITLEN_S = 8
mosi_mask = 0 if (mosi_bits == 0) else (mosi_bits - 1)
miso_mask = 0 if (miso_bits == 0) else (miso_bits - 1)
self.write_reg(SPI_DATA_LEN_REG,
(miso_mask << SPI_MISO_BITLEN_S) | (
mosi_mask << SPI_MOSI_BITLEN_S))
# SPI peripheral "command" bitmasks for SPI_CMD_REG
SPI_CMD_USR = (1 << 18)
# shift values
SPI_USR2_DLEN_SHIFT = 28
if read_bits > 32:
raise FatalError("Reading more than 32 bits back from a SPI flash operation is unsupported")
if len(data) > 64:
raise FatalError("Writing more than 64 bytes of data with one SPI command is unsupported")
data_bits = len(data) * 8
old_spi_usr = self.read_reg(SPI_USR_REG)
old_spi_usr2 = self.read_reg(SPI_USR2_REG)
flags = SPI_USR_COMMAND
if read_bits > 0:
flags |= SPI_USR_MISO
if data_bits > 0:
flags |= SPI_USR_MOSI
set_data_lengths(data_bits, read_bits)
self.write_reg(SPI_USR_REG, flags)
self.write_reg(SPI_USR2_REG,
(7 << SPI_USR2_DLEN_SHIFT) | spiflash_command)
if data_bits == 0:
self.write_reg(SPI_W0_REG, 0) # clear data register before we read it
else:
data = pad_to(data, 4, b'\00') # pad to 32-bit multiple
words = struct.unpack("I" * (len(data) // 4), data)
next_reg = SPI_W0_REG
for word in words:
self.write_reg(next_reg, word)
next_reg += 4
self.write_reg(SPI_CMD_REG, SPI_CMD_USR)
def wait_done():
for _ in range(10):
if (self.read_reg(SPI_CMD_REG) & SPI_CMD_USR) == 0:
return
raise FatalError("SPI command did not complete in time")
wait_done()
status = self.read_reg(SPI_W0_REG)
# restore some SPI controller registers
self.write_reg(SPI_USR_REG, old_spi_usr)
self.write_reg(SPI_USR2_REG, old_spi_usr2)
return status
def read_status(self, num_bytes=2):
"""Read up to 24 bits (num_bytes) of SPI flash status register contents
via RDSR, RDSR2, RDSR3 commands
Not all SPI flash supports all three commands. The upper 1 or 2
bytes may be 0xFF.
"""
SPIFLASH_RDSR = 0x05
SPIFLASH_RDSR2 = 0x35
SPIFLASH_RDSR3 = 0x15
status = 0
shift = 0
for cmd in [SPIFLASH_RDSR, SPIFLASH_RDSR2, SPIFLASH_RDSR3][0:num_bytes]:
status += self.run_spiflash_command(cmd, read_bits=8) << shift
shift += 8
return status
def write_status(self, new_status, num_bytes=2, set_non_volatile=False):
"""Write up to 24 bits (num_bytes) of new status register
num_bytes can be 1, 2 or 3.
Not all flash supports the additional commands to write the
second and third byte of the status register. When writing 2
bytes, esptool also sends a 16-byte WRSR command (as some
flash types use this instead of WRSR2.)
If the set_non_volatile flag is set, non-volatile bits will
be set as well as volatile ones (WREN used instead of WEVSR).
"""
SPIFLASH_WRSR = 0x01
SPIFLASH_WRSR2 = 0x31
SPIFLASH_WRSR3 = 0x11
SPIFLASH_WEVSR = 0x50
SPIFLASH_WREN = 0x06
SPIFLASH_WRDI = 0x04
enable_cmd = SPIFLASH_WREN if set_non_volatile else SPIFLASH_WEVSR
# try using a 16-bit WRSR (not supported by all chips)
# this may be redundant, but shouldn't hurt
if num_bytes == 2:
self.run_spiflash_command(enable_cmd)
self.run_spiflash_command(SPIFLASH_WRSR, struct.pack("<H", new_status))
# also try using individual commands (also not supported by all chips for num_bytes 2 & 3)
for cmd in [SPIFLASH_WRSR, SPIFLASH_WRSR2, SPIFLASH_WRSR3][0:num_bytes]:
self.run_spiflash_command(enable_cmd)
self.run_spiflash_command(cmd, struct.pack("B", new_status & 0xFF))
new_status >>= 8
self.run_spiflash_command(SPIFLASH_WRDI)
def hard_reset(self):
self._setRTS(True) # EN->LOW
time.sleep(0.1)
self._setRTS(False)
def soft_reset(self, stay_in_bootloader):
if not self.IS_STUB:
if stay_in_bootloader:
return # ROM bootloader is already in bootloader!
else:
# 'run user code' is as close to a soft reset as we can do
self.flash_begin(0, 0)
self.flash_finish(False)
else:
if stay_in_bootloader:
# soft resetting from the stub loader
# will re-load the ROM bootloader
self.flash_begin(0, 0)
self.flash_finish(True)
elif self.CHIP_NAME != "ESP8266":
raise FatalError("Soft resetting is currently only supported on ESP8266")
else:
# running user code from stub loader requires some hacks
# in the stub loader
self.command(self.ESP_RUN_USER_CODE, wait_response=False)
class ESP8266ROM(ESPLoader):
""" Access class for ESP8266 ROM bootloader
"""
CHIP_NAME = "ESP8266"
IS_STUB = False
DATE_REG_VALUE = 0x00062000
# OTP ROM addresses
ESP_OTP_MAC0 = 0x3ff00050
ESP_OTP_MAC1 = 0x3ff00054
ESP_OTP_MAC3 = 0x3ff0005c
SPI_REG_BASE = 0x60000200
SPI_W0_OFFS = 0x40
SPI_HAS_MOSI_DLEN_REG = False
FLASH_SIZES = {
'512KB':0x00,
'256KB':0x10,
'1MB':0x20,
'2MB':0x30,
'4MB':0x40,
'2MB-c1': 0x50,
'4MB-c1':0x60,
'8MB':0x80,
'16MB':0x90,
}
BOOTLOADER_FLASH_OFFSET = 0
def get_efuses(self):
# Return the 128 bits of ESP8266 efuse as a single Python integer
return (self.read_reg(0x3ff0005c) << 96 |
self.read_reg(0x3ff00058) << 64 |
self.read_reg(0x3ff00054) << 32 |
self.read_reg(0x3ff00050))
def get_chip_description(self):
efuses = self.get_efuses()
is_8285 = (efuses & ((1 << 4) | 1 << 80)) != 0 # One or the other efuse bit is set for ESP8285
return "ESP8285" if is_8285 else "ESP8266EX"
def get_chip_features(self):
features = ["WiFi"]
if self.get_chip_description() == "ESP8285":
features += ["Embedded Flash"]
return features
def flash_spi_attach(self, hspi_arg):
if self.IS_STUB:
super(ESP8266ROM, self).flash_spi_attach(hspi_arg)
else:
# ESP8266 ROM has no flash_spi_attach command in serial protocol,
# but flash_begin will do it
self.flash_begin(0, 0)
def flash_set_parameters(self, size):
# not implemented in ROM, but OK to silently skip for ROM
if self.IS_STUB:
super(ESP8266ROM, self).flash_set_parameters(size)
def chip_id(self):
""" Read Chip ID from efuse - the equivalent of the SDK system_get_chip_id() function """
id0 = self.read_reg(self.ESP_OTP_MAC0)
id1 = self.read_reg(self.ESP_OTP_MAC1)
return (id0 >> 24) | ((id1 & MAX_UINT24) << 8)
def read_mac(self):
""" Read MAC from OTP ROM """
mac0 = self.read_reg(self.ESP_OTP_MAC0)
mac1 = self.read_reg(self.ESP_OTP_MAC1)
mac3 = self.read_reg(self.ESP_OTP_MAC3)
if (mac3 != 0):
oui = ((mac3 >> 16) & 0xff, (mac3 >> 8) & 0xff, mac3 & 0xff)
elif ((mac1 >> 16) & 0xff) == 0:
oui = (0x18, 0xfe, 0x34)
elif ((mac1 >> 16) & 0xff) == 1:
oui = (0xac, 0xd0, 0x74)
else:
raise FatalError("Unknown OUI")
return oui + ((mac1 >> 8) & 0xff, mac1 & 0xff, (mac0 >> 24) & 0xff)
def get_erase_size(self, offset, size):
""" Calculate an erase size given a specific size in bytes.
Provides a workaround for the bootloader erase bug."""
sectors_per_block = 16
sector_size = self.FLASH_SECTOR_SIZE
num_sectors = (size + sector_size - 1) // sector_size
start_sector = offset // sector_size
head_sectors = sectors_per_block - (start_sector % sectors_per_block)
if num_sectors < head_sectors:
head_sectors = num_sectors
if num_sectors < 2 * head_sectors:
return (num_sectors + 1) // 2 * sector_size
else:
return (num_sectors - head_sectors) * sector_size
def override_vddsdio(self, new_voltage):
raise NotImplementedInROMError("Overriding VDDSDIO setting only applies to ESP32")
class ESP8266StubLoader(ESP8266ROM):
""" Access class for ESP8266 stub loader, runs on top of ROM.
"""
FLASH_WRITE_SIZE = 0x4000 # matches MAX_WRITE_BLOCK in stub_loader.c
IS_STUB = True
def __init__(self, rom_loader):
self._port = rom_loader._port
self._trace_enabled = rom_loader._trace_enabled
self.flush_input() # resets _slip_reader
def get_erase_size(self, offset, size):
return size # stub doesn't have same size bug as ROM loader
ESP8266ROM.STUB_CLASS = ESP8266StubLoader
class ESP32ROM(ESPLoader):
"""Access class for ESP32 ROM bootloader
"""
CHIP_NAME = "ESP32"
IS_STUB = False
DATE_REG_VALUE = 0x15122500
IROM_MAP_START = 0x400d0000
IROM_MAP_END = 0x40400000
DROM_MAP_START = 0x3F400000
DROM_MAP_END = 0x3F800000
# ESP32 uses a 4 byte status reply
STATUS_BYTES_LENGTH = 4
SPI_REG_BASE = 0x60002000
EFUSE_REG_BASE = 0x6001a000
SPI_W0_OFFS = 0x80
SPI_HAS_MOSI_DLEN_REG = True
FLASH_SIZES = {
'1MB':0x00,
'2MB':0x10,
'4MB':0x20,
'8MB':0x30,
'16MB':0x40
}
BOOTLOADER_FLASH_OFFSET = 0x1000
OVERRIDE_VDDSDIO_CHOICES = ["1.8V", "1.9V", "OFF"]
def get_chip_description(self):
word3 = self.read_efuse(3)
chip_ver_rev1 = (word3 >> 15) & 0x1
pkg_version = (word3 >> 9) & 0x07
chip_name = {
0: "ESP32D0WDQ6",
1: "ESP32D0WDQ5",
2: "ESP32D2WDQ5",
5: "ESP32-PICO-D4",
}.get(pkg_version, "unknown ESP32")
return "%s (revision %d)" % (chip_name, chip_ver_rev1)
def get_chip_features(self):
features = ["WiFi"]
word3 = self.read_efuse(3)
# names of variables in this section are lowercase
# versions of EFUSE names as documented in TRM and
# ESP-IDF efuse_reg.h
chip_ver_dis_bt = word3 & (1 << 1)
if chip_ver_dis_bt == 0:
features += ["BT"]
chip_ver_dis_app_cpu = word3 & (1 << 0)
if chip_ver_dis_app_cpu:
features += ["Single Core"]
else:
features += ["Dual Core"]
chip_cpu_freq_rated = word3 & (1 << 13)
if chip_cpu_freq_rated:
chip_cpu_freq_low = word3 & (1 << 12)
if chip_cpu_freq_low:
features += ["160MHz"]
else:
features += ["240MHz"]
pkg_version = (word3 >> 9) & 0x07
if pkg_version in [2, 4, 5]:
features += ["Embedded Flash"]
word4 = self.read_efuse(4)
adc_vref = (word4 >> 8) & 0x1F
if adc_vref:
features += ["VRef calibration in efuse"]
blk3_part_res = word3 >> 14 & 0x1
if blk3_part_res:
features += ["BLK3 partially reserved"]
word6 = self.read_efuse(6)
coding_scheme = word6 & 0x3
features += ["Coding Scheme %s" % {
0: "None",
1: "3/4",
2: "Repeat (UNSUPPORTED)",
3: "Invalid"}[coding_scheme]]
return features
def read_efuse(self, n):
""" Read the nth word of the ESP3x EFUSE region. """
return self.read_reg(self.EFUSE_REG_BASE + (4 * n))
def chip_id(self):
raise NotSupportedError(self, "chip_id")
def read_mac(self):
""" Read MAC from EFUSE region """
words = [self.read_efuse(2), self.read_efuse(1)]
bitstring = struct.pack(">II", *words)
bitstring = bitstring[2:8] # trim the 2 byte CRC
try:
return tuple(ord(b) for b in bitstring)
except TypeError: # Python 3, bitstring elements are already bytes
return tuple(bitstring)
def get_erase_size(self, offset, size):
return size
def override_vddsdio(self, new_voltage):
new_voltage = new_voltage.upper()
if new_voltage not in self.OVERRIDE_VDDSDIO_CHOICES:
raise FatalError("The only accepted VDDSDIO overrides are '1.8V', '1.9V' and 'OFF'")
RTC_CNTL_SDIO_CONF_REG = 0x3ff48074
RTC_CNTL_XPD_SDIO_REG = (1 << 31)
RTC_CNTL_DREFH_SDIO_M = (3 << 29)
RTC_CNTL_DREFM_SDIO_M = (3 << 27)
RTC_CNTL_DREFL_SDIO_M = (3 << 25)
# RTC_CNTL_SDIO_TIEH = (1 << 23) # not used here, setting TIEH=1 would set 3.3V output, not safe for esptool.py to do
RTC_CNTL_SDIO_FORCE = (1 << 22)
RTC_CNTL_SDIO_PD_EN = (1 << 21)
reg_val = RTC_CNTL_SDIO_FORCE # override efuse setting
reg_val |= RTC_CNTL_SDIO_PD_EN
if new_voltage != "OFF":
reg_val |= RTC_CNTL_XPD_SDIO_REG # enable internal LDO
if new_voltage == "1.9V":
reg_val |= (RTC_CNTL_DREFH_SDIO_M | RTC_CNTL_DREFM_SDIO_M | RTC_CNTL_DREFL_SDIO_M) # boost voltage
self.write_reg(RTC_CNTL_SDIO_CONF_REG, reg_val)
print("VDDSDIO regulator set to %s" % new_voltage)
class ESP32StubLoader(ESP32ROM):
""" Access class for ESP32 stub loader, runs on top of ROM.
"""
FLASH_WRITE_SIZE = 0x4000 # matches MAX_WRITE_BLOCK in stub_loader.c
STATUS_BYTES_LENGTH = 2 # same as ESP8266, different to ESP32 ROM
IS_STUB = True
def __init__(self, rom_loader):
self._port = rom_loader._port
self._trace_enabled = rom_loader._trace_enabled
self.flush_input() # resets _slip_reader
ESP32ROM.STUB_CLASS = ESP32StubLoader
class ESPBOOTLOADER(object):
""" These are constants related to software ESP bootloader, working with 'v2' image files """
# First byte of the "v2" application image
IMAGE_V2_MAGIC = 0xea
# First 'segment' value in a "v2" application image, appears to be a constant version value?
IMAGE_V2_SEGMENT = 4
def LoadFirmwareImage(chip, filename):
""" Load a firmware image. Can be for ESP8266 or ESP32. ESP8266 images will be examined to determine if they are
original ROM firmware images (ESP8266ROMFirmwareImage) or "v2" OTA bootloader images.
Returns a BaseFirmwareImage subclass, either ESP8266ROMFirmwareImage (v1) or ESP8266V2FirmwareImage (v2).
"""
with open(filename, 'rb') as f:
if chip.lower() == 'esp32':
return ESP32FirmwareImage(f)
else: # Otherwise, ESP8266 so look at magic to determine the image type
magic = ord(f.read(1))
f.seek(0)
if magic == ESPLoader.ESP_IMAGE_MAGIC:
return ESP8266ROMFirmwareImage(f)
elif magic == ESPBOOTLOADER.IMAGE_V2_MAGIC:
return ESP8266V2FirmwareImage(f)
else:
raise FatalError("Invalid image magic number: %d" % magic)
class ImageSegment(object):
""" Wrapper class for a segment in an ESP image
(very similar to a section in an ELFImage also) """
def __init__(self, addr, data, file_offs=None):
self.addr = addr
self.data = data
self.file_offs = file_offs
self.include_in_checksum = True
if self.addr != 0:
self.pad_to_alignment(4) # pad all "real" ImageSegments 4 byte aligned length
def copy_with_new_addr(self, new_addr):
""" Return a new ImageSegment with same data, but mapped at
a new address. """
return ImageSegment(new_addr, self.data, 0)
def split_image(self, split_len):
""" Return a new ImageSegment which splits "split_len" bytes
from the beginning of the data. Remaining bytes are kept in
this segment object (and the start address is adjusted to match.) """
result = copy.copy(self)
result.data = self.data[:split_len]
self.data = self.data[split_len:]
self.addr += split_len
self.file_offs = None
result.file_offs = None
return result
def __repr__(self):
r = "len 0x%05x load 0x%08x" % (len(self.data), self.addr)
if self.file_offs is not None:
r += " file_offs 0x%08x" % (self.file_offs)
return r
def pad_to_alignment(self, alignment):
self.data = pad_to(self.data, alignment, b'\x00')
class ELFSection(ImageSegment):
""" Wrapper class for a section in an ELF image, has a section
name as well as the common properties of an ImageSegment. """
def __init__(self, name, addr, data):
super(ELFSection, self).__init__(addr, data)
self.name = name.decode("utf-8")
def __repr__(self):
return "%s %s" % (self.name, super(ELFSection, self).__repr__())
class BaseFirmwareImage(object):
SEG_HEADER_LEN = 8
SHA256_DIGEST_LEN = 32
""" Base class with common firmware image functions """
def __init__(self):
self.segments = []
self.entrypoint = 0
self.elf_sha256 = None
self.elf_sha256_offset = 0
def load_common_header(self, load_file, expected_magic):
(magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', load_file.read(8))
if magic != expected_magic:
raise FatalError('Invalid firmware image magic=0x%x' % (magic))
return segments
def verify(self):
if len(self.segments) > 16:
raise FatalError('Invalid segment count %d (max 16). Usually this indicates a linker script problem.' % len(self.segments))
def load_segment(self, f, is_irom_segment=False):
""" Load the next segment from the image file """
file_offs = f.tell()
(offset, size) = struct.unpack('<II', f.read(8))
self.warn_if_unusual_segment(offset, size, is_irom_segment)
segment_data = f.read(size)
if len(segment_data) < size:
raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data)))
segment = ImageSegment(offset, segment_data, file_offs)
self.segments.append(segment)
return segment
def warn_if_unusual_segment(self, offset, size, is_irom_segment):
if not is_irom_segment:
if offset > 0x40200000 or offset < 0x3ffe0000 or size > 65536:
print('WARNING: Suspicious segment 0x%x, length %d' % (offset, size))
def maybe_patch_segment_data(self, f, segment_data):
"""If SHA256 digest of the ELF file needs to be inserted into this segment, do so. Returns segment data."""
segment_len = len(segment_data)
file_pos = f.tell()
if self.elf_sha256_offset >= file_pos and self.elf_sha256_offset < file_pos + segment_len:
# SHA256 digest needs to be patched into this segment,
# calculate offset of the digest inside the segment.
patch_offset = self.elf_sha256_offset - file_pos
# Sanity checks
if patch_offset < self.SEG_HEADER_LEN or patch_offset + self.SHA256_DIGEST_LEN > segment_len:
raise FatalError('Can not place SHA256 digest on segment boundary' +
'(elf_sha256_offset=%d, file_pos=%d, segment_size=%d)' %
(self.elf_sha256_offset, file_pos, segment_len))
assert(len(self.elf_sha256) == self.SHA256_DIGEST_LEN)
# offset relative to the data part
patch_offset -= self.SEG_HEADER_LEN
segment_data = segment_data[0:patch_offset] + self.elf_sha256 + \
segment_data[patch_offset + self.SHA256_DIGEST_LEN:]
return segment_data
def save_segment(self, f, segment, checksum=None):
""" Save the next segment to the image file, return next checksum value if provided """
segment_data = self.maybe_patch_segment_data(f, segment.data)
f.write(struct.pack('<II', segment.addr, len(segment_data)))
f.write(segment_data)
if checksum is not None:
return ESPLoader.checksum(segment_data, checksum)
def read_checksum(self, f):
""" Return ESPLoader checksum from end of just-read image """
# Skip the padding. The checksum is stored in the last byte so that the
# file is a multiple of 16 bytes.
align_file_position(f, 16)
return ord(f.read(1))
def calculate_checksum(self):
""" Calculate checksum of loaded image, based on segments in
segment array.
"""
checksum = ESPLoader.ESP_CHECKSUM_MAGIC
for seg in self.segments:
if seg.include_in_checksum:
checksum = ESPLoader.checksum(seg.data, checksum)
return checksum
def append_checksum(self, f, checksum):
""" Append ESPLoader checksum to the just-written image """
align_file_position(f, 16)
f.write(struct.pack(b'B', checksum))
def write_common_header(self, f, segments):
f.write(struct.pack('<BBBBI', ESPLoader.ESP_IMAGE_MAGIC, len(segments),
self.flash_mode, self.flash_size_freq, self.entrypoint))
def is_irom_addr(self, addr):
""" Returns True if an address starts in the irom region.
Valid for ESP8266 only.
"""
return ESP8266ROM.IROM_MAP_START <= addr < ESP8266ROM.IROM_MAP_END
def get_irom_segment(self):
irom_segments = [s for s in self.segments if self.is_irom_addr(s.addr)]
if len(irom_segments) > 0:
if len(irom_segments) != 1:
raise FatalError('Found %d segments that could be irom0. Bad ELF file?' % len(irom_segments))
return irom_segments[0]
return None
def get_non_irom_segments(self):
irom_segment = self.get_irom_segment()
return [s for s in self.segments if s != irom_segment]
class ESP8266ROMFirmwareImage(BaseFirmwareImage):
""" 'Version 1' firmware image, segments loaded directly by the ROM bootloader. """
ROM_LOADER = ESP8266ROM
def __init__(self, load_file=None):
super(ESP8266ROMFirmwareImage, self).__init__()
self.flash_mode = 0
self.flash_size_freq = 0
self.version = 1
if load_file is not None:
segments = self.load_common_header(load_file, ESPLoader.ESP_IMAGE_MAGIC)
for _ in range(segments):
self.load_segment(load_file)
self.checksum = self.read_checksum(load_file)
self.verify()
def default_output_name(self, input_file):
""" Derive a default output name from the ELF name. """
return input_file + '-'
def save(self, basename):
""" Save a set of V1 images for flashing. Parameter is a base filename. """
# IROM data goes in its own plain binary file
irom_segment = self.get_irom_segment()
if irom_segment is not None:
with open("%s0x%05x.bin" % (basename, irom_segment.addr - ESP8266ROM.IROM_MAP_START), "wb") as f:
f.write(irom_segment.data)
# everything but IROM goes at 0x00000 in an image file
normal_segments = self.get_non_irom_segments()
with open("%s0x00000.bin" % basename, 'wb') as f:
self.write_common_header(f, normal_segments)
checksum = ESPLoader.ESP_CHECKSUM_MAGIC
for segment in normal_segments:
checksum = self.save_segment(f, segment, checksum)
self.append_checksum(f, checksum)
class ESP8266V2FirmwareImage(BaseFirmwareImage):
""" 'Version 2' firmware image, segments loaded by software bootloader stub
(ie Espressif bootloader or rboot)
"""
ROM_LOADER = ESP8266ROM
def __init__(self, load_file=None):
super(ESP8266V2FirmwareImage, self).__init__()
self.version = 2
if load_file is not None:
segments = self.load_common_header(load_file, ESPBOOTLOADER.IMAGE_V2_MAGIC)
if segments != ESPBOOTLOADER.IMAGE_V2_SEGMENT:
# segment count is not really segment count here, but we expect to see '4'
print('Warning: V2 header has unexpected "segment" count %d (usually 4)' % segments)
# irom segment comes before the second header
#
# the file is saved in the image with a zero load address
# in the header, so we need to calculate a load address
irom_segment = self.load_segment(load_file, True)
irom_segment.addr = 0 # for actual mapped addr, add ESP8266ROM.IROM_MAP_START + flashing_addr + 8
irom_segment.include_in_checksum = False
first_flash_mode = self.flash_mode
first_flash_size_freq = self.flash_size_freq
first_entrypoint = self.entrypoint
# load the second header
segments = self.load_common_header(load_file, ESPLoader.ESP_IMAGE_MAGIC)
if first_flash_mode != self.flash_mode:
print('WARNING: Flash mode value in first header (0x%02x) disagrees with second (0x%02x). Using second value.'
% (first_flash_mode, self.flash_mode))
if first_flash_size_freq != self.flash_size_freq:
print('WARNING: Flash size/freq value in first header (0x%02x) disagrees with second (0x%02x). Using second value.'
% (first_flash_size_freq, self.flash_size_freq))
if first_entrypoint != self.entrypoint:
print('WARNING: Entrypoint address in first header (0x%08x) disagrees with second header (0x%08x). Using second value.'
% (first_entrypoint, self.entrypoint))
# load all the usual segments
for _ in range(segments):
self.load_segment(load_file)
self.checksum = self.read_checksum(load_file)
self.verify()
def default_output_name(self, input_file):
""" Derive a default output name from the ELF name. """
irom_segment = self.get_irom_segment()
if irom_segment is not None:
irom_offs = irom_segment.addr - ESP8266ROM.IROM_MAP_START
else:
irom_offs = 0
return "%s-0x%05x.bin" % (os.path.splitext(input_file)[0],
irom_offs & ~(ESPLoader.FLASH_SECTOR_SIZE - 1))
def save(self, filename):
with open(filename, 'wb') as f:
# Save first header for irom0 segment
f.write(struct.pack(b'<BBBBI', ESPBOOTLOADER.IMAGE_V2_MAGIC, ESPBOOTLOADER.IMAGE_V2_SEGMENT,
self.flash_mode, self.flash_size_freq, self.entrypoint))
irom_segment = self.get_irom_segment()
if irom_segment is not None:
# save irom0 segment, make sure it has load addr 0 in the file
irom_segment = irom_segment.copy_with_new_addr(0)
irom_segment.pad_to_alignment(16) # irom_segment must end on a 16 byte boundary
self.save_segment(f, irom_segment)
# second header, matches V1 header and contains loadable segments
normal_segments = self.get_non_irom_segments()
self.write_common_header(f, normal_segments)
checksum = ESPLoader.ESP_CHECKSUM_MAGIC
for segment in normal_segments:
checksum = self.save_segment(f, segment, checksum)
self.append_checksum(f, checksum)
# calculate a crc32 of entire file and append
# (algorithm used by recent 8266 SDK bootloaders)
with open(filename, 'rb') as f:
crc = esp8266_crc32(f.read())
with open(filename, 'ab') as f:
f.write(struct.pack(b'<I', crc))
# Backwards compatibility for previous API, remove in esptool.py V3
ESPFirmwareImage = ESP8266ROMFirmwareImage
OTAFirmwareImage = ESP8266V2FirmwareImage
def esp8266_crc32(data):
"""
CRC32 algorithm used by 8266 SDK bootloader (and gen_appbin.py).
"""
crc = binascii.crc32(data, 0) & 0xFFFFFFFF
if crc & 0x80000000:
return crc ^ 0xFFFFFFFF
else:
return crc + 1
class ESP32FirmwareImage(BaseFirmwareImage):
""" ESP32 firmware image is very similar to V1 ESP8266 image,
except with an additional 16 byte reserved header at top of image,
and because of new flash mapping capabilities the flash-mapped regions
can be placed in the normal image (just @ 64kB padded offsets).
"""
ROM_LOADER = ESP32ROM
# ROM bootloader will read the wp_pin field if SPI flash
# pins are remapped via flash. IDF actually enables QIO only
# from software bootloader, so this can be ignored. But needs
# to be set to this value so ROM bootloader will skip it.
WP_PIN_DISABLED = 0xEE
EXTENDED_HEADER_STRUCT_FMT = "B" * 16
def __init__(self, load_file=None):
super(ESP32FirmwareImage, self).__init__()
self.secure_pad = False
self.flash_mode = 0
self.flash_size_freq = 0
self.version = 1
self.wp_pin = self.WP_PIN_DISABLED
# SPI pin drive levels
self.clk_drv = 0
self.q_drv = 0
self.d_drv = 0
self.cs_drv = 0
self.hd_drv = 0
self.wp_drv = 0
self.append_digest = True
if load_file is not None:
start = load_file.tell()
segments = self.load_common_header(load_file, ESPLoader.ESP_IMAGE_MAGIC)
self.load_extended_header(load_file)
for _ in range(segments):
self.load_segment(load_file)
self.checksum = self.read_checksum(load_file)
if self.append_digest:
end = load_file.tell()
self.stored_digest = load_file.read(32)
load_file.seek(start)
calc_digest = hashlib.sha256()
calc_digest.update(load_file.read(end - start))
self.calc_digest = calc_digest.digest() # TODO: decide what to do here?
self.verify()
def is_flash_addr(self, addr):
return (ESP32ROM.IROM_MAP_START <= addr < ESP32ROM.IROM_MAP_END) \
or (ESP32ROM.DROM_MAP_START <= addr < ESP32ROM.DROM_MAP_END)
def default_output_name(self, input_file):
""" Derive a default output name from the ELF name. """
return "%s.bin" % (os.path.splitext(input_file)[0])
def warn_if_unusual_segment(self, offset, size, is_irom_segment):
pass # TODO: add warnings for ESP32 segment offset/size combinations that are wrong
def save(self, filename):
total_segments = 0
with io.BytesIO() as f: # write file to memory first
self.write_common_header(f, self.segments)
# first 4 bytes of header are read by ROM bootloader for SPI
# config, but currently unused
self.save_extended_header(f)
checksum = ESPLoader.ESP_CHECKSUM_MAGIC
# split segments into flash-mapped vs ram-loaded, and take copies so we can mutate them
flash_segments = [copy.deepcopy(s) for s in sorted(self.segments, key=lambda s:s.addr) if self.is_flash_addr(s.addr)]
ram_segments = [copy.deepcopy(s) for s in sorted(self.segments, key=lambda s:s.addr) if not self.is_flash_addr(s.addr)]
IROM_ALIGN = 65536
# check for multiple ELF sections that are mapped in the same flash mapping region.
# this is usually a sign of a broken linker script, but if you have a legitimate
# use case then let us know (we can merge segments here, but as a rule you probably
# want to merge them in your linker script.)
if len(flash_segments) > 0:
last_addr = flash_segments[0].addr
for segment in flash_segments[1:]:
if segment.addr // IROM_ALIGN == last_addr // IROM_ALIGN:
raise FatalError(("Segment loaded at 0x%08x lands in same 64KB flash mapping as segment loaded at 0x%08x. " +
"Can't generate binary. Suggest changing linker script or ELF to merge sections.") %
(segment.addr, last_addr))
last_addr = segment.addr
def get_alignment_data_needed(segment):
# Actual alignment (in data bytes) required for a segment header: positioned so that
# after we write the next 8 byte header, file_offs % IROM_ALIGN == segment.addr % IROM_ALIGN
#
# (this is because the segment's vaddr may not be IROM_ALIGNed, more likely is aligned
# IROM_ALIGN+0x18 to account for the binary file header
align_past = (segment.addr % IROM_ALIGN) - self.SEG_HEADER_LEN
pad_len = (IROM_ALIGN - (f.tell() % IROM_ALIGN)) + align_past
if pad_len == 0 or pad_len == IROM_ALIGN:
return 0 # already aligned
# subtract SEG_HEADER_LEN a second time, as the padding block has a header as well
pad_len -= self.SEG_HEADER_LEN
if pad_len < 0:
pad_len += IROM_ALIGN
return pad_len
# try to fit each flash segment on a 64kB aligned boundary
# by padding with parts of the non-flash segments...
while len(flash_segments) > 0:
segment = flash_segments[0]
pad_len = get_alignment_data_needed(segment)
if pad_len > 0: # need to pad
if len(ram_segments) > 0 and pad_len > self.SEG_HEADER_LEN:
pad_segment = ram_segments[0].split_image(pad_len)
if len(ram_segments[0].data) == 0:
ram_segments.pop(0)
else:
pad_segment = ImageSegment(0, b'\x00' * pad_len, f.tell())
checksum = self.save_segment(f, pad_segment, checksum)
total_segments += 1
else:
# write the flash segment
assert (f.tell() + 8) % IROM_ALIGN == segment.addr % IROM_ALIGN
checksum = self.save_segment(f, segment, checksum)
flash_segments.pop(0)
total_segments += 1
# flash segments all written, so write any remaining RAM segments
for segment in ram_segments:
checksum = self.save_segment(f, segment, checksum)
total_segments += 1
if self.secure_pad:
# pad the image so that after signing it will end on a a 64KB boundary.
# This ensures all mapped flash content will be verified.
if not self.append_digest:
raise FatalError("secure_pad only applies if a SHA-256 digest is also appended to the image")
align_past = (f.tell() + self.SEG_HEADER_LEN) % IROM_ALIGN
# 16 byte aligned checksum (force the alignment to simplify calculations)
checksum_space = 16
# after checksum: SHA-256 digest + (to be added by signing process) version, signature + 12 trailing bytes due to alignment
space_after_checksum = 32 + 4 + 64 + 12
pad_len = (IROM_ALIGN - align_past - checksum_space - space_after_checksum) % IROM_ALIGN
pad_segment = ImageSegment(0, b'\x00' * pad_len, f.tell())
checksum = self.save_segment(f, pad_segment, checksum)
total_segments += 1
# done writing segments
self.append_checksum(f, checksum)
image_length = f.tell()
if self.secure_pad:
assert ((image_length + space_after_checksum) % IROM_ALIGN) == 0
# kinda hacky: go back to the initial header and write the new segment count
# that includes padding segments. This header is not checksummed
f.seek(1)
try:
f.write(chr(total_segments))
except TypeError: # Python 3
f.write(bytes([total_segments]))
if self.append_digest:
# calculate the SHA256 of the whole file and append it
f.seek(0)
digest = hashlib.sha256()
digest.update(f.read(image_length))
f.write(digest.digest())
with open(filename, 'wb') as real_file:
real_file.write(f.getvalue())
def load_extended_header(self, load_file):
def split_byte(n):
return (n & 0x0F, (n >> 4) & 0x0F)
fields = list(struct.unpack(self.EXTENDED_HEADER_STRUCT_FMT, load_file.read(16)))
self.wp_pin = fields[0]
# SPI pin drive stengths are two per byte
self.clk_drv, self.q_drv = split_byte(fields[1])
self.d_drv, self.cs_drv = split_byte(fields[2])
self.hd_drv, self.wp_drv = split_byte(fields[3])
if fields[15] in [0, 1]:
self.append_digest = (fields[15] == 1)
else:
raise RuntimeError("Invalid value for append_digest field (0x%02x). Should be 0 or 1.", fields[15])
# remaining fields in the middle should all be zero
if any(f for f in fields[4:15] if f != 0):
print("Warning: some reserved header fields have non-zero values. This image may be from a newer esptool.py?")
def save_extended_header(self, save_file):
def join_byte(ln,hn):
return (ln & 0x0F) + ((hn & 0x0F) << 4)
append_digest = 1 if self.append_digest else 0
fields = [self.wp_pin,
join_byte(self.clk_drv, self.q_drv),
join_byte(self.d_drv, self.cs_drv),
join_byte(self.hd_drv, self.wp_drv)]
fields += [0] * 11
fields += [append_digest]
packed = struct.pack(self.EXTENDED_HEADER_STRUCT_FMT, *fields)
save_file.write(packed)
class ELFFile(object):
SEC_TYPE_PROGBITS = 0x01
SEC_TYPE_STRTAB = 0x03
LEN_SEC_HEADER = 0x28
def __init__(self, name):
# Load sections from the ELF file
self.name = name
with open(self.name, 'rb') as f:
self._read_elf_file(f)
def get_section(self, section_name):
for s in self.sections:
if s.name == section_name:
return s
raise ValueError("No section %s in ELF file" % section_name)
def _read_elf_file(self, f):
# read the ELF file header
LEN_FILE_HEADER = 0x34
try:
(ident,_type,machine,_version,
self.entrypoint,_phoff,shoff,_flags,
_ehsize, _phentsize,_phnum, shentsize,
shnum,shstrndx) = struct.unpack("<16sHHLLLLLHHHHHH", f.read(LEN_FILE_HEADER))
except struct.error as e:
raise FatalError("Failed to read a valid ELF header from %s: %s" % (self.name, e))
if byte(ident, 0) != 0x7f or ident[1:4] != b'ELF':
raise FatalError("%s has invalid ELF magic header" % self.name)
if machine != 0x5e:
raise FatalError("%s does not appear to be an Xtensa ELF file. e_machine=%04x" % (self.name, machine))
if shentsize != self.LEN_SEC_HEADER:
raise FatalError("%s has unexpected section header entry size 0x%x (not 0x28)" % (self.name, shentsize, self.LEN_SEC_HEADER))
if shnum == 0:
raise FatalError("%s has 0 section headers" % (self.name))
self._read_sections(f, shoff, shnum, shstrndx)
def _read_sections(self, f, section_header_offs, section_header_count, shstrndx):
f.seek(section_header_offs)
len_bytes = section_header_count * self.LEN_SEC_HEADER
section_header = f.read(len_bytes)
if len(section_header) == 0:
raise FatalError("No section header found at offset %04x in ELF file." % section_header_offs)
if len(section_header) != (len_bytes):
raise FatalError("Only read 0x%x bytes from section header (expected 0x%x.) Truncated ELF file?" % (len(section_header), len_bytes))
# walk through the section header and extract all sections
section_header_offsets = range(0, len(section_header), self.LEN_SEC_HEADER)
def read_section_header(offs):
name_offs,sec_type,_flags,lma,sec_offs,size = struct.unpack_from("<LLLLLL", section_header[offs:])
return (name_offs, sec_type, lma, size, sec_offs)
all_sections = [read_section_header(offs) for offs in section_header_offsets]
prog_sections = [s for s in all_sections if s[1] == ELFFile.SEC_TYPE_PROGBITS]
# search for the string table section
if not (shstrndx * self.LEN_SEC_HEADER) in section_header_offsets:
raise FatalError("ELF file has no STRTAB section at shstrndx %d" % shstrndx)
_,sec_type,_,sec_size,sec_offs = read_section_header(shstrndx * self.LEN_SEC_HEADER)
if sec_type != ELFFile.SEC_TYPE_STRTAB:
print('WARNING: ELF file has incorrect STRTAB section type 0x%02x' % sec_type)
f.seek(sec_offs)
string_table = f.read(sec_size)
# build the real list of ELFSections by reading the actual section names from the
# string table section, and actual data for each section from the ELF file itself
def lookup_string(offs):
raw = string_table[offs:]
return raw[:raw.index(b'\x00')]
def read_data(offs,size):
f.seek(offs)
return f.read(size)
prog_sections = [ELFSection(lookup_string(n_offs), lma, read_data(offs, size)) for (n_offs, _type, lma, size, offs) in prog_sections
if lma != 0 and size > 0]
self.sections = prog_sections
def sha256(self):
# return SHA256 hash of the input ELF file
sha256 = hashlib.sha256()
with open(self.name, 'rb') as f:
sha256.update(f.read())
return sha256.digest()
def slip_reader(port, trace_function):
"""Generator to read SLIP packets from a serial port.
Yields one full SLIP packet at a time, raises exception on timeout or invalid data.
Designed to avoid too many calls to serial.read(1), which can bog
down on slow systems.
"""
partial_packet = None
in_escape = False
while True:
waiting = port.inWaiting()
read_bytes = port.read(1 if waiting == 0 else waiting)
if read_bytes == b'':
waiting_for = "header" if partial_packet is None else "content"
trace_function("Timed out waiting for packet %s", waiting_for)
raise FatalError("Timed out waiting for packet %s" % waiting_for)
trace_function("Read %d bytes: %s", len(read_bytes), HexFormatter(read_bytes))
for b in read_bytes:
if type(b) is int:
b = bytes([b]) # python 2/3 compat
if partial_packet is None: # waiting for packet header
if b == b'\xc0':
partial_packet = b""
else:
trace_function("Read invalid data: %s", HexFormatter(read_bytes))
trace_function("Remaining data in serial buffer: %s", HexFormatter(port.read(port.inWaiting())))
raise FatalError('Invalid head of packet (0x%s)' % hexify(b))
elif in_escape: # part-way through escape sequence
in_escape = False
if b == b'\xdc':
partial_packet += b'\xc0'
elif b == b'\xdd':
partial_packet += b'\xdb'
else:
trace_function("Read invalid data: %s", HexFormatter(read_bytes))
trace_function("Remaining data in serial buffer: %s", HexFormatter(port.read(port.inWaiting())))
raise FatalError('Invalid SLIP escape (0xdb, 0x%s)' % (hexify(b)))
elif b == b'\xdb': # start of escape sequence
in_escape = True
elif b == b'\xc0': # end of packet
trace_function("Received full packet: %s", HexFormatter(partial_packet))
yield partial_packet
partial_packet = None
else: # normal byte in packet
partial_packet += b
def arg_auto_int(x):
return int(x, 0)
def div_roundup(a, b):
""" Return a/b rounded up to nearest integer,
equivalent result to int(math.ceil(float(int(a)) / float(int(b))), only
without possible floating point accuracy errors.
"""
return (int(a) + int(b) - 1) // int(b)
def align_file_position(f, size):
""" Align the position in the file to the next block of specified size """
align = (size - 1) - (f.tell() % size)
f.seek(align, 1)
def flash_size_bytes(size):
""" Given a flash size of the type passed in args.flash_size
(ie 512KB or 1MB) then return the size in bytes.
"""
if "MB" in size:
return int(size[:size.index("MB")]) * 1024 * 1024
elif "KB" in size:
return int(size[:size.index("KB")]) * 1024
else:
raise FatalError("Unknown size %s" % size)
def hexify(s, uppercase=True):
format_str = '%02X' if uppercase else '%02x'
if not PYTHON2:
return ''.join(format_str % c for c in s)
else:
return ''.join(format_str % ord(c) for c in s)
class HexFormatter(object):
"""
Wrapper class which takes binary data in its constructor
and returns a hex string as it's __str__ method.
This is intended for "lazy formatting" of trace() output
in hex format. Avoids overhead (significant on slow computers)
of generating long hex strings even if tracing is disabled.
Note that this doesn't save any overhead if passed as an
argument to "%", only when passed to trace()
If auto_split is set (default), any long line (> 16 bytes) will be
printed as separately indented lines, with ASCII decoding at the end
of each line.
"""
def __init__(self, binary_string, auto_split=True):
self._s = binary_string
self._auto_split = auto_split
def __str__(self):
if self._auto_split and len(self._s) > 16:
result = ""
s = self._s
while len(s) > 0:
line = s[:16]
ascii_line = "".join(c if (c == ' ' or (c in string.printable and c not in string.whitespace))
else '.' for c in line.decode('ascii', 'replace'))
s = s[16:]
result += "\n %-16s %-16s | %s" % (hexify(line[:8], False), hexify(line[8:], False), ascii_line)
return result
else:
return hexify(self._s, False)
def pad_to(data, alignment, pad_character=b'\xFF'):
""" Pad to the next alignment boundary """
pad_mod = len(data) % alignment
if pad_mod != 0:
data += pad_character * (alignment - pad_mod)
return data
class FatalError(RuntimeError):
"""
Wrapper class for runtime errors that aren't caused by internal bugs, but by
ESP8266 responses or input content.
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
@staticmethod
def WithResult(message, result):
"""
Return a fatal error object that appends the hex values of
'result' as a string formatted argument.
"""
message += " (result was %s)" % hexify(result)
return FatalError(message)
class NotImplementedInROMError(FatalError):
"""
Wrapper class for the error thrown when a particular ESP bootloader function
is not implemented in the ROM bootloader.
"""
def __init__(self, bootloader, func):
FatalError.__init__(self, "%s ROM does not support function %s." % (bootloader.CHIP_NAME, func.__name__))
class NotSupportedError(FatalError):
def __init__(self, esp, function_name):
FatalError.__init__(self, "Function %s is not supported for %s." % (function_name, esp.CHIP_NAME))
# "Operation" commands, executable at command line. One function each
#
# Each function takes either two args (<ESPLoader instance>, <args>) or a single <args>
# argument.
def load_ram(esp, args):
image = LoadFirmwareImage(esp.CHIP_NAME, args.filename)
print('RAM boot...')
for seg in image.segments:
size = len(seg.data)
print('Downloading %d bytes at %08x...' % (size, seg.addr), end=' ')
sys.stdout.flush()
esp.mem_begin(size, div_roundup(size, esp.ESP_RAM_BLOCK), esp.ESP_RAM_BLOCK, seg.addr)
seq = 0
while len(seg.data) > 0:
esp.mem_block(seg.data[0:esp.ESP_RAM_BLOCK], seq)
seg.data = seg.data[esp.ESP_RAM_BLOCK:]
seq += 1
print('done!')
print('All segments done, executing at %08x' % image.entrypoint)
esp.mem_finish(image.entrypoint)
def read_mem(esp, args):
print('0x%08x = 0x%08x' % (args.address, esp.read_reg(args.address)))
def write_mem(esp, args):
esp.write_reg(args.address, args.value, args.mask, 0)
print('Wrote %08x, mask %08x to %08x' % (args.value, args.mask, args.address))
def dump_mem(esp, args):
f = open(args.filename, 'wb')
for i in range(args.size // 4):
d = esp.read_reg(args.address + (i * 4))
f.write(struct.pack(b'<I', d))
if f.tell() % 1024 == 0:
print('\r%d bytes read... (%d %%)' % (f.tell(),
f.tell() * 100 // args.size),
end=' ')
sys.stdout.flush()
print('Done!')
def detect_flash_size(esp, args):
if args.flash_size == 'detect':
flash_id = esp.flash_id()
size_id = flash_id >> 16
args.flash_size = DETECTED_FLASH_SIZES.get(size_id)
if args.flash_size is None:
print('Warning: Could not auto-detect Flash size (FlashID=0x%x, SizeID=0x%x), defaulting to 4MB' % (flash_id, size_id))
args.flash_size = '4MB'
else:
print('Auto-detected Flash size:', args.flash_size)
def _update_image_flash_params(esp, address, args, image):
""" Modify the flash mode & size bytes if this looks like an executable bootloader image """
if len(image) < 8:
return image # not long enough to be a bootloader image
# unpack the (potential) image header
magic, _, flash_mode, flash_size_freq = struct.unpack("BBBB", image[:4])
if address != esp.BOOTLOADER_FLASH_OFFSET or magic != esp.ESP_IMAGE_MAGIC:
return image # not flashing a bootloader, so don't modify this
if args.flash_mode != 'keep':
flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
flash_freq = flash_size_freq & 0x0F
if args.flash_freq != 'keep':
flash_freq = {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
flash_size = flash_size_freq & 0xF0
if args.flash_size != 'keep':
flash_size = esp.parse_flash_size_arg(args.flash_size)
flash_params = struct.pack(b'BB', flash_mode, flash_size + flash_freq)
if flash_params != image[2:4]:
print('Flash params set to 0x%04x' % struct.unpack(">H", flash_params))
image = image[0:2] + flash_params + image[4:]
return image
def write_flash(esp, args):
# set args.compress based on default behaviour:
# -> if either --compress or --no-compress is set, honour that
# -> otherwise, set --compress unless --no-stub is set
if args.compress is None and not args.no_compress:
args.compress = not args.no_stub
# verify file sizes fit in flash
flash_end = flash_size_bytes(args.flash_size)
for address, argfile in args.addr_filename:
argfile.seek(0,2) # seek to end
if address + argfile.tell() > flash_end:
raise FatalError(("File %s (length %d) at offset %d will not fit in %d bytes of flash. " +
"Use --flash-size argument, or change flashing address.")
% (argfile.name, argfile.tell(), address, flash_end))
argfile.seek(0)
for address, argfile in args.addr_filename:
if args.no_stub:
print('Erasing flash...')
image = pad_to(argfile.read(), 4)
if len(image) == 0:
print('WARNING: File %s is empty' % argfile.name)
continue
image = _update_image_flash_params(esp, address, args, image)
calcmd5 = hashlib.md5(image).hexdigest()
uncsize = len(image)
if args.compress:
uncimage = image
image = zlib.compress(uncimage, 9)
ratio = uncsize / len(image)
blocks = esp.flash_defl_begin(uncsize, len(image), address)
else:
ratio = 1.0
blocks = esp.flash_begin(uncsize, address)
argfile.seek(0) # in case we need it again
seq = 0
written = 0
t = time.time()
while len(image) > 0:
print('\rWriting at 0x%08x... (%d %%)' % (address + seq * esp.FLASH_WRITE_SIZE, 100 * (seq + 1) // blocks), end='')
sys.stdout.flush()
block = image[0:esp.FLASH_WRITE_SIZE]
if args.compress:
esp.flash_defl_block(block, seq, timeout=DEFAULT_TIMEOUT * ratio)
else:
# Pad the last block
block = block + b'\xff' * (esp.FLASH_WRITE_SIZE - len(block))
esp.flash_block(block, seq)
image = image[esp.FLASH_WRITE_SIZE:]
seq += 1
written += len(block)
t = time.time() - t
speed_msg = ""
if args.compress:
if t > 0.0:
speed_msg = " (effective %.1f kbit/s)" % (uncsize / t * 8 / 1000)
print('\rWrote %d bytes (%d compressed) at 0x%08x in %.1f seconds%s...' % (uncsize, written, address, t, speed_msg))
else:
if t > 0.0:
speed_msg = " (%.1f kbit/s)" % (written / t * 8 / 1000)
print('\rWrote %d bytes at 0x%08x in %.1f seconds%s...' % (written, address, t, speed_msg))
try:
res = esp.flash_md5sum(address, uncsize)
if res != calcmd5:
print('File md5: %s' % calcmd5)
print('Flash md5: %s' % res)
print('MD5 of 0xFF is %s' % (hashlib.md5(b'\xFF' * uncsize).hexdigest()))
raise FatalError("MD5 of file does not match data in flash!")
else:
print('Hash of data verified.')
except NotImplementedInROMError:
pass
print('\nLeaving...')
if esp.IS_STUB:
# skip sending flash_finish to ROM loader here,
# as it causes the loader to exit and run user code
esp.flash_begin(0, 0)
if args.compress:
esp.flash_defl_finish(False)
else:
esp.flash_finish(False)
if args.verify:
print('Verifying just-written flash...')
print('(This option is deprecated, flash contents are now always read back after flashing.)')
verify_flash(esp, args)
def image_info(args):
image = LoadFirmwareImage(args.chip, args.filename)
print('Image version: %d' % image.version)
print('Entry point: %08x' % image.entrypoint if image.entrypoint != 0 else 'Entry point not set')
print('%d segments' % len(image.segments))
print
idx = 0
for seg in image.segments:
idx += 1
print('Segment %d: %r' % (idx, seg))
calc_checksum = image.calculate_checksum()
print('Checksum: %02x (%s)' % (image.checksum,
'valid' if image.checksum == calc_checksum else 'invalid - calculated %02x' % calc_checksum))
try:
digest_msg = 'Not appended'
if image.append_digest:
is_valid = image.stored_digest == image.calc_digest
digest_msg = "%s (%s)" % (hexify(image.calc_digest).lower(),
"valid" if is_valid else "invalid")
print('Validation Hash: %s' % digest_msg)
except AttributeError:
pass # ESP8266 image has no append_digest field
def make_image(args):
image = ESP8266ROMFirmwareImage()
if len(args.segfile) == 0:
raise FatalError('No segments specified')
if len(args.segfile) != len(args.segaddr):
raise FatalError('Number of specified files does not match number of specified addresses')
for (seg, addr) in zip(args.segfile, args.segaddr):
data = open(seg, 'rb').read()
image.segments.append(ImageSegment(addr, data))
image.entrypoint = args.entrypoint
image.save(args.output)
def elf2image(args):
e = ELFFile(args.input)
if args.chip == 'auto': # Default to ESP8266 for backwards compatibility
print("Creating image for ESP8266...")
args.chip = 'esp8266'
if args.chip == 'esp32':
image = ESP32FirmwareImage()
image.secure_pad = args.secure_pad
elif args.version == '1': # ESP8266
image = ESP8266ROMFirmwareImage()
else:
image = ESP8266V2FirmwareImage()
image.entrypoint = e.entrypoint
image.segments = e.sections # ELFSection is a subclass of ImageSegment
image.flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
image.flash_size_freq = image.ROM_LOADER.FLASH_SIZES[args.flash_size]
image.flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
if args.elf_sha256_offset:
image.elf_sha256 = e.sha256()
image.elf_sha256_offset = args.elf_sha256_offset
image.verify()
if args.output is None:
args.output = image.default_output_name(args.input)
image.save(args.output)
def read_mac(esp, args):
mac = esp.read_mac()
def print_mac(label, mac):
print('%s: %s' % (label, ':'.join(map(lambda x: '%02x' % x, mac))))
print_mac("MAC", mac)
def chip_id(esp, args):
try:
chipid = esp.chip_id()
print('Chip ID: 0x%08x' % chipid)
except NotSupportedError:
print('Warning: %s has no Chip ID. Reading MAC instead.' % esp.CHIP_NAME)
read_mac(esp, args)
def erase_flash(esp, args):
print('Erasing flash (this may take a while)...')
t = time.time()
esp.erase_flash()
print('Chip erase completed successfully in %.1fs' % (time.time() - t))
def erase_region(esp, args):
print('Erasing region (may be slow depending on size)...')
t = time.time()
esp.erase_region(args.address, args.size)
print('Erase completed successfully in %.1f seconds.' % (time.time() - t))
def run(esp, args):
esp.run()
def flash_id(esp, args):
flash_id = esp.flash_id()
print('Manufacturer: %02x' % (flash_id & 0xff))
flid_lowbyte = (flash_id >> 16) & 0xFF
print('Device: %02x%02x' % ((flash_id >> 8) & 0xff, flid_lowbyte))
print('Detected flash size: %s' % (DETECTED_FLASH_SIZES.get(flid_lowbyte, "Unknown")))
def read_flash(esp, args):
if args.no_progress:
flash_progress = None
else:
def flash_progress(progress, length):
msg = '%d (%d %%)' % (progress, progress * 100.0 / length)
padding = '\b' * len(msg)
if progress == length:
padding = '\n'
sys.stdout.write(msg + padding)
sys.stdout.flush()
t = time.time()
data = esp.read_flash(args.address, args.size, flash_progress)
t = time.time() - t
print('\rRead %d bytes at 0x%x in %.1f seconds (%.1f kbit/s)...'
% (len(data), args.address, t, len(data) / t * 8 / 1000))
open(args.filename, 'wb').write(data)
def verify_flash(esp, args):
differences = False
for address, argfile in args.addr_filename:
image = pad_to(argfile.read(), 4)
argfile.seek(0) # rewind in case we need it again
image = _update_image_flash_params(esp, address, args, image)
image_size = len(image)
print('Verifying 0x%x (%d) bytes @ 0x%08x in flash against %s...' % (image_size, image_size, address, argfile.name))
# Try digest first, only read if there are differences.
digest = esp.flash_md5sum(address, image_size)
expected_digest = hashlib.md5(image).hexdigest()
if digest == expected_digest:
print('-- verify OK (digest matched)')
continue
else:
differences = True
if getattr(args, 'diff', 'no') != 'yes':
print('-- verify FAILED (digest mismatch)')
continue
flash = esp.read_flash(address, image_size)
assert flash != image
diff = [i for i in range(image_size) if flash[i] != image[i]]
print('-- verify FAILED: %d differences, first @ 0x%08x' % (len(diff), address + diff[0]))
for d in diff:
flash_byte = flash[d]
image_byte = image[d]
if PYTHON2:
flash_byte = ord(flash_byte)
image_byte = ord(image_byte)
print(' %08x %02x %02x' % (address + d, flash_byte, image_byte))
if differences:
raise FatalError("Verify failed.")
def read_flash_status(esp, args):
print('Status value: 0x%04x' % esp.read_status(args.bytes))
def write_flash_status(esp, args):
fmt = "0x%%0%dx" % (args.bytes * 2)
args.value = args.value & ((1 << (args.bytes * 8)) - 1)
print(('Initial flash status: ' + fmt) % esp.read_status(args.bytes))
print(('Setting flash status: ' + fmt) % args.value)
esp.write_status(args.value, args.bytes, args.non_volatile)
print(('After flash status: ' + fmt) % esp.read_status(args.bytes))
def version(args):
print(__version__)
#
# End of operations functions
#
def main():
parser = argparse.ArgumentParser(description='esptool.py v%s - ESP8266 ROM Bootloader Utility' % __version__, prog='esptool')
parser.add_argument('--chip', '-c',
help='Target chip type',
choices=['auto', 'esp8266', 'esp32'],
default=os.environ.get('ESPTOOL_CHIP', 'auto'))
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', None))
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate used when flashing/reading',
type=arg_auto_int,
default=os.environ.get('ESPTOOL_BAUD', ESPLoader.ESP_ROM_BAUD))
parser.add_argument(
'--before',
help='What to do before connecting to the chip',
choices=['default_reset', 'no_reset', 'no_reset_no_sync'],
default=os.environ.get('ESPTOOL_BEFORE', 'default_reset'))
parser.add_argument(
'--after', '-a',
help='What to do after esptool.py is finished',
choices=['hard_reset', 'soft_reset', 'no_reset'],
default=os.environ.get('ESPTOOL_AFTER', 'hard_reset'))
parser.add_argument(
'--no-stub',
help="Disable launching the flasher stub, only talk to ROM bootloader. Some features will not be available.",
action='store_true')
parser.add_argument(
'--trace', '-t',
help="Enable trace-level output of esptool.py interactions.",
action='store_true')
parser.add_argument(
'--override-vddsdio',
help="Override ESP32 VDDSDIO internal voltage regulator (use with care)",
choices=ESP32ROM.OVERRIDE_VDDSDIO_CHOICES,
nargs='?')
subparsers = parser.add_subparsers(
dest='operation',
help='Run esptool {command} -h for additional help')
def add_spi_connection_arg(parent):
parent.add_argument('--spi-connection', '-sc', help='ESP32-only argument. Override default SPI Flash connection. ' +
'Value can be SPI, HSPI or a comma-separated list of 5 I/O numbers to use for SPI flash (CLK,Q,D,HD,CS).',
action=SpiConnectionAction)
parser_load_ram = subparsers.add_parser(
'load_ram',
help='Download an image to RAM and execute')
parser_load_ram.add_argument('filename', help='Firmware image')
parser_dump_mem = subparsers.add_parser(
'dump_mem',
help='Dump arbitrary memory to disk')
parser_dump_mem.add_argument('address', help='Base address', type=arg_auto_int)
parser_dump_mem.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_dump_mem.add_argument('filename', help='Name of binary dump')
parser_read_mem = subparsers.add_parser(
'read_mem',
help='Read arbitrary memory location')
parser_read_mem.add_argument('address', help='Address to read', type=arg_auto_int)
parser_write_mem = subparsers.add_parser(
'write_mem',
help='Read-modify-write to arbitrary memory location')
parser_write_mem.add_argument('address', help='Address to write', type=arg_auto_int)
parser_write_mem.add_argument('value', help='Value', type=arg_auto_int)
parser_write_mem.add_argument('mask', help='Mask of bits to write', type=arg_auto_int)
def add_spi_flash_subparsers(parent, is_elf2image):
""" Add common parser arguments for SPI flash properties """
extra_keep_args = [] if is_elf2image else ['keep']
auto_detect = not is_elf2image
parent.add_argument('--flash_freq', '-ff', help='SPI Flash frequency',
choices=extra_keep_args + ['40m', '26m', '20m', '80m'],
default=os.environ.get('ESPTOOL_FF', '40m' if is_elf2image else 'keep'))
parent.add_argument('--flash_mode', '-fm', help='SPI Flash mode',
choices=extra_keep_args + ['qio', 'qout', 'dio', 'dout'],
default=os.environ.get('ESPTOOL_FM', 'qio' if is_elf2image else 'keep'))
parent.add_argument('--flash_size', '-fs', help='SPI Flash size in MegaBytes (1MB, 2MB, 4MB, 8MB, 16M)'
' plus ESP8266-only (256KB, 512KB, 2MB-c1, 4MB-c1)',
action=FlashSizeAction, auto_detect=auto_detect,
default=os.environ.get('ESPTOOL_FS', 'detect' if auto_detect else '1MB'))
add_spi_connection_arg(parent)
parser_write_flash = subparsers.add_parser(
'write_flash',
help='Write a binary blob to flash')
parser_write_flash.add_argument('addr_filename', metavar='<address> <filename>', help='Address followed by binary filename, separated by space',
action=AddrFilenamePairAction)
add_spi_flash_subparsers(parser_write_flash, is_elf2image=False)
parser_write_flash.add_argument('--no-progress', '-p', help='Suppress progress output', action="store_true")
parser_write_flash.add_argument('--verify', help='Verify just-written data on flash ' +
'(mostly superfluous, data is read back during flashing)', action='store_true')
compress_args = parser_write_flash.add_mutually_exclusive_group(required=False)
compress_args.add_argument('--compress', '-z', help='Compress data in transfer (default unless --no-stub is specified)',action="store_true", default=None)
compress_args.add_argument('--no-compress', '-u', help='Disable data compression during transfer (default if --no-stub is specified)',action="store_true")
subparsers.add_parser(
'run',
help='Run application code in flash')
parser_image_info = subparsers.add_parser(
'image_info',
help='Dump headers from an application image')
parser_image_info.add_argument('filename', help='Image file to parse')
parser_make_image = subparsers.add_parser(
'make_image',
help='Create an application image from binary files')
parser_make_image.add_argument('output', help='Output image file')
parser_make_image.add_argument('--segfile', '-f', action='append', help='Segment input file')
parser_make_image.add_argument('--segaddr', '-a', action='append', help='Segment base address', type=arg_auto_int)
parser_make_image.add_argument('--entrypoint', '-e', help='Address of entry point', type=arg_auto_int, default=0)
parser_elf2image = subparsers.add_parser(
'elf2image',
help='Create an application image from ELF file')
parser_elf2image.add_argument('input', help='Input ELF file')
parser_elf2image.add_argument('--output', '-o', help='Output filename prefix (for version 1 image), or filename (for version 2 single image)', type=str)
parser_elf2image.add_argument('--version', '-e', help='Output image version', choices=['1','2'], default='1')
parser_elf2image.add_argument('--secure-pad', action='store_true', help='Pad image so once signed it will end on a 64KB boundary. For ESP32 images only.')
parser_elf2image.add_argument('--elf-sha256-offset', help='If set, insert SHA256 hash (32 bytes) of the input ELF file at specified offset in the binary.',
type=arg_auto_int, default=None)
add_spi_flash_subparsers(parser_elf2image, is_elf2image=True)
subparsers.add_parser(
'read_mac',
help='Read MAC address from OTP ROM')
subparsers.add_parser(
'chip_id',
help='Read Chip ID from OTP ROM')
parser_flash_id = subparsers.add_parser(
'flash_id',
help='Read SPI flash manufacturer and device ID')
add_spi_connection_arg(parser_flash_id)
parser_read_status = subparsers.add_parser(
'read_flash_status',
help='Read SPI flash status register')
add_spi_connection_arg(parser_read_status)
parser_read_status.add_argument('--bytes', help='Number of bytes to read (1-3)', type=int, choices=[1,2,3], default=2)
parser_write_status = subparsers.add_parser(
'write_flash_status',
help='Write SPI flash status register')
add_spi_connection_arg(parser_write_status)
parser_write_status.add_argument('--non-volatile', help='Write non-volatile bits (use with caution)', action='store_true')
parser_write_status.add_argument('--bytes', help='Number of status bytes to write (1-3)', type=int, choices=[1,2,3], default=2)
parser_write_status.add_argument('value', help='New value', type=arg_auto_int)
parser_read_flash = subparsers.add_parser(
'read_flash',
help='Read SPI flash content')
add_spi_connection_arg(parser_read_flash)
parser_read_flash.add_argument('address', help='Start address', type=arg_auto_int)
parser_read_flash.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_read_flash.add_argument('filename', help='Name of binary dump')
parser_read_flash.add_argument('--no-progress', '-p', help='Suppress progress output', action="store_true")
parser_verify_flash = subparsers.add_parser(
'verify_flash',
help='Verify a binary blob against flash')
parser_verify_flash.add_argument('addr_filename', help='Address and binary file to verify there, separated by space',
action=AddrFilenamePairAction)
parser_verify_flash.add_argument('--diff', '-d', help='Show differences',
choices=['no', 'yes'], default='no')
add_spi_flash_subparsers(parser_verify_flash, is_elf2image=False)
parser_erase_flash = subparsers.add_parser(
'erase_flash',
help='Perform Chip Erase on SPI flash')
add_spi_connection_arg(parser_erase_flash)
parser_erase_region = subparsers.add_parser(
'erase_region',
help='Erase a region of the flash')
add_spi_connection_arg(parser_erase_region)
parser_erase_region.add_argument('address', help='Start address (must be multiple of 4096)', type=arg_auto_int)
parser_erase_region.add_argument('size', help='Size of region to erase (must be multiple of 4096)', type=arg_auto_int)
subparsers.add_parser(
'version', help='Print esptool version')
# internal sanity check - every operation matches a module function of the same name
for operation in subparsers.choices.keys():
assert operation in globals(), "%s should be a module function" % operation
expand_file_arguments()
args = parser.parse_args()
print('esptool.py v%s' % __version__)
# operation function can take 1 arg (args), 2 args (esp, arg)
# or be a member function of the ESPLoader class.
if args.operation is None:
parser.print_help()
sys.exit(1)
operation_func = globals()[args.operation]
if PYTHON2:
# This function is depreciated in Python3
operation_args = inspect.getargspec(operation_func).args
else:
operation_args = inspect.getfullargspec(operation_func).args
if operation_args[0] == 'esp': # operation function takes an ESPLoader connection object
if args.before != "no_reset_no_sync":
initial_baud = min(ESPLoader.ESP_ROM_BAUD, args.baud) # don't sync faster than the default baud rate
else:
initial_baud = args.baud
if args.port is None:
ser_list = sorted(ports.device for ports in list_ports.comports())
print("Found %d serial ports" % len(ser_list))
else:
ser_list = [args.port]
esp = None
for each_port in reversed(ser_list):
print("Serial port %s" % each_port)
try:
if args.chip == 'auto':
esp = ESPLoader.detect_chip(each_port, initial_baud, args.before, args.trace)
else:
chip_class = {
'esp8266': ESP8266ROM,
'esp32': ESP32ROM,
}[args.chip]
esp = chip_class(each_port, initial_baud, args.trace)
esp.connect(args.before)
break
except (FatalError, OSError) as err:
if args.port is not None:
raise
print("%s failed to connect: %s" % (each_port, err))
esp = None
if esp is None:
raise FatalError("All of the %d available serial ports could not connect to a Espressif device." % len(ser_list))
print("Chip is %s" % (esp.get_chip_description()))
print("Features: %s" % ", ".join(esp.get_chip_features()))
read_mac(esp, args)
if not args.no_stub:
esp = esp.run_stub()
if args.override_vddsdio:
esp.override_vddsdio(args.override_vddsdio)
if args.baud > initial_baud:
try:
esp.change_baud(args.baud)
except NotImplementedInROMError:
print("WARNING: ROM doesn't support changing baud rate. Keeping initial baud rate %d" % initial_baud)
# override common SPI flash parameter stuff if configured to do so
if hasattr(args, "spi_connection") and args.spi_connection is not None:
if esp.CHIP_NAME != "ESP32":
raise FatalError("Chip %s does not support --spi-connection option." % esp.CHIP_NAME)
print("Configuring SPI flash mode...")
esp.flash_spi_attach(args.spi_connection)
elif args.no_stub:
print("Enabling default SPI flash mode...")
# ROM loader doesn't enable flash unless we explicitly do it
esp.flash_spi_attach(0)
if hasattr(args, "flash_size"):
print("Configuring flash size...")
detect_flash_size(esp, args)
esp.flash_set_parameters(flash_size_bytes(args.flash_size))
operation_func(esp, args)
# Handle post-operation behaviour (reset or other)
if operation_func == load_ram:
# the ESP is now running the loaded image, so let it run
print('Exiting immediately.')
elif args.after == 'hard_reset':
print('Hard resetting via RTS pin...')
esp.hard_reset()
elif args.after == 'soft_reset':
print('Soft resetting...')
# flash_finish will trigger a soft reset
esp.soft_reset(False)
else:
print('Staying in bootloader.')
if esp.IS_STUB:
esp.soft_reset(True) # exit stub back to ROM loader
esp._port.close()
else:
operation_func(args)
def expand_file_arguments():
""" Any argument starting with "@" gets replaced with all values read from a text file.
Text file arguments can be split by newline or by space.
Values are added "as-is", as if they were specified in this order on the command line.
"""
new_args = []
expanded = False
for arg in sys.argv:
if arg.startswith("@"):
expanded = True
with open(arg[1:],"r") as f:
for line in f.readlines():
new_args += shlex.split(line)
else:
new_args.append(arg)
if expanded:
print("esptool.py %s" % (" ".join(new_args[1:])))
sys.argv = new_args
class FlashSizeAction(argparse.Action):
""" Custom flash size parser class to support backwards compatibility with megabit size arguments.
(At next major relase, remove deprecated sizes and this can become a 'normal' choices= argument again.)
"""
def __init__(self, option_strings, dest, nargs=1, auto_detect=False, **kwargs):
super(FlashSizeAction, self).__init__(option_strings, dest, nargs, **kwargs)
self._auto_detect = auto_detect
def __call__(self, parser, namespace, values, option_string=None):
try:
value = {
'2m': '256KB',
'4m': '512KB',
'8m': '1MB',
'16m': '2MB',
'32m': '4MB',
'16m-c1': '2MB-c1',
'32m-c1': '4MB-c1',
}[values[0]]
print("WARNING: Flash size arguments in megabits like '%s' are deprecated." % (values[0]))
print("Please use the equivalent size '%s'." % (value))
print("Megabit arguments may be removed in a future release.")
except KeyError:
value = values[0]
known_sizes = dict(ESP8266ROM.FLASH_SIZES)
known_sizes.update(ESP32ROM.FLASH_SIZES)
if self._auto_detect:
known_sizes['detect'] = 'detect'
if value not in known_sizes:
raise argparse.ArgumentError(self, '%s is not a known flash size. Known sizes: %s' % (value, ", ".join(known_sizes.keys())))
setattr(namespace, self.dest, value)
class SpiConnectionAction(argparse.Action):
""" Custom action to parse 'spi connection' override. Values are SPI, HSPI, or a sequence of 5 pin numbers separated by commas.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.upper() == "SPI":
value = 0
elif value.upper() == "HSPI":
value = 1
elif "," in value:
values = value.split(",")
if len(values) != 5:
raise argparse.ArgumentError(self, '%s is not a valid list of comma-separate pin numbers. Must be 5 numbers - CLK,Q,D,HD,CS.' % value)
try:
values = tuple(int(v,0) for v in values)
except ValueError:
raise argparse.ArgumentError(self, '%s is not a valid argument. All pins must be numeric values' % values)
if any([v for v in values if v > 33 or v < 0]):
raise argparse.ArgumentError(self, 'Pin numbers must be in the range 0-33.')
# encode the pin numbers as a 32-bit integer with packed 6-bit values, the same way ESP32 ROM takes them
# TODO: make this less ESP32 ROM specific somehow...
clk,q,d,hd,cs = values
value = (hd << 24) | (cs << 18) | (d << 12) | (q << 6) | clk
else:
raise argparse.ArgumentError(self, '%s is not a valid spi-connection value. ' +
'Values are SPI, HSPI, or a sequence of 5 pin numbers CLK,Q,D,HD,CS).' % value)
setattr(namespace, self.dest, value)
class AddrFilenamePairAction(argparse.Action):
""" Custom parser class for the address/filename pairs passed as arguments """
def __init__(self, option_strings, dest, nargs='+', **kwargs):
super(AddrFilenamePairAction, self).__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
# validate pair arguments
pairs = []
for i in range(0,len(values),2):
try:
address = int(values[i],0)
except ValueError:
raise argparse.ArgumentError(self,'Address "%s" must be a number' % values[i])
try:
argfile = open(values[i + 1], 'rb')
except IOError as e:
raise argparse.ArgumentError(self, e)
except IndexError:
raise argparse.ArgumentError(self,'Must be pairs of an address and the binary filename to write there')
pairs.append((address, argfile))
# Sort the addresses and check for overlapping
end = 0
for address, argfile in sorted(pairs):
argfile.seek(0,2) # seek to end
size = argfile.tell()
argfile.seek(0)
sector_start = address & ~(ESPLoader.FLASH_SECTOR_SIZE - 1)
sector_end = ((address + size + ESPLoader.FLASH_SECTOR_SIZE - 1) & ~(ESPLoader.FLASH_SECTOR_SIZE - 1)) - 1
if sector_start < end:
message = 'Detected overlap at address: 0x%x for file: %s' % (address, argfile.name)
raise argparse.ArgumentError(self, message)
end = sector_end
setattr(namespace, self.dest, pairs)
# Binary stub code (see flasher_stub dir for source & details)
ESP8266ROM.STUB_CODE = eval(zlib.decompress(base64.b64decode(b"""
eNrNPHt/1Da2X2XshJCE0Fq2x5bTsMxMwvAo3PLYpHQ3bWPLNpTbdpMhv03K0vvZr89Lkj0TAn3d+0dgZFvS0XmfoyP95+Z5c3l+c3dU3Ty+LPXxpYqOL6No0v2jji/bFv7m9+BR/093f21z96sH04ddv7T7q+DT\
u91bw43mLn2mvW5NN0NbwCwT+pJenAwmUKt/K+8bAs0DyPRnohkGULtOk5XLOb4szC1eRxnJr27am97AqQe1HZAh6WFi0JDhqh62eggabXmwdqjSNYL10gMQaGR7F9BovEZhEZ96b6CzqtzQZXS8GCBHWxCOz+Xn\
0+6fxmuo2BvCeGBUkddQrV3EXve4YIAiH1QgVll70EUedFHvpaG57Dxq7KFI9XkgijzWw4bMXhmhUUd4rb1G6Rovsddk5zH+F32B/10+sOzyiH9V6UP+Zcxn/Et1EzQxN2pd4K/X9lk3SC0zFiAHyNWTxxsCEg8Z\
EKiwqKLrWSpieuiiut8mCsu1EAlIC47L/e5pXM668eNyCvOV3XBtXN4l0WlyGs1YFMEUMTzs3lYJIxDQA3yd+QIHIMVfhjl8rXlWbbYD+H7/HvWOVCB07SZRQjklD7d3YO4RDWkALfFM4JeFjGnSulwBKlKwZVRE\
yg4GVImjdGQf4NA78A+Plg5Hu+o5wIoImcqPZ90PxMyZ/GAUxRUDwoOZdhXIEWMABLLDwJqFURN4Knsr3QQx/AYnSfproqcqDqfAJiF/BECYdXw2nj3bj8twg1gGJlEmyUlcVTeLznz5lgESmS4GCQyBv6Jw1M6g\
36Yj2iZ8EZbhXBiDidbB82yffxvGRpUNsSFMkwIbhzBRzDTX90g/opWxKw99mGgwpUbwY9QhVsescHNiYZUT8xSgpBTP1TQ0aGmuAEagLctlBjEZCC9znMMzCBeYgCIh2OAzjXooj5+1AnDGkADg2du1Fj+efytP\
9o9/btfk08c8J/SpfQXW7mOvM/z9wpsucRAWuq8six5wUfaOIAMst2+9IcTWZfwxqD3qNb90o1XeaPOf4HlHe81aVpkBIgqaa7NldYCrm8jH8KAD5GcauR3A+ZlDgs5dF+UDMJPnoffwG4Eq9uhaK/hCJTvdhOfy\
LKJn3Z8GaH8d8EKfrDq74b9L+u+63yBIYx9XqgYbIGJ2wrossxa0ZczF81Lenfnq6HyZBZvuy6YGR4QkWGc/WdO8M4VOZ8NORyT5qsQFXdaggXHxn9FX2jwZAQumRy/AHLKmjcEKaVZuGet/54mA1KFi/mFpuueE\
ziaiuQzO9Tn/yP9Ga4A50PgtD03LKK9aRrZyGUdsWhqg49eOBOCh1LGI9oZF+Eth9pYxLwSpWqFoLTT5YunVPr0y2X/5xN6QT+ZPUJ0/DtY94+ADEgvfeeI+v8vzwVgpT6fibpTuv+bjBpqLFvgbjQVIitL51Mcv\
DFf4w8UscJE3bkaugRNk9H30Dui/58QdaNyAb8giWfXJwxRgQZD5xTdo2rmbqSn3Z47y4H02zSSE4fLPYPznDN24Evdjxj9QVR2C5hclnJ8cojE6gIdPD0bwAfoek2QEcBkxK0bJmkcowB2Q81seXiQGcGjwcKOy\
5PgcVIp9q49Yq+mY9RAix2Qjp600qRBii3VHYfSckWvmI8t9X+FXm8J3JLM91ouyLWG9QRzEvYELNvvTOPYnPaXyXjfLtBvevDjylEQVZMoR/JKoZkzeZ5fW/EtxAOfPWbwml6XNsx3oPqIIyYepsIxC/nzB+jRC\
OLe7RsFOuf4ypFY03v56RF8Z5BnQVu03BBp+kMw5HhxbJcur156Y1Q5LywglBSW+tECM/BGNQIePz9mJxqYZv/X8DHEj2oqVktNjnZUxpXXUZIoWXbLOSCUo/jOhN80cpTCQmOoaleYGMuc8xBhceAW4bi1nXy+e\
E2ajNBaDhrM8ZEsqbqDMpMQpytxwKBgmoVXgauvhigKSLHroPNmAidOMEvTvwPtORmsgE74njiFIb7w78PI2xqDwfjx8H5LbDbyGH6jhB2xWjB/+4fQ0cTKDf9OXx8fgp/0bRpkTu9Hnq/3NBJ3NZBQzOlCzINve\
+yezTubSIlH0cR51iEqcFKlTmOUtZtCGXQtg2DQgn7htpg7hVRwyfyoORpqKOSoJQd/Ei+BWVd6g6Jjt8c2cVa9IXYwfxCVMM37C7nocjM8kZQAMFdw6/ZKteHlAMTl47nX531W5hYPsTF5yQNoxT9mSOKvxHBy8\
9KIb0vLwzwQAuKqNWexQLw3UUePNH2G2cmNRruOo23svQLO+B3mED9LXoG1ArVaJx7swdtrBrRFgskeb9B7RETOr5i3zsq+FgZ0Kkhr4X8cFCEtBEV+Z393F0Hn9J+rS/dxgBwcsSYca8AzNWoSpq+DfjDHDBhQN\
+sPqW8KZqSmjoDMXWJc5aeEGBJXSDcEpjQLrMm3B3mzGfxE/oMHmvg8Vu3RTr+8S/OgcNDD53g6D48c6OPKCVnbjaY4rM25lUfKNXZG46nYZmVvG94NlFLKMZJh/E/mIgn96fQz2Cb7hRwBBAgmv+DVbiugGiNKb\
cFZGz8u0Kj8HooEkMD8rio6WNASSfRZEz4O0CrBTkBDXkxg2pKRGLTGnMf9+OkpFXPUIg66nf4cIuzqUIPzsKfMWZqIwzsGUwKvcESUaTyaQ5hFfJJuAICVRC5xk5jyUuHKOFMEdGHtnvyC0zylX0UcjE6TN7txF\
um6xXfNfWv5jrg1Hmyy0hhYXmSSANTZFJcxLw5iS5rNkjoWuoSyP53CJotDlcYmc/LueiCaHAL4BMQLBRZ4E5xMzJ2ebhM7OwK53S6nyUTCFJeUkfE0yJesIS2rro3ANTfM6qoZ6DzFw4wMYKCwGtthZMC+OwJyW\
3Ckd5A4te/8J657Lolmdwzo4iTfh5QKN1Hug3IgwpSLnE6vsCTk2LXBmXeDq165c/UKWjgEEjmOOyAa1qadP7Jr1n0Jr0Dpo+9qAHU2wYxF6kW/C3RCYbEZGyhKaY8tGT2XlQzLtTT+e78GOgnrgmJoyO4stQyNU\
HHg5BR0wneopMUpbW133JnwUCpYIap7aebNTCpvb+k3yKAn34RuL7IA4i0ZWMrKRkQUPgloF8LTZvO8oYmpY0ZDgCpwXYRe1gJqkZFnbeRznX8TH57u+FyKJzzoKMSVSsJ3R7BQOkLjzdi6pa0lfAc9dJ3EDnttL\
jsi+wqr/cnmjzm3kRK7BvZZz9Dk2PB5zjOfFr1G0tz+nXGMUL+uVTqEUAD3QOloEEFpV7eKeAHjoXEGyQ0+oUUf74DOcrKHncJMcLEh+ISFqL6eX0LPGrJhf4/zazj8X7vNU3g44UOcSFr1zfv8Kuu2QBYX5Kn2F\
VCGtIBPaLIKtpwC6wSjoHfxzQQnZSEGCLb6gVDC1stugtcDjN4CDnEz4At2ugRWvIxJTZ8UXQSzWm+w22HCY0bw7PG05LYQypTmnAdDGnYwv5sNob7Ae3GVKOKGKDLLhYiWdBY6rwlcgRDvsoBeoEmbwLBbPc/yB\
RYFLvLQoSZjo7QtF4gyfuCV2K+7WDZjLZbdEz/aFuZIL4g+Mik18SvuEEXuiRTqTpGkVcqDY5qFsXMiuEtDZnA/D4tkD25XkTGsZYhzTnk6jdzlah+xHLuO12w8WB64zO++YQsRgQHkpuyx4jyoyDK28faRFX/Jp\
MGMtesaYvp653rbppaTAb/VnJIEXBW+ImiT33hqnr/kFbp/gcOBrqcmcSQwIazAMjUYutbrCRXiAvAhpuvpRMtqA7rAzBUkdNBpVBupaBd85SLqYGEF/jpvSPR79dcigQqWOBftecyTpFWsV7vwmJww8uyMyMCb+\
f2Aagjla5XTIlB26o/Xt9ek+Ych5os5YWze1Q3xwG216iIkX3i9QJkHSgHBi3k6dfeFT5XQBTI1CPjm9BPu0AP/3JcQh6kDinc5gKZ9sixIrEtTzZd1yfHOK6oSWvX1800txRerJsAPBjFTCIRGo7a0ZZSJwywjY\
bcggnRyqjj02IUmK6hkLPXgqHYNwqrC8jZB/5myhz0woemOf6gD8Nu5bJb4WQimJZmvC4AczQz87ID4jM4hhfSrzAJ+pLV6uM8HsAGCaZ4Ho7Vol2fFCg62ElgaFDNm7lpwnVLhbROyiPOB9LshZyXLRw8zW2XJY\
dCtyHnB70HTuWChwjEh7o4UHccacwnvyTBVsqOFO/xWWOgnYdzRFxdt6hcyMFCxKzakEQWL2kiRHYQkG7pwnkliflShfB7DVnyxKfZqPOOGvVFzmcZDNghzhSBdBFpebk2Q0C/TpGXL8waLMZqW+T3memvMdILo6\
z9XpA1pKER247P1komblpgs7EULNG4ZYtIKpOoR91i1o8hYGmAWbQJ1k9jW0FrIrDzoCQkVU5DbLDt2iUzCEkwvszCQGX0HjlCEOM5rZ5FwTFVNBicstdsayW/wpLP70APh/QUmvtp3cR2aFpzh+51ufd8uCmVLJ\
JoAEdXCHf0d97s3VIRKQd9Ehr/sZqWmHaBB9Y81Zzs5luQhyYAx9ihTAtHwU6Yils0AgJshHpwlViRTxC6cJi8IXElW4BLMrMglub2y55HmZuz1i7bmjoKpr3nanb2U/GvL3AHmZhfJqS8pMKmL/Bh3T9Iyzm2Z7\
7TRkxNktHuYOkJpC9qfV5KESobkP/oHbj7ccLNKdT2i3NudnGDHdcIuxBUaGXAVazBY5X1iqYt6TG+nZW8zigNSYCDI6UfoIsFwmhOzYZf+A9ZtE8pSzW53/Ax4venApB8GZBJp768NCmw8H7hifJRy7F39h7O4I\
720h1LkXSuRLqb2Zq5vQ1e3eHImrnWH3Zx1nXRvRxLcIKrGyClgINLLPQguff2Kff1BlRSPmH6oMiAoFY2uqbPqZIyTFlSSGuSFlwqtKfJDUUR/8TEv9EE0DTR5yphOpj8z1hGt3Mlb8bmcCos1ODjdsGQS9SHpO\
nbebjl616lT9MYAJxQWtoWQJugpKkgOGdq7dX2EncWZvRnVXbYSxGehTnZzEIitSJwRd9E9iSQDgKx06ZcSf+SfmdRrCJc3d4cfueFhHKiw5ZXdX+P9jk1bKrB0RS+hk2VG0/J+v5v/5CuY3HCdE5ECsL4uBY8FI\
W5HI78BUwTNaKxGzoO6Gdlx1yRs1qsCxLmCsZVwAXBdA8wtY4fZFAO6gmX6OjL3xiMY473EGaXZiDh5QGdGDLcbs52ofmeRg9yAmHKlrwu1DWolz6jo3jsLtd63vqM2HUcEZ4bo1MySSwlhPxdtE4kzKhtr2lbcX\
kMnyl3jpBUfbbaugLsAEVE3TmuVcP66iswpDyDuwA6JBy/USasZAJe/AYVIpvcJ9PUQdFkiYvk87DL8DgrmF4jxMm1dJQb80q7HWbFLtHmqACp3aF1KmyHaxyWZcfmfIRqDlSUSEsp7wbj0txJquicr9yhV+tYVT\
K569BXcqgqqglLajGwgfi7RnZKs/xcjOr7ewrFyxmjWlTcrfamHbj7awNz5Fw7zlbUx2jdC1rFdZ2ewvtLL6j7eyrFLiZUML2fQ+70zExjnewWhodCIGNqN6dXVywknMzBaOY2btPVlB7dFdVWs9y4fK7eQVOrVo\
dnchw1O2E3BMAa9FJsSf27KBFQa2nxwZeaqS6jleid86f+gVD8ReDQ/bFgwho2uSrZWf5EW+8/1k2FbOIYYrbAWpxxNSZLA2uoovlmjD8WQVWdpMqc60rXbDHpkij0yR6khDUlMmToKBEqpaf0/CMvR1osyXw4Uj\
HJogEIvqHcrlsnC+EAdI6DMW+vzkbWFeQSgu/7SlaOvDuAMMXjFbiU/UaKP1j8fnbAmfZ1xsXD0Kr4xL4LHCqoUu4HrImG29+uwytzkUVd1/73l5OS76VAw5lqudYlD5DmbDGtDKIvuddS6dhxlitqdGQ4JeHMQA\
qOzbsJsx9o/npD0UTwYotqUUhz85WpEkxDKSugWbF9krISAJeMssAxVHVSlIphL/NvhVlEdLW/VcNb4e/AdevKmgnVXgLalsg0ev6gMGQrYWIhRzfZXOXkNnlfexS7bkRQLldW39IacwHqhrl/KOh+raeju3eX9s\
6CGyeiXlHfW8w7LnESY9Z1C0spfi9b26FU6f5It8l4EolkrkvTvwFUy2jZ7AHaEGxkzWlEI9ZNRKMtJwbaBSJxPW6VSPf/KD+APpK/YH3JGNvmOg8hOsIb3vpFcjfdlf6bkEdh3bsDxIRFZ+xE0m4QS59gEsDPgu\
St8cYNrNZlqKVFTOAUcvlbmVAP5pkynr+wKkf3o85hvVa9lMM5uVUG7YcD7DcljxB3DYkLeiVe4B4s47fFCPP+geVL578ORK9yCdcB19iYW0q/XlGVecXMlQM5+h+g4mHVro9KUk95yTgOq/ZUDLzIbggp7MsQO5\
h8wOV7qH+0LsdIWHcIVWnNJxMlobWubdA9xbmZXrU1oWvAMlsJtqLPGDyFhlr92phaqefTyLLZWS9bmsYgcXHY0/ltGq6/XYK1BiZrGkwfJeB8oweHpMdgOdrRM8n3rWBxEHFhDL+ssK4+eICh9NF8HBmQA8llCe\
YAh0imNjNBCX29H+6GtRgU/vjcgp2z64D6eIsJZVav8MDrHzgpLjeKx070VAFUVQzBGZ9aKaf7+aMN6eflMsgm0vzt9BHwE3vZUbjVyvaHvNi+5VdRGiQV6U22hpRc6r6ogAheDCHr+ciXtUcaagnt3znytsKmnG\
XyT4IP7iEMQmkxBI1B4zfclVM3joisYnoFqsAZeHCh+qN4fspysXzZWlK3SjU20QkUDmHl3pnMtD0KH4kbjVVWbdwSAM95M+Os+5QzBRJDZ/w3mWqxI+4z8lFDPeRu5vqM+S4xhSLvO3j0HD0p72Dm8NESIO/08Q\
obN+sRoVwK8o0Jo7I/9p28A75BG4wHsGC/2jom5QPSCgqmof8ERsmnB6DPEzrppBe1rKWntQ41Z+ubNWVK6SV19NSjBiKaZDS6wIIuexW3inCOpiASID3lf6C/z6gUIEjY8w+wM/xu+5Sg5cQ85PQahixpxNytkF\
QpwhAV8CYRQ42uVTokuLKsE8hVrVMVXN9oplM66AtLmvX9l3k11p8wuVzbp9/00mireviAfVSu+Z+sd3pDdAhOxT2JktD1e8SK56kV71YnzVi+yqF/ngBTY0uqBlcoH+8+naFFAcEp7xJDteR+ACI1+3h7t2oO3d\
EeBndAELadQ/CKlN9NmoQzmmKmnXvrNfnxP6n5ODbYbo7zCNO/ml1C/dlkPR3w8/XQS008vl+6dke+mQTfD52SP4vqPdt0xQ8/oOMWgVbbnzQY0caaZTVniIDYl5+A0JecNqDDgQNkVgf6mM35POR8kXduRdf1BY\
2EnPvXDfOOU4PHhjq57lDBvsvtWNukPOwa1NrPRt5XBhKMcSWDcrLhQvt9b08eJMWmT/TXK653ZT1biSwxZYJFOjMji5wd5p9d3T70ZPv+czV8Xx4mkIetAsGL78Njty49SeYceSjeAWHzfTPLN5QFkB3ch5E8TD\
5h6YhplnlxsRX0PupWF5bFs5WpRvwc6P8YrosbKpgbGM1ZgvoZt00ayZbPUifOvZEPth0Y09f8HeUAQnvfkr2NooA04yQ2ZDrQ25Dx5uLj80cL4RH2oBEMn+pczrneVR9Yr+wizR/D4DBrcK9D8b0Q0KmuW9MLIE\
GBuPLe4BP94ewY4fMK22B2h5241RjQV85eYOwjoGRNtwNR4yqXemCNhjPDgq5o4Z2OOG7sBra6+0oP4hH4mq+YgSVQ5+zqUimPAtOtEVzwLOPRUGo6xbYD7WRv/lVWnmsoYHj18eH7/+8fI9QsInqlq+ZaV3rrj2\
LxPgwy2xA7nNBPVcL10slYQtvLop5h5E4tJlBXgokQ+f2GJ/4C0wY23x4PhY7zyUE2Q4V+GSUxS6ywmoQkT3pXfKzWgoMYBEKKQn2uIp7w/SkT98F7ldQ1R5AnMc7Pev1jBxgJdnBHh5RoCXZwR3iU06IixovXIh\
C7HICXt+sX93TbzqIhutyH/zbnvYxFNyo/WjGV8TQbs6Um3uPoR1YLlP3nv8kNWHdw/G0XnvC+1laGKKf0drkV2K9u6vUauv4IG4295Xo1P/ipqJvTnnMWdEmrv+zTzQVVNXD3bUWIr9GilxktsvakLsOTwrgoD5\
r3c/hhyCo5rjc1Cp77GDXAHDn4ZrLE/kvY7AbdXjr6TwlU+GFRmwcqL+B5A/xX+7Lyf7tE4e6iWzGJ54bR8lkudADLSYSK8hKdGmtkuFafBH9xw4Bzwz9Qz4LGOL7JTSZtpe8C8AQOG/U2bVdteRGA/YPiNr3C1V\
qvc5/dO2S0zjAPqGD8qKZvMep30+U3K6YDdcuoYEz2rzJT14iATv1uBtXUyEw+GOWsopdY+2UnSkAyI0VtQxN6jIly//Lqa+Gp64ZbqUji+MHneZuOQZ0XvC6fFYE+rflOI0dBDze8zALVtojAbGfCa6pS+x9LmV\
+Be5Le0hvOiJI3LyEV7vsvmY7U5yfHONnQiUyOkjhsva982d0SiVwznN1IOAVN9OyWarnRz2ZCfGZGsUrR8lDB0epm3RsbNna9s+PygKIbzHM9NjD6wQJJ2iH4+QD4rHemc33NoR8gI1r6LiY7knST5o0O8Aa67Y\
MNXFswdgrjt+Pz9D7ncGCg0HbDFjOQ7E3jVc86Oyb9muaufutXxHRGum/QOOcDsLaREgQ0Mpop/ped0OyTv0ndgzukkao+GSwLYS9oM43YBn3Tyxh5jN8lUe2kihv0k7rwt7bd+G8NeAd1azjULPttgD1J4BauHy\
DP2Mz6a02srLSNDsXziGJ44QHUVPDliZig/8HBVRdHw8/Wb9mVga6JFupVj7kP4jtJJv8IYOU/x9C3qpvRXOgFa4GSvLG0d8hOSDjOEDKJesVAhppJY8DvkA89jRShA+bqZuAnsXgZybH99j3pG7T8ZiIRCm3F2l\
FOGtMqtBw1KJG58M2gA8IhMbRSQbCsIuM8eH0jh67LgS9lbQKU59Gb1e4y4Lae1TtkPfWVy6G6+WCYVIuk3AGQvQ4JYEYNdlNAm7D/ibPnjpexWv/cap3zj3G5d+433/wkA9uECwGLb9W+Gw2lvbu92+xF+pfVZ/\
4d0hWBRysZtyJToOrXzXDioHwC8oPdR4oPk6Rejf7ga3MuD1eVARJZlIDALYW1VliAZHrmloOahQmNLywhIK4Z8zw7Txj3zhw0pVFfA5SE3KzhX5Ppb9A95NaooVXeuG/VfKK2w/g6kOxMfI9uTTkpP/La7ytQsm\
6kzuDiI4IiXhgyLBwBvZ2P2BpW6S4VTl4S+y76esJq6Xlle6qtVu4ktOlxt38cyPZAxwJyFhnUre0G3m+vIAZkrvHMMRP/ItES2DqePh1M8oNEcBFZteHr7kxEfZPGOZoec/wPOS9xc8aCVMwke0uY6Jdqj9gzgG\
NUl7+rmwUKLFhMautLtu3J5hI0iV46/d5LPGu9cuXoHDyi4AIyG+A41uknlGJwflziFF1HlA66xwr9aelsOzDuPRYYRpl5QvcYswl+j441EW2gNvB35GGtJQCWvoqtg4gIChRCf0Dk+OhZiQIVOrbudLPNcKwYRN\
EPKTvEtL0N3TS7fD4e4lXmdR2tMfF3LBjl2gVOi0LtzuOGiG+wn3c0+9RhFcl4JaKK88UqV85UtTuwO/BSt6Dkw/2lso5IRhu0LZf1gZNWIKRP0YOWfojeljqMXKUO1Q5fawDefzrXCjADV9AdIrvKiWbj8UIO7I\
WdEnI+5TLPepOd9bS+osOoIUVR2847v9PH3Ko4yXeOViiVFIWEmXsFZ0TieS1EhFz28glPMPnovxRE/8a/ZGKsdNBZzctIedooRSszjOBp+rGW+s8ykpW4gxE/8GDuzE27gbsyURon+noP3S4JfH5zIzfk3Z6R7Q\
8n296nvn6tk+VB+FzZs7I7yC+Pu35+UCLiJWUZ7qNM/TtHvT/Hy++MV/qLuHdXleyo3FNkuBOmTs7ZJ5ewBUYMJ/iIovOSjAu1cjr4Ha0b7JuYwIL4qtKejFRt3aN3yrMXXIvIbX4ZL38DGS1V6j4nBzeQav8YLi\
1OFjvuySLjquSD12jX376+oRKW+y+jO68o0bsB/eKnlT8yE1RFRLEZaSS3QxJfLhSa9uoEyNV70pjG38h3yX7tczjy5YLS5di9ZDLm7UCDWKsX1zy454/umQ/u4G1KkzIE/sr7G3hqW9kmFaenixzuDobP9QpV8g\
RGVmvdbgHmg1mBvPjEWDG/Tstbmu0buAsBzkcwZjGrXitm81+H54A3g8aCeDdjpoZ4O2HrRNv60G8Kje9yO/0fvSv0ZcnSzvcf1pf+qadvyJPHQdT13HY8N2dk07v6atP9g+/0Dr5w+0+veMr2qbD7YXH5Kda/8+\
VW6zT8LR+Sesewh5e40WGECuBpCoARZVb7w1v3HLb/SG7R1l2/cbL/xGjyBvB5pmAGc5aJtBu0lWSIn6C6X4z9YCv1dL/F4t8nu1zO/VQte1P/FPRS6bZiUwR8mj46VjlrTU7qEsGGuNy4OtsnlLlOmt9CY7vb6P\
nORxF3HqX/8XohBRvg==\
""")))
ESP32ROM.STUB_CODE = eval(zlib.decompress(base64.b64decode(b"""
eNqNWnt31LgV/yqOYfJaskeyPbZEz0IS6BBg2xIoIdDp2bFlOyFbcoDOQjgL/ezVfVnyzNDtHxNkPa/u43cf4vedZXez3LmbNDvzm94k/k9xH1oKW+XR/Eb5ptX+s/W/fn7jVEKdxsyX/i+01O2zExrFmfX/M1PD\
foomyE8roUBFrehnhKJu6ocd7WTgzIbayvepbDh7D9bEVG2tkZe+pR35c3e+PPu4Tj9uA5vrTG7iv4tkV22+iVKHRGoX6NSlp6QONHdtxDO3cqa1dGboQCLOvnyfe8NPh7ZRYbUDofb3aQP5KbpHJO4toSZLYQjo\
fuAbU7iJCTfpahqtp8L982NiUS+syo9gWxh65edBb3OeAk0vQdSeIDeFGRlvCqLLgdvp4bn/1BPfn0ciVtyGa01hh9PQGWQFfJvSijYbDR5fjiR9gvxc8qbm+CRlUTt1t4CNjm26wmhD/6JeIrlqRUnxw6qI18oc\
cstE4kBKi/j78FBaJ9SNa/Ro32LYN0gK2QvcqaBxJI3hludgD08egNrp0cCu/1MmyZKlmYGJuvIWDPnJXUeTGx3aA4/rbJsb+FtOWLqgZ34vx5oBet3F+t5EezaRMC3928CP28NZjm3PelnqZnQscZqgigZBy6HP\
cl8wnuwfzHRPj3Gju5zwSHQAUq4jCnk/W8EZ9+Llhu6K91PBnlu07YoMXtsZoZJSX/00v8T4EQ1s9iPLsfXy1mvmDx3FsM/3kcIGglo9gOKSr56PmHLOxzbpNm8M0OlF2FoWObQLQQqvb26sC69pyRiNnh2AajH+\
+T81bnBeqKljZcvB0sHGizcvns3nfo4pZXVHHCKjfOhX+xEtXDa3iXcIOhm5AWF7DK8gFl0Al/LEE9pkCWMII0IXWbNxd1PSKVfs/R2puvvyNfwDJMNVwfDGODB2Tmi679GIvHe5f3Ib7w/zU+JEHZyWcLZuCahN\
BP6Bqp/m18HddI7sCD2AJk2ts4DvYD9aoE8Tc9ou8i1ZBKL5qi0H31wnMY5m7BebbItngor363gc87JWt0jJVv0TKpC4GyVevNY3EZnsDvF6Sq76BGZmV/CJsjpKoZ0jZWCRNTgUu79PxxqaE+IAY5/svWF9Qa06\
mF/z/m7K9NoRvXcIawYHHggC7EWxZaSEOK0jWcB424xjkRFjZI5jXc/He+Na2dPwPtX/2KflOcX6nPXYgG5yF/YuA4LIfvKtm5QDq0bAK2Ns3RirSfs8/vAA14K6T73+t+ZHtgTrom7wnHBf/1FubRENAFUo4Ch+\
HBlpGXAS3H7JrgSt6NkDQPSW4xGRkA7T4p3AmuAEmd+wIa5Rka+ufZIeInyeOVJWNAg23SZaDcBe1+wTug0yhH4bBT2NrJkELUUNIb4jOrlVKQw2FGknnOjaP9KIy1he7+OPZfxxE38ASF0w1gGes4nAEZdsLFsg\
Nhvhg1yx7ul+xjwGdfgYOIVWWt6ZX4MfMc0Fz/uO5PB2CECPPMOhM5ux20ChV/GUeOlf4JRTQS+QfSOkvbyiRRI2qvIgitAGQR06YjBdI2QEpDyLS9G/MgGBsLMBgFl3NozlUYSBXJg+h9uX74gMW5/O5h+Fkonw\
Y+FPxCNYZ9H9sQYixPGJRAvxuNGJl3FdMVHNmnFdpVcPK0JT5zhwwNOetZsvbSo/XOd0RBfnEcyzttgsPIlHNAfaGPRlu+j1f2Mtq5A/kIGVs/k1XHhKE+vsqcBUT0cTPn6lYLLhWKuttmfzHTIyZEn/HgaTsVnW\
a9DahavgUrXJPpnj08Bxsu874K5R5Pn3jO85EKXhXLhQq18Rx01HQdaQBG1AWoz5u/t/Ozl6TMZHwfP9AmP65SFrEKUJmG4U91cSvA3ZIWiiGeH54Sh93UgDQebw4QW5E+1QRMnKcDyTENEt4Spv0oR06eslH90x\
2JxLZHj4r310JSZjj+LcHWz9TP8UlBPiYtBiS5dQJFXvac4HzPqZvC5O1QUth6xFc4gr8aRn6nUKPsscc0AikLAGMFvk0DARa9ke0dPinseYVTxNK5Bx1XGciCc8Jfx2OplEoeFUsOGIqemErI6ypyEkB7m7fYg3\
3ZsheHscHGC3Fu/CarxgSooOHVGqmHtVTibJqICSM0R+z6qbDexwdhRnGk4tHAEQWJXLklugQSk046JBQuAC4Zxi6/seqAC/HV+x1c/5yKizERRgrtdSGnGDxfYbOIQZS+f3a/Mfjng3QO0GZXt1e7jXJec/5afR\
dW84qHKkBtz7NvTqLD3iy4PbQj1xu9iHUe/pfPn2dDuEg9qVF5QM6H4W7q+GDfKGG1iegrqFXqRJfwyNXTDSKOvP9k5n4yqRdmnqj+zGmmUa0U4gdSJucziS7Wh8KsfgWcKBHUYDFQVIWsfhiiBfHzq7bhI6MVtC\
M5hJPt3E7ML6zg6VHBSHthYvdZH9Fe/AyWzNENSXn5K+lP5OOpGf/bb0o5fxfTVO6CXjLn/HzzO+bEnQzaEpugbWK5DZClUEYRcoy/5TIMw00Q6IXn1YiBPysG0TbTv7IkRqEeDoUFdaOnEXbGDofCCTocOTwzlm\
v0Lwj3xoybUFXqJH00z5dNCdiDDHhNmCtROTFY3OrzzwZy6lT1EfJfd42T5SapyycuAkHsujQ2FAz6jCIexCW8Hd70lOcIA2X4qn2OaIRsTJvEb1M2PvB6pYI1AsZskiJR3X5T4nqeV/ooQKfpCBGv1xFVnOKAnS\
3TEbGkbTGbk/526zpuhgnLVagD89o7t4DHrDFhWlkDX/eqx6Xm04FGtQbvEQDhpOfMQ13LWtiHi3YR8IgnSzTvyZIOaMAhk1hPZSWAnMnl0Fne+zmM29BPU8KB4XDXR1tG1EWotIK5TeDtNU+YvUuTBKwaLYQ4aR\
OlY0xwm9tmIp/+Tah6HC9U5HJj6UxPINcRUoXYFk3oFFj+C4P0V+ulw9rUZ+vJQ3hio6LyF2ac5MHMf6FLZ6ZNUV+TQsV+u3kLi32Q8wGQ8BW8gP+M7l41DU1BmtE89UZ5RWC2ngZWwtbD4hF7ez+5qgHmqxPRad\
etJZO4D5Yg/qKJ6xriKLtpyCNALtzLpBszNJ6LFwa2lW04DC15bT2WqffPzgZ6ZcsYB2Id6gZJ/DDKvxuSNhpYEeLBOXicyvIsI130ZRI1kQKuPpao9qQ8SsWT5OcjC9bQMrxVWvMHNQqR/lIcbIuLkIKgdZsB9v\
kOW7EIkhujD0GkJCMiDOvVQ4mvButj1Y0olMrATKkxUbQuFbBmuSCa2Kh9asmLPKbrTZ6Byu1TJrMQmI1c1IzMoPaZFSicIY1ivgfA24YSEitJbDpYizzym+Q0F070CdWnxjklnwlFD99GfaspMEZXwj2Ki7O+jt\
AVX03FAl2ycn41m/fJoiGddP2d3DHWxCz3ikrRl9oNuxt5mLjmoIGMZGUcOAebEPstnYgQ0BZDmO15RahbC+T/hNBAIJK9UC39PJ50QCsjPG7WolnOrlYaLUF2BL0xD8nCGxQ0yFjynbJUtfVyHjkReUtcgAhp+t\
OXcNUUQILnQU+2jFod/w6iMveh0nBKxcwUXVHDB2UbmhG8XqAEhwRcMeSEvhDLFj0U4ZDug5iovRDQEy1nXwjapkEIczpqtEcDSb80NPRHSYM4uK4sws4ge+NwzuikI+cM7gEyZURhX3j0XaTmr0EpOncUAOeAr3\
ArqbhKZhDK9EA0DnJ9+Y0f007Dho2eZQv5dQv09S2afh2En01zKLW34GajBE+EA8BOTs8FXn7QfgzGeQw5f6BkI5Zz+zHtjoKQAQoOWCnNNS9QJMbPUHCvF6fjsC6MEijuOUmUvClh9IeklqDCUWcC8nfVwCa/J1\
wXqIugZ1wPfOD+KxSIMaC73iGBt+sKrheaI2XDyCuWi57O1qgI+GH+ywA2sV78OuWMqoiTp8IhDmtuE5Ap9bohxEaqHEsMkyZtQXjgkgnyhvQKzuM/nHqkdH+Zkde0mPnVBig2IglkcH+K32XoAt3RM1OYMIo99l\
OgC4mmxwcnElZCXAd3bvxQye3YLegaKfUZGgYUdrN5mY1CPY1K1an4P92Xo/KCkm8t1lxe+RwL0uLt2a9o/NGjBjdU7HTlAi3CYyKQnkpNy8Rq9oLj8LoQMEWZoZmWDf+0z+slpgeh0eppuCgZPXO85w+FlCEIRa\
2QRELfWfOHQtB3c74wAHppnPcOELcK2vQJqPGEBBTpEfBwsc3+cdZ5fXC36aU+W34W12Jy7t/LrOCnm9U1JXgcLFDohrvZRCFXu4f/kbRsevJTR+dQkHoepmW6+g922IcOSBweqXUYxiLocE1ZnFLlD+Dpef8qYW\
X0ttXMmhabDrUHElU0gheOW0FBZvfr+x8l41xNK7jLhswxiQuCxwG6OBMuYUVum+UomLTtnmAGK6nXKrDGXi0aOj2rslZ8E2LuWSnypX4tnWoTe38f+FwrkHXKOKrjec0H4luB0u04yWpoGQsJyXjNm1cyfB/6v2\
y7+X9Uf4H2taVUVuc2WMH+mulx+/DJ1FUUJnWy9r/q9tUcF5h0fijfIqL0udffsvV73qkg==\
""")))
def _main():
try:
main()
except FatalError as e:
print('\nA fatal error occurred: %s' % e)
sys.exit(2)
if __name__ == '__main__':
_main()
|
PypiClean
|
/jlcards-0.0.8-py3-none-any.whl/jlcards-0.0.8.data/data/share/jupyter/labextensions/jlmc/static/172.5c064f9af0b278ea71e8.js
|
"use strict";(self.webpackChunkjlmc=self.webpackChunkjlmc||[]).push([[172],{18172:(t,e,r)=>{function n(t){for(var e=arguments.length,r=Array(e>1?e-1:0),n=1;n<e;n++)r[n-1]=arguments[n];throw Error("[Immer] minified error nr: "+t+(r.length?" "+r.map((function(t){return"'"+t+"'"})).join(","):"")+". Find the full error at: https://bit.ly/3cXEKWf")}function o(t){return!!t&&!!t[Z]}function i(t){return!!t&&(function(t){if(!t||"object"!=typeof t)return!1;var e=Object.getPrototypeOf(t);if(null===e)return!0;var r=Object.hasOwnProperty.call(e,"constructor")&&e.constructor;return r===Object||"function"==typeof r&&Function.toString.call(r)===et}(t)||Array.isArray(t)||!!t[Y]||!!t.constructor[Y]||h(t)||v(t))}function u(t){return o(t)||n(23,t),t[Z].t}function a(t,e,r){void 0===r&&(r=!1),0===c(t)?(r?Object.keys:rt)(t).forEach((function(n){r&&"symbol"==typeof n||e(n,t[n],t)})):t.forEach((function(r,n){return e(n,r,t)}))}function c(t){var e=t[Z];return e?e.i>3?e.i-4:e.i:Array.isArray(t)?1:h(t)?2:v(t)?3:0}function f(t,e){return 2===c(t)?t.has(e):Object.prototype.hasOwnProperty.call(t,e)}function s(t,e){return 2===c(t)?t.get(e):t[e]}function l(t,e,r){var n=c(t);2===n?t.set(e,r):3===n?(t.delete(e),t.add(r)):t[e]=r}function p(t,e){return t===e?0!==t||1/t==1/e:t!=t&&e!=e}function h(t){return H&&t instanceof Map}function v(t){return L&&t instanceof Set}function y(t){return t.o||t.t}function d(t){if(Array.isArray(t))return Array.prototype.slice.call(t);var e=nt(t);delete e[Z];for(var r=rt(e),n=0;n<r.length;n++){var o=r[n],i=e[o];!1===i.writable&&(i.writable=!0,i.configurable=!0),(i.get||i.set)&&(e[o]={configurable:!0,writable:!0,enumerable:i.enumerable,value:t[o]})}return Object.create(Object.getPrototypeOf(t),e)}function g(t,e){return void 0===e&&(e=!1),P(t)||o(t)||!i(t)||(c(t)>1&&(t.set=t.add=t.clear=t.delete=b),Object.freeze(t),e&&a(t,(function(t,e){return g(e,!0)}),!0)),t}function b(){n(2)}function P(t){return null==t||"object"!=typeof t||Object.isFrozen(t)}function m(t){var e=ot[t];return e||n(18,t),e}function O(t,e){ot[t]||(ot[t]=e)}function w(){return B}function A(t,e){e&&(m("Patches"),t.u=[],t.s=[],t.v=e)}function j(t){D(t),t.p.forEach(k),t.p=null}function D(t){t===B&&(B=t.l)}function S(t){return B={p:[],l:B,h:t,m:!0,_:0}}function k(t){var e=t[Z];0===e.i||1===e.i?e.j():e.O=!0}function _(t,e){e._=e.p.length;var r=e.p[0],o=void 0!==t&&t!==r;return e.h.g||m("ES5").S(e,t,o),o?(r[Z].P&&(j(e),n(4)),i(t)&&(t=E(e,t),e.l||M(e,t)),e.u&&m("Patches").M(r[Z],t,e.u,e.s)):t=E(e,r,[]),j(e),e.u&&e.v(e.u,e.s),t!==V?t:void 0}function E(t,e,r){if(P(e))return e;var n=e[Z];if(!n)return a(e,(function(o,i){return x(t,n,e,o,i,r)}),!0),e;if(n.A!==t)return e;if(!n.P)return M(t,n.t,!0),n.t;if(!n.I){n.I=!0,n.A._--;var o=4===n.i||5===n.i?n.o=d(n.k):n.o;a(3===n.i?new Set(o):o,(function(e,i){return x(t,n,o,e,i,r)})),M(t,o,!1),r&&t.u&&m("Patches").R(n,r,t.u,t.s)}return n.o}function x(t,e,r,n,u,a){if(o(u)){var c=E(t,u,a&&e&&3!==e.i&&!f(e.D,n)?a.concat(n):void 0);if(l(r,n,c),!o(c))return;t.m=!1}if(i(u)&&!P(u)){if(!t.h.F&&t._<1)return;E(t,u),e&&e.A.l||M(t,u)}}function M(t,e,r){void 0===r&&(r=!1),t.h.F&&t.m&&g(e,r)}function z(t,e){var r=t[Z];return(r?y(r):t)[e]}function F(t,e){if(e in t)for(var r=Object.getPrototypeOf(t);r;){var n=Object.getOwnPropertyDescriptor(r,e);if(n)return n;r=Object.getPrototypeOf(r)}}function I(t){t.P||(t.P=!0,t.l&&I(t.l))}function R(t){t.o||(t.o=d(t.t))}function C(t,e,r){var n=h(e)?m("MapSet").N(e,r):v(e)?m("MapSet").T(e,r):t.g?function(t,e){var r=Array.isArray(t),n={i:r?1:0,A:e?e.A:w(),P:!1,I:!1,D:{},l:e,t,k:null,o:null,j:null,C:!1},o=n,i=it;r&&(o=[n],i=ut);var u=Proxy.revocable(o,i),a=u.revoke,c=u.proxy;return n.k=c,n.j=a,c}(e,r):m("ES5").J(e,r);return(r?r.A:w()).p.push(n),n}function K(t){return o(t)||n(22,t),function t(e){if(!i(e))return e;var r,n=e[Z],o=c(e);if(n){if(!n.P&&(n.i<4||!m("ES5").K(n)))return n.t;n.I=!0,r=N(e,o),n.I=!1}else r=N(e,o);return a(r,(function(e,o){n&&s(n.t,e)===o||l(r,e,t(o))})),3===o?new Set(r):r}(t)}function N(t,e){switch(e){case 2:return new Map(t);case 3:return Array.from(t)}return d(t)}function W(){function t(t,e){var r=i[t];return r?r.enumerable=e:i[t]=r={configurable:!0,enumerable:e,get:function(){var e=this[Z];return it.get(e,t)},set:function(e){var r=this[Z];it.set(r,t,e)}},r}function e(t){for(var e=t.length-1;e>=0;e--){var o=t[e][Z];if(!o.P)switch(o.i){case 5:n(o)&&I(o);break;case 4:r(o)&&I(o)}}}function r(t){for(var e=t.t,r=t.k,n=rt(r),o=n.length-1;o>=0;o--){var i=n[o];if(i!==Z){var u=e[i];if(void 0===u&&!f(e,i))return!0;var a=r[i],c=a&&a[Z];if(c?c.t!==u:!p(a,u))return!0}}var s=!!e[Z];return n.length!==rt(e).length+(s?0:1)}function n(t){var e=t.k;if(e.length!==t.t.length)return!0;var r=Object.getOwnPropertyDescriptor(e,e.length-1);return!(!r||r.get)}var i={};O("ES5",{J:function(e,r){var n=Array.isArray(e),o=function(e,r){if(e){for(var n=Array(r.length),o=0;o<r.length;o++)Object.defineProperty(n,""+o,t(o,!0));return n}var i=nt(r);delete i[Z];for(var u=rt(i),a=0;a<u.length;a++){var c=u[a];i[c]=t(c,e||!!i[c].enumerable)}return Object.create(Object.getPrototypeOf(r),i)}(n,e),i={i:n?5:4,A:r?r.A:w(),P:!1,I:!1,D:{},l:r,t:e,k:o,o:null,O:!1,C:!1};return Object.defineProperty(o,Z,{value:i,writable:!0}),o},S:function(t,r,i){i?o(r)&&r[Z].A===t&&e(t.p):(t.u&&function t(e){if(e&&"object"==typeof e){var r=e[Z];if(r){var o=r.t,i=r.k,u=r.D,c=r.i;if(4===c)a(i,(function(e){e!==Z&&(void 0!==o[e]||f(o,e)?u[e]||t(i[e]):(u[e]=!0,I(r)))})),a(o,(function(t){void 0!==i[t]||f(i,t)||(u[t]=!1,I(r))}));else if(5===c){if(n(r)&&(I(r),u.length=!0),i.length<o.length)for(var s=i.length;s<o.length;s++)u[s]=!1;else for(var l=o.length;l<i.length;l++)u[l]=!0;for(var p=Math.min(i.length,o.length),h=0;h<p;h++)void 0===u[h]&&t(i[h])}}}}(t.p[0]),e(t.p))},K:function(t){return 4===t.i?r(t):n(t)}})}function U(){function t(e){if(!i(e))return e;if(Array.isArray(e))return e.map(t);if(h(e))return new Map(Array.from(e.entries()).map((function(e){return[e[0],t(e[1])]})));if(v(e))return new Set(Array.from(e).map(t));var r=Object.create(Object.getPrototypeOf(e));for(var n in e)r[n]=t(e[n]);return f(e,Y)&&(r[Y]=e[Y]),r}function e(e){return o(e)?t(e):e}var r="add";O("Patches",{$:function(e,o){return o.forEach((function(o){for(var i=o.path,u=o.op,a=e,f=0;f<i.length-1;f++){var l=c(a),p=""+i[f];0!==l&&1!==l||"__proto__"!==p&&"constructor"!==p||n(24),"function"==typeof a&&"prototype"===p&&n(24),"object"!=typeof(a=s(a,p))&&n(15,i.join("/"))}var h=c(a),v=t(o.value),y=i[i.length-1];switch(u){case"replace":switch(h){case 2:return a.set(y,v);case 3:n(16);default:return a[y]=v}case r:switch(h){case 1:return a.splice(y,0,v);case 2:return a.set(y,v);case 3:return a.add(v);default:return a[y]=v}case"remove":switch(h){case 1:return a.splice(y,1);case 2:return a.delete(y);case 3:return a.delete(o.value);default:return delete a[y]}default:n(17,u)}})),e},R:function(t,n,o,i){switch(t.i){case 0:case 4:case 2:return function(t,n,o,i){var u=t.t,c=t.o;a(t.D,(function(t,a){var l=s(u,t),p=s(c,t),h=a?f(u,t)?"replace":r:"remove";if(l!==p||"replace"!==h){var v=n.concat(t);o.push("remove"===h?{op:h,path:v}:{op:h,path:v,value:p}),i.push(h===r?{op:"remove",path:v}:"remove"===h?{op:r,path:v,value:e(l)}:{op:"replace",path:v,value:e(l)})}}))}(t,n,o,i);case 5:case 1:return function(t,n,o,i){var u=t.t,a=t.D,c=t.o;if(c.length<u.length){var f=[c,u];u=f[0],c=f[1];var s=[i,o];o=s[0],i=s[1]}for(var l=0;l<u.length;l++)if(a[l]&&c[l]!==u[l]){var p=n.concat([l]);o.push({op:"replace",path:p,value:e(c[l])}),i.push({op:"replace",path:p,value:e(u[l])})}for(var h=u.length;h<c.length;h++){var v=n.concat([h]);o.push({op:r,path:v,value:e(c[h])})}u.length<c.length&&i.push({op:"replace",path:n.concat(["length"]),value:u.length})}(t,n,o,i);case 3:return function(t,e,n,o){var i=t.t,u=t.o,a=0;i.forEach((function(t){if(!u.has(t)){var i=e.concat([a]);n.push({op:"remove",path:i,value:t}),o.unshift({op:r,path:i,value:t})}a++})),a=0,u.forEach((function(t){if(!i.has(t)){var u=e.concat([a]);n.push({op:r,path:u,value:t}),o.unshift({op:"remove",path:u,value:t})}a++}))}(t,n,o,i)}},M:function(t,e,r,n){r.push({op:"replace",path:[],value:e===V?void 0:e}),n.push({op:"replace",path:[],value:t.t})}})}function $(){function t(t,e){function r(){this.constructor=t}u(t,e),t.prototype=(r.prototype=e.prototype,new r)}function e(t){t.o||(t.D=new Map,t.o=new Map(t.t))}function r(t){t.o||(t.o=new Set,t.t.forEach((function(e){if(i(e)){var r=C(t.A.h,e,t);t.p.set(e,r),t.o.add(r)}else t.o.add(e)})))}function o(t){t.O&&n(3,JSON.stringify(y(t)))}var u=function(t,e){return(u=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,e){t.__proto__=e}||function(t,e){for(var r in e)e.hasOwnProperty(r)&&(t[r]=e[r])})(t,e)},c=function(){function r(t,e){return this[Z]={i:2,l:e,A:e?e.A:w(),P:!1,I:!1,o:void 0,D:void 0,t,k:this,C:!1,O:!1},this}t(r,Map);var n=r.prototype;return Object.defineProperty(n,"size",{get:function(){return y(this[Z]).size}}),n.has=function(t){return y(this[Z]).has(t)},n.set=function(t,r){var n=this[Z];return o(n),y(n).has(t)&&y(n).get(t)===r||(e(n),I(n),n.D.set(t,!0),n.o.set(t,r),n.D.set(t,!0)),this},n.delete=function(t){if(!this.has(t))return!1;var r=this[Z];return o(r),e(r),I(r),r.D.set(t,!1),r.o.delete(t),!0},n.clear=function(){var t=this[Z];o(t),y(t).size&&(e(t),I(t),t.D=new Map,a(t.t,(function(e){t.D.set(e,!1)})),t.o.clear())},n.forEach=function(t,e){var r=this;y(this[Z]).forEach((function(n,o){t.call(e,r.get(o),o,r)}))},n.get=function(t){var r=this[Z];o(r);var n=y(r).get(t);if(r.I||!i(n))return n;if(n!==r.t.get(t))return n;var u=C(r.A.h,n,r);return e(r),r.o.set(t,u),u},n.keys=function(){return y(this[Z]).keys()},n.values=function(){var t,e=this,r=this.keys();return(t={})[tt]=function(){return e.values()},t.next=function(){var t=r.next();return t.done?t:{done:!1,value:e.get(t.value)}},t},n.entries=function(){var t,e=this,r=this.keys();return(t={})[tt]=function(){return e.entries()},t.next=function(){var t=r.next();if(t.done)return t;var n=e.get(t.value);return{done:!1,value:[t.value,n]}},t},n[tt]=function(){return this.entries()},r}(),f=function(){function e(t,e){return this[Z]={i:3,l:e,A:e?e.A:w(),P:!1,I:!1,o:void 0,t,k:this,p:new Map,O:!1,C:!1},this}t(e,Set);var n=e.prototype;return Object.defineProperty(n,"size",{get:function(){return y(this[Z]).size}}),n.has=function(t){var e=this[Z];return o(e),e.o?!!e.o.has(t)||!(!e.p.has(t)||!e.o.has(e.p.get(t))):e.t.has(t)},n.add=function(t){var e=this[Z];return o(e),this.has(t)||(r(e),I(e),e.o.add(t)),this},n.delete=function(t){if(!this.has(t))return!1;var e=this[Z];return o(e),r(e),I(e),e.o.delete(t)||!!e.p.has(t)&&e.o.delete(e.p.get(t))},n.clear=function(){var t=this[Z];o(t),y(t).size&&(r(t),I(t),t.o.clear())},n.values=function(){var t=this[Z];return o(t),r(t),t.o.values()},n.entries=function(){var t=this[Z];return o(t),r(t),t.o.entries()},n.keys=function(){return this.values()},n[tt]=function(){return this.values()},n.forEach=function(t,e){for(var r=this.values(),n=r.next();!n.done;)t.call(e,n.value,n.value,this),n=r.next()},e}();O("MapSet",{N:function(t,e){return new c(t,e)},T:function(t,e){return new f(t,e)}})}function J(){W(),$(),U()}function T(t){return t}function X(t){return t}r.r(e),r.d(e,{default:()=>dt,Immer:()=>at,applyPatches:()=>ht,castDraft:()=>T,castImmutable:()=>X,createDraft:()=>vt,current:()=>K,enableAllPlugins:()=>J,enableES5:()=>W,enableMapSet:()=>$,enablePatches:()=>U,finishDraft:()=>yt,freeze:()=>g,immerable:()=>Y,isDraft:()=>o,isDraftable:()=>i,nothing:()=>V,original:()=>u,produce:()=>ft,produceWithPatches:()=>st,setAutoFreeze:()=>lt,setUseProxies:()=>pt});var q,B,G="undefined"!=typeof Symbol&&"symbol"==typeof Symbol("x"),H="undefined"!=typeof Map,L="undefined"!=typeof Set,Q="undefined"!=typeof Proxy&&void 0!==Proxy.revocable&&"undefined"!=typeof Reflect,V=G?Symbol.for("immer-nothing"):((q={})["immer-nothing"]=!0,q),Y=G?Symbol.for("immer-draftable"):"__$immer_draftable",Z=G?Symbol.for("immer-state"):"__$immer_state",tt="undefined"!=typeof Symbol&&Symbol.iterator||"@@iterator",et=""+Object.prototype.constructor,rt="undefined"!=typeof Reflect&&Reflect.ownKeys?Reflect.ownKeys:void 0!==Object.getOwnPropertySymbols?function(t){return Object.getOwnPropertyNames(t).concat(Object.getOwnPropertySymbols(t))}:Object.getOwnPropertyNames,nt=Object.getOwnPropertyDescriptors||function(t){var e={};return rt(t).forEach((function(r){e[r]=Object.getOwnPropertyDescriptor(t,r)})),e},ot={},it={get:function(t,e){if(e===Z)return t;var r=y(t);if(!f(r,e))return function(t,e,r){var n,o=F(e,r);return o?"value"in o?o.value:null===(n=o.get)||void 0===n?void 0:n.call(t.k):void 0}(t,r,e);var n=r[e];return t.I||!i(n)?n:n===z(t.t,e)?(R(t),t.o[e]=C(t.A.h,n,t)):n},has:function(t,e){return e in y(t)},ownKeys:function(t){return Reflect.ownKeys(y(t))},set:function(t,e,r){var n=F(y(t),e);if(null==n?void 0:n.set)return n.set.call(t.k,r),!0;if(!t.P){var o=z(y(t),e),i=null==o?void 0:o[Z];if(i&&i.t===r)return t.o[e]=r,t.D[e]=!1,!0;if(p(r,o)&&(void 0!==r||f(t.t,e)))return!0;R(t),I(t)}return t.o[e]===r&&"number"!=typeof r&&(void 0!==r||e in t.o)||(t.o[e]=r,t.D[e]=!0,!0)},deleteProperty:function(t,e){return void 0!==z(t.t,e)||e in t.t?(t.D[e]=!1,R(t),I(t)):delete t.D[e],t.o&&delete t.o[e],!0},getOwnPropertyDescriptor:function(t,e){var r=y(t),n=Reflect.getOwnPropertyDescriptor(r,e);return n?{writable:!0,configurable:1!==t.i||"length"!==e,enumerable:n.enumerable,value:r[e]}:n},defineProperty:function(){n(11)},getPrototypeOf:function(t){return Object.getPrototypeOf(t.t)},setPrototypeOf:function(){n(12)}},ut={};a(it,(function(t,e){ut[t]=function(){return arguments[0]=arguments[0][0],e.apply(this,arguments)}})),ut.deleteProperty=function(t,e){return it.deleteProperty.call(this,t[0],e)},ut.set=function(t,e,r){return it.set.call(this,t[0],e,r,t[0])};var at=function(){function t(t){var e=this;this.g=Q,this.F=!0,this.produce=function(t,r,o){if("function"==typeof t&&"function"!=typeof r){var u=r;r=t;var a=e;return function(t){var e=this;void 0===t&&(t=u);for(var n=arguments.length,o=Array(n>1?n-1:0),i=1;i<n;i++)o[i-1]=arguments[i];return a.produce(t,(function(t){var n;return(n=r).call.apply(n,[e,t].concat(o))}))}}var c;if("function"!=typeof r&&n(6),void 0!==o&&"function"!=typeof o&&n(7),i(t)){var f=S(e),s=C(e,t,void 0),l=!0;try{c=r(s),l=!1}finally{l?j(f):D(f)}return"undefined"!=typeof Promise&&c instanceof Promise?c.then((function(t){return A(f,o),_(t,f)}),(function(t){throw j(f),t})):(A(f,o),_(c,f))}if(!t||"object"!=typeof t){if((c=r(t))===V)return;return void 0===c&&(c=t),e.F&&g(c,!0),c}n(21,t)},this.produceWithPatches=function(t,r){return"function"==typeof t?function(r){for(var n=arguments.length,o=Array(n>1?n-1:0),i=1;i<n;i++)o[i-1]=arguments[i];return e.produceWithPatches(r,(function(e){return t.apply(void 0,[e].concat(o))}))}:[e.produce(t,r,(function(t,e){n=t,o=e})),n,o];var n,o},"boolean"==typeof(null==t?void 0:t.useProxies)&&this.setUseProxies(t.useProxies),"boolean"==typeof(null==t?void 0:t.autoFreeze)&&this.setAutoFreeze(t.autoFreeze)}var e=t.prototype;return e.createDraft=function(t){i(t)||n(8),o(t)&&(t=K(t));var e=S(this),r=C(this,t,void 0);return r[Z].C=!0,D(e),r},e.finishDraft=function(t,e){var r=(t&&t[Z]).A;return A(r,e),_(void 0,r)},e.setAutoFreeze=function(t){this.F=t},e.setUseProxies=function(t){t&&!Q&&n(20),this.g=t},e.applyPatches=function(t,e){var r;for(r=e.length-1;r>=0;r--){var n=e[r];if(0===n.path.length&&"replace"===n.op){t=n.value;break}}var i=m("Patches").$;return o(t)?i(t,e):this.produce(t,(function(t){return i(t,e.slice(r+1))}))},t}(),ct=new at,ft=ct.produce,st=ct.produceWithPatches.bind(ct),lt=ct.setAutoFreeze.bind(ct),pt=ct.setUseProxies.bind(ct),ht=ct.applyPatches.bind(ct),vt=ct.createDraft.bind(ct),yt=ct.finishDraft.bind(ct);const dt=ft}}]);
|
PypiClean
|
/bulkdata-0.6.1.tar.gz/bulkdata-0.6.1/docs/bulkdata.rst
|
bulkdata
========
Subpackages
^^^^^^^^^^^
.. toctree::
bulkdata.pyNastran
Submodules
^^^^^^^^^^
bulkdata.card
-------------
.. automodule:: bulkdata.card
:members:
:undoc-members:
:special-members: __getitem__, __setitem__, __delitem__, __str__, __len__, __iter__, __bool__
bulkdata.cli
------------
.. automodule:: bulkdata.cli
:members:
:undoc-members:
bulkdata.deck
-------------
.. automodule:: bulkdata.deck
:members:
:undoc-members:
:special-members: __getitem__, __setitem__, __str__, __len__, __iter__, __bool__
bulkdata.error
--------------
.. automodule:: bulkdata.error
:members:
:undoc-members:
bulkdata.field
--------------
.. automodule:: bulkdata.field
:members:
:undoc-members:
bulkdata.format
---------------
.. automodule:: bulkdata.format
:members:
:undoc-members:
bulkdata.parse
--------------
.. automodule:: bulkdata.parse
:members:
:undoc-members:
bulkdata.util
-------------
.. automodule:: bulkdata.util
:members:
:undoc-members:
Module
^^^^^^
.. automodule:: bulkdata
:members:
:undoc-members:
The :class:`~bulkdata.card.Card` and :class:`~bulkdata.deck.Deck`
classes are accessible via the top-level :mod:`bulkdata` module.
|
PypiClean
|
/sierra-research-1.3.5.tar.gz/sierra-research-1.3.5/sierra/core/pipeline/stage4/intra_exp_graph_generator.py
|
# Core packages
import os
import copy
import typing as tp
import logging
import pathlib
# 3rd party packages
import json
# Project packages
from sierra.core.graphs.stacked_line_graph import StackedLineGraph
from sierra.core.graphs.heatmap import Heatmap
from sierra.core.models.graphs import IntraExpModel2DGraphSet
import sierra.core.variables.batch_criteria as bc
import sierra.core.plugin_manager as pm
from sierra.core import types, config, utils
class BatchIntraExpGraphGenerator:
def __init__(self, cmdopts: types.Cmdopts) -> None:
# Copy because we are modifying it and don't want to mess up the
# arguments for graphs that are generated after us
self.cmdopts = copy.deepcopy(cmdopts)
self.logger = logging.getLogger(__name__)
def __call__(self,
main_config: types.YAMLDict,
controller_config: types.YAMLDict,
LN_config: types.YAMLDict,
HM_config: types.YAMLDict,
criteria: bc.IConcreteBatchCriteria) -> None:
"""Generate all intra-experiment graphs for a :term:`Batch Experiment`.
Parameters:
main_config: Parsed dictionary of main YAML configuration
controller_config: Parsed dictionary of controller YAML
configuration.
LN_config: Parsed dictionary of intra-experiment linegraph
configuration.
HM_config: Parsed dictionary of intra-experiment heatmap
configuration.
criteria: The :term:`Batch Criteria` used for the batch
experiment.
"""
exp_to_gen = utils.exp_range_calc(self.cmdopts,
self.cmdopts['batch_output_root'],
criteria)
for exp in exp_to_gen:
batch_output_root = pathlib.Path(self.cmdopts["batch_output_root"])
batch_stat_root = pathlib.Path(self.cmdopts["batch_stat_root"])
batch_input_root = pathlib.Path(self.cmdopts["batch_input_root"])
batch_graph_root = pathlib.Path(self.cmdopts["batch_graph_root"])
batch_model_root = pathlib.Path(self.cmdopts["batch_model_root"])
cmdopts = copy.deepcopy(self.cmdopts)
cmdopts["exp_input_root"] = str(batch_input_root / exp.name)
cmdopts["exp_output_root"] = str(batch_output_root / exp.name)
cmdopts["exp_graph_root"] = str(batch_graph_root / exp.name)
cmdopts["exp_model_root"] = str(batch_model_root / exp.name)
cmdopts["exp_stat_root"] = str(batch_stat_root / exp.name)
if os.path.isdir(cmdopts["exp_stat_root"]):
generator = pm.module_load_tiered(project=self.cmdopts['project'],
path='pipeline.stage4.intra_exp_graph_generator')
generator.IntraExpGraphGenerator(main_config,
controller_config,
LN_config,
HM_config,
cmdopts)(criteria)
else:
self.logger.warning("Skipping experiment '%s': %s does not exist",
exp,
cmdopts['exp_stat_root'])
class IntraExpGraphGenerator:
"""Generates graphs from :term:`Averaged .csv` files for a single experiment.
Which graphs are generated is controlled by YAML configuration files parsed
in :class:`~sierra.core.pipeline.stage4.pipeline_stage4.PipelineStage4`.
This class can be extended/overriden using a :term:`Project` hook. See
:ref:`ln-sierra-tutorials-project-hooks` for details.
Attributes:
cmdopts: Dictionary of parsed cmdline attributes.
main_config: Parsed dictionary of main YAML configuration
controller_config: Parsed dictionary of controller YAML
configuration.
LN_config: Parsed dictionary of intra-experiment linegraph
configuration.
HM_config: Parsed dictionary of intra-experiment heatmap
configuration.
criteria: The :term:`Batch Criteria` used for the batch
experiment.
logger: The handle to the logger for this class. If you extend this
class, you should save/restore this variable in tandem with
overriding it in order to get logging messages have unique logger
names between this class and your derived class, in order to
reduce confusion.
"""
def __init__(self,
main_config: types.YAMLDict,
controller_config: types.YAMLDict,
LN_config: types.YAMLDict,
HM_config: types.YAMLDict,
cmdopts: types.Cmdopts) -> None:
# Copy because we are modifying it and don't want to mess up the
# arguments for graphs that are generated after us
self.cmdopts = copy.deepcopy(cmdopts)
self.main_config = main_config
self.LN_config = LN_config
self.HM_config = HM_config
self.controller_config = controller_config
self.logger = logging.getLogger(__name__)
utils.dir_create_checked(self.cmdopts["exp_graph_root"], exist_ok=True)
def __call__(self, criteria: bc.IConcreteBatchCriteria) -> None:
"""
Generate graphs.
Performs the following steps:
# . :class:`~sierra.core.pipeline.stage4.intra_exp_graph_generator.LinegraphsGenerator`
to generate linegraphs for each experiment in the batch.
# . :class:`~sierra.core.pipeline.stage4.intra_exp_graph_generator.HeatmapsGenerator`
to generate heatmaps for each experiment in the batch.
"""
LN_targets, HM_targets = self.calc_targets()
self.generate(LN_targets, HM_targets)
def generate(self,
LN_targets: tp.List[types.YAMLDict],
HM_targets: tp.List[types.YAMLDict]):
if not self.cmdopts['project_no_LN']:
LinegraphsGenerator(self.cmdopts, LN_targets).generate()
if not self.cmdopts['project_no_HM']:
HeatmapsGenerator(self.cmdopts, HM_targets).generate()
def calc_targets(self) -> tp.Tuple[tp.List[types.YAMLDict],
tp.List[types.YAMLDict]]:
"""Calculate what intra-experiment graphs should be generated.
Uses YAML configuration for controller and intra-experiment graphs.
Returns a tuple of dictionaries: (intra-experiment linegraphs,
intra-experiment heatmaps) defined what graphs to generate. The enabled
graphs exist in their YAML respective YAML configuration `and` are
enabled by the YAML configuration for the selected controller.
"""
keys = []
for category in list(self.controller_config.keys()):
if category not in self.cmdopts['controller']:
continue
for controller in self.controller_config[category]['controllers']:
if controller['name'] not in self.cmdopts['controller']:
continue
# valid to specify no graphs, and only to inherit graphs
keys = controller.get('graphs', [])
if 'graphs_inherit' in controller:
for inherit in controller['graphs_inherit']:
keys.extend(inherit) # optional
# Get keys for enabled graphs
LN_keys = [k for k in self.LN_config if k in keys]
self.logger.debug("Enabled linegraph categories: %s", LN_keys)
HM_keys = [k for k in self.HM_config if k in keys]
self.logger.debug("Enabled heatmap categories: %s", HM_keys)
# Strip out all configured graphs which are not enabled
LN_targets = [self.LN_config[k] for k in LN_keys]
HM_targets = [self.HM_config[k] for k in HM_keys]
return LN_targets, HM_targets
class LinegraphsGenerator:
"""
Generates linegraphs from :term:`Averaged .csv` files within an experiment.
"""
def __init__(self,
cmdopts: types.Cmdopts,
targets: tp.List[types.YAMLDict]) -> None:
self.cmdopts = cmdopts
self.targets = targets
self.logger = logging.getLogger(__name__)
self.graph_root = pathlib.Path(self.cmdopts['exp_graph_root'])
self.stats_root = pathlib.Path(self.cmdopts['exp_stat_root'])
def generate(self) -> None:
self.logger.info("Linegraphs from %s", self.cmdopts['exp_stat_root'])
# For each category of linegraphs we are generating
for category in self.targets:
# For each graph in each category
for graph in category['graphs']:
output_fpath = self.graph_root / ('SLN-' + graph['dest_stem'] +
config.kImageExt)
try:
self.logger.trace('\n' + # type: ignore
json.dumps(graph, indent=4))
StackedLineGraph(stats_root=self.stats_root,
input_stem=graph['src_stem'],
output_fpath=output_fpath,
stats=self.cmdopts['dist_stats'],
dashstyles=graph.get('dashes', None),
linestyles=graph.get('styles', None),
cols=graph.get('cols', None),
title=graph.get('title', None),
legend=graph.get('legend', None),
xlabel=graph.get('xlabel', None),
ylabel=graph.get('ylabel', None),
logyscale=self.cmdopts['plot_log_yscale'],
large_text=self.cmdopts['plot_large_text']).generate()
except KeyError:
self.logger.fatal(("Could not generate linegraph. "
"Possible reasons include: "))
self.logger.fatal(("1. The YAML configuration entry is "
"missing required fields"))
missing_cols = graph.get('cols', "MISSING_KEY")
missing_stem = graph.get('src_stem', "MISSING_KEY")
self.logger.fatal(("2. 'cols' is present in YAML "
"configuration but some of %s are "
"missing from %s"),
missing_cols,
missing_stem)
raise
class HeatmapsGenerator:
"""
Generates heatmaps from :term:`Averaged .csv` files for a single experiment.
"""
def __init__(self,
cmdopts: types.Cmdopts,
targets: tp.List[types.YAMLDict]) -> None:
self.exp_stat_root = pathlib.Path(cmdopts['exp_stat_root'])
self.exp_graph_root = pathlib.Path(cmdopts["exp_graph_root"])
self.exp_model_root = pathlib.Path(cmdopts["exp_model_root"])
self.large_text = cmdopts['plot_large_text']
self.targets = targets
self.logger = logging.getLogger(__name__)
def generate(self) -> None:
self.logger.info("Heatmaps from %s", self.exp_stat_root)
# For each category of heatmaps we are generating
for category in self.targets:
# For each graph in each category
for graph in category['graphs']:
self.logger.trace('\n' + # type: ignore
json.dumps(graph, indent=4))
if IntraExpModel2DGraphSet.model_exists(self.exp_model_root,
graph['src_stem']):
IntraExpModel2DGraphSet(self.exp_stat_root,
self.exp_model_root,
self.exp_graph_root,
graph['src_stem'],
graph.get('title', None)).generate()
else:
input_fpath = self.exp_stat_root / (graph['src_stem'] +
config.kStats['mean'].exts['mean'])
output_fpath = self.exp_graph_root / ('HM-' + graph['src_stem'] +
config.kImageExt)
Heatmap(input_fpath=input_fpath,
output_fpath=output_fpath,
title=graph.get('title', None),
xlabel='X',
ylabel='Y',
large_text=self.large_text).generate()
__api__ = [
'BatchIntraExpGraphGenerator',
'IntraExpGraphGenerator',
'LinegraphsGenerator',
'HeatmapsGenerator'
]
|
PypiClean
|
/love_course_2016_2019-2023.3.1.0-py3-none-any.whl/LoveCourse20162019/docs/wu-ya-jiu-shu/乌鸦救赎《终极火箭班》:乌鸦经典语录:不要凭感觉要靠测试.md
|
# 乌鸦救赎《终极火箭班》:乌鸦经典语录:不要凭感觉要靠测试
并感觉去猜错很多人会把状况指挥越猜越糟糕,你需要做的事情是吗,是要去测试,要去测试,永远都不要伤去词的感觉,要相信自己去测试,你想要知道真实结果你要去测试,他是不是不爱你,去测试一下就知道了,那就是。
平感觉是一个很坏事情的东西,见过很多人,我见过很多人的话,就是说,因为平感觉然后失去了词的女孩,平感觉然后觉得他不爱自己来,然后直接跟你说分手,就生尽病,然后平感觉觉得那女孩对自己上去表吧。
女孩要懂得去测试,永远要去懂得测试,你不懂得测试的话,在这个方面你会吃很多可怜的,测试是一种美的,什么叫做测试呢,或者是一个杀选,也或者是一个服侧,或者是一个废物测试都可以,看看对方对你有没有那个兴趣。
比如说暗度就是一种测试,我这种看法,发现你今天喜欢跟着我,然后跟我聊天,然后走到哪儿都喜欢跟我待呗一起,你是不是喜欢我呀,他就是一个测试,然后对方说啊,没有,但是说话说太多就很爱没,那肯定是喜欢你。
用了对方连回答都不想回答你那时候,你也不用去表白了,面的尴尬,面的尴尬,然后比如说如果是一个女孩,自己管兴趣的话,你当他让他去,把你去做一些事情,他肯定是愿意去把你呢,他如果他不愿意把你走。
说明服侧限不够,服侧限不够是因为吸引不够,所以永远到学会测试,不要去学会这些东西,又把这些东西给望着,平感决定了,大概到了哪,没有感觉的,你要去相信测试,你们真真实实到了哪个点,只有测试能够告诉你。
要相信这些具有数据化的东西
|
PypiClean
|
/scottbrian_utils-2.4.0.tar.gz/scottbrian_utils-2.4.0/src/scottbrian_utils/stop_watch.py
|
import threading
import time
from typing import Optional, Type, TYPE_CHECKING, Union
########################################################################
# Third Party
########################################################################
########################################################################
# Local
########################################################################
########################################################################
# type aliases
########################################################################
IntFloat = Union[int, float]
OptIntFloat = Optional[IntFloat]
########################################################################
# StopWatch Exceptions classes
########################################################################
class StopWatchError(Exception):
"""Base class for exception in this module."""
pass
########################################################################
# StopWatch Class
########################################################################
class StopWatch:
"""StopWatch class for testing.
The StopWatch class is used to assist in the testing of
multi-threaded functions. It provides a set of methods that help
with verification of timed event. The test case setup
involves a mainline thread that starts one or more remote threads.
The start_clock and duration methods are used to verify event times.
"""
####################################################################
# __init__
####################################################################
def __init__(self) -> None:
"""Initialize the object."""
self.clock_lock = threading.Lock()
self.start_time: float = 0.0
self.previous_start_time: float = 0.0
self.clock_in_use = False
self.clock_iter = 0
####################################################################
# repr
####################################################################
def __repr__(self) -> str:
"""Return a representation of the class.
Returns:
The representation as how the class is instantiated
:Example: repr of StopWatch
>>> from scottbrian_utils.stop_watch import StopWatch
>>> stop_watch = StopWatch()
>>> repr(stop_watch)
'StopWatch()'
"""
if TYPE_CHECKING:
__class__: Type[StopWatch] # noqa: F842
classname = self.__class__.__name__
parms = ''
return f'{classname}({parms})'
####################################################################
# pause
####################################################################
def pause(self,
seconds: IntFloat,
clock_iter: int) -> None:
"""Sleep for the number of input seconds relative to start_time.
Args:
seconds: number of seconds to pause from the start_time for
the given clock_iter.
clock_iter: clock clock_iter to pause on
Notes:
1) The clock_iter is used to identify the clock that is
currently in use. A remote thread wants to pause
for a given number of seconds relative to the StopWatch
start_time for a given iteration of the clock. We will do
a sleep loop until the given clock_iter matches
the StopWatch clock_iter.
"""
while clock_iter != self.clock_iter:
time.sleep(0.1)
remaining_seconds = seconds - (time.time() - self.start_time)
while remaining_seconds > 0:
time.sleep(remaining_seconds)
remaining_seconds = seconds - (time.time() - self.start_time)
####################################################################
# start_clock
####################################################################
def start_clock(self,
clock_iter: int) -> None:
"""Set the start_time to the current time.
Args:
clock_iter: clock_iter to set for the clock
"""
while True:
with self.clock_lock:
if not self.clock_in_use: # if clock is free to use
self.clock_in_use = True # claim for us
break
time.sleep(0.01) # wait until we can have the clock
self.start_time = time.time()
self.clock_iter = clock_iter
####################################################################
# duration
####################################################################
def duration(self) -> float:
"""Return the number of seconds from the start_time.
Returns:
number of seconds from the start_time
"""
ret_duration = time.time() - self.start_time
# no need to get clock_lock to reset the clock_in_use flag
self.clock_in_use = False # make available to others
return ret_duration
|
PypiClean
|
/dsin100daysv36-6.0.1.tar.gz/dsin100daysv36-6.0.1/notebook/static/components/MathJax/localization/vi/TeX.js
|
MathJax.Localization.addTranslation("vi","TeX",{version:"2.7.8",isLoaded:true,strings:{ExtraOpenMissingClose:"D\u1EA5u ngo\u1EB7c m\u1EDF c\u00F2n d\u01B0 ho\u1EB7c d\u1EA5u ngo\u1EB7c \u0111\u00F3ng b\u1ECB thi\u1EBFu",ExtraCloseMissingOpen:"D\u1EA5u ngo\u1EB7c \u0111\u00F3ng c\u00F2n d\u01B0 ho\u1EB7c d\u1EA5u ngo\u1EB7c m\u1EDF b\u1ECB thi\u1EBFu",MissingLeftExtraRight:"Thi\u1EBFu \\left ho\u1EB7c d\u01B0 \\right",MissingScript:"Thi\u1EBFu \u0111\u1ED1i s\u1ED1 ch\u1EC9 s\u1ED1",ExtraLeftMissingRight:"D\u01B0 \\left ho\u1EB7c thi\u1EBFu \\right",MissingOpenForSub:"Thi\u1EBFu d\u1EA5u ngo\u1EB7c \u0111\u01A1n m\u1EDF cho ch\u1EC9 s\u1ED1 d\u01B0\u1EDBi",MissingOpenForSup:"Thi\u1EBFu d\u1EA5u ngo\u1EB7c \u0111\u01A1n m\u1EDF cho ch\u1EC9 s\u1ED1 tr\u00EAn",AmbiguousUseOf:"%1 \u0111\u01B0\u1EE3c s\u1EED d\u1EE5ng m\u1ED9t c\u00E1ch kh\u00F4ng r\u00F5 r\u00E0ng",EnvBadEnd:"\\begin{%1} k\u1EBFt th\u00FAc v\u1EDBi \\end{%2}",EnvMissingEnd:"Thi\u1EBFu \\end{%1}",MissingBoxFor:"Thi\u1EBFu h\u1ED9p cho %1",MissingCloseBrace:"Thi\u1EBFu d\u1EA5u ngo\u1EB7c \u0111\u00F3ng",MisplacedMiddle:"%1 ph\u1EA3i n\u1EB1m gi\u1EEFa \\left v\u00E0 \\right",MisplacedLimits:"%1 ch\u1EC9 \u0111\u01B0\u1EE3c cho ph\u00E9p \u0111\u1ED1i v\u1EDBi ph\u00E9p to\u00E1n",MultipleCommand:"Nhi\u1EC1u %1",InvalidMathMLAttr:"Thu\u1ED9c t\u00EDnh MathML kh\u00F4ng h\u1EE3p l\u1EC7: %1",UnknownAttrForElement:"%1 kh\u00F4ng \u0111\u01B0\u1EE3c c\u00F4ng nh\u1EADn l\u00E0 thu\u1ED9c t\u00EDnh cho %2",MissingArgFor:"Thi\u1EBFu \u0111\u1ED1i s\u1ED1 cho %1",InvalidEnv:"T\u00EAn m\u00F4i tr\u01B0\u1EDDng \u201C%1\u201D kh\u00F4ng h\u1EE3p l\u1EC7",UnknownEnv:"M\u00F4i tr\u01B0\u1EDDng kh\u00F4ng r\u00F5 \u201C%1\u201D",TokenNotFoundForCommand:"Kh\u00F4ng t\u00ECm th\u1EA5y %1 cho %2",CommandNotAllowedInEnv:"Kh\u00F4ng cho ph\u00E9p %1 trong m\u00F4i tr\u01B0\u1EDDng %2",MultipleLabel:"Nh\u00E3n \u201C%1\u201D \u0111\u01B0\u1EE3c \u0111\u1ECBnh r\u00F5 nhi\u1EC1u l\u1EA7n",CommandAtTheBeginingOfLine:"%1 ph\u1EA3i n\u1EB1m v\u00E0o \u0111\u1EA7u d\u00F2ng",MultipleBBoxProperty:"%1 \u0111\u01B0\u1EE3c \u0111\u1ECBnh r\u00F5 hai l\u1EA7n trong %2",InvalidDecimalNumber:"S\u1ED1 th\u1EADp ph\u00E2n kh\u00F4ng h\u1EE3p l\u1EC7",InvalidNumber:"S\u1ED1 kh\u00F4ng h\u1EE3p l\u1EC7",NoClosingChar:"Kh\u00F4ng t\u00ECm th\u1EA5y %1 \u0111\u00F3ng",Misplaced:"\u0110\u00E3 \u0111\u1EB7t sai ch\u1ED7 %1",UndefinedControlSequence:"Tr\u00ECnh t\u1EF1 ki\u1EC3m so\u00E1t kh\u00F4ng x\u00E1c \u0111\u1ECBnh %1",DoubleExponent:"Ch\u1EC9 s\u1ED1 tr\u00EAn ch\u1EC9 s\u1ED1 tr\u00EAn: d\u00F9ng d\u1EA5u ngo\u1EB7c m\u00F3c \u0111\u1EC3 l\u00E0m r\u00F5",DoubleSubscripts:"Ch\u1EC9 s\u1ED1 d\u01B0\u1EDBi ch\u1EC9 s\u1ED1 d\u01B0\u1EDBi: d\u00F9ng d\u1EA5u ngo\u1EB7c m\u00F3c \u0111\u1EC3 l\u00E0m r\u00F5",DoubleExponentPrime:"D\u1EA5u ph\u1EA9y tr\u00EAn g\u00E2y ra ch\u1EC9 s\u1ED1 tr\u00EAn ch\u1EC9 s\u1ED1 tr\u00EAn: d\u00F9ng d\u1EA5u ngo\u1EB7c m\u00F3c \u0111\u1EC3 l\u00E0m r\u00F5",CantUseHash1:"B\u1EA1n kh\u00F4ng th\u1EC3 s\u1EED d\u1EE5ng \u201Ck\u00FD t\u1EF1 tham bi\u1EBFn macro #\u201D trong ch\u1EBF \u0111\u1ED9 to\u00E1n",MisplacedMoveRoot:"%1 ch\u1EC9 c\u00F3 th\u1EC3 xu\u1EA5t hi\u1EC7n trong ph\u00E9p c\u0103n",IntegerArg:"\u0110\u1ED1i s\u1ED1 c\u1EE7a %1 ph\u1EA3i l\u00E0 s\u1ED1 nguy\u00EAn",NotMathMLToken:"%1 kh\u00F4ng ph\u1EA3i l\u00E0 ph\u1EA7n t\u1EED d\u1EA5u hi\u1EC7u",MaxMacroSub1:"\u0110\u00E3 v\u01B0\u1EE3t qu\u00E1 s\u1ED1 l\u1EA7n thay th\u1EBF macro t\u1ED1i \u0111a c\u1EE7a MathJax; c\u00F3 ph\u1EA3i g\u1ECDi macro \u0111\u1EC7 quy?",MaxMacroSub2:"\u0110\u00E3 v\u01B0\u1EE3t qu\u00E1 s\u1ED1 l\u1EA7n thay th\u1EBF t\u1ED1i \u0111a c\u1EE7a MathJax; m\u00F4i tr\u01B0\u1EDDng LaTeX c\u00F3 ph\u1EA3i \u0111\u1EC7 quy?",ExtraAlignTab:"Th\u1EBB c\u0103n ch\u1EC9nh d\u01B0 trong v\u0103n b\u1EA3n \\cases",BracketMustBeDimension:"\u0110\u1ED1i s\u1ED1 trong d\u1EA5u ngo\u1EB7c c\u1EE7a %1 ph\u1EA3i l\u00E0 chi\u1EC1u",ExtraCloseLooking:"D\u1EA5u \u0111\u00F3ng b\u1EA5t ng\u1EDD trong khi t\u00ECm ki\u1EBFm %1",MissingCloseBracket:"Kh\u00F4ng t\u00ECm th\u1EA5y d\u1EA5u \u201C]\u201D \u0111\u00F3ng cho \u0111\u1ED1i s\u1ED1 c\u1EE7a %1",MissingOrUnrecognizedDelim:"D\u1EA5u t\u00E1ch b\u1ECB thi\u1EBFu ho\u1EB7c kh\u00F4ng r\u00F5 cho %1",MissingDimOrUnits:"Thi\u1EBFu chi\u1EC1u ho\u1EB7c \u0111\u01A1n v\u1ECB c\u1EE7a %1",MathNotTerminated:"To\u00E1n kh\u00F4ng ch\u1EA5m d\u1EE9t trong h\u1ED9p v\u0103n b\u1EA3n",IllegalMacroParam:"Tham chi\u1EBFu tham bi\u1EBFn macro kh\u00F4ng h\u1EE3p l\u1EC7",MaxBufferSize:"\u0110\u00E3 v\u01B0\u1EE3t qu\u00E1 k\u00EDch th\u01B0\u1EDBc b\u1ED9 \u0111\u1EC7m n\u1ED9i b\u1ED9 c\u1EE7a MathJax; c\u00F3 ph\u1EA3i g\u1ECDi macro \u0111\u1EC7 quy?",IllegalAlign:"\u0110\u00E3 x\u00E1c \u0111\u1ECBnh s\u1EF1 c\u0103n ch\u1EC9nh kh\u00F4ng h\u1EE3p l\u1EC7 trong %1",BadMathStyleFor:"Ki\u1EC3u to\u00E1n h\u1ECFng v\u1EDBi %1",PositiveIntegerArg:"\u0110\u1ED1i s\u1ED1 c\u1EE7a %1 ph\u1EA3i l\u00E0 s\u1ED1 nguy\u00EAn d\u01B0\u01A1ng",ErroneousNestingEq:"C\u1EA5u tr\u00FAc \u0111\u1EB3ng th\u1EE9c b\u1ECB x\u1EBFp l\u1ED3ng sai l\u1EA7m",MultlineRowsOneCol:"C\u00E1c h\u00E0ng trong m\u00F4i tr\u01B0\u1EDDng %1 ph\u1EA3i c\u00F3 \u0111\u00FAng m\u1ED9t c\u1ED9t",InvalidBBoxProperty:"\u201C%1\u201D kh\u00F4ng tr\u00F4ng gi\u1ED1ng nh\u01B0 m\u00E0u, chi\u1EC1u l\u00F3t, ho\u1EB7c ki\u1EC3u",ExtraEndMissingBegin:"D\u01B0 %1 ho\u1EB7c thi\u1EBFu \\begingroup",GlobalNotFollowedBy:"%1 kh\u00F4ng c\u00F3 \\let, \\def, ho\u1EB7c \\newcommand ti\u1EBFp theo",UndefinedColorModel:"M\u00F4 h\u00ECnh m\u00E0u \u201C%1\u201D kh\u00F4ng \u0111\u1ECBnh ngh\u0129a",ModelArg1:"Gi\u00E1 tr\u1ECB m\u00E0u cho m\u00F4 h\u00ECnh %1 \u0111\u00F2i h\u1ECFi 3 s\u1ED1",ModelArg2:"Gi\u00E1 tr\u1ECB m\u00E0u cho m\u00F4 h\u00ECnh %1 ph\u1EA3i \u1EDF gi\u1EEFa %2 v\u00E0 %3",NewextarrowArg1:"\u0110\u1ED1i s\u1ED1 \u0111\u1EA7u ti\u00EAn c\u1EE7a %1 ph\u1EA3i l\u00E0 t\u00EAn tr\u00ECnh t\u1EF1 \u0111i\u1EC1u khi\u1EC3n",NewextarrowArg2:"\u0110\u1ED1i s\u1ED1 th\u1EE9 hai c\u1EE7a %1 ph\u1EA3i l\u00E0 hai s\u1ED1 nguy\u00EAn ph\u00E2n t\u00E1ch b\u1EB1ng d\u1EA5u ph\u1EA9y",NewextarrowArg3:"\u0110\u1ED1i s\u1ED1 th\u1EE9 ba c\u1EE7a %1 ph\u1EA3i l\u00E0 s\u1ED1 k\u00FD t\u1EF1 Unicode",IllegalControlSequenceName:"T\u00EAn tr\u00ECnh t\u1EF1 \u0111i\u1EC1u khi\u1EC3n kh\u00F4ng h\u1EE3p l\u1EC7 cho %1",IllegalParamNumber:"\u0110\u00E3 x\u00E1c \u0111\u1ECBnh s\u1ED1 tham bi\u1EBFn kh\u00F4ng h\u1EE3p l\u1EC7 cho %1",MissingCS:"%1 ph\u1EA3i c\u00F3 tr\u00ECnh t\u1EF1 \u0111i\u1EC1u khi\u1EC3n ti\u1EBFp theo",CantUseHash2:"\u0110\u00E3 s\u1EED d\u1EE5ng # m\u1ED9t c\u00E1ch kh\u00F4ng h\u1EE3p l\u1EC7 trong khu\u00F4n m\u1EABu c\u1EE7a %1",SequentialParam:"Tham bi\u1EBFn c\u1EE7a %1 ph\u1EA3i \u0111\u01B0\u1EE3c \u0111\u00E1nh s\u1ED1 li\u00EAn t\u1EE5c",MissingReplacementString:"Thi\u1EBFu chu\u1ED7i thay th\u1EBF khi \u0111\u1ECBnh ngh\u0129a %1",MismatchUseDef:"\u0110\u00E3 s\u1EED d\u1EE5ng %1 m\u1ED9t c\u00E1ch kh\u00F4ng ph\u00F9 h\u1EE3p v\u1EDBi \u0111\u1ECBnh ngh\u0129a c\u1EE7a n\u00F3",RunawayArgument:"\u0110\u1ED1i s\u1ED1 c\u1EE7a %1 kh\u00F4ng ng\u1EEBng?",NoClosingDelim:"Kh\u00F4ng t\u00ECm th\u1EA5y d\u1EA5u k\u1EBFt th\u00FAc %1"}});MathJax.Ajax.loadComplete("[MathJax]/localization/vi/TeX.js");
|
PypiClean
|
/dbpedia_ent-0.1.9-py3-none-any.whl/dbpedia_ent/dto/syn/n1/p/rev_po.py
|
d_rev_po = {'po-2': 'polikarpov_po-2',
'po-chu-i': 'bai_juyi',
'po-eun': 'jeong_mongju',
'po-hai': 'balhae',
'po-lay': 'pu-erh_tea',
'po-lei': 'pu-erh_tea',
'po-lu-chia': 'aksu',
'po-mo': 'postmodernism',
'po-pis': 'popis',
'po-po': 'police',
'po-po-po': 'popopo',
'po0wned': 'pwn',
'poa1/4egnania': 'farewells',
'poaaf': 'portrait_of_an_american_family',
'poacae': 'poaceae',
'poacaea': 'poaceae',
'poacea': 'poaceae',
'poachard': 'diving_duck',
'poachers': 'poaching',
'poaf': 'portuguese_air_force',
'poago': 'puao',
'poal': 'el_poal',
'poas': 'principal_officials_accountability_system',
'poasm': 'pelles_c',
'pobbles': 'three_cliffs_bay',
'pobedonostsev': 'konstantin_pobedonostsev',
'pobedy': 'pobeda',
'pobico': 'pyeongchang_county',
'pobjoy': 'pobjoy_airmotors',
'poblenou': 'el_poblenou',
'poblet': 'poblet_monastery',
'pobox.com': 'pobox',
'pobs': 'pirates_of_the_burning_sea',
'poc-fu': 'fu-schnickens',
'poca': 'proceeds_of_crime_act_2002',
'pocahantas': 'pocahontas',
'pocahantos': 'pocahontas',
'pocahauntus': 'pocahontas',
'pocahontes': 'pocahontas',
'pocari': 'pocari_sweat',
'poccnr': 'russia',
'pocdol': 'pocdol_mountains',
'pocei': 'pocsaj',
'pochahantas': 'pocahontas',
'pochahontas': 'pocahontas',
'pochampalli': 'bhoodhan_pochampally',
'pochampally': 'bhoodhan_pochampally',
'pochayiv': 'pochaiv',
'pochekin': 'ivan_pochekin',
'pochentong': 'phnom_penh_international_airport',
'pochette': 'kit_violin',
'pochhammer': 'leo_august_pochhammer',
'pochinkovski': 'pochinkovsky_district',
'pochinkovskii': 'pochinkovsky_district',
'pochinkovskiy': 'pochinkovsky_district',
'pochinkovsky': 'pochinkovsky_district',
'pochoir': 'stencil',
'pochongyo': 'bocheon-gyo',
'pochote': 'bombacopsis_quinata',
'pochtecatl': 'pochteca',
'pochutec': 'pochutec_language',
'pochutla': 'san_pedro_pochutla',
'pochy': 'pochi',
'poci': 'posterior_circulation_infarct',
'pocker': 'poker',
'pocket-bike': 'minibike',
'pocket-knife': 'pocket_knife',
'pocket-mouse': 'pocket_mouse',
'pocket-veto': 'pocket_veto',
'pocketbike': 'minibike',
'pocketdelta': 'pocketdelta_robot',
'pocketfritz': 'pocket_fritz',
'pocketknife': 'pocket_knife',
'pocketmodel': 'constructible_strategy_game',
'pocketmodels': 'constructible_strategy_game',
'pocketpaks': 'listerine',
'pocketpc': 'pocket_pc',
'pocketref': 'pocket_ref',
'pocketrock': 'pocket_rock',
'pockets': 'pocket',
'pocketwatch': 'pocket_watch',
'pocketzip': 'iomega_pocket_zip_drive',
'pockle': 'giftpia',
'pockmarked': 'pockmark',
'pockmarks': 'pockmark',
'pocl3': 'phosphoryl_chloride',
'poco': 'plain_old_clr_object',
'pocohantas': 'pocahontas',
'pocohantus': 'pocahontas',
'pocohontas': 'pocahontas',
'pocomam': 'poqomam',
'poconos': 'the_poconos',
'pocoughtraonack': 'mingo',
'pocsesti': 'donici',
'poct': 'point-of-care_testing',
'pocte': 'nebbiolo',
'pocumtuc': 'pocomtuc',
'pocumtuck': 'pocomtuc',
'pocuttya': 'pokuttya',
'pocztowi': 'poczet',
'pocztowy': 'poczet',
'poda': 'poda_protected_area',
'poda-poda': 'share_taxi',
'podager': 'nacunda_nighthawk',
'podalarius': 'podalirius',
'podandos': 'padyandus',
'podandus': 'padyandus',
'podanthera': 'epipogium',
'podar': 'padar',
'podargidae': 'frogmouth',
'podargiformes': 'frogmouth',
'podaria': 'tubercle',
'podarium': 'tubercle',
'podarke': 'aello',
'podbonesec': 'pulfero',
'podc': 'symposium_on_principles_of_distributed_computing',
'podcar': 'personal_rapid_transit',
'podcasted': 'podcast',
'podcaster': 'podcast',
'podcasters': 'podcast',
'podcasting': 'podcast',
'podcasts': 'podcast',
'podcatcher': 'podcast',
'podcatchers': 'podcast',
'podcatching': 'podcast',
'podcomplex': 'podcomplex_records',
'poddington': 'podington',
'podebusk': 'henning_podebusk',
'podengo': 'portuguese_podengo',
'podervianskiy': 'les_podervyansky',
'poderviansky': 'les_podervyansky',
'podestamattoon': 'podesta_group',
'podfade': 'podcast',
'podfather': 'adam_curry',
'podgaytsy': 'pidhaitsi',
'podgecast': 'the_podge_cast',
'podgoritsa': 'podgorica',
'podhajce': 'pidhaitsi',
'podhalanczycy': 'podhale_rifles',
'podharcmistrz': 'ranks_in_polish_scouting',
'podharcmistrzyni': 'ranks_in_polish_scouting',
'podhayce': 'pidhaitsi',
'podhgayce': 'pidhaitsi',
'podhorze': 'pidhirtsi',
'podia': 'podium',
'podiatric': 'podiatry',
'podiatrist': 'podiatry',
'podiatrists': 'podiatry',
'podica': 'african_finfoot',
'podicepidae': 'grebe',
'podicipedidae': 'grebe',
'podicipediformes': 'grebe',
'podience': 'podcast',
'podilia': 'podolia',
'podillia': 'podolia',
'podillya': 'podolia',
'podillya-khmelnytsky': 'fc_dynamo_khmelnytskyi',
'podillya-khmelnytskyi': 'fc_dynamo_khmelnytskyi',
'podilya': 'podolia',
'podimore': 'yeovilton',
'podinton': 'podington',
'podiums': 'podium',
'podjack': 'podcast',
'podjacking': 'podcast',
'podkanclercy': 'kanclerz',
'podkanclerz': 'kanclerz',
'podkanclerzy': 'kanclerz',
'podkarpacie': 'carpathian_mountains',
'podkarpackie': 'subcarpathian_voivodeship',
'podkletnov': 'eugene_podkletnov',
'podkrepa': 'confederation_of_labour_podkrepa',
'podkumok': 'podkumok_river',
'podlachian': 'podlaskie_voivodeship',
'podlachien': 'podlachia',
'podlakur': 'podalakur',
'podlasia': 'podlachia',
'podlasie': 'podlachia',
'podlaskie': 'podlaskie_voivodeship',
'podlesia': 'podlachia',
'podleski': 'greta_and_janet_podleski',
'podnography': 'podcast',
'podocalycinae': 'podocalyx',
'podocarp': 'podocarpaceae',
'podocarps': 'podocarpaceae',
'podoces': 'ground_jay',
'podocnemidinae': 'podocnemididae',
'podoconiosis': 'elephantiasis',
'podocytes': 'podocyte',
'podocytosis': 'transcytosis',
'podofilox': 'podophyllotoxin',
'podokeesaurus': 'podokesaurus',
'podokesauridae': 'coelophysidae',
'podokkhep': 'padakshep',
'podol': 'podil',
'podola': 'guenther_podola',
'podolian': 'podolia',
'podolja': 'podolia',
'podolly': 'mori_chack',
'podolski': 'podolsky',
'podoluma': 'pouteria',
'podomys': 'florida_mouse',
'podopetalum': 'ormosia',
'podophile': 'foot_fetishism',
'podophilia': 'foot_fetishism',
'podophyllaceae': 'berberidaceae',
'podophyllu': 'podophyllum',
'podopterisi': 'jianzi',
'podosomes': 'podosome',
'podosphere': 'podcast',
'podostemales': 'podostemaceae',
'podostemineae': 'podostemaceae',
'podostemonaceae': 'podostemaceae',
'podostemonales': 'podostemaceae',
'podotrochleosis': 'navicular_disease',
'podoxymys': 'roraima_mouse',
'podpolkovnik': 'lieutenant_colonel',
'podporuchik': 'poruchik',
'podpulkownik': 'lieutenant_colonel',
'podranki': 'wounded_game',
'podravje': 'podravina',
'podrobnosti.ua': 'podrobnosti',
'podrunner': 'steve_boyett',
'podsednik': 'scott_podsednik',
'podshock': 'doctor_who:_podshock',
'podshow': 'mevio',
'podslurping': 'pod_slurping',
'podsnap': 'our_mutual_friend',
'podsolization': 'podsol',
'podstolitz': 'standesamt_podstolitz',
'podujeva': 'podujevo',
'podujeve': 'podujevo',
'poduridae': 'podura',
'poduroidea': 'podura',
'poduval': 'pothuval',
'podxt': 'line_6',
'podyachiy': 'podyachy',
'podzilla2': 'ipodlinux',
'podzinger': 'everyzing',
'podzol': 'podsol',
'podzolic': 'podsol',
'podzolisation': 'podsol',
'podzolization': 'podsol',
'podzols': 'podsol',
'poea': 'polyethoxylated_tallow_amine',
'poechos': 'poechos_reservoir',
'poecilidae': 'poeciliidae',
'poeciliid': 'poeciliidae',
'poecilitic': 'poikilitic',
'poecilmitis': 'chrysoritis',
'poeciloconger': 'barred_conger',
'poecilogale': 'african_striped_weasel',
'poeciloneuronpauciflorum': 'poeciloneuron_pauciflorum',
'poecilospondylus': 'varanosaurus',
'poecilostolus': 'atheris',
'poecilostulus': 'atheris',
'poecilozonites': 'bermuda_land_snail',
'poeck': 'pock',
'poehlman': 'eric_poehlman',
'poehlmann': 'eric_poehlman',
'poekoot': 'pokot',
'poelaert': 'joseph_poelaert',
'poelagus': 'bunyoro_rabbit',
'poem': 'poetry',
'poema': 'symphonic_poem',
'poemandres': 'poimandres',
'poemas': 'poetry',
'poen': 'poenit',
'poenitentiaria': 'apostolic_penitentiary',
'poenites': 'poenit',
'poepp.': 'eduard_friedrich_poeppig',
'poer': 'concrete_masonry_unit',
'poes': 'polar_operational_environmental_satellites',
'poesias': 'poetry',
'poesy': 'poetry',
'poet-saint': 'sant_mat',
'poet-saints': 'sant_mat',
'poet-smoothing': 'phonological_history_of_english_diphthongs',
'poetato': 'potato',
'poetatoe': 'potato',
'poetovio': 'ptuj',
'poetress': 'poetess',
'poets': 'poet',
'poetschendorf': 'pieckowo',
'poett': 'nigel_poett',
'poetv': 'portal_of_evil',
'poeu': 'post_office_engineering_union',
'poey': 'felipe_poey',
'poff': 'point-to-point_protocol_daemon',
'poffabro': 'frisanco',
'pofs': 'pakistan_ordnance_factories',
'pofv': 'phantasmagoria_of_flower_view',
'pogadi': 'angloromani_language',
'pogatetz': 'emanuel_pogatetz',
'pogatschar': 'helga_pogatschar',
'poges': 'the_pogues',
'pogesania': 'pogesanians',
'pogesanien': 'pogesanians',
'pogey': 'welfare',
'pogezania': 'pogesanians',
'pogezanians': 'pogesanians',
'pogezanien': 'pogesanians',
'poggeophyton': 'erythrococca',
'poggiobonizio': 'poggibonsi',
'pogl': 'perl_opengl',
'poglietti': 'alessandro_poglietti',
'poglizza': 'poljica',
'pognoncini': 'pagnoncini',
'pogo-stick': 'pogo_stick',
'pogobal': 'lolo_ball',
'pogobat': 'youtube',
'pogodin': 'mikhail_pogodin',
'pogolo': 'pogoro_people',
'pogonias': 'black_drum',
'pogonina': 'natalia_pogonina',
'pogoniulus': 'tinkerbird',
'pogonocichla': 'white-starred_robin',
'pogonology': 'beard',
'pogonophoran': 'pogonophora',
'pogonophore': 'siboglinidae',
'pogonophyllum': 'micrandra',
'pogonopomoides': 'pogonopoma',
'pogonoscorpius': 'pogonoscorpius_sechellensis',
'pogonotriccus': 'phylloscartes',
'pogonotrophy': 'facial_hair',
'pogorom': 'pogrom',
'pogosort': 'bogosort',
'pogosta': 'pogost',
'pogostick': 'pogo_stick',
'pogradeci': 'pogradec',
'pogram': 'pogrom',
'pogranychny': 'pogranichny_volcano',
'pogrebischtsche': 'pohrebysche',
'pogrebnyak': 'pavel_pogrebnyak',
'pogromchiki': 'pogrom',
'pogromnacht': 'kristallnacht',
'pogromni': 'pogromni_volcano',
'pogroms': 'pogrom',
'pogromshchiki': 'pogrom',
'pogson': 'norman_robert_pogson',
'pogues': 'the_pogues',
'pogy': 'menhaden',
'pohai': 'balhae',
'pohamba': 'hifikepunye_pohamba',
'pohc': 'pittsburgh_and_ohio_central_railroad',
'pohe': 'flattened_rice',
'pohio': 'horomona_pohio',
'pohjanheimo': 'mervi_pohjanheimo',
'pohjanlahti': 'gulf_of_bothnia',
'pohjois-inkeri': 'north_ingria',
'pohjois-karjala': 'north_karelia',
'pohjois-pohjanmaa': 'northern_ostrobothnia',
'pohjoisesplanadi': 'esplanadi',
'pohjoiskalotti': 'cap_of_the_north',
'pohl-weary': 'emily_pohl-weary',
'pohlin': 'marko_pohlin',
'pohlman': 'pohlmann',
'pohlschmidt': 'manfred_pohlschmidt',
'pohlsepia': 'pohlsepia_mazonensis',
'pohnpeian': 'pohnpeian_language',
'pohokura': 'pohokura_field',
'pohr': 'pre-determined_overhead_rate',
'pohrebyszcze': 'pohrebysche',
'pohuism': 'apathy',
'pohutakawa': 'metrosideros_excelsa',
'pohutukawa': 'metrosideros_excelsa',
'pohyon-sa': 'pohyonsa',
'poias': 'poeas',
'poictou': 'poitou',
'poietic': 'esthesic_and_poietic',
'poikile': 'stoa_poikile',
'poikiloblastic': 'poikiloblast',
'poikilocyte': 'poikilocytosis',
'poikilohydric': 'poikilohydry',
'poikilothermal': 'poikilotherm',
'poikilothermia': 'poikilotherm',
'poikilothermic': 'poikilotherm',
'poikilothermism': 'cold-blooded',
'poikilotherms': 'poikilotherm',
'poikilothermy': 'poikilotherm',
'poilaniella': 'trigonostemon',
'poilus': 'poilu',
'poine': 'poena',
'poinsetta': 'euphorbia_pulcherrima',
'poinsettia': 'euphorbia_pulcherrima',
'poinsettias': 'euphorbia_pulcherrima',
'poinsot': 'louis_poinsot',
'poinsur': 'poisar',
'point-and-shoot': 'point-and-shoot_camera',
'point-blank': 'point-blank_range',
'point-blank-range': 'point-blank_range',
'point-defense': 'point-defence',
'point-guard': 'point_guard',
'point-in-polygon': 'point_in_polygon',
'point-in-time': 'tuple-versioning',
'point-noire': 'pointe-noire',
'point-of-no-return': 'point_of_no_return',
'point-of-presence': 'point_of_presence',
'point-of-sale': 'point_of_sale',
'point-of-sales': 'point_of_sale',
'point-of-service': 'point_of_service_plan',
'point-of-view': 'point_of_view',
'point-pedro': 'point_pedro',
'point-shaving': 'point_shaving',
'point-slope': 'linear_equation',
'point-to-multipoint': 'point-to-multipoint_communication',
'point/counterpoint': 'weekend_update',
'pointalisim': 'pointillism',
'pointalism': 'pointillism',
'pointblank': 'point-blank_range',
'pointclasses': 'pointclass',
'pointcode': 'point_code',
'pointdexter': 'john_poindexter',
'pointe': 'en_pointe',
'pointe-du-hoc': 'pointe_du_hoc',
'pointe-saint-charles': 'pointe_saint-charles',
'pointe-sainte-anne': 'fredericton',
'pointe-work': 'en_pointe',
'pointework': 'en_pointe',
'pointfree': 'point-free',
'pointilism': 'pointillism',
'pointilist': 'pointillism',
'pointillist': 'pointillism',
'pointillistic': 'pointillism',
'pointism': 'pointillism',
'pointlessly': 'pointless_topology',
'pointlessness': 'pointless_topology',
'points': 'point',
'pointsetta': 'euphorbia_pulcherrima',
'pointsettas': 'euphorbia_pulcherrima',
'pointsettia': 'euphorbia_pulcherrima',
'pointshooting': 'point_shooting',
'pointwork': 'railroad_switch',
'pointy-haired': 'pointy-haired_boss',
'poip': 'post_over_ip',
'poiphet': 'poipet',
'poir.': 'jean_louis_marie_poiret',
'poirot': 'hercule_poirot',
'pois': 'postorgasmic_illness_syndrome',
'poiseidon': 'poseidon',
'poiser': 'halteres',
'poiseuille': 'jean_louis_marie_poiseuille',
'poisha': 'paisa',
'poisin': 'poison',
'poision': 'poison',
'poisk': 'poisk_centre',
'poison-apple': 'solanum_aculeastrum',
'poison-flower': 'atropa_belladonna',
'poison-ivy': 'poison_ivy',
'poison-oak': 'poison_oak',
'poison-sego': 'zigadenus',
'poisonberries': 'solanum_dulcamara',
'poisonberry': 'solanum_dulcamara',
'poisonflower': 'solanum_dulcamara',
'poisonflowers': 'solanum_dulcamara',
'poisonings': 'poisoning',
'poisonous': 'poison',
'poisons': 'poison',
'poisonwood': 'metopium_toxiferum',
'poissonella': 'pouteria',
'poissonian': 'poisson_distribution',
'poissy-sur-seine': 'poissy',
'poit.': 'pierre_antoine_poiteau',
'poitevins': 'poitou',
'poito': 'chief_winnemucca',
'poitou-charente': 'poitou-charentes',
'poitvin': 'poitevin',
'poiuyt': 'blivet',
'poivrea': 'combretum',
'pojagi': 'bojagi',
'pojarski': 'pozharsky',
'pojarskii': 'pozharsky',
'pojarskiy': 'pozharsky',
'pojarsky': 'pozharsky',
'pojo': 'plain_old_java_object',
'pojulu-people': 'pojulu_people',
'pok-a-tok': 'mesoamerican_ballgame',
'pok-ta-pok': 'mesoamerican_ballgame',
'pokachu': 'pikachu',
'pokah': 'bwin',
'pokahontas': 'pocahontas',
'pokak': 'bokak_atoll',
'pokaran': 'pokhran',
'pokayoke': 'poka-yoke',
'pokcy': 'pocky',
'poke-bonnet': 'poke_bonnet',
'poke-root': 'pokeweed',
'pokeberries': 'pokeweed',
'pokeberry': 'pokeweed',
'pokefami': 'pocketfami',
'pokemeboy': 'acacia_anegadensis',
'pokemon/charizard': 'charizard',
'pokemon/mewtwo': 'mewtwo',
'pokemon/pikachu': 'pikachu',
'poker-work': 'pyrography',
'pokerbot': 'computer_poker_players',
'pokerbots': 'computer_poker_players',
'pokerchamps.com': 'betfair',
'pokerface': 'poker_face',
'pokerhost': 'microgaming',
'pokermanager': 'poker_tools',
'pokernews.com': 'tony_g',
'pokernordica': 'casinokontanter.com',
'pokernordica.com': 'casinokontanter.com',
'pokeroot': 'pokeweed',
'pokerpro': 'pokertek',
'pokerprobe': 'poker_tools',
'pokerroom': 'bwin',
'pokerroom.com': 'bwin',
'pokerroom.tv': 'bwin',
'pokerspot': 'dutch_boyd',
'pokerstars.com': 'pokerstars',
'pokerstars.net': 'pokerstars',
'pokerstars.tv': 'pokerstars',
'pokerstat': 'poker_tools',
'pokerwork': 'pyrography',
'pokes': 'poke',
'pokeyoke': 'poka-yoke',
'pokfulam': 'pok_fu_lam',
'pokhalde': 'pokalde',
'pokhalo': 'pakhal',
'pokharan': 'pokhran',
'pokharan-i': 'smiling_buddha',
'pokhlebkin': 'william_pokhlyobkin',
'pokhran-i': 'smiling_buddha',
'poki': 'computer_poker_players',
'pokia': 'hulger',
'pokie': 'slot_machine',
'pokies': 'slot_machine',
'pokiest': 'speed',
'pokkali': 'pokkali_rice',
'pokker': 'p._k._pokker',
'pokkuri': 'okobo',
'poklad': 'lastovo_poklad',
'poklon': 'zemnoy_poklon',
'poklos': 'alba_iulia',
'pokolenie': 'a_generation',
'pokolpatak': 'cleja',
'pokomi': 'bobobo-bo_bo-bobo',
'pokomoko': 'slowly_i_turned',
'pokoot': 'pokot',
'pokr-mazra': 'pokr_masrik',
'pokravan': 'zoravan',
'pokrov': 'the_protection_of_the_mother_of_god',
'pokrovskaya': 'pokrovsky',
'pokrovski': 'pokrovsky',
'pokrovskii': 'pokrovsky',
'pokrovskoye': 'pokrovsky',
'pokryshkin': 'alexander_pokryshkin',
'poktamui': 'lepidocaryum',
'pokucie': 'pokuttya',
'pokulus': 'rayman_2:_the_great_escape',
'pokutia': 'pokuttya',
'pokuttia': 'pokuttya',
'pokutye': 'pokuttya',
'pol-an-ionian': 'pol_an_ionain',
'pol-e-charkhi': 'pul-e-charkhi_prison',
'pol-e-kohneh': 'kohneh_bridge',
'pol-e-vahid': 'vahid_bridge',
'pol.': 'polish_language',
'pol.dk': 'politiken',
'pola': 'dna_polymerase_i',
'polab': 'polabian_slavs',
'polabians': 'polabian_slavs',
'polabs': 'polabian_slavs',
'polacak': 'polotsk',
'polacanthinae': 'ankylosauridae',
'polacanthoides': 'hylaeosaurus',
'polaco': 'lito_y_polaco',
'polacre': 'polacca',
'polacy': 'poles',
'polad': 'khachardzan',
'polamalu': 'troy_polamalu',
'polamory': 'polyamory',
'poland-saxony': 'electorate_of_saxony',
'poland.': 'poland',
'poland/communications': 'telecommunications_in_poland',
'poland/economy': 'economy_of_poland',
'poland/geography': 'geography_of_poland',
'poland/government': 'politics_of_poland',
'poland/history': 'history_of_poland',
'poland/media': 'media_of_poland',
'poland/military': 'polish_armed_forces',
'poland/music': 'music_of_poland',
'poland/people': 'demographics_of_poland',
'poland/politicians': 'politics_of_poland',
'poland/transportation': 'transport_in_poland',
'polander': 'poles',
'polanes': 'polans',
'polangen': 'palanga',
'polannaruwa': 'polonnaruwa',
'polanski': 'roman_polanski',
'polansky': 'roman_polanski',
'polar-bear': 'polar_bear',
'polaramine': 'dexchlorpheniramine',
'polarbanen': 'polar_line',
'polarbear': 'polar_bear',
'polareasterlies': 'polar_easterlies',
'polarfront': 'ms_polarfront',
'polarimetric': 'polarimetry',
'polarinstitutt': 'norwegian_polar_institute',
'polariod': 'polaroid',
'polarion': 'polarion_software',
'polarisability': 'polarizability',
'polarisation': 'polarization',
'polariscope': 'polarimetry',
'polarise': 'polarization',
'polarised': 'polarization',
'polariser': 'polarizer',
'polarities': 'polarity',
'polaritons': 'polariton',
'polarizations': 'polarization',
'polarize': 'polarization',
'polarized': 'polarization',
'polarizing': 'polarization',
'polarman': 'real-life_superhero',
'polarmis': 'pelamis_wave_energy_converter',
'polarmount': 'polar_mount',
'polarnoje': 'polyarny',
'polarstern': 'rv_polarstern',
'polartec': 'malden_mills',
'polarus': 'polaris',
'polarware': 'penguin_software',
'polaski': 'deborah_polaski',
'polatsk': 'polotsk',
'polb': 'dna_polymerase_beta',
'polcalypso': 'the_original_danish_polcalypso_orchestra',
'polcari': 'pier_paolo_polcari',
'polchinsky': 'joseph_polchinski',
'poldark': 'the_poldark_novels',
'poldek': 'poldek_pfefferberg',
'poldering': 'polder',
'poldermodel': 'polder_model',
'polders': 'polder',
'poldi': 'lukas_podolski',
'poldice': 'poldice_mine',
'pole-arm': 'pole_weapon',
'pole-axe': 'pollaxe',
'pole-cat': 'polecat',
'pole-cats': 'polecat',
'pole-dancing': 'pole_dance',
'pole-placement': 'nonlinear_control',
'pole-position': 'pole_position',
'pole-star': 'polaris',
'pole-vaulting': 'pole_vault',
'poleand': 'poland',
'polearm': 'pole_weapon',
'polearms': 'pole_weapon',
'poleaxe': 'pollaxe',
'poleboat': 'barge',
'polecats': 'polecat',
'poledancing': 'pole_dance',
'polehinke': 'comair_flight_191',
'polei': 'pula',
'poleis': 'polis',
'polelu': 'puran_poli',
'polemaetus': 'martial_eagle',
'polemarchos': 'polemarch',
'polemic': 'polemics',
'polemical': 'polemics',
'polemically': 'polemics',
'polemicalness': 'polemics',
'polemicism': 'polemics',
'polemicisms': 'polemics',
'polemicist': 'polemics',
'polemicistic': 'polemics',
'polemicists': 'polemics',
'polemicizing': 'polemics',
'polemies': 'polemics',
'polemism': 'polemics',
'polemist': 'polemics',
'polemistic': 'polemics',
'polemists': 'polemics',
'polemo': 'polemon',
'polemology': 'war',
'polemoniales': 'solanales',
'polemy': 'polemics',
'polen': 'poland',
'polenka': 'palinka',
'polentani': 'da_polenta_family',
'polerio': 'giulio_cesare_polerio',
'poles': 'pole',
'poleshuks': 'poleszuk',
'polesie': 'polesia',
'poleski': 'polesie_national_park',
'polesski': 'polessky_district',
'polesskii': 'polessky_district',
'polesskiy': 'polessky_district',
'polesskoye': 'poliske',
'polessky': 'polessky_district',
'polestar': 'pole_star',
'polestars': 'pole_star',
'polesye': 'polesia',
'poleszuks': 'poleszuk',
'polet': 'polet_airlines',
'polevault': 'pole_vault',
'polevaulting': 'pole_vault',
'polevoi': 'slavic_fairies',
'polewig': 'tadpole',
'polewiki': 'polevik',
'poleyns': 'poleyn',
'polhem': 'christopher_polhem',
'polhengoda': 'narahenpita',
'poli': 'centricom',
'poli-sci': 'political_science',
'poliachi': 'pagliacci',
'poliakov': 'polyakov',
'poliamory': 'polyamory',
'polianite': 'pyrolusite',
'polians': 'polans',
'poliarny': 'polyarny_airport',
'polias': 'athena',
'policarp': 'polycarp',
'policarpa': 'policarpa_salavarrieta',
'policast': 'evaporative-pattern_casting',
'policastro': 'policastro_bussentino',
'police-man': 'police_officer',
'police-men': 'police',
'police-state': 'police_state',
'police-woman': 'police',
'police-women': 'police',
'policeforces': 'police',
'policeman': 'police_officer',
'policemen': 'police_officer',
'policenaughts': 'policenauts',
'policestory-2': 'police_story_2',
'policewoman': 'police_officer',
'policharki': 'pul-e-charkhi_prison',
'polichrono': 'polychrono',
'policies': 'policy',
'policing': 'police',
'policium': 'darmstadtium',
'policki': 'police_county',
'policlinic': 'polyclinic',
'policlinics': 'polyclinic',
'policy-maker': 'policy',
'policy-makers': 'policy',
'policy-mix': 'policy_mix',
'policy-shops': 'numbers_game',
'policymaker': 'politician',
'policymakers': 'politician',
'polidarius': 'podalirius',
'polideportivo': 'sports_club',
'polidesportivo': 'sports_club',
'polidoro': 'polydoro',
'polidoron': 'polydoro',
'poliez': 'poliez-le-grand',
'poligar': 'palaiyakkarar',
'poligars': 'palaiyakkarar',
'poligeenan': 'carrageenan',
'poligiros': 'polygyros',
'polignano': 'polignano_a_mare',
'poligodial': 'polygodial',
'polihale': 'polihale_state_park',
'polihrono': 'polychrono',
'polihronon': 'polychrono',
'poliii': 'dna_polymerase_iii_holoenzyme',
'polija': 'poland',
'polik': 'haloprogin',
'polimi': 'politecnico_di_milano',
'polimorphism': 'polymorphism',
'polimotor': 'plastic_automotive_engine',
'polination': 'pollination',
'polinium': 'gottfried_osann',
'polinka': 'palinka',
'polinya': 'polynya',
'polio': 'poliomyelitis',
'polioencephalomalasia': 'polioencephalomalacia',
'poliolais': 'white-tailed_warbler',
'poliomyelitiss': 'poliomyelitis',
'poliomyletis': 'poliomyelitis',
'poliomylitis': 'poliomyelitis',
'polioptilidae': 'gnatcatcher',
'poliorketika': 'poliorcetica',
'poliorketikon': 'poliorcetica',
'poliosaurus': 'ophiacodon',
'polioviruses': 'poliovirus',
'polioxolmis': 'rufous-webbed_bush-tyrant',
'polip': 'polyp',
'poliphili': 'hypnerotomachia_poliphili',
'poliphilo': 'hypnerotomachia_poliphili',
'polirinia': 'polyrrhenia',
'polisar': 'barry_louis_polisar',
'polisario': 'polisario_front',
'polisen': 'swedish_police_service',
'polisfoerbundet': 'swedish_police_union',
'polisforbundet': 'swedish_police_union',
'polish-american': 'polish_american',
'polish-americans': 'polish_american',
'polish-brazilians': 'polish_brazilian',
'polish-canadian': 'polish_canadians',
'polish-canadians': 'polish_canadians',
'polish-czech': 'polish_minority_in_the_czech_republic',
'polish-filipino': 'demographics_of_the_philippines',
'polish-filipinos': 'demographics_of_the_philippines',
'polish-germans': 'poles_in_germany',
'polish-jew': 'history_of_the_jews_in_poland',
'polish-jewish': 'history_of_the_jews_in_poland',
'polish-korean': 'korean_polish',
'polish-koreans': 'korean_polish',
'polish-silesian': 'silesian_language',
'polishlanguage': 'polish_language',
'polisia': 'polesia',
'polissena': 'polissena_of_san_macario',
'polissia': 'polesia',
'polissya': 'polesia',
'polist': 'polist_river',
'polistini': 'polistes',
'politbiuro': 'politburo',
'politboro': 'politburo',
'politbuero': 'politburo',
'politbureau': 'politburo',
'politbyro': 'politburo',
'politea': 'politeia',
'politechnic': 'institute_of_technology',
'politechnika': 'vocational_university',
'politeism': 'polytheism',
'politeuma': 'polity',
'politi': 'police_of_denmark',
'politian': 'poliziano',
'politianus': 'poliziano',
'politic': 'politics',
'political': 'politics',
'political-correctness': 'political_correctness',
'political-military': 'stratocracy',
'politicalarena': 'politicalarena.com',
'politicalbetting': 'politicalbetting.com',
'politicalcompass.org': 'political_compass',
'politically': 'politics',
'politically-correct': 'political_correctness',
'politicalphilosophy': 'political_philosophy',
'politicalspectrum': 'political_spectrum',
'politican': 'politician',
'politicians': 'politician',
'politicisation': 'politicized_issue',
'politicisations': 'politicized_issue',
'politicise': 'politicized_issue',
'politicised': 'politicized_issue',
'politiciser': 'politicized_issue',
'politicisers': 'politicized_issue',
'politicises': 'politicized_issue',
'politicising': 'politicized_issue',
'politicization': 'politicized_issue',
'politicizations': 'politicized_issue',
'politicize': 'politicized_issue',
'politicized': 'politicized_issue',
'politicizer': 'politicized_issue',
'politicizers': 'politicized_issue',
'politicizes': 'politicized_issue',
'politicizing': 'politicized_issue',
'politico.com': 'the_politico',
'politicology': 'political_science',
'politicorps': 'bus_project',
'politicos': 'the_politico',
'politics/wanted': 'politics',
'politicspa.com': 'politicspa',
'politidirektoratet': 'national_police_directorate',
'polities': 'polity',
'politifact': 'st._petersburg_times',
'politifact.com': 'st._petersburg_times',
'politik': 'realpolitik',
'politiken.dk': 'politiken',
'politio': 'politico',
'politiques': 'politique',
'politition': 'politician',
'politizdat': 'publishing_houses_in_the_soviet_union',
'politkovskaja': 'anna_politkovskaya',
'politkovskaya': 'anna_politkovskaya',
'politna': 'nonsense_verse',
'politologist': 'political_science',
'politology': 'political_science',
'politropic': 'polytropic_process',
'politruk': 'political_commissar',
'polizia': 'polizia_di_stato',
'poliziesco': 'poliziotteschi',
'poliziottesco': 'poliziotteschi',
'poljicica': 'bosnian_cyrillic',
'poljud': 'gradski_stadion_u_poljudu',
'polka-dot': 'polka_dot',
'polka-dots': 'polka_dot',
'polkadot': 'polka_dot',
'polkadotroo': 'polka_dot_door',
'polkaroo': 'polka_dot_door',
'polkas': 'polka',
'polke': 'sigmar_polke',
'polkinghorne': 'john_polkinghorne',
'polkomtel': 'plus_gsm',
'polkowicki': 'polkowice_county',
'polkweed': 'pokeweed',
'poll': 'dna_polymerase_lambda',
'poll-tax': 'poll_tax',
'poll-watcher': 'scrutineer',
'pollachius': 'pollock',
'pollaganda': 'propaganda',
'polland': 'poland',
'pollanism': 'michael_pollan',
'pollaphuca': 'poulaphouca',
'pollarded': 'pollarding',
'pollards': 'pollarding',
'pollasecco': 'grignolino',
'polled': 'polled_livestock',
'pollenation': 'pollination',
'pollenia': 'cluster_fly',
'polleniser': 'pollenizer',
'pollenizers': 'pollenizer',
'pollens': 'pollen',
'pollenzo': 'pollentia',
'pollex': 'thumb',
'pollical': 'thumb',
'pollich': 'johan_adam_pollich',
'pollies': 'pollie_awards',
'pollighautcherry': 'palakkad',
'pollinaria': 'pollinium',
'pollinarium': 'pollinium',
'pollinate': 'pollination',
'pollinated': 'pollination',
'pollinater': 'pollinator',
'pollinating': 'pollination',
'pollinators': 'pollinator',
'polling': 'poll',
'pollini': 'maurizio_pollini',
'pollinia': 'pollinium',
'pollinisation': 'pollination',
'pollinizer': 'pollenizer',
'pollish': 'polishing',
'pollisimo': 'ocellated_turkey',
'pollitarianism': 'pollotarianism',
'polliwig': 'tadpole',
'polliwog': 'tadpole',
'polljavatnet': 'degnepollvatnet',
'pollo-vegetarianism': 'pollotarianism',
'pollockshields': 'pollokshields',
'polloksheilds': 'pollokshields',
'pollonarrua': 'polonnaruwa',
'pollotarian': 'pollotarianism',
'pollovegetarianism': 'pollotarianism',
'pollox': 'pollux',
'polls': 'poll',
'pollsmoor': 'pollsmoor_prison',
'pollsters': 'pollster',
'polltax': 'poll_tax',
'pollutants': 'pollution',
'pollute': 'pollution',
'polluted': 'pollution',
'polluter': 'pollution',
'polluter-pays': 'polluter_pays_principle',
'polluting': 'pollution',
'pollyana': 'pollyanna',
'pollyannaism': 'pollyanna_principle',
'pollyannish': 'pollyanna_principle',
'pollybait': 'sand_goby',
'pollyfish': 'parrotfish',
'pollywog': 'tadpole',
'polm': 'dna_polymerase_mu',
'polnd': 'poland',
'polnocny': 'polnocny_class_landing_ship',
'polnoglasie': 'slavic_liquid_metathesis_and_pleophony',
'polo-neck': 'polo_neck',
'polock': 'polack',
'polockas': 'polotsk',
'polocross': 'polocrosse',
'polocyte': 'polar_body',
'pologne': 'poland',
'polokwane': 'pietersburg',
'polonaises': 'polonaise',
'polonez': 'fso_polonez',
'polonisation': 'polonization',
'polonita': 'feliceni',
'polonize': 'polonization',
'polonized': 'polonization',
'polonnaruva': 'polonnaruwa',
'polonnoye': 'polonne',
'polonophobe': 'anti-polish_sentiment',
'polonophobia': 'anti-polish_sentiment',
'polonophobic': 'anti-polish_sentiment',
'polonska-vasylenko': 'nataliia_polonska-vasylenko',
'poloroid': 'polaroid',
'poloskogo': 'khabomai',
'polotsky': 'hans_jakob_polotsky',
'polovchi': 'cumans',
'polovci': 'cumans',
'polovets': 'kipchaks',
'polovtsi': 'kipchaks',
'polovtsian': 'cumans',
'polovtsians': 'cumans',
'polovtsky': 'cumans',
'polovtsy': 'cumans',
'polow': 'pilaf',
'poloxamers': 'poloxamer',
'poloxamine': 'poloxamer',
'polozk': 'polotsk',
'polp': 'principle_of_least_privilege',
'polpescetarian': 'pescetarianism',
'polpot': 'pol_pot',
'polr2': 'polr2a',
'polr2d': 'rna_polymerase_ii_subunit_b4',
'polra': 'polr2a',
'polrevkom': 'provisional_polish_revolutionary_committee',
'polrewkom': 'provisional_polish_revolutionary_committee',
'polri': 'indonesian_national_police',
'polsham': 'st_cuthbert_out',
'polski': 'polish_language',
'polskor': 'polska',
'polsstokverspringen': 'fierljeppen',
'polszczyzna': 'polish_language',
'poltavshchyna': 'poltava_oblast',
'poltergeists': 'poltergeist',
'poltergiest': 'poltergeist',
'poltics': 'politics',
'poltix': 'disadvantage',
'poltoratsk': 'ashgabat',
'poltrigeist': 'poltergeist',
'poludeukeis': 'castor_and_pollux',
'poludnica': 'lady_midday',
'poludnioworoztoczanski': 'south_roztocze_landscape_park',
'poludnitsa': 'slavic_fairies',
'polundica': 'slavic_fairies',
'polution': 'pollution',
'polwig': 'tadpole',
'poly-': 'poly',
'poly-3-hydroxybutyrate': 'polyhydroxybutyrate',
'poly-800': 'korg_poly-800',
'poly-a': 'polyadenylation',
'poly-adp-ribosylation': 'adp-ribosylation',
'poly-alpha-olefin': 'polyolefin',
'poly-b-hydroxybutyrate': 'polyhydroxybutyrate',
'poly-drug': 'poly_drug_use',
'poly-lysine': 'polylysine',
'poly-play': 'poly_play',
'poly-si': 'crystalline_silicon',
'poly-trauma': 'polytrauma',
'polya': 'polyadenylation',
'polyacene': 'acene',
'polyacetal': 'polyoxymethylene_plastic',
'polyacrylate': 'acrylate_polymer',
'polyaddition': 'chain_growth_polymerisation',
'polyadenation': 'polyadenylation',
'polyadenosine': 'polyadenylation',
'polyadenylate': 'polyadenylation',
'polyagreement': 'polypersonal_agreement',
'polyainos': 'polyaenus',
'polyalcohol': 'sugar_alcohol',
'polyalkene': 'polyolefin',
'polyalpha-olefin': 'polyolefin',
'polyalpha-olefins': 'polyolefin',
'polyalphaolefin': 'polyolefin',
'polyalphaolefins': 'polyolefin',
'polyamideimide': 'polyamide-imide',
'polyamides': 'polyamide',
'polyamines': 'polyamine',
'polyamond': 'polyiamond',
'polyamonds': 'polyiamond',
'polyamor': 'polyamory',
'polyamore': 'polyamory',
'polyamorie': 'polyamory',
'polyamoris': 'polyamory',
'polyamorist': 'polyamory',
'polyamorists': 'polyamory',
'polyamorous': 'polyamory',
'polyamorphic': 'polyamorphism',
'polyamourous': 'polyamory',
'polyamoury': 'polyamory',
'polyan': 'polian',
'polyandrous': 'polyandry',
'polyangiaceae': 'myxobacteria',
'polyanion': 'polyelectrolyte',
'polyanitsa': 'bogatyr',
'polyanna': 'pollyanna',
'polyans': 'polans',
'polyanthos': 'george_washington_dixon',
'polyaramid': 'aramid',
'polyarbonate': 'polycarbonate',
'polyarmory': 'polyamory',
'polyarnoe': 'polyarny',
'polyarnyy': 'kilpyavr',
'polyaromatic': 'aromatic_hydrocarbon',
'polyarteritis': 'polyarteritis_nodosa',
'polyarthralgia': 'arthralgia',
'polyarthropathies': 'polyarthritis',
'polyatomic': 'polyatomic_ion',
'polybaramin': 'baraminology',
'polybenzimidazole': 'polybenzimidazole_fiber',
'polybenzobisoxazole': 'zylon',
'polybenzoxazole': 'zylon',
'polybezier': 'beziergon',
'polybiini': 'epiponini',
'polybios': 'polybius',
'polybolus': 'polybolos',
'polybor': 'disodium_octaborate_tetrahydrate',
'polybos': 'polybus',
'polybotum': 'polybotus',
'polybromobiphenyl': 'polybrominated_biphenyl',
'polybromobiphenyls': 'polybrominated_biphenyl',
'polycameral': 'multicameralism',
'polycameralism': 'multicameralism',
'polycaprolactam': 'nylon_6',
'polycaps': 'camelcase',
'polycarb': 'polycarbonate',
'polycarbonates': 'polycarbonate',
'polycarpous': 'polycarpic',
'polycarpus': 'polycarp',
'polycation': 'polyelectrolyte',
'polycentrid': 'polycentridae',
'polycephal': 'polycephaly',
'polycephalic': 'polycephaly',
'polyceratidae': 'polyceridae',
'polychaeta': 'polychaete',
'polychaetes': 'polychaete',
'polycheles': 'polychelidae',
'polychilos': 'phalaenopsis',
'polychlorbiphenyl': 'polychlorinated_biphenyl',
'polychlorbiphenyls': 'polychlorinated_biphenyl',
'polychlorobiphenyl': 'polychlorinated_biphenyl',
'polychlorocamphene': 'toxaphene',
'polychloroethene': 'polyvinyl_chloride',
'polychloroprene': 'neoprene',
'polychondritis': 'relapsing_polychondritis',
'polychora': 'polychoron',
'polychoral': 'antiphon',
'polychordal': 'polychord',
'polychotomy': 'polytomy',
'polychroa': 'nessaea',
'polychromy': 'polychrome',
'polychronic': 'chronemics',
'polychronicon': 'ranulf_higdon',
'polychrotinae': 'polychrotidae',
'polycillin': 'ampicillin',
'polyclad': 'polycladida',
'polyclar': 'polyvinylpyrrolidone',
'polycleitus': 'polykleitos',
'polycletus': 'polyclitus',
'polyclimax': 'climax_community',
'polyclinics': 'polyclinic',
'polycoa': 'bryozoa',
'polycomb': 'polycomb-group_proteins',
'polycomb-group': 'polycomb-group_proteins',
'polycondensation': 'step-growth_polymerization',
'polycontexturality': 'subjectivity',
'polycopoidea': 'polycopidae',
'polycosanol': 'policosanol',
'polycotylid': 'polycotylidae',
'polycrystalline': 'polycrystal',
'polycrystals': 'polycrystal',
'polycyma': 'candalides',
'polycystin-1': 'pkd1',
'polycystin-2': 'polycystic_kidney_disease_2',
'polycystina': 'polycystine',
'polycystinea': 'polycystine',
'polycystinia': 'polycystine',
'polycytemia': 'polycythemia',
'polycythaemia': 'polycythemia',
'polycythemic': 'polycythemia',
'polydactyl': 'polydactyly',
'polydactylic': 'polydactyly',
'polydactylism': 'polydactyly',
'polydactylus': 'threadfin',
'polydama': 'polydamna',
'polydegmon': 'hades',
'polydektes': 'polydectes',
'polydeukes': 'castor_and_pollux',
'polydiacetylene': 'polydiacetylenes',
'polydisk': 'polydisc',
'polydispersed': 'polydisperse',
'polydispersity': 'polydisperse',
'polydnaviridae': 'polydnavirus',
'polydor': 'polydor_records',
'polydoron': 'polydoro',
'polydrug': 'poly_drug_use',
'polyedge': 'polystick',
'polyektone': 'polyketone',
'polyeleon': 'polyeleos',
'polyembryonic': 'polyembryony',
'polyenes': 'polyene',
'polyenos': 'polyaenus',
'polyenus': 'polyaenus',
'polyepoxide': 'epoxy',
'polyerata': 'amazilia',
'polyesters': 'polyester',
'polyestrous': 'estrous_cycle',
'polyethelene': 'polyethylene',
'polyethene': 'polyethylene',
'polyetheretherketone': 'peek',
'polyetherethersulfone': 'polysulfone',
'polyetherimides': 'polyetherimide',
'polyethers': 'ether',
'polyethersulfone': 'polysulfone',
'polyethoxylated': 'ethoxylation',
'polyethyelene': 'polyethylene',
'polyethyleneglycol': 'polyethylene_glycol',
'polyethylenes': 'polyethylene',
'polyethyleneterephthalate': 'polyethylene_terephthalate',
'polyethyne': 'polyacetylene',
'polyetilene': 'polyethylene',
'polyeuctes': 'polyeuctus',
'polyeuctos': 'patriarch_polyeuctus_of_constantinople',
'polyface': 'polyface_farm',
'polyfax': 'polymyxin',
'polyfidelitous': 'polyfidelity',
'polyfilla': 'spackling_paste',
'polyfluorenes': 'polyfluorene',
'polyformaldehyde': 'polyoxymethylene_plastic',
'polygalales': 'polygalaceae',
'polygamic': 'polygamy',
'polygamist': 'polygamy',
'polygamists': 'polygamy',
'polygamma': 'polygamma_function',
'polygamous': 'polygamy',
'polygany': 'polygyny',
'polygar': 'palaiyakkarar',
'polygars': 'palaiyakkarar',
'polygeenan': 'carrageenan',
'polygeline': 'haemaccel',
'polygeneration': 'cogeneration',
'polygenesis': 'polygenism',
'polygenetic': 'quantitative_trait_locus',
'polygenic': 'quantitative_trait_locus',
'polygenist': 'polygenism',
'polygenists': 'polygenism',
'polyglandular': 'gland',
'polyglas': 'goodyear_polyglas_tire',
'polyglucosan': 'glucan',
'polygnotos': 'polygnotus',
'polygonaceous': 'polygonaceae',
'polygonal': 'polygon',
'polygons': 'polygon',
'polygraphia': 'cryptography',
'polygraphs': 'polygraph',
'polygraphy': 'polygraph',
'polygynandrous': 'polygynandry',
'polygyne': 'gyne',
'polygynous': 'polygyny',
'polyhedra': 'polyhedron',
'polyhedral': 'dihedral',
'polyhedroid': 'polychoron',
'polyhedrons': 'polyhedron',
'polyhexanide': 'papb',
'polyhrono': 'polychrono',
'polyhydramnion': 'polyhydramnios',
'polyhydramniosis': 'polyhydramnios',
'polyhydroamniosis': 'polyhydramnios',
'polyhydroxyaldehyde': 'aldose',
'polyhydroxyaldehydes': 'aldose',
'polyhydroxyalkanoate': 'polyhydroxyalkanoates',
'polyhydroxybutanoate': 'polyhydroxybutyrate',
'polyhydroxybutyrates': 'polyhydroxybutyrate',
'polyhydroxyketone': 'carbohydrate',
'polyidus': 'polyeidos',
'polyimides': 'polyimide',
'polyisobutene': 'butyl_rubber',
'polyisobutylene': 'butyl_rubber',
'polyisoprene': 'natural_rubber',
'polykaon': 'polycaon',
'polykletus': 'polyclitus',
'polylactide': 'polylactic_acid',
'polyline': 'polygonal_chain',
'polylingual': 'multilingualism',
'polylinker': 'multiple_cloning_site',
'polylog': 'polylogarithm',
'polymannoacetate': 'acemannan',
'polymascotfoamalate': 'homestar_runner',
'polymastism': 'polymastia',
'polymathic': 'polymath',
'polymaths': 'polymath',
'polymathy': 'polymath',
'polymegethism': 'corneal_endothelium',
'polymenorrhea': 'menstrual_disorder',
'polymerases': 'polymerase',
'polymeric': 'polymer',
'polymerisation': 'polymerization',
'polymerised': 'polymerization',
'polymerizations': 'polymerization',
'polymerize': 'polymerization',
'polymerized': 'polymer',
'polymerizes': 'polymerization',
'polymers': 'polymer',
'polymetallic': 'polymetal',
'polymethines': 'polymethine',
'polymethylene': 'polyethylene',
'polymethylenes': 'polyethylene',
'polymetre': 'polyrhythm',
'polymetrics': 'plyometrics',
'polymictic': 'polymictic_lake',
'polymide': 'polyimide',
'polymino': 'polyomino',
'polymixiid': 'beardfish',
'polymixiidae': 'beardfish',
'polymixiiformes': 'beardfish',
'polymixin': 'polymyxin',
'polymixins': 'polymyxin',
'polymnos': 'prosymnus',
'polymnus': 'prosymnus',
'polymorphic': 'polymorphism',
'polymorphisms': 'polymorphism',
'polymorphonuclearcyte': 'neutrophil_granulocyte',
'polymorphous': 'polymorphism',
'polymorphs': 'polymorph',
'polymox': 'amoxicillin',
'polymyalgia': 'polymyalgia_rheumatica',
'polymyxins': 'polymyxin',
'polymyxiomorpha': 'beardfish',
'polyna': 'polynya',
'polyneices': 'polynices',
'polyneikes': 'polynices',
'polynemid': 'threadfin',
'polynemidae': 'threadfin',
'polynephritis': 'pyelonephritis',
'polynesie': 'french_polynesia',
'polyneuritis': 'neuritis',
'polyneuropathies': 'polyneuropathy',
'polyneutron': 'neutronium',
'polynia': 'polynya',
'polynomial-time': 'polynomial_time',
'polynomials': 'polynomial',
'polynorbornene': 'norbornene',
'polynormand': 'polynormande',
'polynucleate': 'polynuclear',
'polynucleotidase': 'nuclease',
'polynumber': 'multicomplex_number',
'polynumbers': 'multicomplex_number',
'polynyas': 'polynya',
'polyodon': 'american_paddlefish',
'polyodontid': 'paddlefish',
'polyodontidae': 'paddlefish',
'polyodontosaurus': 'troodon',
'polyogamy': 'polygamy',
'polyolbion': 'poly-olbion',
'polyolefine': 'polyolefin',
'polyolefins': 'polyolefin',
'polyols': 'polyol',
'polyoma': 'polyomavirus',
'polyomaviridae': 'polyomavirus',
'polyominoes': 'polyomino',
'polyonymus': 'bronze-tailed_comet',
'polyorchid': 'polyorchidism',
'polyosmaceae': 'polyosma',
'polyovyk': 'slavic_fairies',
'polyoxometallate': 'polyoxometalate',
'polyoxybenzylmethylenglycolanhydride': 'bakelite',
'polyoxylation': 'ethoxylation',
'polyoxymethylene': 'polyoxymethylene_plastic',
'polyoxymetylene': 'polyoxymethylene_plastic',
'polypaudio': 'pulseaudio',
'polype': 'polyp',
'polypemon': 'procrustes',
'polypeptide': 'peptide',
'polypeptides': 'peptide',
'polypersonalism': 'polypersonal_agreement',
'polyphagan': 'polyphaga',
'polyphagic': 'polyphagia',
'polyphaginae': 'polyphagidae',
'polyphagy': 'polyphagia',
'polyphasic': 'polyphasic_sleep',
'polyphem': 'polyphem_missile',
'polyphemo': 'polyphemus',
'polyphemos': 'polyphemus',
'polyphenol-antioxidant': 'polyphenol_antioxidant',
'polyphenolic': 'polyphenol',
'polyphenolics': 'polyphenol',
'polyphenols': 'polyphenol',
'polyphenylethene': 'polystyrene',
'polyphenylsulphone': 'polyphenylsulfone',
'polyphone': 'polyphony',
'polyphonic': 'polyphony',
'polyphonies': 'polyphony',
'polyphyletic': 'polyphyly',
'polyphyony': 'polyphony',
'polypipnus': 'polyipnus',
'polyplacophora': 'chiton',
'polyplacophoran': 'chiton',
'polyplay': 'poly_play',
'polyploid': 'polyploidy',
'polypodiophyta': 'fern',
'polypodiophyte': 'fern',
'polypodiopsida': 'pteridopsida',
'polypody': 'polypodiaceae',
'polypoetes': 'polypoites',
'polypoidy': 'polyploidy',
'polypolyhedra': 'polyhedral_compound',
'polypompholyx': 'utricularia_subg._polypompholyx',
'polypores': 'polypore',
'polyporolithon': 'mesophyllum',
'polyporus': 'polypore',
'polyprenols': 'polyprenol',
'polyprionid': 'wreckfish',
'polyprionidae': 'wreckfish',
'polypro': 'polypropylene',
'polypropelyne': 'polypropylene',
'polypropene': 'polypropylene',
'polypropylenes': 'polypropylene',
'polyprotein': 'protein',
'polyprotic': 'acid',
'polyps': 'polyp',
'polypterid': 'bichir',
'polypteridae': 'bichir',
'polypteriformes': 'bichir',
'polyq': 'trinucleotide_repeat_disorder',
'polyquaternium-47': 'polyquaternium',
'polyquaternium-7': 'polyquaternium',
'polyradiculopathy': 'radiculopathy',
'polyrectangle': 'jordan_measure',
'polyrhythmic': 'polyrhythm',
'polyrhythmics': 'polyrhythm',
'polyrhythms': 'polyrhythm',
'polyribosome': 'polysome',
'polyribosomes': 'polysome',
'polyrythm': 'polyrhythm',
'polys': 'granulocyte',
'polysaccharides': 'polysaccharide',
'polysalt': 'polyelectrolyte',
'polysci': 'political_science',
'polyseme': 'polysemy',
'polysemes': 'polysemy',
'polysemia': 'polysemy',
'polysemic': 'polysemy',
'polysemous': 'polysemy',
'polysendeton': 'polysyndeton',
'polyserositis': 'familial_mediterranean_fever',
'polysexual': 'polysexuality',
'polysilicon': 'crystalline_silicon',
'polysiloxane': 'silicone',
'polysiphionia': 'polysiphonia',
'polysleep': 'polyphasic_sleep',
'polysomes': 'polysome',
'polysomnia': 'polysomnography',
'polysomnogram': 'polysomnography',
'polysomnograph': 'polysomnography',
'polysomnographic': 'polysomnography',
'polysorbates': 'polysorbate',
'polysperchon': 'polyperchon',
'polyspiro': 'spiro_compound',
'polysporangiomorpha': 'polysporangiophyte',
'polysporangiophytes': 'polysporangiophyte',
'polysporinopsis': 'acarospora',
'polysquare': 'polyomino',
'polyssena': 'polyxena',
'polystachyeae': 'polystachya',
'polysticks': 'polygon',
'polystipos': 'polystypos',
'polystylistic': 'polystylism',
'polystylus': 'phalaenopsis',
'polystyrenes': 'polystyrene',
'polystyrol': 'polystyrene',
'polysulfides': 'polysulfide',
'polysulfones': 'polysulfone',
'polysulphone': 'polysulfone',
'polyswitch': 'resettable_fuse',
'polysyllabic': 'syllable',
'polysyllable': 'syllable',
'polysyllogisms': 'polysyllogism',
'polysynaptic': 'neural_pathway',
'polysynthesis': 'polysynthetic_language',
'polysynthetic': 'polysynthetic_language',
'polysyto': 'polysito',
'polytarp': 'tarpaulin',
'polytech': 'polytechnic',
'polytechnical': 'institute_of_technology',
'polytechnics': 'institute_of_technology',
'polytechnik': 'veb_polytechnik',
'polytecnic': 'institute_of_technology',
'polytene': 'polytene_chromosome',
'polyterpenoid': 'terpene',
'polytetracfluoroethane': 'polytetrafluoroethylene',
'polytetraflouroethylene': 'polytetrafluoroethylene',
'polytetrafluorethylene': 'polytetrafluoroethylene',
'polytetrafluoroethene': 'polytetrafluoroethylene',
'polytetrafluroethylene': 'polytetrafluoroethylene',
'polythanol': 'calcium_phosphide',
'polytheist': 'polytheism',
'polytheistic': 'polytheism',
'polytheists': 'polytheism',
'polythelia': 'supernumerary_nipple',
'polythene': 'polyethylene',
'polytherapy': 'polypharmacy',
'polythiophenes': 'polythiophene',
'polytimetus': 'zeravshan_river',
'polytomography': 'tomography',
'polytomous': 'polytomy',
'polytonal': 'polytonality',
'polytonic': 'greek_diacritics',
'polytopes': 'polytope',
'polytrack': 'synthetic_racetrack_surfaces_for_horse_racing',
'polytrichales': 'polytrichaceae',
'polytrichidae': 'polytrichaceae',
'polytrichopsida': 'polytrichaceae',
'polytrioxane': 'polyoxymethylene_plastic',
'polytropic': 'polytropic_process',
'polytropism': 'monotropism',
'polytype': 'polymorphs_of_silicon_carbide',
'polytypes': 'polymorphs_of_silicon_carbide',
'polytypism': 'polymorphs_of_silicon_carbide',
'polyu': 'hong_kong_polytechnic_university',
'polyubiquitination': 'ubiquitin',
'polyunsaturate': 'unsaturated_fat',
'polyunsaturated': 'polyunsaturated_fat',
'polyunsaturation': 'polyunsaturated_fat',
'polyurathane': 'polyurethane',
'polyurethan': 'polyurethane',
'polyurethanes': 'polyurethane',
'polyushko-pole': 'polyushko_pole',
'polyushko-polye': 'polyushko_pole',
'polyvector': 'multivector',
'polyvend': 'glasco_polyvend_lektrovend',
'polyvidone': 'polyvinylpyrrolidone',
'polyvinylacetate': 'polyvinyl_acetate',
'polyvinylchloride': 'polyvinyl_chloride',
'polyvinylidenedifluoride': 'polyvinylidene_fluoride',
'polyvinylphenol': 'poly-4-vinylphenol',
'polyvinylpolypyrrolidone': 'polyvinylpyrrolidone',
'polyvinylpyrrolidonee': 'polyvinylpyrrolidone',
'polyvinyls': 'polyvinyl',
'polyvinyltoluene': 'polyvinyl_toluene',
'polyviol': 'polyvinyl_alcohol',
'polyvox': 'polivoks',
'polywere': 'otherkin',
'polywog': 'tadpole',
'polywood': 'engineered_wood',
'polyxeinus': 'polyxenus',
'polyxene': 'polyxena',
'polyxenos': 'telegony',
'polyzalus': 'charioteer_of_delphi',
'polyzoa': 'bryozoa',
'polyzoan': 'bryozoa',
'polyzoon': 'bryozoa',
'polzunov': 'ivan_polzunov',
'pom-a-poo': 'poodle_hybrid',
'pom-pom': 'pom-pon',
'pom-ponning': 'pom-pon',
'pom.xml': 'apache_maven',
'pomac': 'pommac',
'pomacanthid': 'marine_angelfish_group',
'pomacanthidae': 'marine_angelfish_group',
'pomaceae': 'ampullariidae',
'pomacentrid': 'pomacentridae',
'pomadasyidae': 'haemulidae',
'pomades': 'pomade',
'pomagalski': 'poma',
'pomak': 'pomaks',
'pomaki': 'pomaks',
'pomaklar': 'pomaks',
'pomapoo': 'poodle_hybrid',
'pomarosa': 'syzygium',
'pomarrosa': 'syzygium',
'pomassl': 'franz_pomassl',
'pomata': 'pomade',
'pomatomid': 'bluefish',
'pomatomidae': 'bluefish',
'pomatomus': 'bluefish',
'pomatorhinidae': 'scimitar-babbler',
'pomatostomidae': 'australo-papuan_babbler',
'pomatostomus': 'australo-papuan_babbler',
'pomatsi': 'pomaks',
'pomatum': 'pomade',
'pomatums': 'pomade',
'pombe': 'schizosaccharomyces_pombe',
'pomc': 'proopiomelanocortin',
'pomchi': 'dog_hybrid',
'pomcite': 'spondias_dulcis',
'pomdp': 'partially_observable_markov_decision_process',
'pomegranade': 'pomegranate',
'pomegranates': 'pomegranate',
'pomegranete': 'pomegranate',
'pomegranetes': 'pomegranate',
'pomegranite': 'pomegranate',
'pomegrante': 'pomegranate',
'pomel': 'auguste_pomel',
'pomello': 'pomelo',
'pomellos': 'pomelo',
'pomeloes': 'grapefruit',
'pomelos': 'pomelo',
'pomeralia': 'pomerelia',
'pomerance': 'carl_pomerance',
'pomerania-stettin': 'duchy_of_pomerania',
'pomerania-wolgast': 'duchy_of_pomerania',
'pomeranians': 'pomeranian',
'pomeranus': 'johannes_bugenhagen',
'pomerellen': 'pomerelia',
'pomerellia': 'pomerelia',
'pomeroon': 'pomeroon_river',
'pomers': 'scarlett_pomers',
'pomes': 'pome',
'pomesania': 'pomesanians',
'pomesanien': 'pomesanians',
'pometia': 'suessa_pometia',
'pomezanians': 'pomesanians',
'pomezanien': 'pomesanians',
'pomgranate': 'pomegranate',
'pomgranite': 'pomegranate',
'pomiferin': 'maclura_pomifera',
'pomilio': 'fabbrica_aeroplani_ing._o._pomilio',
'pominville': 'jason_pominville',
'pomlazka': 'easter_monday',
'pommade': 'pomade',
'pomme': 'apple',
'pommecythere': 'spondias_dulcis',
'pommegranate': 'pomegranate',
'pommegranite': 'pomegranate',
'pommel-horse': 'pommel_horse',
'pommelo': 'pomelo',
'pommels': 'pommel',
'pommerac': 'syzygium',
'pommerania': 'pomerania',
'pommeranisch': 'pomeranian_language',
'pommerats': 'les_pommerats',
'pommerellen': 'pomerelia',
'pommern': 'pomerania',
'pommernschaf': 'pomeranian_coarsewool_sheep',
'pommernstellung': 'pomeranian_wall',
'pommerstellung': 'pomeranian_wall',
'pommes': 'french_fries',
'pommie': 'alternative_names_for_the_british',
'pommies': 'alternative_names_for_the_british',
'pomo': 'postmodernism',
'pomoan': 'pomoan_languages',
'pomoerium': 'pomerium',
'pomoideae': 'maloideae',
'pomola': 'pamola',
'pomolobus': 'shad',
'pomor': 'pomors',
'pomoranians': 'pomeranian',
'pomoravski': 'pomoravlje_district',
'pomorians': 'pomorian_old-orthodox_church',
'pomornik': 'zubr_class_lcac',
'pomorskie': 'pomeranian_voivodeship',
'pomortsy': 'pomorian_old-orthodox_church',
'pomorye': 'pomors',
'pomorze': 'pomerania',
'pomosexuality': 'pomosexual',
'pomoxis': 'crappie',
'pompador': 'pompadour',
'pompanos': 'pompano',
'pompe': 'glycogen_storage_disease_type_ii',
'pompeian': 'pompey',
'pompeians': 'pompey',
'pompeij': 'pompeii',
'pompej': 'pompeii',
'pompeji': 'pompeii',
'pompelmous': 'pomelo',
'pompetus': 'pompatus',
'pompeya': 'pompeii',
'pompeysaurus': 'xenoposeidon',
'pompholyx': 'dyshidrosis',
'pompilid': 'spider_wasp',
'pompilidae': 'spider_wasp',
'pompilius': 'numa_pompilius',
'pompion': 'pompion_hill_chapel',
'pompitous': 'pompatus',
'pompitus': 'pompatus',
'pompoko': 'pom_poko',
'pompom': 'pom-pon',
'pompoms': 'pom-pon',
'pompon': 'pom-pon',
'pomponatius': 'pietro_pomponazzi',
'pomponazzi': 'pietro_pomponazzi',
'pomponi': 'pomponius',
'pomposity': 'pompous',
'pompously': 'pompous',
'pompousness': 'pompous',
'pomr': 'revolutionary_marxist_workers_party',
'poms': 'alternative_names_for_the_british',
'pomum': 'pomade',
'pomus': 'thyroid_cartilage',
'pomz-2': 'pomz',
'pomz-2m': 'pomz',
'pon-cee-cee': 'paladine_roye',
'pon-chan': 'guru_guru_pon-chan',
'ponagansett': 'ponaganset',
'ponage': 'pwn',
'ponam': 'pakistan_oppressed_nations_movement',
'ponamu': 'pounamu',
'ponani': 'ponnani',
'ponapean': 'pohnpeian_language',
'ponaren': 'paneriai',
'ponch': 'chips',
'ponchielli': 'amilcare_ponchielli',
'ponchos': 'poncho',
'ponchozero': 'lake_ponchozero',
'poncirus': 'trifoliate_orange',
'pond-apple': 'annona_glabra',
'pond-lilies': 'nuphar',
'pond-skater': 'gerridae',
'pondberry': 'lindera_melissifolia',
'pondcypress': 'taxodium_ascendens',
'pondegi': 'beondegi',
'pondeljak': 'tom_pondeljak',
'ponderation': 'contrapposto',
'pondhawk': 'erythemis',
'pondicherry': 'puducherry',
'pondichery': 'puducherry',
'pondikonisi': 'pontikos',
'pondimin': 'fenfluramine',
'pondinil': 'mefenorex',
'pondlife': 'aquatic_ecosystem',
'pondoland': 'maputaland-pondoland_bushland_and_thickets',
'pondos': 'pondo',
'ponds': 'pond',
'pondscum': 'algae',
'pondskater': 'gerridae',
'pondy': 'puducherry',
'poned': 'pwn',
'ponedelnik': 'viktor_ponedelnik',
'ponerichthys': 'dunkleosteus',
'ponet': 'john_ponet',
'ponevezh': 'ponevezh_yeshiva',
'poney': 'pony',
'ponferradina': 'sd_ponferradina',
'pongal': 'thai_pongal',
'pongdong-ni': 'pongdong-ri',
'ponge': 'francis_ponge',
'ponggol': 'punggol',
'pongid': 'orangutan',
'pongidae': 'hominidae',
'pongids': 'orangutan',
'pongini': 'orangutan',
'ponglish': 'poglish',
'pongoid': 'orangutan',
'pongu': 'pongu_language',
'pongwe': 'pongwe_pogwe',
'pongwiffy': 'kaye_umansky',
'poni-tails': 'the_poni-tails',
'ponia': 'mrs.',
'poniard': 'poignard',
'poniarded': 'poignard',
'poniarding': 'poignard',
'poniards': 'poignard',
'poniatowski': 'poniatowski_family',
'ponicki': 'ponice',
'ponies': 'pony',
'ponikarovsky': 'alexei_ponikarovsky',
'ponikvica': 'ponikovica',
'ponil': 'fallugia',
'ponjhe': 'panaji',
'ponmela': 'en_lo_claro',
'ponmudy': 'k._ponmudy',
'ponnaganset': 'ponaganset',
'ponnagansett': 'ponaganset',
'ponnaiyan': 'c._ponnaiyan',
'ponnaiyar': 'ponnaiyar_river',
'ponniyinselvan': 'ponniyin_selvan',
'ponnje': 'panaji',
'ponoko': 'direct_digital_manufacturing',
'ponomarev': 'ponomaryov',
'ponomariov': 'ponomaryov',
'ponovezh': 'ponevezh_yeshiva',
'ponoy': 'ponoy_river',
'pons': 'pon',
'ponselle': 'rosa_ponselle',
'ponseti': 'ignacio_ponseti',
'ponsetti': 'ponseti_method',
'ponsi': 'ponzi_scheme',
'ponson': 'ponson_island',
'ponsse': 'ponsse_group',
'ponstan': 'mefenamic_acid',
'ponstel': 'mefenamic_acid',
'pont-au-change': 'pont_au_change',
'pont-de-beauvoisin': 'le_pont-de-beauvoisin',
'pont-de-braye': 'lavenay',
'pont-de-claix': 'le_pont-de-claix',
'pont-de-montvert': 'le_pont-de-montvert',
'pont-neuf': 'pont_neuf',
'pont-royal': 'pont_royal',
'pont-y-clun': 'pontyclun',
'pont-y-cymer': 'crosskeys',
'pont-y-pwl': 'pontypool',
'ponta': 'guru_guru_pon-chan',
'pontafel': 'pontebba',
'pontano': 'iovianus_pontanus',
'pontardulais': 'pontarddulais',
'pontargothi': 'pont-ar-gothi',
'pontastacus': 'astacus',
'pontault': 'pontault-combault',
'pontchatrain': 'lake_pontchartrain',
'pontcysyllte': 'pontcysyllte_aqueduct',
'ponte-leccia': 'morosaglia',
'pontecagnano': 'pontecagnano_faiano',
'pontecagnano-faiano': 'pontecagnano_faiano',
'pontecuti': 'todi',
'pontee': 'glassblowing',
'ponteibe': 'pontebba',
'pontekroaz': 'pont-croix',
'pontelagoscuro': 'ferrara',
'pontenova': 'a_pontenova',
'pontes': 'staines',
'pontfraverger-moronvilliers': 'pontfaverger-moronvilliers',
'pontiaka': 'pontic_greek',
'pontians': 'pontic_greeks',
'pontianus': 'pope_pontian',
'pontic-caspian': 'pontic-caspian_steppe',
'pontic-greek-language': 'pontic_greek',
'pontic-language': 'pontic_greek',
'pontica': 'pontus',
'pontics': 'pontic',
'pontides': 'pontic_mountains',
'pontiets': 'pontyates',
'pontif': 'pontiff',
'pontifex': 'college_of_pontiffs',
'pontiffs': 'pontiff',
'pontifical': 'roman_pontifical',
'pontificale': 'roman_pontifical',
'pontificalia': 'pontifical_vestments',
'pontificality': 'pontiff',
'pontificals': 'pontifical_vestments',
'pontificate': 'pontiff',
'pontificated': 'pontiff',
'pontificates': 'pontiff',
'pontificating': 'pontiff',
'pontification': 'pontiff',
'pontifications': 'pontiff',
'pontificator': 'pontiff',
'pontificators': 'pontiff',
'pontificatory': 'pontiff',
'pontifices': 'college_of_pontiffs',
'pontigibaud': 'pontgibaud',
'pontil': 'glassblowing',
'pontile': 'pontic',
'pontillism': 'pointillism',
'pontine': 'pons',
'pontio': 'pietro_pontio',
'pontipines': 'in_the_night_garden',
'pontirama': 'rhamma',
'pontllan-fraith': 'pontllanfraith',
'pontlliw': 'pont-lliw',
'pontneathvaughan': 'pontneddfechan',
'ponto-caspian': 'pontic-caspian_steppe',
'pontocaine': 'tetracaine',
'pontocaspian': 'pontic-caspian_steppe',
'pontomedusa': 'nereid',
'pontonier': 'sapper',
'pontoons': 'pontoon',
'pontopiddan': 'erik_pontoppidan',
'pontoporiidae': 'la_plata_dolphin',
'pontoppidan': 'erik_pontoppidan',
'pontos': 'pontus',
'pontrhydygroes': 'pont-rhyd-y-groes',
'pontrjagin': 'lev_pontryagin',
'pontryagin': 'lev_pontryagin',
'ponts-de-martel': 'les_ponts-de-martel',
'pontycymmer': 'pontycymer',
'pontymoel': 'pontypool',
'pontypandy': 'fireman_sam',
'ponugode': 'garidepalli',
'ponury': 'jan_piwnik',
'ponv': 'postoperative_nausea_and_vomiting',
'pony-car': 'pony_car',
'pony-hawk': 'mohawk_hairstyle',
'ponyca': 'pony_canyon',
'ponyfishes': 'ponyfish',
'ponygirl': 'animal_roleplay',
'ponyism': 'animal_roleplay',
'ponyplay': 'animal_roleplay',
'ponytails': 'ponytail',
'ponzi': 'charles_ponzi',
'ponzi-style': 'ponzi_scheme',
'ponziani': 'domenico_lorenzo_ponziani',
'poo-poo': 'feces',
'poo-shi': 'poodle_hybrid',
'poobah': 'grand_poobah',
'poochi': 'poo-chi',
'poochin': 'poodle_hybrid',
'poochon': 'poodle_hybrid',
'poochy': 'poochie',
'poocuohhunkkunnah': 'cuttyhunk',
'pood': 'principle_of_orthogonal_design',
'poodles': 'poodle',
'pooecetes': 'vesper_sparrow',
'poof': 'terminology_of_homosexuality',
'pooftah': 'terminology_of_homosexuality',
'poofy': 'homosexuality',
'poogal': 'pugal',
'poogie': 'monster_hunter',
'poogy': 'kaveret',
'pooh': 'poo',
'pooh-bah': 'grand_poobah',
'poohbah': 'grand_poobah',
'poohbear': 'winnie-the-pooh',
'poohman': 'pooh-man',
'pooing': 'defecation',
'poojagandhi': 'pooja_gandhi',
'poojah': 'puja',
'poojas': 'pooja_umashankar',
'pooki': 'garfield',
'pookie': 'pooky',
'pooktre': 'tree_shaping',
'pool-and-riffle': 'riffle-pool_sequence',
'pool.com': 'michael_arrington',
'pool.ntp.org': 'ntp_pool',
'pooladshahr': 'fooladshahr',
'poolball': 'cue_sports',
'poolbeg': 'poolbeg_generating_station',
'poolhall': 'billiard_hall',
'pooling': 'pool',
'poolish': 'pre-ferment',
'pooloku': 'poomaikelani',
'poolpo': 'poulpo',
'poolroom': 'billiard_room',
'pools': 'pool',
'poolshark': 'pool_shark',
'pooltown': 'luxborough',
'poolway': 'stechford_and_yardley_north',
'poom': 'mart_poom',
'poomaikalani': 'poomaikelani',
'poompuhar': 'puhar',
'poomsae': 'hyeong',
'poomse': 'hyeong',
'poomully': 'poomulli',
'poona': 'pune',
'poonac': 'copra',
'poonah': 'pune',
'poonakari': 'pooneryn',
'poonal': 'upanayana',
'poonamalee': 'poonamallee',
'poonani': 'punani',
'pooned': 'pwn',
'poongmul': 'pungmul',
'poongothai': 'poongothai_aladi_aruna',
'poonia': 'puniya',
'poonool': 'upanayana',
'poontalai': 'sterculia_lychnophora',
'pooop': 'poop',
'poopdeck': 'poop_deck',
'pooped': 'poop',
'pooper': 'anus',
'poopertoot': 'flatulence',
'poopertooter': 'flatulence',
'poophead': 'shithead',
'poophole': 'anus',
'poopie': 'feces',
'pooping': 'defecation',
'poopoo': 'pupu',
'pooppathi': 'poo_pathi',
'poopsock': 'video_game_addiction',
'poopsocking': 'video_game_addiction',
'poopsykins': 'south_park',
'pooptoot': 'flatulence',
'poopy': 'feces',
'poor-fen': 'poor_fen',
'poor-house': 'poorhouse',
'poor-houses': 'poorhouse',
'poor-me-one': 'potoo',
'poor-me-ones': 'potoo',
'poor-will': 'common_poorwill',
'poorams': 'pooram',
'poorer': 'poverty',
'poorest': 'poverty',
'poorhouses': 'poorhouse',
'poorijagannadh': 'puri_jagannadh',
'poorijagannath': 'puri_jagannadh',
'poorism': 'poverty_tourism',
'poorjoe': 'diodia_teres',
'poorkkalam': 'porkkaalam',
'poorly': 'poverty',
'poormouth': 'a_rare_conundrum',
'poornaprajna': 'madhvacharya',
'poorness': 'poverty',
'poornima': 'purnima',
'poortman': 'johannes_jacobus_poortman',
'poorvanchal': 'purvanchal',
'poorvi': 'purvi',
'poorwill': 'nightjar',
'pootan': 'cromartie_high_school',
'pooterism': 'charles_pooter',
'poothan': 'poothan_and_thira',
'poothara': 'puttara',
'pootiki': 'pontiki',
'pootschki': 'cow_parsnip',
'poottara': 'kalari',
'poovarasu': 'thespesia_populnea',
'poovey': 'mary_poovey',
'pooviyoor': 'puviyur',
'pop-1': 'cowsel',
'pop-art': 'pop_art',
'pop-behind': 'pop-up_ad',
'pop-corn': 'popcorn',
'pop-country': 'country_pop',
'pop-culture': 'popular_culture',
'pop-dance': 'dance-pop',
'pop-filter': 'pop_filter',
'pop-folk': 'balkan_pop',
'pop-gun': 'pop_gun',
'pop-ice': 'fla-vor-ice',
'pop-justice': 'popjustice',
'pop-lock': 'popping',
'pop-locking': 'popping',
'pop-out': 'batted_ball',
'pop-port': 'nokia_pop-port',
'pop-promo': 'music_video',
'pop-psych': 'psychedelic_pop',
'pop-psychology': 'popular_psychology',
'pop-punk': 'pop_punk',
'pop-rap': 'hip_hop_music',
'pop-rock': 'pop_rock',
'pop-sike': 'psychedelic_pop',
'pop-star': 'celebrity',
'pop-stars': 'popstars',
'pop-surf': 'surf_music',
'pop-tart': 'pop-tarts',
'pop-under': 'pop-up_ad',
'pop-up': 'popup',
'pop-ups': 'popup',
'pop...booya': 'tony_eveready',
'pop/rock': 'pop_rock',
'pop1': 'post_office_protocol',
'pop100': 'pop_100',
'pop11': 'pop-11',
'pop3': 'post_office_protocol',
'pop3s': 'post_office_protocol',
'pop:t2t': 'prince_of_persia:_the_two_thrones',
'pop:ww': 'prince_of_persia:_warrior_within',
'popa': 'porin_palloilijat',
'popadam': 'papadum',
'popadija': 'presbytera',
'popadom': 'papadum',
'popadum': 'papadum',
'popadya': 'presbytera',
'popal': 'popalzai',
'popali': 'popalzai',
'popalzada': 'popalzai',
'popalzay': 'popalzai',
'popalzy': 'popalzai',
'popart': 'pop_art',
'popb': 'palais_omnisports_de_paris-bercy',
'popbtch': 'popbitch',
'popcap': 'pop_cap',
'popcaps': 'pop_cap',
'popcicle': 'popsicle',
'popclient': 'fetchmail',
'popcnt': 'hamming_weight',
'popcorn.net': 'movieland',
'popcornflower': 'plagiobothrys',
'popcount': 'hamming_weight',
'popcron': 'popcorn',
'popcru': 'police_and_prisons_civil_rights_union',
'popculture': 'popular_culture',
'popd': 'pushd_and_popd',
'pope-joan': 'pope_joan',
'pope-waverly': 'pope-waverley',
'popeable': 'papabile',
'popean': 'alexander_pope',
'popehat': 'mitre',
'popeil': 'ron_popeil',
'popelairia': 'discosura',
'popeman': 'the_incredible_popeman',
'popera': 'operatic_pop',
'poperinghe': 'poperinge',
'poperos': 'meir_ben_judah_leib_poppers',
'popery': 'papist',
'popess': 'la_papessa',
'popeyed': 'pop_eyed',
'popfly': 'microsoft_popfly',
'popfnn': 'neuro-fuzzy',
'popgirl': 'pop_girl',
'popgun': 'pop_gun',
'popguns': 'the_popguns',
'popheart': 'please:_popheart_live_ep',
'popia': 'popiah',
'popielidzi': 'popielids',
'popigai': 'popigai_crater',
'popigay': 'popigai_crater',
'popil': 'magical_puzzle_popils',
'popils': 'magical_puzzle_popils',
'popinac': 'vachellia_farnesiana',
'popintzi': 'popintsi',
'popish': 'papist',
'popishly': 'papist',
'popishness': 'papist',
'popka': 'klonoa',
'popkart': 'crazyracing_kartrider',
'popl': 'symposium_on_principles_of_programming_languages',
'popland': 'kevin_mathews',
'poplar': 'populus',
'poplarism': 'poplar_rates_rebellion',
'poplars': 'populus',
'poplavsky': 'poplawski',
'poplicani': 'catharism',
'poplicola': 'publius_valerius_publicola',
'popliteus': 'popliteus_muscle',
'poplmark': 'poplmark_challenge',
'poplock': 'popping',
'poplocking': 'popping',
'popmart': 'popmart_tour',
'popmatters.com': 'popmatters',
'popmusic': 'pop_music',
'popmusiikkia': 'pop-musiikkia',
'popmuzik': 'pop_muzik',
'popoff': 'popov',
'popoie': 'secret_of_mana',
'popojan': 'popovjani',
'popol-vuh': 'popol_vuh',
'popolo': 'piazza_del_popolo',
'popoloca': 'popoluca',
'popomanaseu': 'mount_popomanaseu',
'popora': 'popora_people',
'poposaurid': 'poposauridae',
'popova': 'popov',
'popoveni': 'craiova',
'popovers': 'popover',
'popovici-ureche': 'v._a._urechia',
'popoy': 'filemon_lagman',
'poppadam': 'papadum',
'poppadom': 'papadum',
'poppadum': 'papadum',
'poppadums': 'papadum',
'poppaea': 'poppaea_sabina',
'poppant': 'super_smash_bros._brawl',
'poppea': 'poppaea_sabina',
'poppentiner-see': 'poppentiner_see',
'popperazzi': 'karl_popper',
'popperian': 'karl_popper',
'popperos': 'meir_ben_judah_leib_poppers',
'poppers/nitrites': 'poppers',
'poppets': 'paynes_poppets',
'poppie': 'poppy',
'poppils': 'magical_puzzle_popils',
'poppingawier': 'poppenwier',
'poppinjay': 'popinjay',
'popplers': 'the_problem_with_popplers',
'poppunk': 'pop_punk',
'poppy-head': 'poppy',
'poppyflowers': 'poppy',
'poppyland': 'overstrand',
'poppyseed': 'poppy_seed',
'poppywort': 'stylophorum_diphyllum',
'popra': 'operatic_pop',
'poprad-tatry': 'poprad',
'popradzki': 'poprad_landscape_park',
'poprete': 'romanichthys_valsanicola',
'poprock': 'pop_rock',
'pops': 'persistent_organic_pollutant',
'popsci': 'popular_science',
'popsci.com': 'popular_science',
'popsco': 'posco',
'popsicles': 'popsicle',
'popski': 'vladimir_peniakoff',
'popstar': 'celebrity',
'popstation': 'pop_station',
'poptart': 'pop-tarts',
'poptarts': 'pop-tarts',
'poptext.org': 'poptext',
'popti': 'jakaltek_language',
'poptop': 'poptop_software',
'poptronics': 'popular_electronics',
'poptropica.com': 'poptropica',
'popu': 'atho-popu',
'populaces': 'populace',
'popular-culture': 'popular_culture',
'popular-science': 'popular_science',
'popular-udeur': 'udeur_populars',
'populare': 'populares',
'popularis': 'populares',
'popularisation': 'popularity',
'popularisational': 'popularity',
'popularisations': 'popularity',
'popularise': 'popularity',
'popularised': 'popularity',
'populariser': 'popularity',
'popularisers': 'popularity',
'popularises': 'popularity',
'popularising': 'popularity',
'popularism': 'popolarismo',
'popularismo': 'popolarismo',
'popularisms': 'popolarismo',
'popularist': 'populism',
'popularistic': 'populism',
'popularistical': 'populism',
'popularistically': 'populism',
'popularists': 'populism',
'popularization': 'popularity',
'popularizational': 'popularity',
'popularizations': 'popularity',
'popularize': 'popularity',
'popularized': 'popularity',
'popularizer': 'popularity',
'popularizers': 'popularity',
'popularizes': 'popularity',
'popularizing': 'popularity',
'popularly': 'popularity',
'popularmechanics.com': 'popular_mechanics',
'popularmusic': 'lists_of_musicians',
'populate': 'population',
'populated': 'population',
'populates': 'population',
'populating': 'population',
'populational': 'population',
'populationally': 'population',
'populationism': 'population',
'populations': 'population',
'populator': 'sancho_i_of_portugal',
'populatorily': 'population',
'populators': 'population',
'populicide': 'democide',
'populise': 'popularity',
'populised': 'popularity',
'populiser': 'popularity',
'populisers': 'popularity',
'populises': 'popularity',
'populising': 'popularity',
'populisms': 'populism',
'populistic': 'populism',
'populistical': 'populism',
'populistically': 'populism',
'populists': 'populist',
'populize': 'popularity',
'populized': 'popularity',
'populizer': 'popularity',
'populizers': 'popularity',
'populizes': 'popularity',
'populizing': 'popularity',
'populnetin': 'kaempferol',
'populonium': 'populonia',
'populum': 'argumentum_ad_populum',
'populuxe': 'googie_architecture',
'popunder': 'pop-up_ad',
'popups': 'popup',
'popweed': 'fucus',
'popwhore': 'tatum_reed',
'popyright': 'sound_recording_copyright_symbol',
'popyura': 'the_new_transistor_heroes',
'poqet': 'poqet_pc',
'porage': 'porridge',
'porajarvi': 'porosozero',
'porajmo': 'porajmos',
'porality': 'porosity',
'poran': 'borandukht',
'porbander': 'porbandar',
'porbeagles': 'porbeagle',
'porbunder': 'porbandar',
'porc': 'pork',
'porcelainflower': 'wax-plant',
'porcelainite': 'mullite',
'porcelan': 'porcelain',
'porcelein': 'porcelain',
'porcelin': 'porcelain',
'porcell': 'john_porcelly',
'porcellanidae': 'porcelain_crab',
'porcellian': 'porcellian_club',
'porcfest': 'free_state_project',
'porche': 'porsche',
'porchow': 'burkau',
'porcine': 'pig',
'porcini': 'boletus_edulis',
'porcula': 'pygmy_hog',
'porcupinefishes': 'diodon',
'porcupines': 'porcupine',
'porcus': 'pig',
'pordenon': 'pordenone',
'pordenun': 'pordenone',
'pordoi': 'pordoi_pass',
'pore-water': 'groundwater',
'porecanka': 'parenzana',
'porecka': 'parenzana',
'poreiton': 'manipur',
'poreleaf': 'porophyllum',
'pores': 'pore',
'poreska': 'parenzana',
'porewater': 'groundwater',
'porferia': 'porphyria',
'porfimer': 'porfimer_sodium',
'porfiria': 'mammillaria',
'porfit': 'profit',
'porgera': 'porgera_gold_mine',
'porgie': 'sparidae',
'porgies': 'sparidae',
'pori': 'pattern_oriented_rule_implementation',
'poriah': 'poria',
'porichthys': 'midshipman_fish',
'poridge': 'porridge',
'porifera': 'sponge',
'porifera/temp': 'sponge',
'poriferan': 'sponge',
'poriot': 'hercule_poirot',
'porisms': 'porism',
'poriya': 'poria',
'pork-barrel': 'pork_barrel',
'pork-n-beans': 'pork_and_beans',
'pork-pie': 'pork_pie_hat',
'porkaalam': 'porkkaalam',
'porkage': 'sex',
'porkala': 'porkkala',
'porkbarrel': 'pork_barrel',
'porkbarrelling': 'pork_barrel',
'porkberry': 'town',
'porkburger': 'hamburger',
'porkbuster': 'porkbusters',
'porkchop': 'pork_chop',
'porker': 'pig',
'porketta': 'porchetta',
'porkfish': 'anisotremus_virginicus',
'porkies': 'rhyming_slang',
'porking': 'sexual_intercourse',
'porkkalam': 'porkkaalam',
'porkpie': 'pork_pie',
'porkrinds': 'pork_rind',
'porkulus': 'american_recovery_and_reinvestment_act_of_2009',
'porl': 'porl_thompson',
'porlampi': 'sveklovichnoye',
'porn': 'progressive_outer_retinal_necrosis',
'porn-star': 'pornographic_actor',
'porn-stars': 'pornographic_actor',
'porn2ube': 'amateur_pornography',
'pornagraphy': 'pornography',
'pornagrophy': 'pornography',
'pornai': 'prostitution_in_ancient_greece',
'pornaim': 'parnaim',
'porncast': 'podcast',
'porncasting': 'podcast',
'porngrind': 'pornogrind',
'pornhub': 'amateur_pornography',
'porno-chic': 'golden_age_of_porn',
'porno-star': 'pornographic_actor',
'pornoaction': 'bill_against_pornography_and_pornoaction',
'pornocore': 'dirty_rap',
'pornocracy': 'saeculum_obscurum',
'pornograffiti': 'pornograffitti',
'pornografiti': 'pornograffitti',
'pornografitti': 'pornograffitti',
'pornograph': 'pornography',
'pornographer': 'pornography',
'pornographers': 'pornography',
'pornographic': 'pornography',
'pornomation': 'zuma:_tales_of_a_sexual_gladiator',
'pornos': 'pornography',
'pornosec': 'prolefeed',
'pornostar': 'pornographic_actor',
'pornostars': 'pornographic_actor',
'pornotanz': 'uwe_schmidt',
'pornotube.com': 'pornotube',
'pornstar': 'pornographic_actor',
'pornstars': 'pornographic_actor',
'pornthip': 'pornthip_rojanasunand',
'porntipsguzzardo': 'simcity_2000',
'porntoy': 'portnoy',
'porntube': 'pornotube',
'porocarcinoma': 'malignant_acrospiroma',
'porocyte': 'porocytes',
'poroelasticity': 'poromechanics',
'porohanons': 'porohanon',
'porolepiform': 'porolepiformes',
'porolepiforms': 'porolepiformes',
'poroma': 'acrospiroma',
'poromeric': 'artificial_leather',
'poronaisk': 'poronaysk',
'porong': 'sidoarjo',
'porongurups': 'porongurup_national_park',
'poronography': 'pornography',
'poroporo': 'solanum_aviculare',
'pororo': 'pororo_the_little_penguin',
'poroshenko': 'petro_poroshenko',
'poroshuram': 'rajshekhar_basu',
'porosomes': 'porosome',
'porothelion': 'porto_cheli',
'poroto': 'common_bean',
'porotta': 'kerala_porotta',
'porous': 'porosity',
'porousity': 'porosity',
'porousness': 'porosity',
'porphilia': 'porphyria',
'porphine': 'porphin',
'porphiria': 'porphyria',
'porphirine': 'porphyrin',
'porphyran': 'porphyra',
'porphyrian': 'porphyry',
'porphyrias': 'porphyria',
'porphyrine': 'porphyrin',
'porphyrins': 'porphyrin',
'porphyrius': 'porphyry',
'porphyroblastic': 'porphyroblast',
'porphyroblasts': 'porphyroblast',
'porphyrochitonium': 'anthurium_sect._porphyrochitonium',
'porphyrocoma': 'justicia',
'porphyrogenetos': 'byzantine_aristocracy_and_bureaucracy',
'porphyrogenitus': 'porphyrogenitos',
'porphyrogennetos': 'porphyrogenitos',
'porphyrolaema': 'purple-throated_cotinga',
'porphyromonas': 'porphyromonas_gingivalis',
'porphyropsin': 'photopsin',
'porphyrospiza': 'yellow-billed_blue_finch',
'porpise': 'porpoise',
'porpitidae': 'chondrophore',
'porpoises': 'porpoise',
'porpora': 'nicola_porpora',
'porqueras': 'porqueres',
'porr': 'penang_outer_ring_road',
'porra': 'churro',
'porraimos': 'porajmos',
'porrajmos': 'porajmos',
'porrasturvat': 'stair_dismount',
'porretta': 'porretta_terme',
'porsangen': 'porsangerfjorden',
'porsangerfjord': 'porsangerfjorden',
'porsanggu': 'porsanger',
'porsanki': 'porsanger',
'porsche911': 'porsche_911',
'porschwitz': 'parszowice',
'porsena': 'lars_porsena',
'porsenna': 'lars_porsena',
'porsgrund': 'porsgrunn',
'porshe': 'porsche',
'porshuram': 'parshuram_upazila',
'porson': 'richard_porson',
'porsulae': 'mosynopolis',
'port-a-john': 'portable_toilet',
'port-a-potties': 'portable_toilet',
'port-a-potty': 'portable_toilet',
'port-a-san': 'portable_toilet',
'port-arthur': 'port_arthur',
'port-bail': 'portbail',
'port-bouet': 'port_bouet_airport',
'port-en-bessin': 'port-en-bessin-huppain',
'port-etienne': 'nouadhibou',
'port-francqui': 'ilebo',
'port-glasgow': 'port_glasgow',
'port-havannah': 'port_havannah',
'port-leucate': 'leucate',
'port-louis': 'port_louis',
'port-lyautey': 'kenitra',
'port-marly': 'le_port-marly',
'port-mathurin': 'port_mathurin',
'port-o-head': 'spongebob_squarepants',
'port-o-let': 'portable_toilet',
'port-o-potty': 'portable_toilet',
'port-o-spain': 'port_of_spain',
'port-of-spain': 'port_of_spain',
'port-said': 'port_said',
'port-vila': 'port_vila',
'port-wine': 'port_wine',
'porta-ledge': 'portaledge',
'porta-potty': 'portable_toilet',
'portabello': 'portobello',
'portableapps': 'portableapps.com',
'portables': 'portable_building',
'portabletor': 'portable_tor',
'portaboat': 'porta-bote',
'portacabin': 'portable_building',
'portacot': 'infant_bed',
'portada': 'main_page',
'portagaf': 'shandy',
'portagaff': 'shandy',
'portage-la-prairie': 'portage_la_prairie',
'portaging': 'portage',
'portainer': 'container_crane',
'portaiture': 'portrait',
'portakabin': 'portable_building',
'portalada': 'main_page',
'portalban': 'delley-portalban',
'portalbot': 'port_talbot',
'portalet': 'le_portalet',
'portaloo': 'portable_toilet',
'portals': 'portal',
'portapack': 'portapak',
'portapotty': 'portable_toilet',
'portaputty': 'putty',
'portarico': 'puerto_rico',
'portative': 'portative_organ',
'portau': 'isaac_de_porthau',
'portaulun': 'ngarrindjeri',
'portaut': 'isaac_de_porthau',
'portcullises': 'portcullis',
'porte-coton': 'groom_of_the_stool',
'ported': 'porting',
'portegal': 'portugal',
'porteguese': 'portuguese',
'portekiz': 'portugal',
'portemento': 'portamento',
'portend': 'omen',
'portended': 'omen',
'portending': 'omen',
'portends': 'omen',
'portentious': 'omen',
'portentiously': 'omen',
'portentous': 'omen',
'portentously': 'omen',
'portents': 'omen',
'porter-duff': 'alpha_compositing',
'porter-gaud': 'porter-gaud_school',
'porteranthus': 'gillenia',
'porters': 'porters_ski_area',
'portfolios': 'portfolio',
'portglen1': 'portglenone',
'portgual': 'portugal',
'porthaethwy': 'menai_bridge',
'porthan': 'henrik_gabriel_porthan',
'porthau': 'isaac_de_porthau',
'porthcurnow': 'porthcurno',
'porthemmet': 'jonty_haywood',
'porthkerry': 'porthkerry_park',
'porthocyon': 'borophagus',
'porthoer': 'aberdaron',
'porthor': 'aberdaron',
'portianou': 'portiano',
'porticello': 'villa_san_giovanni',
'porticoes': 'portico',
'porticos': 'portico',
'porticullis': 'portcullis',
'porticus': 'portico',
'portie': 'portuguese_water_dog',
'portimonense': 'portimonense_s.c.',
'portinari': 'candido_portinari',
'portincross': 'portencross',
'portingles': 'porglish',
'portio': 'vaginal_portion_of_cervix',
'portis': 'portis/sportis',
'portiuncola': 'porziuncola',
'portiuncula': 'porziuncola',
'portiwgal': 'portugal',
'portlander': 'portland',
'portlandian': 'late_jurassic',
'portlandite': 'calcium_hydroxide',
'portlands': 'port_lands',
'portlaois': 'portlaoise',
'portlets': 'portlet',
'portliner': 'port_island_line',
'portlligat': 'port_lligat',
'portlooe': 'looe',
'portmadoc': 'porthmadog',
'portmann': 'portman',
'portmans': 'just_group',
'portmantuae': 'portmanteau',
'portmantuea': 'portmanteau',
'portmeiron': 'portmeirion',
'portmerion': 'portmeirion',
'portmonteau': 'portmanteau',
'portnoff': 'leo_portnoff',
'porto-potty': 'portable_toilet',
'portobella': 'agaricus_bisporus',
'portocheli': 'porto_cheli',
'portochelio': 'porto_cheli',
'portochelion': 'porto_cheli',
'portogallo': 'portugal',
'portogalo': 'portugal',
'portoheli': 'porto_cheli',
'portohelio': 'porto_cheli',
'portokheli': 'porto_cheli',
'portokhelion': 'porto_cheli',
'portolan': 'portolan_chart',
'portolano': 'portolan_chart',
'portonic': 'port_wine',
'portopotty': 'portable_toilet',
'portorico': 'puerto_rico',
'portoriko': 'puerto_rico',
'portoryko': 'puerto_rico',
'portovesme': 'portoscuso',
'portpass': 'port_passenger_accelerated_service_system',
'portpoole': 'portpool',
'portrait-painter': 'portrait',
'portraitist': 'portrait',
'portraits': 'portrait',
'portraiture': 'portrait',
'portreitz': 'port_reitz',
'ports': 'port',
'portscan': 'port_scanner',
'portscanner': 'port_scanner',
'portscanning': 'port_scanner',
'portslade-by-sea': 'portslade',
'portuary': 'breviary',
'portucel': 'portucel_soporcel',
'portuga': 'portugal',
'portugais': 'blauer_portugieser',
'portugal/communications': 'telecommunications_in_portugal',
'portugal/economy': 'economy_of_portugal',
'portugal/geography': 'geography_of_portugal',
'portugal/government': 'politics_of_portugal',
'portugal/history': 'history_of_portugal',
'portugal/military': 'portuguese_armed_forces',
'portugal/people': 'demographics_of_portugal',
'portugal/transportation': 'transport_in_portugal',
'portugale': 'portugal',
'portugaleje': 'portugal',
'portugali': 'portugal',
'portugalija': 'portugal',
'portugalio': 'portugal',
'portugaljka': 'blauer_portugieser',
'portugalka': 'blauer_portugieser',
'portugall': 'portugal',
'portugallu': 'portugal',
'portugalska': 'portugal',
'portugalsko': 'portugal',
'portugalujo': 'portugal',
'portugaul': 'portugal',
'portugese': 'portuguese',
'portugese-american': 'luso_american',
'portugieser': 'blauer_portugieser',
'portugreek': 'waterworld',
'portugual': 'portugal',
'portugues': 'portugal',
'portuguese-american': 'portuguese_american',
'portuguese-americans': 'luso_american',
'portuguese-brazilian': 'portuguese_brazilian',
'portuguese-brazilians': 'portuguese_brazilian',
'portuguese-canadian': 'portuguese_canadians',
'portuguese-canadians': 'portuguese_canadians',
'portuguese-chinese': 'chinese_portuguese',
'portuguese-galician': 'galician-portuguese',
'portuguese-jewish': 'history_of_the_jews_in_portugal',
'portuguese-language': 'portuguese_language',
'portuguese-speaking': 'lusophone',
'portuguesesql': 'postgresql',
'portugul': 'portugal',
'portulacas': 'portulaca',
'portulaceae': 'portulacaceae',
'portulan': 'portolan_chart',
'portulan-chart': 'portolan_chart',
'portumnes': 'portunes',
'portunalia': 'portunes',
'portunga': 'portugal',
'portvale': 'port_vale_f.c.',
'portwine': 'port_wine',
'portyanki': 'sock',
'portyngal': 'portugal',
'porutchik': 'poruchik',
'porv': 'pilot-operated_relief_valve',
'porvad': 'porwad',
'porwal': 'porwad',
'porwiggle': 'tadpole',
'porwigle': 'tadpole',
'porz-am-rhein': 'porz',
'porz-am-rhine': 'porz',
'porzioncula': 'porziuncola',
'porziuncula': 'porziuncola',
'pos-tagging': 'part-of-speech_tagging',
'posadism': 'fourth_international_posadist',
'posadist': 'fourth_international_posadist',
'posans': 'phi_omega_sigma',
'posap': 'atom_probe',
'posat': 'posat-1',
'posaune': 'trombone',
'poschi': 'postauto',
'poschti': 'postauto',
'posdnous': 'kelvin_mercer',
'posdnuos': 'kelvin_mercer',
'pose': 'palm_os_emulator',
'posedian': 'poseidon',
'posedonius': 'posidonius',
'poseideon': 'attic_calendar',
'poseidippos': 'poseidippus',
'poseidon-2': 'jason-1',
'poseidone': 'ss_empire_ballad',
'poseidonios': 'posidonius',
'poseidonius': 'posidonius',
'posekim': 'posek',
'posel': 'sejm_of_the_republic_of_poland',
'posemeter': 'light_meter',
'posen-west-prussia': 'posen-west_prussia',
'posenergy': 'posco',
'poseq': 'posek',
'posessions': 'ownership',
'poset': 'partially_ordered_set',
'posette': 'poser',
'poseurs': 'poseur',
'poseys': 'nosegay',
'posh': 'windows_powershell',
'posha': 'lom_people',
'poshang': 'pashang',
'poshchimbonggo': 'west_bengal',
'poshlust': 'poshlost',
'posho': 'ugali',
'posi': 'badagry_division',
'posi-core': 'positive_hardcore',
'posicor': 'mibefradil',
'posicore': 'positive_hardcore',
'posiden': 'poseidon',
'posidippos': 'poseidippus_of_pella',
'posidippus': 'poseidippus_of_pella',
'posidon': 'poseidon',
'posidoniaceae': 'posidonia',
'posidonios': 'posidonius',
'posidriv': 'pozidriv',
'posidrive': 'pozidriv',
'posiedon': 'poseidon',
'posiet': 'posyet',
'posilac': 'bovine_somatotropin',
'posilge': 'prussian_chronicle',
'posilipo': 'posillipo',
'posilonic': 'pozzolan',
'posing': 'human_position',
'posion': 'poison',
'posit': 'axiom',
'positional': 'positional_notation',
'positionality': 'positional_good',
'positioner': 'position',
'positiva': 'positiva_records',
'positive-definite': 'definite_bilinear_form',
'positive-real': 'positive-real_function',
'positiveness': 'norman_vincent_peale',
'positivist': 'positivism',
'positivistic': 'positivism',
'positivistically': 'positivism',
'positivists': 'positivism',
'positivusab': 'positivus_festival',
'positonium': 'positronium',
'positronic': 'positronic_brain',
'positrons': 'positron',
'posiwid': 'the_purpose_of_a_system_is_what_it_does',
'posix.1': 'posix',
'poskam': 'poskam_county',
'poskim': 'posek',
'posky': 'microsoft_flight_simulator',
'poslq': 'posslq',
'posm': 'styrene',
'posmox': 'amoxicillin',
'posnovis': 'unovis',
'posokh': 'crosier',
'posole': 'pozole',
'posology': 'pharmacology',
'posp.': 'eduard_pospichal',
'pospelov': 'pyotr_pospelov',
'pospelovsky': 'dimitry_pospielovsky',
'pospia': 'posco',
'pospielovsky': 'dimitry_pospielovsky',
'posquieres': 'vauvert',
'possability': 'possibility',
'possebon': 'rodrigo_possebon',
'posses': 'posse',
'possesion': 'possession',
'possess': 'possession',
'possessions': 'possession',
'possessor': 'possession',
'possevin': 'antonio_possevino',
'possi': 'boussy',
'possibilian': 'possibilianism',
'possibilist': 'possibilism',
'possibilists': 'possibilism',
'possible': 'possibility',
'possidius': 'saint_possidius',
'possiet': 'posyet',
'possil': 'possilpark',
'possinja': 'pasiene',
'possinus': 'pierre_poussines',
'possitions': 'position',
'possl': 'prolifics_open_source_software_for_linux',
'possom': 'possum',
'possums': 'possum',
'post-1945': 'post-war',
'post-agb': 'protoplanetary_nebula',
'post-agilism': 'agile_software_development',
'post-alveolar': 'postalveolar_consonant',
'post-american': 'sojourners_magazine',
'post-amerikan': 'post_amerikan',
'post-anarchist': 'post-anarchism',
'post-and-beam': 'timber_framing',
'post-and-lintel': 'timber_framing',
'post-apoc': 'apocalyptic_and_post-apocalyptic_fiction',
'post-apocalyptic': 'apocalyptic_and_post-apocalyptic_fiction',
'post-apocalyse': 'apocalyptic_and_post-apocalyptic_fiction',
'post-atheism': 'post-theism',
'post-atheistic': 'post-theism',
'post-bebop': 'post-bop',
'post-behavioralism': 'postpositivism',
'post-black': 'black_metal',
'post-bulletin': 'rochester_post-bulletin',
'post-chaise': 'chaise',
'post-christendom': 'postchristianity',
'post-christian': 'postchristianity',
'post-christianity': 'postchristianity',
'post-classic': 'mesoamerican_chronology',
'post-cognitivism': 'postcognitivism',
'post-cognitivist': 'postcognitivism',
'post-colonial': 'postcolonialism',
'post-colonialism': 'postcolonialism',
'post-colonialist': 'postcolonialism',
'post-colonialists': 'postcolonialism',
'post-communion': 'postcommunion',
'post-communisms': 'post-communism',
'post-communist': 'post-communism',
'post-communistic': 'post-communism',
'post-communists': 'post-communism',
'post-condition': 'postcondition',
'post-consumerism': 'post-materialism',
'post-courier': 'papua_new_guinea_post-courier',
'post-crescent': 'the_post-crescent',
'post-crisis': 'crisis_on_infinite_earths',
'post-dated': 'postdated',
'post-death': 'melodic_death_metal',
'post-dispatch': 'st._louis_post-dispatch',
'post-doc': 'postdoctoral_research',
'post-doctoral': 'postdoctoral_research',
'post-doctorate': 'doctorate',
'post-doctural': 'postdoctoral_research',
'post-emo': 'emo',
'post-exilic': 'babylonian_captivity',
'post-expressionism': 'new_objectivity',
'post-fordist': 'post-fordism',
'post-gazette': 'pittsburgh_post-gazette',
'post-genderism': 'george_dvorsky',
'post-graduate': 'postgraduate_education',
'post-graduation': 'postgraduate_education',
'post-graffiti': 'street_art',
'post-historical': 'the_end_of_history_and_the_last_man',
'post-hittite': 'syro-hittite_states',
'post-hoc': 'post-hoc_analysis',
'post-hockery': 'post_hoc_ergo_propter_hoc',
'post-holocaust': 'apocalyptic_and_post-apocalyptic_fiction',
'post-horn': 'post_horn',
'post-human': 'posthuman',
'post-humanism': 'posthumanism',
'post-ictal': 'postictal_state',
'post-impressionist': 'post-impressionism',
'post-impressionistic': 'post-impressionism',
'post-impressionistically': 'post-impressionism',
'post-impressionists': 'post-impressionism',
'post-increment': 'increment',
'post-industrial': 'post-industrial_society',
'post-industrialism': 'post-industrial_society',
'post-intelligencer': 'seattle_post-intelligencer',
'post-it': 'post-it_note',
'post-its': 'post-it_note',
'post-journal': 'the_post-journal',
'post-keynesian': 'post-keynesian_economics',
'post-keynesians': 'post-keynesian_economics',
'post-kyoto': 'post-kyoto_protocol_negotiations_on_greenhouse_gas_emissions',
'post-lapsarian': 'fall_of_man',
'post-leftism': 'post-left_anarchy',
'post-leftist': 'post-left_anarchy',
'post-leg': 'post-legged',
'post-literacy': 'post_literacy',
'post-man': 'mail',
'post-marxist': 'post-marxism',
'post-medieval': 'post-medieval_archaeology',
'post-miltonic': 'john_milton',
'post-minimal': 'postminimalism',
'post-minimalism': 'postminimalism',
'post-minimalist': 'postminimalism',
'post-mix': 'premix_and_postmix',
'post-modern': 'postmodernism',
'post-modernest': 'postmodernism',
'post-modernism': 'postmodernism',
'post-modernist': 'postmodernism',
'post-modernists': 'postmodernism',
'post-modernity': 'postmodernity',
'post-mortem': 'autopsy',
'post-natal': 'postnatal',
'post-national': 'postnationalism',
'post-nationalism': 'postnationalism',
'post-naturalism': 'antinaturalism',
'post-newsweek': 'the_washington_post_company',
'post-newtonian': 'post-newtonian_expansion',
'post-nominal': 'post-nominal_letters',
'post-nominals': 'post-nominal_letters',
'post-nuclear': 'nuclear_holocaust',
'post-office': 'post_office',
'post-panamax': 'panamax',
'post-partisan': 'postpartisan',
'post-partum': 'postnatal',
'post-positivism': 'postpositivism',
'post-post-modernism': 'post-postmodernism',
'post-postmodern': 'post-postmodernism',
'post-postmodernist': 'post-postmodernism',
'post-prandial': 'postprandial',
'post-processualism': 'post-processual_archaeology',
'post-road': 'post_road',
'post-roads': 'post_road',
'post-romantic': 'post-romanticism',
'post-scarcity': 'post_scarcity',
'post-script': 'postscript',
'post-scriptum': 'postscript',
'post-secondary': 'higher_education',
'post-secular': 'secularism',
'post-see': 'postsee',
'post-soviet': 'post-soviet_states',
'post-stall': 'post_stall',
'post-standard': 'the_post-standard',
'post-star': 'the_post-star',
'post-structural': 'post-structuralism',
'post-structuralisms': 'post-structuralism',
'post-structuralist': 'post-structuralism',
'post-structuralistic': 'post-structuralism',
'post-structuralistical': 'post-structuralism',
'post-structuralistically': 'post-structuralism',
'post-structuralists': 'post-structuralism',
'post-structurally': 'post-structuralism',
'post-surgical': 'perioperative_mortality',
'post-synaptic': 'chemical_synapse',
'post-theistic': 'post-theism',
'post-thrash': 'groove_metal',
'post-tonal': 'atonality',
'post-translation': 'posttranslational_modification',
'post-translational': 'posttranslational_modification',
'post-uvular': 'uvular_consonant',
'post-vulgate': 'post-vulgate_cycle',
'post-work': 'the_end_of_work',
'post-yba': 'post-ybas',
'post-zionist': 'post-zionism',
'postage': 'mail',
'postal2': 'postal_2',
'postal3': 'postal_iii',
'postalveolar': 'postalveolar_consonant',
'postalwatch': 'postalwatch_incorporated',
'postanarchism': 'post-anarchism',
'postanarchist': 'post-anarchism',
'postapocalyptic': 'apocalyptic_and_post-apocalyptic_fiction',
'postarization': 'posterization',
'postbox': 'post_box',
'postbus': 'postauto',
'postcapitalism': 'post-capitalism',
'postcava': 'inferior_vena_cava',
'postchristendom': 'postchristianity',
'postchristian': 'postchristianity',
'postclassic': 'postclassical_era',
'postclassical': 'postclassical_era',
'postclassicalism': 'postclassical_era',
'postclassicism': 'postclassical_era',
'postcode': 'postal_code',
'postcodes': 'postal_code',
'postcognition': 'retrocognition',
'postcognitive': 'retrocognition',
'postcolonial': 'postcolonialism',
'postcolonialist': 'postcolonialism',
'postcolonialists': 'postcolonialism',
'postcomm': 'postal_services_commission',
'postcommunism': 'post-communism',
'postcommunisms': 'post-communism',
'postcommunist': 'post-communism',
'postcommunistic': 'post-communism',
'postcommunists': 'post-communism',
'postcranial': 'postcrania',
'postdenominational': 'post-denominational_churches',
'postdenominationalism': 'post-denominational_churches',
'postdoc': 'postdoctoral_research',
'postdoctoral': 'postdoctoral_research',
'postdoctorate': 'postdoctoral_research',
'postdramatic': 'postdramatic_theatre',
'postec': 'pohang_university_of_science_and_technology',
'postech': 'pohang_university_of_science_and_technology',
'posteel': 'posco',
'postegresql': 'postgresql',
'postelnicie': 'postelnic',
'postempiricism': 'postpositivism',
'postemus': 'apostomus',
'posterboard': 'display_board',
'posterisation': 'posterization',
'posterise': 'posterization',
'posterize': 'posterization',
'posterizing': 'posterization',
'posterns': 'postern',
'posters': 'poster',
'postestrum': 'estrous_cycle',
'postexilic': 'babylonian_captivity',
'postfordism': 'post-fordism',
'postgangionic': 'ganglion',
'postganglionic': 'postganglionic_fibers',
'postgender': 'postgenderism',
'postgirot': 'plusgirot',
'postglacial': 'holocene',
'postgrad': 'post_grad',
'postgraduate': 'postgraduate_education',
'postgraduates': 'postgraduate_education',
'postgraduation': 'postgraduate_education',
'postgre': 'postgresql',
'postgres': 'postgresql',
'postgres95': 'postgresql',
'postgress': 'postgresql',
'postgressql': 'postgresql',
'postgrunge': 'post-grunge',
'posthardcore': 'post-hardcore',
'posthectomy': 'circumcision',
'posthip': 'post-hipster',
'posthipsterism': 'post-hipster',
'postholes': 'posthole',
'posthomously': 'posthumous',
'posthorn': 'post_horn',
'posthosyrphus': 'eupeodes',
'posthumanist': 'posthumanism',
'posthumanity': 'posthuman',
'posthumously': 'posthumous',
'posthumus': 'posthumous',
'posti': 'itella',
'postiche': 'pastiche',
'postictal': 'postictal_state',
'postie': 'mail_carrier',
'postiebike': 'honda_ct110',
'postilla': 'postil',
'postillion': 'postilion',
'postillionage': 'anal_masturbation',
'postimpressionism': 'post-impressionism',
'postimpressionist': 'post-impressionism',
'postimpressionistic': 'post-impressionism',
'postimpressionistically': 'post-impressionism',
'postimpressionists': 'post-impressionism',
'postincrement': 'increment',
'postindustrial': 'post-industrial_society',
'postindustrialism': 'post-industrial_society',
'posting': 'post',
'postinor': 'emergency_contraception',
'postirier': 'butt',
'postitschtraube': 'pinot_meunier',
'postivistic': 'positivism',
'postkeynsian': 'post-keynesian_economics',
'postkom': 'norwegian_post_and_communications_union',
'postlapsarian': 'fall_of_man',
'postlewayt': 'malachy_postlewayt',
'postliberal': 'postliberal_theology',
'postliberalism': 'postliberal_theology',
'postman': 'mail_carrier',
'postmarking': 'postmark',
'postmarks': 'postmark',
'postmarks.': 'postmark',
'postmarxism': 'post-marxism',
'postmaster-general': 'postmaster_general',
'postmaterialism': 'post-materialism',
'postmaturity': 'postmature_birth',
'postmenopausal': 'menopause',
'postmile': 'california_postmile',
'postmilennialism': 'millennialism',
'postmill': 'post_mill',
'postmillenialism': 'postmillennialism',
'postmillennial': 'postmillennialism',
'postmillennialist': 'postmillennialism',
'postmillennialists': 'postmillennialism',
'postminimal': 'postminimalism',
'postminimalist': 'postminimalism',
'postmistress': 'postmaster',
'postmitotic': 'g0_phase',
'postmix': 'premix_and_postmix',
'postmodemity': 'postmodernity',
'postmoderism': 'postmodernism',
'postmodern': 'postmodernism',
'postmodern/music': 'postmodern_music',
'postmodernism/music': 'postmodern_music',
'postmodernism/philosophy': 'postmodern_philosophy',
'postmodernism/to-merge': 'postmodernism',
'postmodernist': 'postmodernism',
'postmodernists': 'postmodernism',
'postmodernity/to-merge': 'postmodernity',
'postmormon': 'postmormon_community',
'postmortem': 'autopsy',
'postnasal': 'nasal_scale',
'postnasals': 'nasal_scale',
'postnational': 'postnationalism',
'postnaturalism': 'antinaturalism',
'postnomial': 'post-nominal_letters',
'postnominal': 'post-nominal_letters',
'postnominals': 'post-nominal_letters',
'postnuke': 'zikula',
'postocular': 'ocular_scales',
'postoculars': 'ocular_scales',
'postoestrus': 'estrous_cycle',
'postopia': 'post_foods',
'postorder': 'tree_traversal',
'postpanamax': 'panamax',
'postparenthood': 'empty_nest_syndrome',
'postpartum': 'postnatal',
'postperovskite': 'post-perovskite',
'postphilosophy': 'postanalytic_philosophy',
'postpoint': 'an_post',
'postponent': 'postponement',
'postposition': 'preposition_and_postposition',
'postpositions': 'preposition_and_postposition',
'postpositivist': 'postpositivism',
'postpost': 'postpost_records',
'postpostivist': 'postpositivism',
'postpostmodern': 'post-postmodernism',
'postpostmodernism': 'post-postmodernism',
'postprint': 'postprints',
'postprocessing': 'post-processing',
'postprocessualism': 'post-processual_archaeology',
'postproduction': 'post-production',
'postprorocentrum': 'prorocentrales',
'postpunk': 'post-punk',
'postrade': 'posco',
'postrel': 'virginia_postrel',
'postremogeniture': 'ultimogeniture',
'postrevolution': 'revolution',
'postrevolutionary': 'revolution',
'postriders': 'post_riders',
'postrig': 'tonsure',
'postriziny': 'cutting_it_short',
'postrock': 'post-rock',
'postromanticism': 'post-romanticism',
'postscarcity': 'post_scarcity',
'postscipt': 'postscript',
'postscriptum': 'postscript',
'postseason': 'playoffs',
'postsecondary': 'higher_education',
'postsecret.com': 'postsecret',
'postsecular': 'postsecularism',
'postshop': 'new_zealand_post',
'postsql': 'postgresql',
'poststructralist': 'post-structuralism',
'poststructural': 'post-structuralism',
'poststructuralism': 'post-structuralism',
'poststructuralisms': 'post-structuralism',
'poststructuralist': 'post-structuralism',
'poststructuralistic': 'post-structuralism',
'poststructuralistical': 'post-structuralism',
'poststructuralistically': 'post-structuralism',
'poststructuralists': 'post-structuralism',
'poststructurally': 'post-structuralism',
'poststrucuturalist': 'post-structuralism',
'postsurrealism': 'post-surrealism',
'postsynaptic': 'chemical_synapse',
'posttheism': 'post-theism',
'posttheistic': 'post-theism',
'posttranslational': 'posttranslational_modification',
'postulancy': 'postulant',
'postulants': 'postulant',
'postulantship': 'postulant',
'postulare': 'postulant',
'postulate': 'axiom',
'postulated': 'axiom',
'postulates': 'axiom',
'postulating': 'axiom',
'postulation': 'axiom',
'postulations': 'axiom',
'postulators': 'postulator',
'posturologists': 'the_boy_who_knew_too_much',
'postverket': 'posten_norge',
'postvesenet': 'posten_norge',
'postwar': 'post-war',
'postwatch': 'consumer_focus',
'postwhore': 'internet_forum',
'postwick': 'postwick_with_witton',
'postyshev': 'pavel_postyshev',
'postzygapophysis': 'articular_processes',
'poswol': 'pasvalys',
'posy': 'nosegay',
'posynomials': 'posynomial',
'posyolok': 'types_of_inhabited_localities_in_russia',
'poszony': 'bratislava',
'pot-bellied': 'pot-bellied_pig',
'pot-boilers': 'potboiler',
'pot-bound': 'container_garden',
'pot-head': 'stoner',
'pot-herb': 'leaf_vegetable',
'pot-herbs': 'leaf_vegetable',
'pot-house': 'tavern',
'pot-houses': 'tavern',
'pot-noodle': 'pot_noodle',
'pot-sticker': 'jiaozi',
'pot-walloper': 'potwalloper',
'pot-worm': 'enchytraeidae',
'pota': 'prevention_of_terrorist_activities_act',
'potaaaym': 'a_portrait_of_the_artist_as_a_young_man',
'potability': 'drinking_water',
'potabilization': 'portable_water_purification',
'potable': 'drinking_water',
'potacan': 'potash_corporation_of_saskatchewan',
'potager': 'kitchen_garden',
'potaissa': 'turda',
'potala': 'potala_palace',
'potamiaena': 'basilides_and_potamiana',
'potamiana': 'basilides_and_potamiana',
'potamodromous': 'fish_migration',
'potamogale': 'giant_otter_shrew',
'potamogetonales': 'najadales',
'potamophis': 'acrochordidae',
'potamos': 'potamus',
'potamotrygonid': 'potamotrygonidae',
'potana': 'pothana',
'potangaroa': 'paora_te_potangaroa',
'potaroo': 'potoroidae',
'potashcorp': 'potash_corporation_of_saskatchewan',
'potashes': 'potash',
'potasium': 'potassium',
'potassa': 'potassium_hydroxide',
'potassemia': 'hyperkalemia',
'potassia': 'potassium_hydroxide',
'potasside': 'alkalide',
'potassium-sparing': 'potassium-sparing_diuretic',
'potato-bean': 'pachyrhizus',
'potatoes': 'potato',
'potatos': 'potato',
'potawatami': 'potawatomi',
'potawatomies': 'potawatomi',
'potawatomis': 'potawatomi',
'potawotomi': 'potawatomi',
'potawotomie': 'potawatomi',
'potbelleez': 'the_potbelleez',
'potbellied': 'potbelly',
'potboilers': 'potboiler',
'potbs': 'pirates_of_the_burning_sea',
'potc1': 'pirates_of_the_caribbean:_the_curse_of_the_black_pearl',
'potc4': 'pirates_of_the_caribbean:_on_stranger_tides',
'potc:tcobtp': 'pirates_of_the_caribbean:_the_curse_of_the_black_pearl',
'potch': 'opal',
'potco': 'pirates_of_the_caribbean_online',
'potcrate': 'potassium_chlorate',
'potd': 'wikipedia:picture_of_the_day',
'potehi': 'glove_puppetry',
'potens': 'potentate',
'potente': 'franka_potente',
'potentia': 'potenza',
'potential/temp': 'potential',
'potentialities': 'potentiality_and_actuality',
'potentiality': 'potentiality_and_actuality',
'potentially': 'potential',
'potentials': 'potential',
'potentian': 'savinian_and_potentian',
'potentiators': 'potentiator',
'potentimeter': 'potentiometer',
'potentiometers': 'potentiometer',
'potentiometric': 'potentiometer',
'poter': 'potter',
'potercha': 'slavic_fairies',
'poteriidae': 'neocyclotidae',
'potesy': 'potyosy',
'potetball': 'raspeball',
'potfest': 'beerfest',
'potga': 'progenies_of_the_great_apocalypse',
'potgietersrus': 'mokopane',
'potgietersrust': 'mokopane',
'pothanicad': 'pothanikkad',
'pothawar': 'pothohar_plateau',
'potheen': 'moonshine',
'potheinos': 'pothinus',
'potheinus': 'pothinus',
'pother': 'annoyance',
'potherb': 'leaf_vegetable',
'potherbs': 'leaf_vegetable',
'pothers': 'annoyance',
'pothohar': 'pothohar_plateau',
'pothohari/mirpuri': 'potwari_language',
'potholder': 'pot-holder',
'potholers': 'caving',
'potholes': 'pothole',
'potholing': 'caving',
'pothouse': 'tavern',
'pothouses': 'tavern',
'pothuava': 'aechmea',
'pothundi': 'pothundi_dam',
'pothwari': 'pothohari',
'poti-pherah': 'potiphar',
'potica': 'nut_roll',
'potidaia': 'potidaea',
'potide': 'potassium_iodide',
'potidea': 'potidaea',
'potifar': 'potiphar',
'potigal': 'portugal',
'potiguaras': 'potiguara',
'potika': 'nut_roll',
'potiomkin': 'potemkin',
'potions': 'potion',
'potiorek': 'oskar_potiorek',
'potipher': 'potiphar',
'potiti': 'portugal',
'potiza': 'nut_roll',
'potlach': 'potlatch',
'potlikker': 'collard_liquor',
'potlucks': 'potluck',
'potnoodle': 'pot_noodle',
'poto': 'planet_of_the_ood',
'potoccy': 'potocki_family',
'potocka': 'potocki_family',
'potocki': 'potocki_family',
'potohar': 'pothohar_plateau',
'potol': 'trichosanthes_dioica',
'potolemy': 'ptolemy',
'potomotyphlus': 'potomotyphlus_kaupii',
'potoos': 'potoo',
'potoplenyk': 'slavic_fairies',
'potoplenytsia': 'slavic_fairies',
'potoprens': 'port-au-prince',
'potoridae': 'potoroidae',
'potoroid': 'potoroidae',
'potoroos': 'potoroo',
'potorous': 'potoroo',
'potos': 'kinkajou',
'pototschnig': 'heinz_pototschnig',
'potowari': 'potwari_language',
'potowatami': 'potawatomi',
'potowatomi': 'potawatomi',
'potp': 'puppetry_of_the_penis',
'potpie': 'pot_pie',
'potresina': 'pontresina',
'potroast': 'braising',
'pots': 'pot',
'potschendorf': 'pieckowo',
'potsdamer': 'shandy',
'potsdamerplatz': 'potsdamer_platz',
'potshard': 'sherd',
'potsherd': 'sherd',
'potsticker': 'jiaozi',
'potstickers': 'jiaozi',
'potstone': 'paramoudra',
'pottawatami': 'potawatomi',
'pottawatamie': 'potawatomi',
'pottawatomi': 'potawatomi',
'pottawatomie': 'potawatomi',
'pottawattamie': 'potawatomi',
'pottawattomie': 'potawatomi',
'potted': 'potting',
'pottekkadu': 's._k._pottekkatt',
'pottekkatt': 's._k._pottekkatt',
'pottenkant': 'antwerp_lace',
'potter7': 'harry_potter_and_the_deathly_hallows',
'pottermania': 'harry_potter_fandom',
'potterrow': 'potterrow_student_centre',
'potters': 'potter',
'potterybarn': 'pottery_barn',
'potthast': 'august_potthast',
'pottock': 'pottok',
'pottoka': 'pottok',
'pottowatomic': 'potawatomi',
'pottowattomie': 'potawatomi',
'potty': 'chamber_pot',
'pottygate': 'larry_craig',
'potulny': 'ryan_potulny',
'potupa': 'alexander_s._potupa',
'potus': 'president_of_the_united_states',
'potusa': 'president_of_the_united_states',
'potusoa': 'president_of_the_united_states',
'potvis': 'sperm_whale',
'potw': 'sewage_treatment',
'potwaller': 'potwalloper',
'potwalloping': 'potwalloper',
'potwar': 'pothohar_plateau',
'potwari': 'pothohari',
'potworm': 'enchytraeidae',
'poty': 'time_person_of_the_year',
'potyomkin': 'potemkin',
'potzolcalli': 'potzollcalli',
'potzolli': 'pozole',
'potzrebie': 'potrzebie',
'pou1f1': 'pituitary-specific_positive_transcription_factor_1',
'pou2f1': 'octamer_transcription_factor',
'pou5f1': 'oct-4',
'pouchekine': 'alexander_pushkin',
'pouchequine': 'alexander_pushkin',
'pouchkine': 'alexander_pushkin',
'pouchou': 'merille',
'poudichery': 'puducherry',
'pouet-pouet-boat': 'pop_pop_boat',
'poufed': 'pouf',
'pouffe': 'tuffet',
'pouffes': 'tuffet',
'poufing': 'pouf',
'poufs': 'pouf',
'pougnadoire': 'sainte-enimie',
'poui': 'tabebuia',
'pouishnoff': 'leff_pouishnoff',
'poujade': 'pierre_poujade',
'poujadism': 'pierre_poujade',
'poujadisme': 'pierre_poujade',
'poujadist': 'pierre_poujade',
'poulantzas': 'nicos_poulantzas',
'pouldron': 'pauldron',
'poulenc': 'francis_poulenc',
'poulet': 'chicken',
'pouliche': 'pre-ferment',
'poulidor': 'raymond_poulidor',
'poulo-condore': 'con_dao',
'poulpoulak': 'pulpulak',
'poulsham': 'st_cuthbert_out',
'poultrygeist': 'poultrygeist:_night_of_the_chicken_dead',
'poulunoe': 'nereid',
'poumai': 'poumei',
'pound-drever-hall': 'pound-drever-hall_technique',
'pound-foot': 'foot-pound_force',
'pound-for-pound': 'pound_for_pound',
'poundals': 'poundal',
'poundffald': 'gower_peninsula',
'poundfield': 'pinfold',
'poundmaker': 'pitikwahanapiwiyin',
'pounds': 'pound',
'pounds-force': 'pound-force',
'poundsterling': 'pound_sterling',
'pounou': 'bapounou',
'poupelin': 'sailor_moon_supers:_the_movie',
'poupon': 'grey_poupon',
'poupou': 'raymond_poulidor',
'pour': 'rain',
'pour-soi': 'pour_soi',
'pourbaix': 'pourbaix_diagram',
'pourbaix-lundin': 'marietta_de_pourbaix-lundin',
'pourewa': 'pourewa_island',
'pourganji': 'hossein_pourganji',
'pournario': 'pournari',
'pournarion': 'pournari',
'pournelle': 'jerry_pournelle',
'pourpirar': 'nasser_pourpirar',
'pourquoi-pas': 'pourquoi_pas',
'pourthiaea': 'photinia',
'pourtout': 'carrosserie_pourtout',
'pous': 'workers_party_of_socialist_unity',
'pousaat': 'pursat',
'pousada': 'pousadas_de_portugal',
'poussard': 'horace_poussard',
'pousse-pousse': 'rickshaw',
'poussin': 'nicolas_poussin',
'poussinistes': 'poussinist',
'poussins': 'nicolas_poussin',
'poussy': 'peyo',
'poustinik': 'poustinia',
'poutargue': 'botargo',
'pouters': 'pouter',
'pouthisat': 'pursat',
'pouto': 'pouto_peninsula',
'poutot-en-bessin': 'putot-en-bessin',
'pouts': 'pout',
'poux': 'jean-baptiste_poux',
'pova': 'pend_oreille_valley_railroad',
'povaliy': 'taisia_povaliy',
'povarotti': 'luciano_pavarotti',
'poveka': 'maria_martinez',
'povel': 'povel_ramel',
'poventsa': 'povenets',
'pover': 'poverty',
'poverties': 'poverty',
'poverty-stricken': 'poverty',
'povertystricken': 'poverty',
'poviat': 'powiat',
'povidla': 'powidl',
'povidle': 'powidl',
'povidlo': 'powidl',
'povidone': 'polyvinylpyrrolidone',
'povitica': 'nut_roll',
'povolzhye': 'volga_region',
'povp': 'point_of_view',
'povray': 'pov-ray',
'pow-mia': 'pow/mia_flag',
'pow/mia': 'pow/mia_flag',
'powa': 'phowa',
'powatan': 'powhatan',
'powder': 'protocol_for_web_description_resources',
'powder-actuated': 'powder-actuated_tool',
'powder-finger': 'powderfinger',
'powderblue': 'x11_color_names',
'powderbox': 'powder_box',
'powdercoating': 'powder_coating',
'powderginfer': 'powderfinger',
'powderham': 'powderham_castle',
'powdermonkey': 'powder_monkey',
'powderpuff': 'powder_puff',
'powders': 'powder',
'powdershire': 'hundreds_of_cornwall',
'powderworks': 'powder_magazine',
'powell-peralta': 'powell_peralta',
'powell-pressburger': 'powell_and_pressburger',
'powelldoctrine': 'powell_doctrine',
'powen': 'gwyniad',
'power': 'ibm_power',
'power--lens': 'optical_power',
'power-associative': 'power_associativity',
'power-associativity': 'power_associativity',
'power-blade': 'power_blade',
'power-bocking': 'powerbocking',
'power-heat-set': 'heatsetting',
'power-heat-setting': 'heatsetting',
'power-law': 'power_law',
'power-lifter': 'powerlifting',
'power-nap': 'power_nap',
'power-number': 'power_number',
'power-on-lan': 'wake-on-lan',
'power-pill': 'aphex_twin',
'power-pop': 'power_pop',
'power-sharing': 'consociationalism',
'power-supply': 'power_supply',
'power-to-weight': 'power-to-weight_ratio',
'power-tool': 'power_tool',
'power-ups': 'power-up',
'power/knowledge': 'power-knowledge',
'power/mass': 'power-to-weight_ratio',
'power/weight': 'power-to-weight_ratio',
'power95.3': 'wpyo',
'power:weight': 'power-to-weight_ratio',
'poweramc': 'powerdesigner',
'powerband': 'power_band',
'powerboard': 'power_strip',
'powerbock': 'powerbocking',
'powerbocker': 'powerbocking',
'powerbocks': 'powerbocking',
'powerbok': 'powerbocking',
'powerbokers': 'powerbocking',
'powerbottom': 'top_and_bottom',
'powerboy': 'power_boy',
'powerbroker': 'power_broker',
'powerburn': 'optical_disc_recording_technologies',
'powercage': 'power_cage',
'powercell': 'power_cell',
'powercenter': 'power_computing_corporation',
'powerchair': 'wheelchair',
'powerchord': 'power_chord',
'powerconnect': 'dell_powerconnect',
'powercor': 'spark_infrastructure',
'powercord': 'power_cord',
'powercreep': 'power_creep',
'powercut': 'power_outage',
'powerdolls': 'power_dolls:_detachment_of_limited_line_service',
'powerdomain': 'power_domains',
'powerdrink': 'resq',
'poweredge': 'dell_poweredge',
'poweredusb': 'powered_usb',
'powerfet': 'power_mosfet',
'powerfolk': 'atsushi_shindo',
'powergamer': 'powergaming',
'powergamers': 'power_gamer',
'powergen': 'e.on_uk',
'powergirl': 'power_girl',
'powerglove': 'power_glove',
'powergrid': 'powergrid_corporation_of_india',
'poweriser': 'powerbocking',
'powerisers': 'powerbocking',
'powerising': 'powerbocking',
'powerizer': 'powerbocking',
'powerizers': 'powerbocking',
'powerkite': 'power_kite',
'powerland': 'powerpark',
'powerlevel': 'experience_point',
'powerlevelling': 'experience_point',
'powerlifter': 'powerlifting',
'powerlineblog': 'power_line',
'powerlinx': 'powerlink',
'powermac': 'power_macintosh',
'powerman': 'power_man',
'powermaster': 'powermasters',
'powermetal': 'power_metal',
'powermeter': 'power_meter',
'powermotoring': 'powered_paragliding',
'powernap': 'power_nap',
'powernapping': 'power_nap',
'powernoise': 'power_noise',
'powernoize': 'power_noise',
'poweropen': 'poweropen_environment',
'powerpad': 'power_pad',
'powerplant': 'power_station',
'powerplay': 'power_play',
'powerpnt': 'microsoft_powerpoint',
'powerpnt.exe': 'microsoft_powerpoint',
'powerpoint': 'microsoft_powerpoint',
'powerpointy': 'microsoft_powerpoint',
'powerpop': 'power_pop',
'powerpuff': 'the_powerpuff_girls',
'powerrangers': 'power_rangers',
'powersat': 'space-based_solar_power',
'powersaver': 'longhaul',
'powerscan': 'psc_inc.',
'powerschool': 'pearson_education',
'powerset': 'power_set',
'powershare': 'apple_open_collaboration_environment',
'powershell': 'windows_powershell',
'powershift': 'powershifting',
'powershot': 'canon_powershot',
'powerskip': 'powerbocking',
'powerskips': 'powerbocking',
'powersmooth': 'smooth_number',
'powersoccer': 'powerchair_football',
'powersong': 'ipod',
'powerstat': 'transformer',
'powerstone': 'power_stone',
'powerstrip': 'power_strip',
'powerstroke': 'ford_power_stroke_engine',
'powersuit': 'power_suit',
'powersupply': 'power_supply',
'powersurge': 'power_surge',
'powertab': 'power_tab_editor',
'powertalk': 'apple_open_collaboration_environment',
'powertap': 'cycling_power_meter',
'powerteacher': 'powerschool',
'powertool': 'power_tool',
'powertoy': 'microsoft_powertoys',
'powertoys': 'microsoft_powertoys',
'powertrains': 'powertrain',
'powerup': 'power-up',
'powerups': 'power-up',
'poweruser': 'power_user',
'powervault': 'dell_powervault',
'powerwasher': 'pressure_washer',
'powerwheels': 'power_wheels',
'powhatans': 'powhatan',
'powhaten': 'powhatan',
'powhattan': 'powhatan',
'powiats': 'powiat',
'powidel': 'powidl',
'powidzki': 'powidz_landscape_park',
'powis': 'powys',
'powl': 'public_offering_without_listing',
'powldron': 'pauldron',
'pownage': 'pwn',
'pownce.com': 'pownce',
'powne': 'pwn',
'powned': 'pwn',
'pownzored': 'pwn',
'powow': 'pow-wow',
'powrbook': 'powerbook',
'pows': 'prisoner_of_war',
'powwow': 'pow-wow',
'powys/powys': 'powys',
'poxvirus': 'poxviridae',
'poxviruses': 'poxviridae',
'poyais': 'gregor_macgregor',
'poyli': 'poylu',
'poynton-with-worth': 'poynton',
'poyntonia': 'poyntonia_paludicola',
'poyopoyozaurus': 'mama_loves_the_poyopoyo-saurus',
'poypifer': 'coral',
'poythress': 'vern_poythress',
'pozan': 'wielkopolski',
'pozcum': 'bugchasing',
'pozezena': 'pojejena',
'pozharskaya': 'pozharsky',
'pozharski': 'pozharsky',
'pozharskii': 'pozharsky',
'pozharskiy': 'pozharsky',
'pozhichalur': 'polichalur',
'pozhou': 'paju',
'pozi': 'pozidriv',
'pozidrive': 'pozidriv',
'poznyaky': 'pozniaky',
'pozolli': 'pozole',
'pozsony': 'bratislava',
'pozun': 'bratislava',
'pozzolanic': 'pozzolan',
'pozzolans': 'pozzolan',
'pozzomoretto': 'villafranca_di_verona',
'pozzulana': 'pozzolana',
'pozzuolana': 'pozzolana'}
|
PypiClean
|
/grease-0.3.zip/grease-0.3/doc/build/html/_static/searchtools.js
|
* helper function to return a node containing the
* search summary for a given text. keywords is a list
* of stemmed words, hlwords is the list of normal, unstemmed
* words. the first one is used to find the occurance, the
* latter for highlighting it.
*/
jQuery.makeSearchSummary = function(text, keywords, hlwords) {
var textLower = text.toLowerCase();
var start = 0;
$.each(keywords, function() {
var i = textLower.indexOf(this.toLowerCase());
if (i > -1)
start = i;
});
start = Math.max(start - 120, 0);
var excerpt = ((start > 0) ? '...' : '') +
$.trim(text.substr(start, 240)) +
((start + 240 - text.length) ? '...' : '');
var rv = $('<div class="context"></div>').text(excerpt);
$.each(hlwords, function() {
rv = rv.highlightText(this, 'highlighted');
});
return rv;
}
/**
* Porter Stemmer
*/
var PorterStemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
}
/**
* Search Module
*/
var Search = {
_index : null,
_queued_query : null,
_pulse_status : -1,
init : function() {
var params = $.getQueryParameters();
if (params.q) {
var query = params.q[0];
$('input[name="q"]')[0].value = query;
this.performSearch(query);
}
},
loadIndex : function(url) {
$.ajax({type: "GET", url: url, data: null, success: null,
dataType: "script", cache: true});
},
setIndex : function(index) {
var q;
this._index = index;
if ((q = this._queued_query) !== null) {
this._queued_query = null;
Search.query(q);
}
},
hasIndex : function() {
return this._index !== null;
},
deferQuery : function(query) {
this._queued_query = query;
},
stopPulse : function() {
this._pulse_status = 0;
},
startPulse : function() {
if (this._pulse_status >= 0)
return;
function pulse() {
Search._pulse_status = (Search._pulse_status + 1) % 4;
var dotString = '';
for (var i = 0; i < Search._pulse_status; i++)
dotString += '.';
Search.dots.text(dotString);
if (Search._pulse_status > -1)
window.setTimeout(pulse, 500);
};
pulse();
},
/**
* perform a search for something
*/
performSearch : function(query) {
// create the required interface elements
this.out = $('#search-results');
this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out);
this.dots = $('<span></span>').appendTo(this.title);
this.status = $('<p style="display: none"></p>').appendTo(this.out);
this.output = $('<ul class="search"/>').appendTo(this.out);
$('#search-progress').text(_('Preparing search...'));
this.startPulse();
// index already loaded, the browser was quick!
if (this.hasIndex())
this.query(query);
else
this.deferQuery(query);
},
query : function(query) {
var stopwords = ['and', 'then', 'into', 'it', 'as', 'are', 'in',
'if', 'for', 'no', 'there', 'their', 'was', 'is',
'be', 'to', 'that', 'but', 'they', 'not', 'such',
'with', 'by', 'a', 'on', 'these', 'of', 'will',
'this', 'near', 'the', 'or', 'at'];
// stem the searchterms and add them to the correct list
var stemmer = new PorterStemmer();
var searchterms = [];
var excluded = [];
var hlterms = [];
var tmp = query.split(/\s+/);
var object = (tmp.length == 1) ? tmp[0].toLowerCase() : null;
for (var i = 0; i < tmp.length; i++) {
if ($u.indexOf(stopwords, tmp[i]) != -1 || tmp[i].match(/^\d+$/) ||
tmp[i] == "") {
// skip this "word"
continue;
}
// stem the word
var word = stemmer.stemWord(tmp[i]).toLowerCase();
// select the correct list
if (word[0] == '-') {
var toAppend = excluded;
word = word.substr(1);
}
else {
var toAppend = searchterms;
hlterms.push(tmp[i].toLowerCase());
}
// only add if not already in the list
if (!$.contains(toAppend, word))
toAppend.push(word);
};
var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
// console.debug('SEARCH: searching for:');
// console.info('required: ', searchterms);
// console.info('excluded: ', excluded);
// prepare search
var filenames = this._index.filenames;
var titles = this._index.titles;
var terms = this._index.terms;
var objects = this._index.objects;
var objtypes = this._index.objtypes;
var objnames = this._index.objnames;
var fileMap = {};
var files = null;
// different result priorities
var importantResults = [];
var objectResults = [];
var regularResults = [];
var unimportantResults = [];
$('#search-progress').empty();
// lookup as object
if (object != null) {
for (var prefix in objects) {
for (var name in objects[prefix]) {
var fullname = (prefix ? prefix + '.' : '') + name;
if (fullname.toLowerCase().indexOf(object) > -1) {
match = objects[prefix][name];
descr = objnames[match[1]] + _(', in ') + titles[match[0]];
// XXX the generated anchors are not generally correct
// XXX there may be custom prefixes
result = [filenames[match[0]], fullname, '#'+fullname, descr];
switch (match[2]) {
case 1: objectResults.push(result); break;
case 0: importantResults.push(result); break;
case 2: unimportantResults.push(result); break;
}
}
}
}
}
// sort results descending
objectResults.sort(function(a, b) {
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
});
importantResults.sort(function(a, b) {
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
});
unimportantResults.sort(function(a, b) {
return (a[1] > b[1]) ? -1 : ((a[1] < b[1]) ? 1 : 0);
});
// perform the search on the required terms
for (var i = 0; i < searchterms.length; i++) {
var word = searchterms[i];
// no match but word was a required one
if ((files = terms[word]) == null)
break;
if (files.length == undefined) {
files = [files];
}
// create the mapping
for (var j = 0; j < files.length; j++) {
var file = files[j];
if (file in fileMap)
fileMap[file].push(word);
else
fileMap[file] = [word];
}
}
// now check if the files don't contain excluded terms
for (var file in fileMap) {
var valid = true;
// check if all requirements are matched
if (fileMap[file].length != searchterms.length)
continue;
// ensure that none of the excluded terms is in the
// search result.
for (var i = 0; i < excluded.length; i++) {
if (terms[excluded[i]] == file ||
$.contains(terms[excluded[i]] || [], file)) {
valid = false;
break;
}
}
// if we have still a valid result we can add it
// to the result list
if (valid)
regularResults.push([filenames[file], titles[file], '', null]);
}
// delete unused variables in order to not waste
// memory until list is retrieved completely
delete filenames, titles, terms;
// now sort the regular results descending by title
regularResults.sort(function(a, b) {
var left = a[1].toLowerCase();
var right = b[1].toLowerCase();
return (left > right) ? -1 : ((left < right) ? 1 : 0);
});
// combine all results
var results = unimportantResults.concat(regularResults)
.concat(objectResults).concat(importantResults);
// print the results
var resultCount = results.length;
function displayNextItem() {
// results left, load the summary and display it
if (results.length) {
var item = results.pop();
var listItem = $('<li style="display:none"></li>');
if (DOCUMENTATION_OPTIONS.FILE_SUFFIX == '') {
// dirhtml builder
var dirname = item[0] + '/';
if (dirname.match(/\/index\/$/)) {
dirname = dirname.substring(0, dirname.length-6);
} else if (dirname == 'index/') {
dirname = '';
}
listItem.append($('<a/>').attr('href',
DOCUMENTATION_OPTIONS.URL_ROOT + dirname +
highlightstring + item[2]).html(item[1]));
} else {
// normal html builders
listItem.append($('<a/>').attr('href',
item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX +
highlightstring + item[2]).html(item[1]));
}
if (item[3]) {
listItem.append($('<span> (' + item[3] + ')</span>'));
Search.output.append(listItem);
listItem.slideDown(5, function() {
displayNextItem();
});
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
$.get(DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' +
item[0] + '.txt', function(data) {
if (data != '') {
listItem.append($.makeSearchSummary(data, searchterms, hlterms));
Search.output.append(listItem);
}
listItem.slideDown(5, function() {
displayNextItem();
});
});
} else {
// no source available, just display title
Search.output.append(listItem);
listItem.slideDown(5, function() {
displayNextItem();
});
}
}
// search finished, update title and status message
else {
Search.stopPulse();
Search.title.text(_('Search Results'));
if (!resultCount)
Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.'));
else
Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount));
Search.status.fadeIn(500);
}
}
displayNextItem();
}
}
$(document).ready(function() {
Search.init();
});
|
PypiClean
|
/kucoin-python-1.0.14.tar.gz/kucoin-python-1.0.14/README.rst
|
===============================
Welcome to python-kucoin-sdk
===============================
.. image:: https://img.shields.io/pypi/l/python-kucoin
:target: https://github.com/Kucoin/kucoin-python-sdk/blob/master/LICENSE
.. image:: https://img.shields.io/badge/python-3.6%2B-green
:target: https://pypi.org/project/python-kucoin
Features
--------
- Implementation of REST endpoints
- Simple handling of authentication
- Response exception handling
- Implement websockets (note only python3.6+)
update
----------
- 2022 02/21
Quick Start
-----------
Register an account with `KuCoin <https://www.kucoin.com/ucenter/signup>`_.
To test on the Sandbox with `KuCoin Sandbox <https://sandbox.kucoin.com/>`_.
`Generate an API Key <https://www.kucoin.com/account/api>`_
or `Generate an API Key in Sandbox <https://sandbox.kucoin.com/account/api>`_ and enable it.
.. code:: bash
pip install kucoin-python
.. code:: python
# MarketData
from kucoin.client import Market
client = Market(url='https://api.kucoin.com')
# client = Market()
# or connect to Sandbox
# client = Market(url='https://openapi-sandbox.kucoin.com')
# client = Market(is_sandbox=True)
# get symbol kline
klines = client.get_kline('BTC-USDT','1min')
# get symbol ticker
server_time = client.get_server_timestamp()
api_key = '<api_key>'
api_secret = '<api_secret>'
api_passphrase = '<api_passphrase>'
# Trade
from kucoin.client import Trade
client = Trade(key='', secret='', passphrase='', is_sandbox=False, url='')
# or connect to Sandbox
# client = Trade(api_key, api_secret, api_passphrase, is_sandbox=True)
# place a limit buy order
order_id = client.create_limit_order('BTC-USDT', 'buy', '1', '8000')
# place a market buy order Use cautiously
order_id = client.create_market_order('BTC-USDT', 'buy', size='1')
# cancel limit order
client.cancel_order('5bd6e9286d99522a52e458de')
# User
from kucoin.client import User
client = User(api_key, api_secret, api_passphrase)
# or connect to Sandbox
# client = User(api_key, api_secret, api_passphrase, is_sandbox=True)
address = client.get_withdrawal_quota('KCS')
Websockets
----------
.. code:: python
import asyncio
from kucoin.client import WsToken
from kucoin.ws_client import KucoinWsClient
async def main():
async def deal_msg(msg):
if msg['topic'] == '/spotMarket/level2Depth5:BTC-USDT':
print(msg["data"])
elif msg['topic'] == '/spotMarket/level2Depth5:KCS-USDT':
print(f'Get KCS level3:{msg["data"]}')
# is public
client = WsToken()
#is private
# client = WsToken(key='', secret='', passphrase='', is_sandbox=False, url='')
# is sandbox
# client = WsToken(is_sandbox=True)
ws_client = await KucoinWsClient.create(None, client, deal_msg, private=False)
# await ws_client.subscribe('/market/ticker:BTC-USDT,ETH-USDT')
await ws_client.subscribe('/spotMarket/level2Depth5:BTC-USDT,KCS-USDT')
while True:
await asyncio.sleep(60, loop=loop)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
PypiClean
|
/pystick-0.1.6.tar.gz/pystick-0.1.6/src/SCons/Tool/MSCommon/sdk.py
|
__revision__ = "src/engine/SCons/Tool/MSCommon/sdk.py 2014/03/02 14:18:15 garyo"
__doc__ = """Module to detect the Platform/Windows SDK
PSDK 2003 R1 is the earliest version detected.
"""
import os
import SCons.Errors
import SCons.Util
import common
debug = common.debug
# SDK Checks. This is of course a mess as everything else on MS platforms. Here
# is what we do to detect the SDK:
#
# For Windows SDK >= 6.0: just look into the registry entries:
# HKLM\Software\Microsoft\Microsoft SDKs\Windows
# All the keys in there are the available versions.
#
# For Platform SDK before 6.0 (2003 server R1 and R2, etc...), there does not
# seem to be any sane registry key, so the precise location is hardcoded.
#
# For versions below 2003R1, it seems the PSDK is included with Visual Studio?
#
# Also, per the following:
# http://benjamin.smedbergs.us/blog/tag/atl/
# VC++ Professional comes with the SDK, VC++ Express does not.
# Location of the SDK (checked for 6.1 only)
_CURINSTALLED_SDK_HKEY_ROOT = \
r"Software\Microsoft\Microsoft SDKs\Windows\CurrentInstallFolder"
class SDKDefinition(object):
"""
An abstract base class for trying to find installed SDK directories.
"""
def __init__(self, version, **kw):
self.version = version
self.__dict__.update(kw)
def find_sdk_dir(self):
"""Try to find the MS SDK from the registry.
Return None if failed or the directory does not exist.
"""
if not SCons.Util.can_read_reg:
debug('find_sdk_dir(): can not read registry')
return None
hkey = self.HKEY_FMT % self.hkey_data
debug('find_sdk_dir(): checking registry:%s'%hkey)
try:
sdk_dir = common.read_reg(hkey)
except WindowsError, e:
debug('find_sdk_dir(): no SDK registry key %s' % repr(hkey))
return None
debug('find_sdk_dir(): Trying SDK Dir: %s'%sdk_dir)
if not os.path.exists(sdk_dir):
debug('find_sdk_dir(): %s not on file system' % sdk_dir)
return None
ftc = os.path.join(sdk_dir, self.sanity_check_file)
if not os.path.exists(ftc):
debug("find_sdk_dir(): sanity check %s not found" % ftc)
return None
return sdk_dir
def get_sdk_dir(self):
"""Return the MSSSDK given the version string."""
try:
return self._sdk_dir
except AttributeError:
sdk_dir = self.find_sdk_dir()
self._sdk_dir = sdk_dir
return sdk_dir
def get_sdk_vc_script(self,host_arch, target_arch):
""" Return the script to initialize the VC compiler installed by SDK
"""
if (host_arch == 'amd64' and target_arch == 'x86'):
# No cross tools needed compiling 32 bits on 64 bit machine
host_arch=target_arch
arch_string=target_arch
if (host_arch != target_arch):
arch_string='%s_%s'%(host_arch,target_arch)
debug("sdk.py: get_sdk_vc_script():arch_string:%s host_arch:%s target_arch:%s"%(arch_string,
host_arch,
target_arch))
file=self.vc_setup_scripts.get(arch_string,None)
debug("sdk.py: get_sdk_vc_script():file:%s"%file)
return file
class WindowsSDK(SDKDefinition):
"""
A subclass for trying to find installed Windows SDK directories.
"""
HKEY_FMT = r'Software\Microsoft\Microsoft SDKs\Windows\v%s\InstallationFolder'
def __init__(self, *args, **kw):
SDKDefinition.__init__(self, *args, **kw)
self.hkey_data = self.version
class PlatformSDK(SDKDefinition):
"""
A subclass for trying to find installed Platform SDK directories.
"""
HKEY_FMT = r'Software\Microsoft\MicrosoftSDK\InstalledSDKS\%s\Install Dir'
def __init__(self, *args, **kw):
SDKDefinition.__init__(self, *args, **kw)
self.hkey_data = self.uuid
#
# The list of VC initialization scripts installed by the SDK
# These should be tried if the vcvarsall.bat TARGET_ARCH fails
preSDK61VCSetupScripts = { 'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvarsamd64.bat',
'x86_amd64': r'bin\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\vcvarsx86_ia64.bat',
'ia64' : r'bin\vcvarsia64.bat'}
SDK61VCSetupScripts = {'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\amd64\vcvarsamd64.bat',
'x86_amd64': r'bin\x86_amd64\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\x86_ia64\vcvarsx86_ia64.bat',
'ia64' : r'bin\ia64\vcvarsia64.bat'}
SDK70VCSetupScripts = { 'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvars64.bat',
'x86_amd64': r'bin\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\vcvarsx86_ia64.bat',
'ia64' : r'bin\vcvarsia64.bat'}
# The list of support SDKs which we know how to detect.
#
# The first SDK found in the list is the one used by default if there
# are multiple SDKs installed. Barring good reasons to the contrary,
# this means we should list SDKs with from most recent to oldest.
#
# If you update this list, update the documentation in Tool/mssdk.xml.
SupportedSDKList = [
WindowsSDK('7.0',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('6.1',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK61VCSetupScripts,
),
WindowsSDK('6.0A',
sanity_check_file=r'include\windows.h',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = preSDK61VCSetupScripts,
),
WindowsSDK('6.0',
sanity_check_file=r'bin\gacutil.exe',
include_subdir='include',
lib_subdir='lib',
vc_setup_scripts = preSDK61VCSetupScripts,
),
PlatformSDK('2003R2',
sanity_check_file=r'SetEnv.Cmd',
uuid="D2FF9F89-8AA2-4373-8A31-C838BF4DBBE1",
vc_setup_scripts = preSDK61VCSetupScripts,
),
PlatformSDK('2003R1',
sanity_check_file=r'SetEnv.Cmd',
uuid="8F9E5EF3-A9A5-491B-A889-C58EFFECE8B3",
vc_setup_scripts = preSDK61VCSetupScripts,
),
]
SupportedSDKMap = {}
for sdk in SupportedSDKList:
SupportedSDKMap[sdk.version] = sdk
# Finding installed SDKs isn't cheap, because it goes not only to the
# registry but also to the disk to sanity-check that there is, in fact,
# an SDK installed there and that the registry entry isn't just stale.
# Find this information once, when requested, and cache it.
InstalledSDKList = None
InstalledSDKMap = None
def get_installed_sdks():
global InstalledSDKList
global InstalledSDKMap
debug('sdk.py:get_installed_sdks()')
if InstalledSDKList is None:
InstalledSDKList = []
InstalledSDKMap = {}
for sdk in SupportedSDKList:
debug('MSCommon/sdk.py: trying to find SDK %s' % sdk.version)
if sdk.get_sdk_dir():
debug('MSCommon/sdk.py:found SDK %s' % sdk.version)
InstalledSDKList.append(sdk)
InstalledSDKMap[sdk.version] = sdk
return InstalledSDKList
# We may be asked to update multiple construction environments with
# SDK information. When doing this, we check on-disk for whether
# the SDK has 'mfc' and 'atl' subdirectories. Since going to disk
# is expensive, cache results by directory.
SDKEnvironmentUpdates = {}
def set_sdk_by_directory(env, sdk_dir):
global SDKEnvironmentUpdates
debug('set_sdk_by_directory: Using dir:%s'%sdk_dir)
try:
env_tuple_list = SDKEnvironmentUpdates[sdk_dir]
except KeyError:
env_tuple_list = []
SDKEnvironmentUpdates[sdk_dir] = env_tuple_list
include_path = os.path.join(sdk_dir, 'include')
mfc_path = os.path.join(include_path, 'mfc')
atl_path = os.path.join(include_path, 'atl')
if os.path.exists(mfc_path):
env_tuple_list.append(('INCLUDE', mfc_path))
if os.path.exists(atl_path):
env_tuple_list.append(('INCLUDE', atl_path))
env_tuple_list.append(('INCLUDE', include_path))
env_tuple_list.append(('LIB', os.path.join(sdk_dir, 'lib')))
env_tuple_list.append(('LIBPATH', os.path.join(sdk_dir, 'lib')))
env_tuple_list.append(('PATH', os.path.join(sdk_dir, 'bin')))
for variable, directory in env_tuple_list:
env.PrependENVPath(variable, directory)
# TODO(sgk): currently unused; remove?
def get_cur_sdk_dir_from_reg():
"""Try to find the platform sdk directory from the registry.
Return None if failed or the directory does not exist"""
if not SCons.Util.can_read_reg:
debug('SCons cannot read registry')
return None
try:
val = common.read_reg(_CURINSTALLED_SDK_HKEY_ROOT)
debug("Found current sdk dir in registry: %s" % val)
except WindowsError, e:
debug("Did not find current sdk in registry")
return None
if not os.path.exists(val):
debug("Current sdk dir %s not on fs" % val)
return None
return val
def get_sdk_by_version(mssdk):
if mssdk not in SupportedSDKMap:
msg = "SDK version %s is not supported" % repr(mssdk)
raise SCons.Errors.UserError(msg)
get_installed_sdks()
return InstalledSDKMap.get(mssdk)
def get_default_sdk():
"""Set up the default Platform/Windows SDK."""
get_installed_sdks()
if not InstalledSDKList:
return None
return InstalledSDKList[0]
def mssdk_setup_env(env):
debug('sdk.py:mssdk_setup_env()')
if 'MSSDK_DIR' in env:
sdk_dir = env['MSSDK_DIR']
if sdk_dir is None:
return
sdk_dir = env.subst(sdk_dir)
debug('sdk.py:mssdk_setup_env: Using MSSDK_DIR:%s'%sdk_dir)
elif 'MSSDK_VERSION' in env:
sdk_version = env['MSSDK_VERSION']
if sdk_version is None:
msg = "SDK version %s is not installed" % repr(mssdk)
raise SCons.Errors.UserError(msg)
sdk_version = env.subst(sdk_version)
mssdk = get_sdk_by_version(sdk_version)
sdk_dir = mssdk.get_sdk_dir()
debug('sdk.py:mssdk_setup_env: Using MSSDK_VERSION:%s'%sdk_dir)
elif 'MSVS_VERSION' in env:
msvs_version = env['MSVS_VERSION']
debug('sdk.py:mssdk_setup_env:Getting MSVS_VERSION from env:%s'%msvs_version)
if msvs_version is None:
debug('sdk.py:mssdk_setup_env thinks msvs_version is None')
return
msvs_version = env.subst(msvs_version)
import vs
msvs = vs.get_vs_by_version(msvs_version)
debug('sdk.py:mssdk_setup_env:msvs is :%s'%msvs)
if not msvs:
debug('sdk.py:mssdk_setup_env: no VS version detected, bailingout:%s'%msvs)
return
sdk_version = msvs.sdk_version
debug('sdk.py:msvs.sdk_version is %s'%sdk_version)
if not sdk_version:
return
mssdk = get_sdk_by_version(sdk_version)
if not mssdk:
mssdk = get_default_sdk()
if not mssdk:
return
sdk_dir = mssdk.get_sdk_dir()
debug('sdk.py:mssdk_setup_env: Using MSVS_VERSION:%s'%sdk_dir)
else:
mssdk = get_default_sdk()
if not mssdk:
return
sdk_dir = mssdk.get_sdk_dir()
debug('sdk.py:mssdk_setup_env: not using any env values. sdk_dir:%s'%sdk_dir)
set_sdk_by_directory(env, sdk_dir)
#print "No MSVS_VERSION: this is likely to be a bug"
def mssdk_exists(version=None):
sdks = get_installed_sdks()
if version is None:
return len(sdks) > 0
return version in sdks
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.