id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/nm_transformers-1.5.1.42301-py3-none-any.whl/transformers/models/cpm/tokenization_cpm_fast.py
|
"""Tokenization classes."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import AddedToken, PreTrainedTokenizerFast
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
},
"tokenizer_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/tokenizer.json",
},
}
class CpmTokenizerFast(PreTrainedTokenizerFast):
"""Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models."""
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=False,
remove_space=True,
keep_accents=False,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
sep_token="<sep>",
pad_token="<pad>",
cls_token="<cls>",
mask_token="<mask>",
additional_special_tokens=["<eop>", "<eod>"],
**kwargs,
):
"""
Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `False`):
Whether to keep accents when tokenizing.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier
token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be
this token instead.
sep_token (`str`, *optional*, defaults to `"<sep>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
for sequence classification or for a text and a question for question answering. It is also used as the
last token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"<cls>"`):
The classifier token which is used when doing sequence classification (classification of the whole
sequence instead of per-token classification). It is the first token of the sequence when built with
special tokens.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`List[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
Additional special tokens used by the tokenizer.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
vocab_file=vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
remove_space=remove_space,
keep_accents=keep_accents,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self._pad_token_type_id = 3
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation."
)
self.jieba = jieba
self.translator = str.maketrans(" \n", "\u2582\u2583")
# Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.build_inputs_with_special_tokens
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLNet sequence has the following format:
- single sequence: `X <sep> <cls>`
- pair of sequences: `A <sep> B <sep> <cls>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return token_ids_0 + sep + cls
return token_ids_0 + sep + token_ids_1 + sep + cls
# Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.create_token_type_ids_from_sequences
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
sequence pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
sep = [self.sep_token_id]
cls_segment_id = [2]
if token_ids_1 is None:
return len(token_ids_0 + sep) * [0] + cls_segment_id
return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
# Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.save_vocabulary
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs):
batch_text_or_text_pairs = [
" ".join([x.translate(self.translator) for x in self.jieba.cut(text, cut_all=False)])
for text in batch_text_or_text_pairs
]
return super()._batch_encode_plus(batch_text_or_text_pairs, *args, **kwargs)
def _decode(self, *args, **kwargs):
text = super()._decode(*args, **kwargs)
text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n")
return text
|
PypiClean
|
/ganariya_neat-0.96.0-py3-none-any.whl/neat/threaded.py
|
from __future__ import print_function
import warnings
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
HAVE_THREADS = False
else:
HAVE_THREADS = True
try:
# pylint: disable=import-error
import Queue as queue
except ImportError:
# pylint: disable=import-error
import queue
class ThreadedEvaluator(object):
"""
A threaded genome evaluator.
Useful on python implementations without GIL (Global Interpreter Lock).
"""
def __init__(self, num_workers, eval_function):
"""
eval_function should take two arguments (a genome object and the
configuration) and return a single float (the genome's fitness).
"""
self.num_workers = num_workers
self.eval_function = eval_function
self.workers = []
self.working = False
self.inqueue = queue.Queue()
self.outqueue = queue.Queue()
if not HAVE_THREADS: # pragma: no cover
warnings.warn("No threads available; use ParallelEvaluator, not ThreadedEvaluator")
def __del__(self):
"""
Called on deletion of the object. We stop our workers here.
WARNING: __del__ may not always work!
Please stop the threads explicitly by calling self.stop()!
TODO: ensure that there are no reference-cycles.
"""
if self.working:
self.stop()
def start(self):
"""Starts the worker threads"""
if self.working:
return
self.working = True
for i in range(self.num_workers):
w = threading.Thread(
name="Worker Thread #{i}".format(i=i),
target=self._worker,
)
w.daemon = True
w.start()
self.workers.append(w)
def stop(self):
"""Stops the worker threads and waits for them to finish"""
self.working = False
for w in self.workers:
w.join()
self.workers = []
def _worker(self):
"""The worker function"""
while self.working:
try:
genome_id, genome, config = self.inqueue.get(
block=True,
timeout=0.2,
)
except queue.Empty:
continue
f = self.eval_function(genome, config)
self.outqueue.put((genome_id, genome, f))
def evaluate(self, genomes, config):
"""Evaluate the genomes"""
if not self.working:
self.start()
p = 0
for genome_id, genome in genomes:
p += 1
self.inqueue.put((genome_id, genome, config))
# assign the fitness back to each genome
while p > 0:
p -= 1
ignored_genome_id, genome, fitness = self.outqueue.get()
genome.fitness = fitness
|
PypiClean
|
/github_python-1.2.3-py3-none-any.whl/github/client.py
|
from __future__ import annotations
import functools
import aiohttp
from typing import (
Awaitable,
Callable,
Literal,
Any,
Coroutine,
Dict,
Generator,
Optional,
Tuple,
Union,
List,
overload,
TypeVar,
)
from typing_extensions import Self, ParamSpec, Concatenate
from . import exceptions
from .cache import ObjectCache
from .http import http
from .objects import Gist, Issue, Organization, Repository, User, File
__all__: Tuple[str, ...] = ('GHClient', 'Client')
T = TypeVar('T')
P = ParamSpec('P')
class GHClient:
"""The main client, used to start most use-cases.
Parameters
----------
username: Optional[:class:`str`]
An optional username to be provided along with a token to make authenticated API calls.
If you provide a username, the token must be provided as well.
user_cache_size: Optional[:class:`int`]
Determines the maximum number of User objects that will be cached in memory.
Defaults to 30, must be between 30 and 0 inclusive.
repo_cache_size: Optional[:class:`int`]
Determines the maximum number of Repository objects that will be cached in memory.
Defaults to 15, must be between 30 and 0 inclusive.
custom_headers: Optional[:class:`dict`]
A way to pass custom headers into the client session that drives the client, eg. a user-agent.
Attributes
----------
username: Optional[:class:`str`]
The authenticated Client's username, if applicable.
token: Optional[:class:`str`]
The authenticated Client's token, if applicable.
"""
has_started: bool = False
def __init__(
self,
*,
username: Optional[str] = None,
token: Optional[str] = None,
user_cache_size: int = 30,
repo_cache_size: int = 15,
custom_headers: Dict[str, Union[str, int]] = {},
):
self._headers = custom_headers
if username and token:
self.username = username
self.__token = token
self.__auth = aiohttp.BasicAuth(username, token)
else:
self.__auth = None
self.username = None
self.__token = None
self.http = http(headers=custom_headers, auth=self.__auth)
self._user_cache = ObjectCache[Any, User](user_cache_size)
self._repo_cache = ObjectCache[Any, Repository](repo_cache_size)
# Cache manegent
self._cache(type='user')(self.get_self) # type: ignore
self._cache(type='user')(self.get_user) # type: ignore
self._cache(type='repo')(self.get_repo) # type: ignore
def __call__(self, *args: Any, **kwargs: Any) -> Coroutine[Any, Any, Self]:
return self.start(*args, **kwargs)
def __await__(self) -> Generator[Any, Any, Self]:
return self.start().__await__()
async def __aenter__(self) -> Self:
await self.start()
return self
async def __aexit__(self, *args: Any, **kwargs: Any) -> None:
try:
session = self.http.session
await session.close()
except Exception as exc:
raise Exception('HTTP Session doesn\'t exist') from exc
def __repr__(self) -> str:
return f'<Client has_auth={bool(self.__auth)}>'
@overload
def check_limits(self, as_dict: Literal[True] = True) -> Dict[str, Union[str, int]]:
...
@overload
def check_limits(self, as_dict: Literal[False] = False) -> List[str]:
...
def check_limits(self, as_dict: bool = False) -> Union[Dict[str, Union[str, int]], List[str]]:
"""Returns the remaining number of API calls per timeframe.
Parameters
----------
as_dict: Optional[:class:`bool`]
Set to True to return the remaining calls in a dictionary.
Set to False to return the remaining calls in a list.
Defaults to False
"""
if not self.has_started:
raise exceptions.NotStarted
if not as_dict:
output: List[str] = []
for key, value in self.http.session._rates._asdict().items(): # type: ignore
output.append(f"{key} : {value}")
return output
return self.http.session._rates # type: ignore
async def update_auth(self, *, username: str, token: str) -> None:
"""Allows you to input auth information after instantiating the client.
Parameters
----------
username: :class:`str`
The username to update the authentication to.
Must also be provided with the valid token.
token: :class:`str`
The token to update the authentication to.
Must also be providede with the valid username.
"""
# check if username and token is valid
await self.http.update_auth(username=username, token=token)
try:
await self.http.get_self()
except exceptions.InvalidToken as exc:
raise exceptions.InvalidToken from exc
async def start(self) -> Self:
"""Main entry point to the wrapper, this creates the ClientSession.
Parameters
----------
"""
if self.has_started:
raise exceptions.AlreadyStarted
if self.__auth:
self.http = await http(auth=self.__auth, headers=self._headers)
try:
await self.http.get_self()
except exceptions.InvalidToken as exc:
raise exceptions.InvalidToken from exc
else:
self.http = await http(auth=None, headers=self._headers)
self.has_started = True
return self
def _cache(
self: Self, *, type: str
) -> Callable[
[Callable[Concatenate[Self, P], Awaitable[T]]],
Callable[Concatenate[Self, P], Awaitable[Optional[Union[T, User, Repository]]]],
]:
def wrapper(
func: Callable[Concatenate[Self, P], Awaitable[T]]
) -> Callable[Concatenate[Self, P], Awaitable[Optional[Union[T, User, Repository]]]]:
@functools.wraps(func)
async def wrapped(self: Self, *args: P.args, **kwargs: P.kwargs) -> Optional[Union[T, User, Repository]]:
if type == 'user':
obj = self._user_cache.get(kwargs.get('user'))
if obj:
return obj
user: User = await func(self, *args, **kwargs) # type: ignore
self._user_cache[kwargs.get("user")] = user
return user
if type == 'repo':
obj = self._repo_cache.get(kwargs.get('repo'))
if obj:
return obj
repo: Repository = await func(self, *args, **kwargs) # type: ignore
self._repo_cache[kwargs.get('repo')] = repo
return repo
return wrapped
return wrapper
# @_cache(type='User')
async def get_self(self) -> User:
""":class:`User`: Returns the authenticated User object."""
if self.__auth:
return User(await self.http.get_self(), self.http)
else:
raise exceptions.NoAuthProvided
async def get_user(self, *, user: str) -> User:
""":class:`User`: Fetch a Github user from their username.
Parameters
----------
user: :class:`str`
The name of the user to fetch.
"""
return User(await self.http.get_user(user), self.http)
async def get_repo(self, *, owner: str, repo: str) -> Repository:
""":class:`Repository`: Fetch a Github repository from it's name.
Parameters
----------
owner: :class:`str`
The name of the owner of a given reposiory.
repo: :class:`str`
The name of the repository to fetch.
"""
return Repository(await self.http.get_repo(owner, repo), self.http) # type: ignore
async def get_issue(self, *, owner: str, repo: str, issue: int) -> Issue:
""":class:`Issue`: Fetch a Github Issue from it's name.
Parameters
----------
owner: :class:`str`
The name of the owner of the repository for which the issue relates to.
repo: :class:`str`
The name of the repository to which the issue is related to.
issue: :class:`int`
The ID of the issue to fetch.
"""
return Issue(await self.http.get_repo_issue(owner, repo, issue), self.http) # type: ignore #fwiw, this shouldn't error but pyright <3
async def create_repo(
self,
name: str,
description: str = 'Repository created using Github-Api-Wrapper.',
public: bool = False,
gitignore: Optional[str] = None,
license: Optional[str] = None,
) -> Repository:
"""Creates a Repository with supplied data.
Requires API authentication.
Parameters
----------
name: :class:`str`
The name of the repository to be created.
description: :class:`str`
A description of the repository to be created.
public: :class:`bool`
Determines whether only the repository will be visible to the public.
Defaults to False (private repository).
gitignore: Optional[:class:`str`]
.gitignore template to use.
See https://github.com/github/gitignore for GitHub's own templates.
Defaults to None.
license: Optional[:class:`str`]
TODO: Document this.
Returns
-------
:class:`Repository`
"""
return Repository(
await self.http.create_repo(name, description, public, gitignore, license),
self.http,
)
async def delete_repo(self, repo: str) -> Optional[str]:
"""Delete a Github repository, requires authorisation.
Parameters
----------
repo: :class:`str`
The name of the repository to delete.
Returns
-------
Optional[:class:`str`]
"""
return await self.http.delete_repo(self.username, repo)
async def get_gist(self, gist: str) -> Gist:
"""Fetch a Github gist from it's id.
Parameters
----------
gist: :class:`str`
The id of the gist to fetch.
Returns
-------
:class:`Gist`
"""
return Gist(await self.http.get_gist(gist), self.http)
async def create_gist(
self, *, files: List[File], description: str = 'Gist from Github-Api-Wrapper', public: bool = True
) -> Gist:
"""Creates a Gist with the given files, requires authorisation.
Parameters
----------
files: List[:class:`File`]
A list of File objects to upload to the gist.
description: :class:`str`
A description of the gist.
public: :class:`bool`
Determines whether the gist will be visible to the public.
Defaults to False (private).
Returns
-------
:class:`Gist`
"""
return Gist(
await self.http.create_gist(files=files, description=description, public=public),
self.http,
)
async def delete_gist(self, gist: int) -> Optional[str]:
"""Delete a Github gist, requires authorisation.
Parameters
----------
gist: :class:`int`
The ID of the gist to delete.
Returns
-------
Optional[:class:`str`]
"""
return await self.http.delete_gist(gist)
async def get_org(self, org: str) -> Organization:
"""Fetch a Github organization from it's name.
Parameters
----------
org: :class:`str`
The name of the organization to fetch.
Returns
-------
:class:`Organization`
"""
return Organization(await self.http.get_org(org), self.http)
async def latency(self) -> float:
""":class:`float`: Returns the latency of the client."""
return await self.http.latency()
async def close(self) -> None:
"""Close the session."""
await self.http.session.close()
class Client(GHClient):
pass
|
PypiClean
|
/jac_vision-1.4.1.9.tar.gz/jac_vision-1.4.1.9/jac_vision/dpt/model.py
|
import numpy as np
import torch
from PIL import Image
from transformers import DPTFeatureExtractor, DPTForDepthEstimation
class DPTLarge:
def __init__(self, device=None, model="dpt-large"):
if device is None:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
self.model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large").to(
self.device
)
def estimate(self, image):
inputs = self.feature_extractor(images=image, return_tensors="pt").to(
self.device
)
outputs = self.model(**inputs)
predicted_depth = outputs.predicted_depth
prediction = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1),
size=image.size[::-1],
mode="bicubic",
align_corners=False,
)
output = prediction.squeeze().cpu().detach().numpy()
formatted = (output * 255 / np.max(output)).astype("uint8")
depth = Image.fromarray(formatted)
return depth
def estimate_batch(self, images):
inputs = self.feature_extractor(images=images, return_tensors="pt").to(
self.device
)
outputs = self.model(**inputs)
predicted_depth = outputs.predicted_depth
prediction = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1),
size=images[0].size[::-1],
mode="bicubic",
align_corners=False,
)
print(prediction.shape)
output = prediction.squeeze().cpu().detach().numpy()
formatted = (output * 255 / np.max(output)).astype("uint8")
depth_imgs = [Image.fromarray(formatted[i]) for i in range(len(formatted))]
del inputs, outputs
return depth_imgs
def get_labels(self):
return self.model.config.id2label.values()
|
PypiClean
|
/Netzob-2.0.0.tar.gz/Netzob-2.0.0/src/netzob/Inference/Vocabulary/FormatOperations/FieldSplitAligned/FieldSplitAligned.py
|
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#| ANSSI, https://www.ssi.gouv.fr |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| File contributors : |
#| - Georges Bossert <georges.bossert (a) supelec.fr> |
#| - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
from collections import OrderedDict
#+---------------------------------------------------------------------------+
#| Local application imports
#+---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import typeCheck, NetzobLogger
from netzob.Model.Vocabulary.AbstractField import AbstractField
from netzob.Common.C_Extensions.WrapperArgsFactory import WrapperArgsFactory
from netzob.Model.Vocabulary.Types.AbstractType import AbstractType, UnitSize
from netzob.Model.Vocabulary.Types.TypeConverter import TypeConverter
from netzob.Model.Vocabulary.Types.HexaString import HexaString
from netzob.Model.Vocabulary.Types.Raw import Raw
from netzob.Model.Vocabulary.Field import Field
from netzob.Model.Vocabulary.Messages.AbstractMessage import AbstractMessage
from netzob.Inference.Vocabulary.Search.SearchEngine import SearchEngine
from netzob import _libNeedleman # type: ignore
@NetzobLogger
class FieldSplitAligned(object):
"""This class align the data attached to a specified field
and build a field definition based on the result of the alignment.
The alignement is based on Needleman & Wunsch sequence alignement.
>>> import binascii
>>> from netzob.all import *
>>> samples = [b"01ff00ff", b"0222ff0000ff", b"03ff000000ff", b"0444ff00000000ff", b"05ff0000000000ff", b"06ff000000000000ff"]
>>> messages = [RawMessage(data=binascii.unhexlify(sample)) for sample in samples]
>>> symbol = Symbol(messages=messages)
>>> symbol.addEncodingFunction(TypeEncodingFunction(HexaString))
>>> print(symbol.str_data())
Field
--------------------
'01ff00ff'
'0222ff0000ff'
'03ff000000ff'
'0444ff00000000ff'
'05ff0000000000ff'
'06ff000000000000ff'
--------------------
>>> fs = FieldSplitAligned()
>>> fs.execute(symbol)
>>> print(symbol.str_data())
Field00 | Field01 | Field02 | Field03
------- | ------- | ------------ | -------
'01' | 'ff00' | '' | 'ff'
'0222' | 'ff00' | '00' | 'ff'
'03' | 'ff00' | '0000' | 'ff'
'0444' | 'ff00' | '000000' | 'ff'
'05' | 'ff00' | '00000000' | 'ff'
'06' | 'ff00' | '0000000000' | 'ff'
------- | ------- | ------------ | -------
>>> samples = [b"hello toto, what's up in France ?", b"hello netzob, what's up in UK ?", b"hello sygus, what's up in Germany ?"]
>>> messages = [RawMessage(data=sample) for sample in samples]
>>> symbol = Symbol(messages=messages)
>>> print(symbol.str_data())
Field
-------------------------------------
"hello toto, what's up in France ?"
"hello netzob, what's up in UK ?"
"hello sygus, what's up in Germany ?"
-------------------------------------
>>> fs = FieldSplitAligned()
>>> fs.execute(symbol, useSemantic = False)
>>> print(symbol.str_data())
Field00 | Field01 | Field02 | Field03 | Field04
-------- | -------- | ----------------- | --------- | -------
'hello ' | 'toto' | ", what's up in " | 'France' | ' ?'
'hello ' | 'netzob' | ", what's up in " | 'UK' | ' ?'
'hello ' | 'sygus' | ", what's up in " | 'Germany' | ' ?'
-------- | -------- | ----------------- | --------- | -------
# Let's illustrate the use of semantic constrained sequence alignment with a simple example
>>> samples = [b"[email protected]", b"Mathieu-0908070605-31 rue de Paris, 75000 Paris, [email protected]", b"Olivia-0348234556-7 allee des peupliers, 13000 Marseille, [email protected]"]
>>> messages = [RawMessage(data=sample) for sample in samples]
>>> symbol = Symbol(messages=messages)
>>> print(symbol.str_data())
Field
-------------------------------------------------------------------------------------------
'[email protected]'
'Mathieu-0908070605-31 rue de Paris, 75000 Paris, [email protected]'
'Olivia-0348234556-7 allee des peupliers, 13000 Marseille, [email protected]'
-------------------------------------------------------------------------------------------
>>> fs = FieldSplitAligned(doInternalSlick=True)
>>> fs.execute(symbol, useSemantic = False)
>>> print(symbol.str_data())
Field00 | Field01 | Field02
--------- | ------- | -----------------------------------------------------------------------------------
'John' | '-0' | '[email protected]'
'Mathieu' | '-0' | '908070605-31 rue de Paris, 75000 Paris, [email protected]'
'Olivia' | '-0' | '348234556-7 allee des peupliers, 13000 Marseille, [email protected]'
--------- | ------- | -----------------------------------------------------------------------------------
>>> applicativeDatas = []
>>> applicativeDatas.append(ApplicativeData("Firstname", String("John")))
>>> applicativeDatas.append(ApplicativeData("Firstname", String("Mathieu")))
>>> applicativeDatas.append(ApplicativeData("Firstname", String("Olivia")))
>>> applicativeDatas.append(ApplicativeData("PhoneNumber", String("0108030405")))
>>> applicativeDatas.append(ApplicativeData("PhoneNumber", String("0348234556")))
>>> applicativeDatas.append(ApplicativeData("PhoneNumber", String("0908070605")))
>>> applicativeDatas.append(ApplicativeData("StreetAddress", String("31 rue de Paris")))
>>> applicativeDatas.append(ApplicativeData("StreetAddress", String("7 allee des peupliers")))
>>> applicativeDatas.append(ApplicativeData("CityAddress", String("Paris")))
>>> applicativeDatas.append(ApplicativeData("CityAddress", String("marseille")))
>>> applicativeDatas.append(ApplicativeData("Email", String("[email protected]")))
>>> applicativeDatas.append(ApplicativeData("Email", String("[email protected]")))
>>> applicativeDatas.append(ApplicativeData("Email", String("[email protected]")))
>>> session = Session(messages, applicativeData=applicativeDatas)
>>> symbol = Symbol(messages=messages)
>>> fs = FieldSplitAligned()
>>> fs.execute(symbol, useSemantic=True)
>>> print(symbol.str_data())
Field00 | Field01 | Field02 | Field03 | Field04 | Field05 | Field06 | Field07 | Field08
--------- | ------- | ------- | ------- | -------- | ------- | ------------------------------------------------ | ------- | --------------------------
'John' | '-0' | '10' | '8' | '030405' | '-' | '' | '-' | '[email protected]'
'Mathieu' | '-0' | '90' | '8' | '070605' | '-' | '31 rue de Paris, 75000 Paris, France' | '-' | '[email protected]'
'Olivia' | '-0' | '34' | '8' | '234556' | '-' | '7 allee des peupliers, 13000 Marseille, France' | '-' | '[email protected]'
--------- | ------- | ------- | ------- | -------- | ------- | ------------------------------------------------ | ------- | --------------------------
"""
def __init__(self, unitSize=UnitSize.SIZE_8,
doInternalSlick=False):
"""Constructor.
"""
self.doInternalSlick = doInternalSlick
self.unitSize = unitSize
@typeCheck(AbstractField, bool)
def execute(self, field, useSemantic=True):
"""Execute the alignement on the specified field.
:parameter field: the field that will be aligned
:type field: :class:`AbstractField <netzob.Model.Vocabulary.AbstractField.AbstractField>`
"""
if field is None:
raise TypeError("Field cannot be None")
if useSemantic is None:
raise TypeError("useSemantic cannot be None")
# First step: we clean and reset the field
from netzob.Inference.Vocabulary.Format import Format
Format.resetFormat(field)
# Retrieve all the segment of messages to align
messageValues = field.getMessageValues(encoded=False, styled=False)
# Semantic tags (a.k.a applicative data)
semanticTags = None
if useSemantic:
semanticTags = [
self.__searchApplicativeDataInMessage(message)
for message, values in list(messageValues.items())
]
if len(list(messageValues.values())) == 0:
return
# Execute the alignement
(alignment, semanticTags, score) = self._alignData(
list(messageValues.values()), semanticTags)
# Check the results
if alignment is None:
raise ValueError(
"Impossible to compute an alignment for the specifed data")
# Build Fields based on computed alignement and semantic tags
self._updateFieldsFromAlignment(field, alignment, semanticTags)
#if useSemantic:
# self._createSubFieldsFollowingSemanticTags(field, alignment, semanticTags)
@typeCheck(AbstractField, bytes, dict)
def _updateFieldsFromAlignment(self, field, alignment, semanticTags):
"""This methods creates a regex based on the computed alignment
by the Needleman&Wunsch algorithm and the attached semantic tags.
@param field : the field for which it creates the regex (and the sub fields)
@param align : the string representing the common alignment between messages of the symbol.
@param semanticTags : the list of tags attached to each half-bytes of the provided alignment."""
if field is None:
raise TypeError("Field cannot be None")
if alignment is None:
raise TypeError("Alignment cannot be None")
if semanticTags is None:
raise TypeError("SemanticTags cannot be None")
self._logger.debug("Semantic Tags : {0}".format(semanticTags))
self._logger.debug("Alignment: {0}".format(alignment))
# Create fields following the alignment
self._splitFieldFollowingAlignment(field, alignment)
def _splitFieldFollowingAlignment(self, field, align):
"""Update the field definition with new fields following the
specified align."""
# STEP 1 : Create a field separation based on static and dynamic fields
leftAlign, rightAlign = self._splitAlignment(align)
splited = self._mergeAlign(leftAlign, rightAlign)
step1Fields = []
for (entryVal, entryDyn) in splited:
if entryDyn:
newField = Field(Raw(nbBytes=(0, int(len(entryVal) / 2))))
else:
newField = Field(
Raw(TypeConverter.convert(entryVal, HexaString, Raw)))
step1Fields.append(newField)
for idx,f in enumerate(step1Fields):
f.encodingFunctions = field.encodingFunctions.values()
f.name="Field%02d"%idx
field.fields = step1Fields
def _splitAlignment(self, align):
"""Splits the specified alignment which is composed of hexastring and of dynamic sections ("-")
The idea is to separate in two (left and right) and then to merge the splitted left and the splitted right.
>>> import random
>>> fs = FieldSplitAligned()
>>> data = b"-----01987640988765--876--678987--67898-------6789789-87987978----"
>>> print(fs._mergeAlign(*fs._splitAlignment(data)))
[['-----', True], ['01987640988765', False], ['--', True], ['876', False], ['--', True], ['678987', False], ['--', True], ['67898', False], ['-------', True], ['6789789', False], ['-', True], ['87987978', False], ['----', True]]
>>> data = b"-------------------------------"
>>> print(fs._mergeAlign(*fs._splitAlignment(data)))
[['-------------------------------', True]]
>>> data = b"98754678998765467890875645"
>>> print(fs._mergeAlign(*fs._splitAlignment(data)))
[['98754678998765467890875645', False]]
>>> data = b"---------------987-----6789765--568767---568776897689---567876------------------5678657865-9876579867789-9876879-9876787678657865467876546"
>>> print(fs._mergeAlign(*fs._splitAlignment(data)))
[['---------------', True], ['987', False], ['-----', True], ['6789765', False], ['--', True], ['568767', False], ['---', True], ['568776897689', False], ['---', True], ['567876', False], ['------------------', True], ['5678657865', False], ['-', True], ['9876579867789', False], ['-', True], ['9876879', False], ['-', True], ['9876787678657865467876546', False]]
>>> nbField = random.randint(50000, 200000)
>>> tab = []
>>> for i in range(nbField):
... if i%2 == 0:
... tab.append(b"-"*random.randint(1, 20))
... else:
... tab.append("".join([random.choice('0123456789abcdef') for x in range(random.randint(1, 20))]).encode('utf-8'))
>>> data = b"".join(tab)
>>> nbField == len(fs._mergeAlign(*fs._splitAlignment(data)))
True
"""
if len(align) == 1:
return ([[chr(align[0]), chr(align[0]) == "-"]], [])
elif len(align) == 2:
return ([[chr(align[0]), chr(align[0]) == "-"]],
[[chr(align[1]), chr(align[1]) == "-"]])
indexHalf = int(len(align) / 2)
leftAlign = align[0:indexHalf]
rightAlign = align[indexHalf:]
leftLeftAlign, rightLeftAlign = self._splitAlignment(leftAlign)
mergedLeftAlign = self._mergeAlign(leftLeftAlign, rightLeftAlign)
leftRightAlign, rightRightAlign = self._splitAlignment(rightAlign)
mergedRightAlign = self._mergeAlign(leftRightAlign, rightRightAlign)
return (mergedLeftAlign, mergedRightAlign)
def _mergeAlign(self, leftAlign, rightAlign):
if len(leftAlign) == 0:
return rightAlign
if len(rightAlign) == 0:
return leftAlign
if leftAlign[-1][1] == rightAlign[0][1]:
leftAlign[-1][0] = leftAlign[-1][0] + rightAlign[0][0]
align = leftAlign + rightAlign[1:]
else:
align = leftAlign + rightAlign
return align
# def _temp(self, align):
# self._logger.debug("Align = {0}".format(align))
# for i, val in enumerate(align):
# if (val == b"-"):
# if (found is False):
# start = i
# found = True
# else:
# if (found is True):
# found = False
# nbTiret = i - start
# self._logger.debug("Add dyn raw : {0}".format(nbTiret / 2))
# domains.append(Raw(nbBytes=(0, nbTiret / 2)))
# self._logger.debug("Converting : {0}".format(val))
# domains.append(Raw(TypeConverter.convert(val, HexaString, Raw)))
# else:
# if len(domains) == 0:
# domains.append(Raw(TypeConverter.convert(val, HexaString, Raw)))
# else:
# prevVal = TypeConverter.convert(domains[-1].value, BitArray, Raw)
# domains[-1] += Raw(prevVal + TypeConverter.convert(val, HexaString, Raw))
# if (found is True):
# nbTiret = i - start + 1
# domains.append(Raw(nbBytes=(0, nbTiret)))
# # We have a computed the 'simple' regex,
# # and represent it using the field representation
# step1Fields = []
# for domainElt in domains:
# if domainElt is None:
# pass
# innerField = Field(domain=domainElt)
# step1Fields.append(innerField)
# field.fields.append(innerField)
@typeCheck(list, list)
def _alignData(self, values, semanticTags=None):
"""Align the specified data with respect to the semantic tags
identified over the data.
:parameter values: values to align
:type values: a list of hexastring.
:keyword semanticTags: semantic tags to consider when aligning
:type semanticTags: a dict of :class:`SemanticTag <netzob.Model.Vocabulary.SemanticTag.SemanticTag>`
:return: the alignment, its score and the semantic tags
:rtype: a tuple (alignement, semanticTags, score)
"""
if values is None or len(values) == 0:
raise TypeError("At least one value must be provided.")
for val in values:
if val is None or not isinstance(val, bytes):
raise TypeError(
"At least one value is None or not a str which is not authorized."
)
if semanticTags is None:
semanticTags = [OrderedDict() for v in values]
if len(semanticTags) != len(values):
raise TypeError(
"There should be a list of semantic tags for each value")
# Prepare the argument to send to the C wrapper
toSend = [(values[iValue], semanticTags[iValue])
for iValue in range(len(values))]
wrapper = WrapperArgsFactory("_libNeedleman.alignMessages")
wrapper.typeList[wrapper.function](toSend)
debug = False
(score1, score2, score3, regex, mask,
semanticTags) = _libNeedleman.alignMessages(
self.doInternalSlick, self._cb_executionStatus, debug, wrapper)
scores = (score1, score2, score3)
# Deserialize returned info
alignment = self._deserializeAlignment(regex, mask, self.unitSize)
semanticTags = self._deserializeSemanticTags(semanticTags,
self.unitSize)
return (alignment, semanticTags, scores)
@typeCheck(AbstractMessage)
def __searchApplicativeDataInMessage(self, message):
"""This internal method search any applicative data that could be identified
in the specified message and returns results in a dict that shows the position
of the applicative data identified.
:parameter message: the message in which we search any applicative data
:type message: :class:`AbstractMessage <netzob.Model.Vocabulary.Messages.AbstractMessage.AbstractMessage>`
:return: a dict that describes the position of identified applicative data
:rtype: :class:`dict`
"""
if message is None:
raise TypeError("Message cannot be None")
self._logger.debug("Search app data in {0}".format(message.data))
results = OrderedDict()
appValues = OrderedDict()
if message.session is not None:
for applicativeD in message.session.applicativeData:
appValues[applicativeD.value] = applicativeD.name
else:
self._logger.debug(
"Message is not attached to a session, so no applicative data will be considered while computing the alignment."
)
if len(appValues) > 0:
searchResults = SearchEngine.searchInMessage(
list(appValues.keys()), message, addTags=False)
for searchResult in searchResults:
for (startResultRange, endResultRange) in searchResult.ranges:
appDataName = appValues[searchResult.searchTask.properties[
"data"]]
for pos in range(
int(startResultRange / 4), int(endResultRange /
4)):
results[pos] = appDataName
return results
@typeCheck(AbstractField, str, dict)
def _createSubFieldsFollowingSemanticTags(self, rootField, align,
semanticTags):
"""Searches for subfields which should be created because of identified semantic boundaries.
"""
if rootField is None:
raise TypeError("RootField cannot be None")
if align is None:
raise TypeError("Align cannot be None")
if semanticTags is None:
raise TypeError("SemanticTags cannot be None")
self._logger.debug("original semantic tags: ")
self._logger.debug(semanticTags)
originalFields = rootField.getLeafFields()
if len(originalFields) == 1 and rootField == originalFields[0]:
# We are dealing with a specific field
self._logger.debug(
"Analyze sub fields for {0}".format(rootField.regex))
if len(set(rootField.getValues())) == 1:
self._createSubFieldsForAStaticField(rootField, align,
semanticTags)
else:
self._createSubFieldsForADynamicField(rootField, align,
semanticTags)
for f in rootField.fields:
self._logger.debug("\t {0} : {1}".format(f.name, f.regex))
else:
# We are dealing with multiple fields, lets split them
currentIndex = 0
for field in originalFields:
self._logger.debug("field regex = {0} (maxSize={1})".format(
field.regex, field.domain.maxSize()))
# Retrieve the size of the current field
lengthField = (int(field.domain.maxSize() / 4))
# Find semantic tags related to the current section
sectionSemanticTags = OrderedDict(
(k, semanticTags[k])
for k in range(currentIndex, currentIndex + lengthField))
# reccursive call
self._logger.debug("Working on field : {0}".format(field.name))
self._createSubFieldsFollowingSemanticTags(
field, align[currentIndex:currentIndex + lengthField],
sectionSemanticTags)
currentIndex += lengthField
def createSubFieldsForAStaticField(self, field, align, semanticTags):
"""createSubFieldsForAStaticField:
Analyzes the static field provided and create sub fields following
the provided semantic tags."""
self._logger.debug("Create subfields for static field {0} : {1}".
format(field.getName(), align))
if len(field.getLocalFields()) > 0:
self._logger.warning(
"Impossible to create sub fields for this field since its not cleaned"
)
return
subFields = []
currentTag = None
currentTagLength = 0
for index, tag in list(semanticTags.items()):
if tag != currentTag:
# Create a sub field
subFieldValue = align[index - currentTagLength:index]
if len(subFieldValue) > 0:
subFields.append(subFieldValue)
currentTagLength = 0
currentTag = tag
currentTagLength += 1
if currentTagLength > 0:
subFieldValue = align[-currentTagLength:]
if len(subFieldValue) > 0:
subFields.append(subFieldValue)
if len(subFields) > 1:
for iSubField, subFieldValue in enumerate(subFields):
subField = Field(b"{0}_{1}".format(field.getName(), iSubField),
b"({0})".format(subFieldValue),
field.getSymbol())
field.addLocalField(subField)
def _createSubFieldsForADynamicField(self, field, align, semanticTags):
"""Analyzes the dynamic field provided and create sub fields following
the provided semantic tags."""
if field is None:
raise TypeError("Field cannot be None")
if align is None:
raise TypeError("Align cannot be None")
if semanticTags is None:
raise TypeError("SemanticTags cannot be None")
self._logger.debug("Create subfields for dynamic field {0} : {1}".
format(field.name, field.regex))
subFields = []
currentTag = None
currentTagLength = 0
semanticTagsForEachMessage = field.getSemanticTagsByMessage()
for index, tag in list(semanticTags.items()):
if tag != currentTag:
# Create a sub field
if currentTagLength > 0:
values = self._getFieldValuesWithTag(
field, semanticTagsForEachMessage, currentTag)
subFields.append((currentTag, values))
currentTagLength = 0
currentTag = tag
currentTagLength += 1
if currentTagLength > 0:
values = self._getFieldValuesWithTag(
field, semanticTagsForEachMessage, currentTag)
subFields.append((currentTag, values))
self._logger.debug("Identified subFields : {0}".format(subFields))
for iSubField, (tag, values) in enumerate(subFields):
if len(values) > 0:
if tag == b"None":
minValue = None
maxValue = None
for v in values:
if minValue is None or len(v) < minValue:
minValue = len(v)
if maxValue is None or len(v) > maxValue:
maxValue = len(v)
subField = Field(
b"{0}_{1}".format(field.getName(), iSubField),
b"(.{" + str(minValue) + b"," + str(maxValue) + b"})",
field.getSymbol())
field.addLocalField(subField)
else:
# create regex based on unique values
newRegex = '|'.join(list(set(values)))
newRegex = b"({0})".format(newRegex)
subField = Field(b"{0}_{1}".format(field.getName(),
iSubField), newRegex,
field.getSymbol())
field.addLocalField(subField)
@typeCheck(AbstractField, dict, str)
def _getFieldValuesWithTag(self, field, semanticTagsForEachMessage, tag):
if field is None:
raise TypeError("Field cannot be None")
if semanticTagsForEachMessage is None:
raise TypeError("SemanticTagsForEachMessage cannot be None")
if tag is None:
raise TypeError("Tag cannot be None")
values = []
# Retrieve value of each message in current field tagged with requested tag
for message, tagsInMessage in list(semanticTagsForEachMessage.items()):
initial = None
end = None
for tagIndex in sorted(tagsInMessage.keys()):
tagName = tagsInMessage[tagIndex]
if initial is None and tagName == tag:
initial = tagIndex
elif initial is not None and tagName != tag:
end = tagIndex
break
if initial is not None and end is None:
end = sorted(tagsInMessage.keys())[-1] + 1
if initial is not None and end is not None:
values.append(message.getStringData()[initial:end])
for i in range(initial, end):
del tagsInMessage[i]
if b"" not in values and len(
list(semanticTagsForEachMessage.keys())) > len(values):
values.append(b"")
return values
def _cb_executionStatus(self, stage, donePercent, currentMessage):
"""Callback function called by the C extension to provide
info on the current status.
"""
def _deserializeSemanticTags(self, tags, unitSize=UnitSize.SIZE_8):
"""Deserialize the information returned from the C library
and build the semantic tags definitions from it.
"""
result = OrderedDict()
arTags = tags.split(';')
j = 0
for iTag, tag in enumerate(arTags):
if tag != b"None":
result[j] = tag[2:-2]
else:
result[j] = tag
if unitSize == UnitSize.SIZE_8:
j = j + 1
result[j] = result[j - 1]
else:
raise ValueError("Unsupported unitsize.")
j += 1
return result
def _deserializeAlignment(self,
regex,
mask,
unitSize=UnitSize.SIZE_8):
"""
deserializeAlignment: Transforms the C extension results
in a python readable way
@param regex the C returned regex
@param mask the C returned mask
@param unitSize the unitSize
@returns the python alignment
"""
if not (unitSize == UnitSize.SIZE_8 or
unitSize == UnitSize.SIZE_4):
raise ValueError(
"Deserializing with unitSize {0} not yet implemented, only 4 and 8 supported.".
format(unitSize))
align = b""
for i, c in enumerate(mask):
if c != 2:
if c == 1:
if unitSize == UnitSize.SIZE_8:
align += b"--"
elif unitSize == UnitSize.SIZE_4:
align += b"-"
else:
if unitSize == UnitSize.SIZE_8:
align += TypeConverter.convert(regex[i:i + 1], Raw,
HexaString)
elif unitSize == UnitSize.SIZE_4:
align += TypeConverter.convert(regex[i:i + 1], Raw,
HexaString)[1:]
return align
@property
def doInternalSlick(self):
return self.__doInternalSlick
@doInternalSlick.setter # type: ignore
@typeCheck(bool)
def doInternalSlick(self, doInternalSlick):
if doInternalSlick is None:
raise TypeError("doInternalSlick cannot be None")
self.__doInternalSlick = doInternalSlick
@property
def unitSize(self):
return self.__unitSize
@unitSize.setter # type: ignore
@typeCheck(UnitSize)
def unitSize(self, unitSize):
if unitSize is None:
raise TypeError("Unitsize cannot be None")
if unitSize not in AbstractType.supportedUnitSizes():
raise TypeError(
"Specified unitsize is not supported, refers to AbstractType.supportedUnitSizes() for the list."
)
self.__unitSize = unitSize
|
PypiClean
|
/sev_snp_measure-0.0.7-py3-none-any.whl/sevsnpmeasure/vmsa.py
|
import ctypes
from ctypes import c_uint8, c_uint16, c_uint32, c_uint64
from typing import Iterator
from .sev_mode import SevMode
from .vmm_types import VMMType
# VMCB Segment (struct vmcb_seg in the linux kernel)
class VmcbSeg(ctypes.Structure):
_pack_ = 1
_fields_ = [
("selector", c_uint16),
("attrib", c_uint16),
("limit", c_uint32),
("base", c_uint64),
]
# VMSA page
#
# The names of the fields are taken from struct sev_es_work_area in the linux kernel:
# https://github.com/AMDESE/linux/blob/sev-snp-v12/arch/x86/include/asm/svm.h#L318
# (following the definitions in AMD APM Vol 2 Table B-4)
class SevEsSaveArea(ctypes.Structure):
_pack_ = 1
_fields_ = [
("es", VmcbSeg),
("cs", VmcbSeg),
("ss", VmcbSeg),
("ds", VmcbSeg),
("fs", VmcbSeg),
("gs", VmcbSeg),
("gdtr", VmcbSeg),
("ldtr", VmcbSeg),
("idtr", VmcbSeg),
("tr", VmcbSeg),
("vmpl0_ssp", c_uint64),
("vmpl1_ssp", c_uint64),
("vmpl2_ssp", c_uint64),
("vmpl3_ssp", c_uint64),
("u_cet", c_uint64),
("reserved_1", c_uint8 * 2),
("vmpl", c_uint8),
("cpl", c_uint8),
("reserved_2", c_uint8 * 4),
("efer", c_uint64),
("reserved_3", c_uint8 * 104),
("xss", c_uint64),
("cr4", c_uint64),
("cr3", c_uint64),
("cr0", c_uint64),
("dr7", c_uint64),
("dr6", c_uint64),
("rflags", c_uint64),
("rip", c_uint64),
("dr0", c_uint64),
("dr1", c_uint64),
("dr2", c_uint64),
("dr3", c_uint64),
("dr0_addr_mask", c_uint64),
("dr1_addr_mask", c_uint64),
("dr2_addr_mask", c_uint64),
("dr3_addr_mask", c_uint64),
("reserved_4", c_uint8 * 24),
("rsp", c_uint64),
("s_cet", c_uint64),
("ssp", c_uint64),
("isst_addr", c_uint64),
("rax", c_uint64),
("star", c_uint64),
("lstar", c_uint64),
("cstar", c_uint64),
("sfmask", c_uint64),
("kernel_gs_base", c_uint64),
("sysenter_cs", c_uint64),
("sysenter_esp", c_uint64),
("sysenter_eip", c_uint64),
("cr2", c_uint64),
("reserved_5", c_uint8 * 32),
("g_pat", c_uint64),
("dbgctrl", c_uint64),
("br_from", c_uint64),
("br_to", c_uint64),
("last_excp_from", c_uint64),
("last_excp_to", c_uint64),
("reserved_7", c_uint8 * 80),
("pkru", c_uint32),
("reserved_8", c_uint8 * 20),
("reserved_9", c_uint64),
("rcx", c_uint64),
("rdx", c_uint64),
("rbx", c_uint64),
("reserved_10", c_uint64),
("rbp", c_uint64),
("rsi", c_uint64),
("rdi", c_uint64),
("r8", c_uint64),
("r9", c_uint64),
("r10", c_uint64),
("r11", c_uint64),
("r12", c_uint64),
("r13", c_uint64),
("r14", c_uint64),
("r15", c_uint64),
("reserved_11", c_uint8 * 16),
("guest_exit_info_1", c_uint64),
("guest_exit_info_2", c_uint64),
("guest_exit_int_info", c_uint64),
("guest_nrip", c_uint64),
("sev_features", c_uint64),
("vintr_ctrl", c_uint64),
("guest_exit_code", c_uint64),
("virtual_tom", c_uint64),
("tlb_id", c_uint64),
("pcpu_id", c_uint64),
("event_inj", c_uint64),
("xcr0", c_uint64),
("reserved_12", c_uint8 * 16),
("x87_dp", c_uint64),
("mxcsr", c_uint32),
("x87_ftw", c_uint16),
("x87_fsw", c_uint16),
("x87_fcw", c_uint16),
("x87_fop", c_uint16),
("x87_ds", c_uint16),
("x87_cs", c_uint16),
("x87_rip", c_uint64),
("fpreg_x87", c_uint8 * 80),
("fpreg_xmm", c_uint8 * 256),
("fpreg_ymm", c_uint8 * 256),
("unused", c_uint8 * 2448),
]
class VMSA(object):
BSP_EIP = 0xfffffff0
@staticmethod
def build_save_area(eip: int, sev_features: int, vcpu_sig: int, vmm_type: VMMType = VMMType.QEMU):
# QEMU and EC2 differ slightly on initial register state
if vmm_type == VMMType.QEMU:
cs_flags = 0x9b
ss_flags = 0x93
tr_flags = 0x8b
rdx = vcpu_sig
elif vmm_type == VMMType.ec2:
cs_flags = 0x9b
if eip == 0xfffffff0:
cs_flags = 0x9a
ss_flags = 0x92
tr_flags = 0x83
rdx = 0
else:
raise ValueError("unknown VMM type")
return SevEsSaveArea(
es=VmcbSeg(0, 0x93, 0xffff, 0),
cs=VmcbSeg(0xf000, cs_flags, 0xffff, eip & 0xffff0000),
ss=VmcbSeg(0, ss_flags, 0xffff, 0),
ds=VmcbSeg(0, 0x93, 0xffff, 0),
fs=VmcbSeg(0, 0x93, 0xffff, 0),
gs=VmcbSeg(0, 0x93, 0xffff, 0),
gdtr=VmcbSeg(0, 0, 0xffff, 0),
idtr=VmcbSeg(0, 0, 0xffff, 0),
ldtr=VmcbSeg(0, 0x82, 0xffff, 0),
tr=VmcbSeg(0, tr_flags, 0xffff, 0),
efer=0x1000, # KVM enables EFER_SVME
cr4=0x40, # KVM enables X86_CR4_MCE
cr0=0x10,
dr7=0x400,
dr6=0xffff0ff0,
rflags=0x2,
rip=eip & 0xffff,
g_pat=0x7040600070406, # PAT MSR: See AMD APM Vol 2, Section A.3
rdx=rdx,
sev_features=sev_features,
xcr0=0x1,
)
def __init__(self, sev_mode: SevMode, ap_eip: int, vcpu_sig: int, vmm_type: VMMType = VMMType.QEMU):
if sev_mode == SevMode.SEV_SNP:
sev_features = 0x1
else:
sev_features = 0x0
self.bsp_save_area = VMSA.build_save_area(self.BSP_EIP, sev_features, vcpu_sig, vmm_type)
if ap_eip:
self.ap_save_area = VMSA.build_save_area(ap_eip, sev_features, vcpu_sig, vmm_type)
def pages(self, vcpus: int) -> Iterator[bytes]:
"""
Generate VMSA pages
"""
for i in range(vcpus):
if i == 0:
yield bytes(self.bsp_save_area)
else:
yield bytes(self.ap_save_area)
|
PypiClean
|
/ReviewBoard-5.0.5-py3-none-any.whl/reviewboard/admin/cache_stats.py
|
import logging
import socket
from django.conf import settings
from djblets.cache.forwarding_backend import DEFAULT_FORWARD_CACHE_ALIAS
logger = logging.getLogger(__name__)
def get_memcached_hosts():
"""Return the hosts currently configured for memcached.
Returns:
list of unicode:
A list of memcached hostnames or UNIX paths.
"""
cache_info = settings.CACHES[DEFAULT_FORWARD_CACHE_ALIAS]
backend = cache_info['BACKEND']
locations = cache_info.get('LOCATION', [])
if 'memcached' not in backend or not locations:
locations = []
elif not isinstance(locations, list):
locations = [locations]
return locations
def get_has_cache_stats():
"""Return whether or not cache stats are supported.
Returns:
bool:
``True`` if cache stats are supported for the current cache setup.
``False`` if cache stats are not supported.
"""
return len(get_memcached_hosts()) > 0
def get_cache_stats():
"""Return statistics for all supported cache backends.
This only supports memcached backends.
Returns:
list of tuple:
Each list item corresponds to one configured memcached server.
The item is a tuple in the form of ``(hostname, stats)``, where
``stats`` is a dictionary with statistics from the cache server.
If no memcached servers are configured, this will return ``None``
instead.
"""
hostnames = get_memcached_hosts()
if not hostnames:
return None
all_stats = []
for hostname in hostnames:
try:
host, port = hostname.split(':')
except ValueError:
# Assume this is a hostname without a port.
socket_af = socket.AF_INET
host = hostname
port = 11211
if host == 'unix':
socket_af = socket.AF_UNIX
connect_param = port
else:
socket_af = socket.AF_INET
connect_param = (host, int(port))
s = socket.socket(socket_af, socket.SOCK_STREAM)
try:
s.connect(connect_param)
except socket.error:
logger.error('Unable to connect to "%s"' % hostname)
s.close()
continue
s.send(b'stats\r\n')
data = s.recv(2048).decode('ascii')
s.close()
stats = {}
for line in data.splitlines():
info = line.split(' ')
if info[0] == 'STAT' and len(info) == 3:
try:
value = int(info[2])
except ValueError:
value = info[2]
stats[info[1]] = value
if stats['cmd_get'] == 0:
stats['hit_rate'] = 0
stats['miss_rate'] = 0
else:
stats['hit_rate'] = 100 * stats['get_hits'] / stats['cmd_get']
stats['miss_rate'] = 100 * stats['get_misses'] / stats['cmd_get']
all_stats.append((hostname, stats))
return all_stats
|
PypiClean
|
/autopilot_tools-0.3.0-py3-none-any.whl/autopilot_tools/vehicle.py
|
import math
import os
import sys
import time
from functools import partial
from typing import List
import serial
import yaml
from pymavlink import mavutil
from pymavlink.dialects.v20.ardupilotmega import \
MAVLink_mission_item_int_message, MAVLink_mission_count_message, \
MAV_MISSION_TYPE_FENCE, MAV_MISSION_TYPE_RALLY, \
MAV_MISSION_TYPE_MISSION
from pymavlink.mavutil import mavlink
from autopilot_tools.log_analyzer.color_logging import log_warn
from .configurator.mavftputils import MavFTP
from .configurator.mavlink_params import deserialize_param_value, \
float_to_integer, serialize_param_value
from .configurator.mission_file import Plan, MissionItem, ParamList
from .configurator.mission_result import MissionResult, StatusCode, StatusText
from .exceptions import MavlinkTimeoutError
from .utils import retry_command
SOURCE_SYSTEM = 2
SOURCE_COMPONENT = 1
MAV_PARAM_TYPE_INT32 = 6
MAX_REQUESTED_SIZE = 90
class Vehicle:
def __init__(self) -> None:
self.device_path = None
self.autopilot = None
self.master = None
self.params = None
self.mav_ftp = None
def connect(self, device="serial"):
if device == "serial":
self._connect_serial()
elif device == 'udp':
self.device_path = 'udpin:localhost:14540'
self._connect()
print(f"Connected: {self.device_path}")
else:
print(f"Unkown device {device}: it should be serial or udp")
def configure(self, file_with_params, reboot=True):
self._read_yaml_parameters(file_with_params)
num_of_recv_params = 0
print(f"Trying to write {len(self.params)} params...")
for param_name in self.params:
set_param_value = self.params[param_name]
if self.set_specific_param(param_name, set_param_value):
num_of_recv_params += 1
print(f"Successfully written {num_of_recv_params}/{len(self.params)} params.")
if reboot:
time.sleep(2)
self.reboot()
time.sleep(2)
self.connect()
def download_px4_log(self, output_dir, output_file_name=""):
myfile = open("log.ulg", "wb")
self.master.mav.log_request_list_send(
self.master.target_system,
self.master.target_component,
0,
1024)
log_entry_msg = self.master.recv_match(type='LOG_ENTRY', blocking=True)
last_log_num = log_entry_msg.last_log_num
last_log_size_kbytes = int(log_entry_msg.size / 1024)
print(f"Last log number is {last_log_num}. The size is {last_log_size_kbytes} KBytes.")
print(f"Output file will be: {output_dir}/{output_file_name}")
start_time = time.time()
for ofs in range(0, log_entry_msg.size, MAX_REQUESTED_SIZE):
self.master.mav.log_request_data_send(
self.master.target_system,
self.master.target_component,
id=last_log_num,
ofs=ofs,
count=MAX_REQUESTED_SIZE)
log_data_msg = self.master.recv_match(type='LOG_DATA', blocking=True)
data = bytearray(log_data_msg.data)
myfile.write(data)
sys.stdout.write("\033[K")
identifier = log_data_msg.id
ofs_kbytes = int(log_data_msg.ofs / 1024)
elapsed_time = int(time.time() - start_time)
msg = f"\r{identifier}, {elapsed_time} sec: {ofs_kbytes} / {last_log_size_kbytes} KB."
print(msg, end='', flush=True)
if log_data_msg.count < MAX_REQUESTED_SIZE:
break
myfile.close()
print("")
def read_all_params(self):
self.master.mav.param_request_list_send(
self.master.target_system, self.master.target_component
)
self.params = {}
prev_recv_time_sec = time.time()
while prev_recv_time_sec + 1.0 > time.time():
time.sleep(0.01)
recv_msg = self.master.recv_match(type='PARAM_VALUE', blocking=False)
if recv_msg is not None:
if recv_msg.param_type == MAV_PARAM_TYPE_INT32:
recv_msg.param_value = float_to_integer(recv_msg.param_value)
recv_msg = recv_msg.to_dict()
self.params[recv_msg['param_id']] = recv_msg['param_value']
print(f"name: {recv_msg['param_id']} value: {recv_msg['param_value']}")
prev_recv_time_sec = time.time()
print("Done!")
def read_specific_param(self, param_name, verbose=False, number_of_attempts=100):
"""Non-blocking read of the specific parameter. Several attemps until fail."""
if verbose:
print(f"{param_name: <18}", end='', flush=True)
recv_msg = None
param_value = None
for _ in range(number_of_attempts):
self.master.mav.param_request_read_send(
self.master.target_system,
self.master.target_component,
bytes(param_name, 'utf-8'),
-1
)
recv_msg = self.master.recv_match(type='PARAM_VALUE', blocking=False)
if recv_msg is None:
time.sleep(0.1)
continue
recv_param_name, param_type, param_value = deserialize_param_value(recv_msg)
if recv_param_name == param_name:
if verbose:
print(f"{param_type: <6} {param_value}")
break
if recv_msg is None:
log_warn(f'Reading {param_name} have been failed {number_of_attempts} times.')
return param_value
def set_specific_param(self, param_name, param_value, number_of_attempts=50):
"""Non-blocking set of the specific parameter. Return True in success, otherwise False."""
self.master.mav.param_set_send(
self.master.target_system,
self.master.target_component,
bytes(param_name, 'utf-8'),
*serialize_param_value(param_value)
)
for _ in range(number_of_attempts):
recv_msg = self.master.recv_match(type='PARAM_VALUE', blocking=False)
if recv_msg is None:
time.sleep(0.01)
continue
recv_param_name, recv_param_type, recv_param_value = deserialize_param_value(recv_msg)
if recv_param_name != param_name:
time.sleep(0.01)
continue
if math.isclose(recv_param_value, param_value, rel_tol=1e-4):
print(f"{recv_param_name: <18} {recv_param_type: <6} {recv_param_value}")
return True
log_warn(f'{param_name}: expected {param_value}, received {recv_param_value}.')
return False
log_warn(f'Writing {param_name} have been failed {number_of_attempts} times.')
return False
def reset_params_to_default(self):
self._reset_params_to_default()
self.reboot()
time.sleep(2)
self.connect()
def force_calibrate(self):
param2 = 76
param5 = 76
self.master.mav.command_long_send(self.master.target_system, self.master.target_component,
mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
0, param2, 0, 0, param5, 0, 0)
def reboot(self):
self.master.reboot_autopilot()
self.master.close()
def download_mission(self) -> List[MAVLink_mission_item_int_message]:
def get_count() -> MAVLink_mission_count_message:
self.master.mav.mission_request_list_send(
self.master.target_system, self.master.target_component)
return self.master.recv_match(type='MISSION_COUNT', blocking=True, timeout=1)
count = retry_command(get_count)
if count is None:
raise MavlinkTimeoutError
data = []
i = 0
while i < count.count:
def get_mission_item() -> MAVLink_mission_item_int_message:
self.master.mav.mission_request_int_send(
self.master.target_system, self.master.target_component, i)
return self.master.recv_match(type='MISSION_ITEM_INT', blocking=True, timeout=1)
data_item = retry_command(get_mission_item)
if data_item is None:
raise MavlinkTimeoutError
if data_item.seq == i:
i += 1
data.append(data_item)
return data
def load_mission(self, path: str) -> StatusCode:
mission_file = Plan(path)
fence_items = mission_file.geofence.get_mission_item_representation()
rally_points_length = mission_file.rally_points.get_mission_item_representation()
mission_length = mission_file.mission.get_mission_item_representation()
def send_mission_items(
count: int, item_list: List[MissionItem], mission_type: int) -> StatusCode:
self.master.mav.mission_count_send(
self.master.target_system, self.master.target_component,
count, mission_type
)
if not item_list:
return StatusCode.EMPTY_MISSION_ITEM_LIST
reached_last_item = False
next_item = -1
while not reached_last_item:
res = self.master.recv_match(
type=['MISSION_REQUEST_INT', 'MISSION_REQUEST'], blocking=True, timeout=0.5)
if res is None:
return StatusCode.MAVLINK_ERROR
next_item = res.seq
print(f"Sending {item_list[next_item]} with id {next_item}")
to_send = item_list[next_item]
params = ParamList(
*[x if x is not None else math.nan for x in to_send.params]
)
self.master.mav.mission_item_int_send(
self.master.target_system, self.master.target_component,
to_send.arguments.seq,
to_send.arguments.frame,
to_send.arguments.command,
to_send.arguments.current,
to_send.arguments.auto_continue,
params.param1,
params.param2,
params.param3,
params.param4,
params.x,
params.y,
params.z,
to_send.mission_type
)
if next_item == count - 1:
reached_last_item = True
res = self.master.recv_match(type='MISSION_ACK', blocking=True, timeout=0.5)
return StatusCode.OK if res is not None else StatusCode.MAVLINK_ERROR
result = retry_command(
partial(send_mission_items, *fence_items, MAV_MISSION_TYPE_FENCE),
test=lambda x: x in [StatusCode.OK, StatusCode.EMPTY_MISSION_ITEM_LIST])
if result is None:
raise MavlinkTimeoutError
result = retry_command(
partial(send_mission_items, *rally_points_length, MAV_MISSION_TYPE_RALLY),
test=lambda x: x in [StatusCode.OK, StatusCode.EMPTY_MISSION_ITEM_LIST])
if result is None:
raise MavlinkTimeoutError
result = retry_command(
partial(send_mission_items, *mission_length, MAV_MISSION_TYPE_MISSION),
test=lambda x: x in [StatusCode.OK, StatusCode.EMPTY_MISSION_ITEM_LIST])
if result is None:
raise MavlinkTimeoutError
return StatusCode.OK
def run_mission(self, path: str = None, timeout: int = 100):
if path is not None:
self.load_mission(path)
mission_data = self.download_mission()
seq = 0
start_time = time.time()
time_elapsed = 0
self.master.mav.command_long_send(
self.master.target_system, self.master.target_component,
mavutil.mavlink.MAV_CMD_MISSION_START,
0,
0, len(mission_data) - 1, 0, 0, 0, 0, 0
)
seq_zeroed = False
print(f"starting mission from {seq} mission_item")
status_texts = []
while not seq_zeroed and time_elapsed < timeout:
msg = self.master.recv_match(type='MISSION_CURRENT', blocking=False)
status = self.master.recv_match(type='STATUSTEXT', blocking=False)
if status:
status_texts.append(StatusText(status.severity, status.text))
if msg is None:
time.sleep(0.01)
continue
if msg.seq != seq:
if msg.seq == 0:
seq_zeroed = True
else:
seq = msg.seq
print(f"mission_item {msg.seq} reached")
time_elapsed = time.time() - start_time
return MissionResult(
StatusCode.OK if time_elapsed < timeout else StatusCode.MISSION_TIMEOUT,
int(time_elapsed),
len(mission_data),
status_texts
)
def _connect_serial(self):
while True:
try:
self.device_path, self.autopilot = Vehicle._get_autopilot_serial_path()
if self.device_path is not None:
self._connect()
print(f"Connected: {self.device_path}")
break
except serial.serialutil.SerialException:
pass
time.sleep(1)
print(f"Waiting for the Autopilot {self.device_path}...")
@staticmethod
def _get_autopilot_serial_path():
serial_devices = os.popen('ls /dev/serial/by-id').read().splitlines()
return Vehicle.get_autopilot_type_by_serial_devices(serial_devices)
@staticmethod
def get_autopilot_type_by_serial_devices(serial_devices):
if len(serial_devices) < 1:
return None, None
device_path = None
autopilot_type = None
for serial_device in serial_devices:
if -1 != serial_device.find("ArduPilot"):
device_path = f"/dev/serial/by-id/{serial_device}"
autopilot_type = "ArduPilot"
break
if -1 != serial_device.find("PX4"):
device_path = f"/dev/serial/by-id/{serial_device}"
autopilot_type = "PX4"
break
return device_path, autopilot_type
def _reset_params_to_default(self):
self.master.mav.command_long_send(
self.master.target_system,
self.master.target_component,
mavutil.mavlink.MAV_CMD_PREFLIGHT_STORAGE,
0,
2, -1, 0, 0, 0, 0, 0)
def _read_yaml_parameters(self, filename, verbose=False):
with open(filename, encoding='UTF-8') as file_descriptor:
self.params = yaml.load(file_descriptor, Loader=yaml.FullLoader)
if verbose:
print(f"{filename} has : {self.params}")
def _connect(self):
self.master = mavutil.mavlink_connection(
self.device_path,
source_component=SOURCE_COMPONENT,
source_system=SOURCE_SYSTEM)
self.master.mav.heartbeat_send(
type=mavlink.MAV_TYPE_CHARGING_STATION,
autopilot=6,
base_mode=12,
custom_mode=0,
system_status=4)
self.master.wait_heartbeat()
self.mav_ftp = MavFTP(self.master)
system_str = f"system {self.master.target_system}"
component_str = f"component {self.master.target_component}"
print(f"Heartbeat from system ({system_str} {component_str})")
|
PypiClean
|
/pollination-annual-daylight.viz-0.10.10.tar.gz/pollination-annual-daylight.viz-0.10.10/pollination/annual_daylight/_raytracing.py
|
from pollination_dsl.dag import Inputs, DAG, task
from dataclasses import dataclass
from pollination.honeybee_radiance.coefficient import DaylightCoefficient
from pollination.honeybee_radiance_postprocess.post_process import \
AnnualDaylightMetricsFile
# input/output alias
from pollination.alias.inputs.radiancepar import daylight_thresholds_input
from pollination.alias.inputs.schedule import schedule_csv_input
@dataclass
class AnnualDaylightRayTracing(DAG):
# inputs
radiance_parameters = Inputs.str(
description='The radiance parameters for ray tracing',
default='-ab 2 -ad 5000 -lw 2e-05'
)
octree_file = Inputs.file(
description='A Radiance octree file.',
extensions=['oct']
)
grid_name = Inputs.str(
description='Sensor grid file name. This is useful to rename the final result '
'file to {grid_name}.ill'
)
sensor_grid = Inputs.file(
description='Sensor grid file.',
extensions=['pts']
)
sensor_count = Inputs.int(
description='Number of sensors in the input sensor grid.'
)
sky_matrix = Inputs.file(
description='Path to total sky matrix file.'
)
sky_dome = Inputs.file(
description='Path to sky dome file.'
)
bsdfs = Inputs.folder(
description='Folder containing any BSDF files needed for ray tracing.',
optional=True
)
sun_up_hours = Inputs.file(
description='A text file that includes all the sun up hours. Each '
'hour is separated by a new line.'
)
schedule = Inputs.file(
description='Path to an annual schedule file. Values should be 0-1 separated '
'by new line. If not provided an 8-5 annual schedule will be created.',
extensions=['txt', 'csv'], optional=True, alias=schedule_csv_input
)
thresholds = Inputs.str(
description='A string to change the threshold for daylight autonomy and useful '
'daylight illuminance. Valid keys are -t for daylight autonomy threshold, -lt '
'for the lower threshold for useful daylight illuminance and -ut for the upper '
'threshold. The default is -t 300 -lt 100 -ut 3000. The order of the keys is '
'not important and you can include one or all of them. For instance if you only '
'want to change the upper threshold to 2000 lux you should use -ut 2000 as '
'the input.', default='-t 300 -lt 100 -ut 3000',
alias=daylight_thresholds_input
)
@task(template=DaylightCoefficient)
def total_sky(
self,
name=grid_name,
radiance_parameters=radiance_parameters,
fixed_radiance_parameters='-aa 0.0 -I -c 1 -faf',
sensor_count=sensor_count,
sky_matrix=sky_matrix,
sky_dome=sky_dome,
sensor_grid=sensor_grid,
scene_file=octree_file,
conversion='47.4 119.9 11.6',
bsdf_folder=bsdfs
):
return [
{
'from': DaylightCoefficient()._outputs.result_file,
'to': 'final/{{self.name}}.ill'
}
]
@task(
template=AnnualDaylightMetricsFile,
needs=[total_sky]
)
def annual_metrics_file(
self,
file=total_sky._outputs.result_file,
sun_up_hours=sun_up_hours,
schedule=schedule,
thresholds=thresholds,
grid_name=grid_name
):
return [
{
'from': AnnualDaylightMetricsFile()._outputs.annual_metrics,
'to': 'metrics'
}
]
|
PypiClean
|
/dynatrace_metric_utils-0.2.1-py3-none-any.whl/dynatrace/metric/utils/_metric_values.py
|
import math
from abc import ABC, abstractmethod
from typing import Union
from . import (
metric_error,
)
def _raise_if_nan_or_inf(value: Union[int, float]):
if math.isnan(value):
raise metric_error.MetricError("Value is NaN")
if math.isinf(value):
raise metric_error.MetricError("Value is Infinite")
def _format_number(value: Union[int, float]):
if abs(value) > 1e15:
as_string = "{:.8e}".format(value)
elif 0 < abs(value) < 1e-15:
as_string = "{:.8e}".format(value)
else:
as_string = str(value)
if "0e" in as_string:
# remove trailing zeroes in exponential notation
start, end = as_string.split("e")
start = str(start).rstrip("0")
if start.endswith("."):
start = start + "0"
return start + "e" + end
if as_string.endswith(".0"):
no_trailing_zero = as_string[0:len(as_string) - 2]
if no_trailing_zero == "-0":
return "0"
return no_trailing_zero
return as_string
class MetricValue(ABC):
@abstractmethod
def serialize_value(self) -> str:
pass
class GaugeValue(MetricValue):
def __init__(self,
value: Union[float, int]
) -> None:
_raise_if_nan_or_inf(value)
self._value = value
def serialize_value(self) -> str:
return "gauge,{}".format(_format_number(self._value))
class CounterValueDelta(MetricValue):
def __init__(self,
value: Union[float, int]
) -> None:
_raise_if_nan_or_inf(value)
self._value = value
def serialize_value(self) -> str:
return "count,delta={}".format(_format_number(self._value))
class SummaryValue(MetricValue):
def __init__(self,
minimum: Union[float, int],
maximum: Union[float, int],
total: Union[float, int],
count: int
) -> None:
_raise_if_nan_or_inf(minimum)
_raise_if_nan_or_inf(maximum)
_raise_if_nan_or_inf(total)
if count < 0:
raise metric_error.MetricError("Count must be 0 or above.")
if minimum > maximum:
raise metric_error.MetricError("Min cannot be larger than max.")
self._min = minimum
self._max = maximum
self._sum = total
self._count = count
def serialize_value(self) -> str:
return "gauge,min={},max={},sum={},count={}".format(
_format_number(self._min),
_format_number(self._max),
_format_number(self._sum),
self._count
)
|
PypiClean
|
/PyFADO-0.0.1.tar.gz/PyFADO-0.0.1/src/python/ReadFADOFitsv01.py
|
from __future__ import print_function, division, absolute_import
import os
# Import astropy library
from astropy.io import fits
import numpy as np
from astropy import units as u
import matplotlib.pyplot as plt
from pylab import *
#from .PlotFADOv01 import PlotFADO
from PlotFADOv01 import PlotFADO
# Class of objects
class mean_stellar(object):
def __init__( self, light, mass, oneGyr=False, solarmet=False ):
self.light = light
self.mass = mass
if oneGyr:
self.light_1Gyr = light / 1.0e9
self.mass_1Gyr = mass / 1.0e9
if solarmet:
self.light_solar = light / 0.02
self.mass_solar = mass / 0.02
class mass(object):
def __init__( self, ever, corr ):
self.ever = ever
self.corrected = corr
class redshift(object):
def __init__( self, value, v ):
c = 299792.458 # [km/s]
# Convert the shifts to final redshift and equivalent velocity in km/s
self.firstvalue = value
self.value = (1.0+value) * (1.0+v/c) - 1.0
self.velocity = self.value * c
# It should not matter for low velocities
self.relativistic_velocity = c * ( ( (1.+self.value)**2 - 1. ) / ( (1.+self.value)**2 + 1. ) )
class ReadFADOFits(object):
""" ============================================================
@author: Jean Gomes
@date : Mon Apr 29 14:40:24 2019
License: pyFADO module is freely available under the General Public License (GPL).
Resume : pyFADO reading fits class in Python from the output files of the FADO run.
How to use it
First import this reading library containing all the classes
from ReadFADOFitsv01 import ReadFADOFits
For a run of FADO with a given galaxy then you have 4 files:
* galaxy_1D.fits
* galaxy_DE.fits
* galaxy_EL.fits
* galaxy_ST.fits
name = 'galaxy'
galaxy = ReadFADOFits( name, path=path, showheader=False )
path and showheader arguments are optional.
Then you read the files and store into the galaxy object.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDI-
RECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBS-
TITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
============================================================
"""
def __init__( self, name, path='./', showheader=False, printspectrum=False, printpopvector=False, printstavector=False ):
self.name_1D = name + '_1D.fits'
self.name_DE = name + '_DE.fits'
self.name_ST = name + '_ST.fits'
if path[len(path)-1] != '/':
path = path + '/'
self.path = path
# Check if directory and FADO files exist
path__to__dir = os.path.isdir(self.path)
name_1D_exist = os.path.isfile(self.path + self.name_1D)
name_DE_exist = os.path.isfile(self.path + self.name_DE)
name_ST_exist = os.path.isfile(self.path + self.name_ST)
#print(path__to__dir,name_1D_exist,name_DE_exist)
if path__to__dir and name_1D_exist and name_1D_exist and name_ST_exist:
self.showheader = showheader
self.printspectrum = printspectrum
self.printpopvector = printpopvector
self.printstavector = printstavector
self.Read1DFits( self.showheader, self.printspectrum )
print("")
self.ReadDEFits( self.showheader, self.printpopvector )
print("")
self.ReadSTFits( self.showheader, self.printstavector )
else:
print("Error: verify if directory and/or files exist")
print(" Directory: {} ==> {}".format(self.path,path__to__dir))
print("_1D extension: {} ==> {}".format(self.name_1D,name_1D_exist))
print("_DE extension: {} ==> {}".format(self.name_DE,name_DE_exist))
print("_ST extension: {} ==> {}".format(self.name_ST,name_ST_exist))
# Reading FADO one-dimensional spectra image ######################################################################################
def Read1DFits( self, showheader=False, printspectrum=False ):
# open fits file
full_path = self.path + self.name_1D
fits_file = fits.open(full_path)
print('File info from FADO: _1D.fits extension')
print(fits_file.info())
header = fits_file[0].header
#print("Print header of file")
if showheader:
print('')
print('showheader: {}'.format(showheader))
print(repr(header))
#with fits_file as hdul:
# hdul.info()
#for i in header.info():
# print(i)
data = fits_file[0].data
self.Naxis1_1D = fits_file[0].header['NAXIS1']
self.Naxis2_1D = fits_file[0].header['NAXIS2']
print("Naxis1: {} x Naxis2: {} pixels".format(self.Naxis1_1D,self.Naxis2_1D))
self.CRVAL1_1D = header['CRVAL1']
self.CRPIX1_1D = header['CRPIX1']
self.CDELT1_1D = header['CDELT1']
self.arq_base = header['ARQ_BASE']
self.arq_conf = header['ARQ_CONF']
self.olsynini = header['OLSYNINI']
self.olsynfin = header['OLSYNFIN']
self.olsyndel = header['OLSYNDEL']
self.galsnorm = header['GALSNORM']
self.lambda_0 = header['LAMBDA_0']
self.fluxunit = header['FLUXUNIT']
self.fluxunit = 10**(self.fluxunit)
# Redshift
self.redshift_aux = header['REDSHIFT']
l = []
j = 0
for i in range(self.Naxis1_1D):
l.append( self.CRVAL1_1D + (j-1) * self.CDELT1_1D )
j = j + 1
#f = data[0]
#e = data[1]
#m = data[2]
#b = data[3]
#s = data[7]
#n = data[8]
# Residual - Sem tratamentos dentários
#r = f - b
r = data[0] - data[3]
# Close fits file
fits_file.close()
# Store everything in spectrum
self.spectrum = np.zeros((self.Naxis1_1D,8))
self.spectrum[:,0] = l[:] # lambda
self.spectrum[:,1] = data[0] # Flux
self.spectrum[:,2] = data[1] # Error
self.spectrum[:,3] = data[2] # Mask
self.spectrum[:,4] = data[3] # Best-fit
self.spectrum[:,5] = data[7] # Smoothed
self.spectrum[:,6] = data[8] # Nebular
self.spectrum[:,7] = r[:] # Residual sem tratamentos dentários
# Delete
del data
del r
del l
###--------------------------------------------------------------------------------------------------------------------------------
if printspectrum:
print('')
print('Print extracted spectra')
print(self.spectrum)
# return spectrum
# Reading FADO one-dimensional spectra image ######################################################################################
# Reading FADO DE population vectors ##############################################################################################
def ReadDEFits( self, showheader=False, printpopvector=False ):
# open fits file
full_path = self.path + self.name_DE
fits_file = fits.open(full_path)
print('File info from FADO: _DE.fits extension')
print(fits_file.info())
header = fits_file[0].header
#print("Print header of file")
if showheader:
print('')
print('showheader: {}'.format(showheader))
print(repr(header))
#print(repr(header))
#with fits_file as hdul:
# hdul.info()
#for i in header.info():
# print(i)
data = fits_file[0].data
self.Naxis1_DE = fits_file[0].header['NAXIS1']
self.Naxis2_DE = fits_file[0].header['NAXIS2']
print("Naxis1: {} x Naxis2: {} pixels".format(self.Naxis1_DE,self.Naxis2_DE))
self.CRVAL1_DE = header['CRVAL1']
self.CRPIX1_DE = header['CRPIX1']
self.CDELT1_DE = header['CDELT1']
self.numparam = header['NUMPARAM']
self.num_base = header['NUM_BASE']
self.nindivid = header['NINDIVID']
# Extinction parameters
self.reddening_law = header['R_LAWOPT']
self.AV_extinction = header['GEXTINCT'] #* u.mag
self.AV_extinction_error = header['GEXTBDEV'] #* u.mag
self.AV_extinction_nebular = header['GNEBULAR'] #* u.mag
self.AV_extinction_nebular_error = header['GNEBBDEV'] #* u.mag
#print(self.AV_extinction, self.AV_extinction_error)
#GNEBULAR= 2.063E+00
#GNEBBDEV= 4.148E-0
# Kinematics
#V0SYSGAL= 4.501E+01
#V0SYSDEV= 7.105E-15
#VTSYSGAL= -2.083E+01
#VTSYSDEV= 8.569E+01
#VDSYSGAL= 1.054E+02
self.systemic_velocity = header['V0SYSGAL']
self.velocity_dispersion = header['VDSYSGAL']
# Call object redshift
# It must have been read before
self.redshift = redshift( self.redshift_aux,self.systemic_velocity )
del self.redshift_aux
#ROWNUM_1= 'Best solution - Light fractions'
#ROWNUM_2= 'Mean solution - Light fractions'
#ROWNUM_3= 'Med. solution - Light fractions'
#ROWNUM_4= 'Standard deviation - Light pop.'
#ROWNUM_5= 'Best solution - Mass corrected'
#ROWNUM_6= 'Mean solution - Mass corrected'
#ROWNUM_7= 'Med. solution - Mass corrected'
#ROWNUM_8= 'Standard deviation - Mass cor.'
#ROWNUM_9= 'Best solution - Mass formed'
#ROWNUM10= 'Mean solution - Mass formed'
#ROWNUM11= 'Med. solution - Mass formed'
#ROWNUM12= 'Standard deviation - Mass for.'
#BASEPOPX= '13 ==> Single solution from one single spectrum'
#BASECHI2= '14 ==> Single solution from one single spectrum'
#BASEADEV= '15 ==> Single solution from one single spectrum'
#BASEGEXT= '16 ==> Single solution from one single spectrum'
#ROWS_LUM= '17-23 ==> Individual PV solutions - Light'
#ROWSMCOR= '24-30 ==> Individual PV solutions - Mass corrected'
#ROWSMFOR= '31-37 ==> Individual PV solutions - Mass formed'
#ROWS_AGE= '38 ==> Age of base elements'
#ROWSLAGE= '39 ==> Log of age--SSP base'
#ROWS_MET= '40 ==> Metallicity of base elements'
#ROWS_ALP= '41 ==> Alpha enhancement of base elements'
self.light_fractions = np.zeros((self.Naxis1_DE))
self.mass_fractions = np.zeros((self.Naxis1_DE))
self.met_stellar_pop = np.zeros((self.Naxis1_DE))
self.log_age_stellar_pop = np.zeros((self.Naxis1_DE))
self.age_stellar_pop = np.zeros((self.Naxis1_DE))
self.light_fractions[:] = data[0] # Best solution - Light fractions
self.mass_fractions[:] = data[4] # Best solution - Mass corrected
self.met_stellar_pop[:] = data[self.Naxis2_DE-4]
self.log_age_stellar_pop[:] = data[self.Naxis2_DE-5]
self.age_stellar_pop[:] = data[self.Naxis2_DE-6]
#print(self.light_fractions)
#print(sum(self.light_fractions[0:self.num_base-1]))
#print(np.size(data[0]))
#print(self.met_stellar_pop)
#print(self.age_stellar_pop)
#print(self.log_age_stellar_pop)
## Evaluate number of ages and metallicities
#minimum_age = np.min( self.log_age_stellar_pop[0:self.num_base] )
#maximum_age = np.max( self.log_age_stellar_pop[0:self.num_base] )
sorted_met = np.sort( self.met_stellar_pop[0:self.num_base] )
minmindif_met = sorted_met[1:self.num_base] - sorted_met[0:self.num_base-1]
index_dif_met = np.arange( 0,len(minmindif_met),1 )
index_dif_met = index_dif_met[ minmindif_met > 0.0 ]
minmindif_met = minmindif_met[ minmindif_met > 0.0 ]
self.N_metallicities = np.size(minmindif_met) + 1
self.N_ages = np.empty(0)
# Se for igual a zero ou temos uma única metalicidade ou algo errado (assume-se o primeiro)
if np.size(minmindif_met) > 1:
Z_grid = sorted_met[0]
Z_grid = np.append( Z_grid, sorted_met[ index_dif_met+1 ] )
z = np.empty(0)
for a in enumerate(Z_grid):
x = self.age_stellar_pop[ (self.age_stellar_pop > -999.0) & (self.met_stellar_pop == Z_grid[a[0]]) ]
z = np.concatenate( (x,z) )
self.N_ages = np.append( self.N_ages,np.size(x) ) # Number of ages per metallicity
# Close fits file
fits_file.close()
# Delete
del data
###--------------------------------------------------------------------------------------------------------------------------------
# Reading FADO DE population vectors ##############################################################################################
# Reading FADO DE population vectors ##############################################################################################
def ReadSTFits( self, showheader=False, printstavector=False ):
# open fits file
full_path = self.path + self.name_ST
fits_file = fits.open(full_path)
print('File info from FADO: _ST.fits extension')
print(fits_file.info())
header = fits_file[0].header
#print("Print header of file")
if showheader:
print('')
print('showheader: {}'.format(showheader))
print(repr(header))
#print(repr(header))
#ROWNUM_1= 'Luminosity weighted mean stellar age'
#ROWNUM_2= 'Mass weighted mean stellar age'
#ROWNUM_3= 'Luminosity weighted mean stellar age (LOG)'
#ROWNUM_4= 'Mass weighted mean stellar age (LOG)'
#ROWNUM_5= 'Luminosity weighted mean stellar metallicity'
#ROWNUM_6= 'Mass weighted mean stellar metallicity'
#ROWNUM_7= 'Mass ever formed'
#ROWNUM_8= 'Mass presently available'
#ROWNUM_9= 'Mass ever formed pAGB'
#ROWNUM10= 'Mass presently available pAGB'
#ROWNUM11= 'Mass ever formed < 1 Gyr'
#ROWNUM12= 'Mass presently available < 1 Gyr'
#ROWNUM13= 'Mass ever formed < 5 Gyr'
#ROWNUM14= 'Mass presently available < 5 Gyr'
#ROWNUM15= 'Total Luminosity at lambda0'
#ROWNUM16= 'Total Luminosity from stars < 1 Gyr at l0'
#ROWNUM17= 'Total Luminosity from stars < 5 Gyr at l0'
#ROWNUM18= 'chi2 '
#ROWNUM19= 'Sum pop X'
#ROWNUM20= 'Sum pop M'
#ROWNUM21= 'Stellar extinction'
#ROWNUM22= 'Nebular extinction'
#ROWNUM23= 'Systemic velocity stars'
#ROWNUM24= 'Velocity dispersion stars'
#ROWNUM25= 'Systemic velocity nebular'
#ROWNUM26= 'Velocity dispersion nebular'
#ROWNUM27= 'log Q(H)'
#ROWNUM28= 'log Q(HeI)'
#ROWNUM29= 'log Q(HeII)'
#ROWNUM30= 'log Q(H) pAGB'
#ROWNUM31= 'tau ratio'
#ROWNUM32= 'tau ratio corrected for extinction'
#ROWNUM33= 'tau ratio pAGB'
#ROWNUM34= 'tau ratio pAGB corrected for extinction'
#ROWNUM35= 'Psi '
#ROWNUM36= 'Psi corrected for extinction'
#ROWNUM37= 'zeta '
#ROWNUM38= 'zeta corrected for extinction'
#MASSEVER= 'Mass ever formed -------------'
#LOGMEBST= 5.252E+00
#LOGMEAVE= 5.252E+00
#LOGMEDEV= 5.320E-08
#MASSCORR= 'Mass presently available -----'
#LOGMCBST= 5.248E+00
#LOGMCAVE= 5.248E+00
#LOGMCDEV= 5.320E-08
#self.mean_stellar_age_light = header['BST_LAGE']
#self.mean_stellar_age_mass = header['BST_MAGE']
#self.mean_stellar_age_mass.set(Gyr, self.mean_stellar_age_light / 1.0e9)
#self.mean_stellar_age_light.Gyr = self.mean_stellar_age_light / 1.0e9
#self.mean_stellar_age_mass.Gyr = self.mean_stellar_age_mass / 1.0e9
#self.mean_stellar_log_age_light = header['BSTLLAGE']
#self.mean_stellar_log_age_mass = header['BSTLMAGE']
#self.mean_stellar_metallicity = header['BST_LMET']
# Call object mass
self.log_stellar_mass = mass( header['LOGMEBST'],header['LOGMCBST'] )
# Call object mean_stellar
self.mean_stellar_age = mean_stellar( header['BST_LAGE'],header['BST_MAGE'], oneGyr=True )
self.mean_stellar_log_age = mean_stellar( header['BSTLLAGE'],header['BSTLMAGE'] )
self.mean_stellar_metallicity = mean_stellar( header['BST_LMET'],header['BST_MMET'], solarmet=True )
#print(self.mean_stellar_age.light,self.mean_stellar_age.mass,self.mean_stellar_metallicity.light,self.mean_stellar_metallicity.mass)
#with fits_file as hdul:
# hdul.info()
#for i in header.info():
# print(i)
data = fits_file[0].data
self.Naxis1_ST = fits_file[0].header['NAXIS1']
self.Naxis2_ST = fits_file[0].header['NAXIS2']
print("Naxis1: {} x Naxis2: {} pixels".format(self.Naxis1_ST,self.Naxis2_ST))
self.CRVAL1_ST = header['CRVAL1']
self.CRPIX1_ST = header['CRPIX1']
self.CDELT1_ST = header['CDELT1']
# Close fits file
fits_file.close()
# Delete
del data
###--------------------------------------------------------------------------------------------------------------------------------
# Reading FADO DE population vectors ##############################################################################################
# Simpler Quick Parameters from FADO: Stellar Populations #########################################################################
# Mean stellar age (light and mass-weighted)
# Mean stellar metallicity (light and mass-weighted)
# Stellar mass
# Stellar extinction in the V-band
def SimplerFADOStellarPopulations( self ):
print( " ============================================================ " )
print( "... Compact information regarding the stellar populations:")
print("")
print( "... Mean stellar age")
print( "... Mean stellar age light-weighted : {:8.5g} [yr] or {:8.5f} [Gyr]".format(self.mean_stellar_age.light,self.mean_stellar_age.light_1Gyr) )
print( "... Mean stellar age mass-weighted : {:8.5g} [yr] or {:8.5f} [Gyr]".format(self.mean_stellar_age.mass,self.mean_stellar_age.mass_1Gyr) )
print( "... Mean stellar logarithmic age light-weighted : {:7.5f}".format(self.mean_stellar_log_age.light) )
print( "... Mean stellar logarithmic age mass-weighted : {:7.5f}".format(self.mean_stellar_log_age.mass) )
print( "... Mean stellar metallicity")
print( "... Mean stellar metallicity light-weighted : {:7.5f} or {:8.5f} [Z_solar]".format(self.mean_stellar_metallicity.light,self.mean_stellar_metallicity.light_solar) )
print( "... Mean stellar metallicity mass-weighted : {:7.5f} or {:8.5f} [Z_solar]".format(self.mean_stellar_metallicity.mass,self.mean_stellar_metallicity.mass_solar) )
print("")
print( "... Total stellar masses" )
print( "... Log of total stellar mass presently available (corr.) : {:7.5f} [Solar masses]".format(self.log_stellar_mass.corrected) )
print( "... Log of total stellar mass ever formed (not-corrected) : {:7.5f} [Solar masses]".format(self.log_stellar_mass.ever) )
print("")
print("... Extinction")
print( "... Stellar extinction in the V-band : {:7.5f} +/- {:7.5g}".format(self.AV_extinction,self.AV_extinction_error) )
print( "... Nebular extinction in the V-band : {:7.5f} +/- {:7.5g}".format(self.AV_extinction_nebular,self.AV_extinction_nebular_error) )
print( "... Extinction-law used in FADO object.reddening_law : {}".format(self.reddening_law) )
print( " ============================================================ " )
print("")
# Simpler Quick Parameters from FADO: Stellar Populations #########################################################################
# Simpler Quick Parameters from FADO: Kinematics of Stellar Populations ###########################################################
# Systemic velocity
# Velocity dispersion
# Redshift
def SimplerFADOKinematics( self ):
print( " ============================================================ " )
print( "... Compact information regarding the kinematics of stellar populations:")
print("")
print( "... Systemic velocity")
print( "... Systemic velocity (non-relativistic correction) : {:8.5f} [km/s]".format(self.redshift.velocity) )
print( "... Systemic velocity ( relativistic correction) : {:8.5f} [km/s]".format(self.redshift.relativistic_velocity) )
print( "... Velocity dispersion : {:8.5f} [km/s]".format(self.velocity_dispersion) )
print( "... First redshift estimation : {:8.5e} ".format(self.redshift.firstvalue) )
print( "... Final redshift (correction due to the fitting) : {:8.5e} ".format(self.redshift.value) )
print( " ============================================================ " )
print("")
# Simpler Quick Parameters from FADO: Kinematics of Stellar Populations ###########################################################
# Simpler Quick Parameters from FADO: Fit parameters ##############################################################################
# Systemic velocity
# Velocity dispersion
# Redshift
def SimplerFADOFitParameters( self ):
print( " ============================================================ " )
print( "... Compact information regarding the fit parameters of FADO:")
print("")
print( "... Parameters for the fitting")
print( "... Normalization wavelength used in the FADO fit : {:8.5f} [Å]".format(self.lambda_0) )
print( "... Flux density at the normalization wavelength : {:8.5f}".format(self.galsnorm) )
print( "... Flux density in units of : {:8.5e} [erg/s/cm²/Å]".format(self.fluxunit) )
print( "... Fit was done from {} to {} Å with δλ = {}".format(self.olsynini,self.olsynfin,self.olsyndel) )
print("")
print( "... Number of base elements used in the fit : {}".format(self.num_base) )
print( "... Number of ages {} and number of metallicities {}".format(self.N_ages,self.N_metallicities) )
for i in range(self.N_metallicities):
print( "... Minimum age for metallicity {:8.5f} is {} [yrs]".format(self.met_stellar_pop[i*np.array(self.N_ages[i], dtype=int)],self.age_stellar_pop[0:np.array(self.N_ages[i], dtype=int)-1].min()) )
print( "... Maximum age for metallicity {:8.5f} is {} [yrs]".format(self.met_stellar_pop[i*np.array(self.N_ages[i], dtype=int)],self.age_stellar_pop[0:np.array(self.N_ages[i], dtype=int)-1].max()) )
print( "" )
print( "... The base file used in FADO was: {}".format(self.arq_base) )
print( "... The configuration file used in FADO was: {}".format(self.arq_conf) )
print( " ============================================================ " )
print("")
# Simpler Quick Parameters from FADO: Fit parameters ##############################################################################
# Main ############################################################################################################################
def main():
#name = '0266.51630.100.7xt.FADO'
#path = 'input'
#path ='/run/media/jean/Isaac/FADOMCDS/bin/Release/'
#name = 'test'
#path ='input'
#name ='0266.51602.089.23.7xt.FADO'
r_object = ReadFADOFits( name, path=path, showheader=False )
#p_object = PlotFADO( r_object, xmin=3000.0 , xmax=9300.0, ymin=-0.3, ymax=+1.6, ymin_residual=-0.1, ymax_residual=0.1 )
if __name__ == "__main__":
main()
# Main ############################################################################################################################
|
PypiClean
|
/azureml_training_tabular-1.53.0-py3-none-any.whl/azureml/training/tabular/score/scoring.py
|
"""Computation of AutoML model evaluation metrics."""
import logging
from typing import Any, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from azureml._base_sdk_common._docstring_wrapper import experimental
from azureml.automl.core.shared.constants import MetricExtrasConstants
from azureml.automl.core.shared import logging_utilities
from . import _scoring_utilities, _validation, constants, utilities
from ._metric_base import NonScalarMetric
logger = logging.getLogger(__name__)
def aggregate_scores(
scores: List[Dict[str, Any]], metrics: List[str] = None
) -> Dict[str, Union[float, Dict[str, Any]]]:
"""
Compute mean scores across validation folds.
:param scores: List of results from scoring functions.
:param metrics: List of metrics to aggregate. If None, autodetect metrics.
:return: Dictionary containing the aggregated scores.
"""
means = {} # type: Dict[str, Union[float, Dict[str, Any]]]
if metrics is None:
all_metrics = set()
for score_dict in scores:
all_metrics.update(set(score_dict.keys()))
metrics = list(all_metrics)
for name in metrics:
if name not in scores[0]:
logger.warning("Tried to aggregate metric {}, but {} was not found in scores".format(name, name))
continue
split_results = [score[name] for score in scores if name in score]
_validation.log_failed_splits(split_results, name)
metric_class = _scoring_utilities.get_metric_class(name)
try:
means[name] = metric_class.aggregate(split_results)
except Exception as e:
safe_name = _scoring_utilities.get_safe_metric_name(name)
logger.error("Score aggregation failed for metric {}".format(safe_name))
logging_utilities.log_traceback(e, logger, is_critical=False)
means[name] = NonScalarMetric.get_error_metric()
try:
name_extras = MetricExtrasConstants.MetricExtrasFormat.format(name)
split_results_extras = [score[name_extras] for score in scores if name_extras in score]
if len(split_results_extras) > 0:
means_name_extras = {} # type: Dict[str, List[float]]
stats = split_results_extras[0].keys()
for stat in stats:
means_name_extras[stat] = metric_class.aggregate([score[stat] for score in split_results_extras])
means[name_extras] = means_name_extras
except Exception as e:
safe_name = _scoring_utilities.get_safe_metric_name(name)
logger.error("Score aggregation failed for metric extras {}".format(safe_name))
logging_utilities.log_traceback(e, logger, is_critical=False)
for train_type in constants.ALL_TIME:
train_times = [res[train_type] for res in scores if train_type in res]
if train_times:
means[train_type] = float(np.mean(train_times))
return means
def score_classification(
y_test: np.ndarray,
y_pred_probs: np.ndarray,
metrics: List[str],
class_labels: np.ndarray,
train_labels: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
y_transformer: Optional[TransformerMixin] = None,
use_binary: bool = False,
multilabel: Optional[bool] = False,
positive_label: Optional[Any] = None,
ensure_contiguous: bool = False,
) -> Dict[str, Union[float, Dict[str, Any]]]:
"""
Compute model evaluation metrics for a classification task.
All class labels for y should come
as seen by the fitted model (i.e. if the fitted model uses a y transformer the labels
should also come transformed).
All metrics present in `metrics` will be present in the output dictionary with either
the value(s) calculated or `nan` if the calculation failed.
:param y_test: The target values (Transformed if using a y transformer)
:param y_pred_probs: The predicted probabilities for all classes.
:param metrics: Classification metrics to compute
:param class_labels: All classes found in the full dataset (includes train/valid/test sets).
These should be transformed if using a y transformer.
:param train_labels: Classes as seen (trained on) by the trained model. These values
should correspond to the columns of y_pred_probs in the correct order.
:param sample_weight: Weights for the samples (Does not need
to match sample weights on the fitted model)
:param y_transformer: Used to inverse transform labels from `y_test`. Required for non-scalar metrics.
:param use_binary: Compute metrics only on the true class for binary classification.
:param positive_label: class designed as positive class in later binary classification metrics.
:param multilabel: Indicate if it is multilabel classification.
:param ensure_contiguous: Whether to pass contiguous NumPy arrays to the sklearn functions computing metrics.
:return: A dictionary mapping metric name to metric score.
"""
if not multilabel:
y_test = _validation.format_1d(y_test, "y_test")
_validation.validate_classification(
y_test, y_pred_probs, metrics, class_labels, train_labels, sample_weight, y_transformer, multilabel=multilabel
)
_validation.log_classification_debug(
y_test, y_pred_probs, class_labels, train_labels, sample_weight=sample_weight, multilabel=multilabel
)
scoring_dto = _scoring_utilities.ClassificationDataDto(
y_test,
y_pred_probs,
class_labels,
train_labels,
sample_weight,
y_transformer,
multilabel=multilabel,
positive_label=positive_label,
)
positive_label_encoded = scoring_dto.positive_label_encoded
results = {}
for name in metrics:
try:
metric_class = _scoring_utilities.get_metric_class(name)
test_targets, pred_targets, labels, positive_label = scoring_dto.get_targets(
encoded=utilities.is_scalar(name), classwise=utilities.is_classwise(name)
)
metric = metric_class(
test_targets,
scoring_dto.y_pred_probs_padded,
scoring_dto.y_test_bin,
pred_targets,
labels,
sample_weight=sample_weight,
use_binary=use_binary,
positive_label_encoded=positive_label_encoded,
multilabel=multilabel,
y_transformer=y_transformer,
ensure_contiguous=ensure_contiguous,
)
results[name] = metric.compute()
except MemoryError:
raise
except Exception as e:
safe_name = _scoring_utilities.get_safe_metric_name(name)
logger.error("Scoring failed for classification metric {}".format(safe_name))
logging_utilities.log_traceback(e, logger, is_critical=False)
if utilities.is_scalar(name):
results[name] = np.nan
else:
results[name] = NonScalarMetric.get_error_metric()
return results
def score_regression(
y_test: np.ndarray,
y_pred: np.ndarray,
metrics: List[str],
y_max: Optional[float] = None,
y_min: Optional[float] = None,
y_std: Optional[float] = None,
sample_weight: Optional[np.ndarray] = None,
bin_info: Optional[Dict[str, float]] = None,
) -> Dict[str, Union[float, Dict[str, Any]]]:
"""
Compute model evaluation metrics for a regression task.
The optional parameters `y_min`, `y_min`, and `y_min` should be based on the
target column y from the full dataset.
- `y_max` and `y_min` should be used to control the normalization of
normalized metrics. The effect will be division by max - min.
- `y_std` is used to estimate a sensible range for displaying non-scalar
regression metrics.
If the metric is undefined given the input data, the score will show
as nan in the returned dictionary.
:param y_test: The target values.
:param y_pred: The predicted values.
:param metrics: List of metric names for metrics to calculate.
:type metrics: list
:param y_max: The max target value.
:param y_min: The min target value.
:param y_std: The standard deviation of targets value.
:param sample_weight:
The sample weight to be used on metrics calculation. This does not need
to match sample weights on the fitted model.
:param bin_info:
The binning information for true values. This should be calculated from make_dataset_bins. Required for
calculating non-scalar metrics.
:return: A dictionary mapping metric name to metric score.
"""
# Lenient on shape of y_test and y_pred
y_test = _validation.format_1d(y_test, "y_test")
y_test = _validation.convert_decimal_to_float(y_test)
y_pred = _validation.format_1d(y_pred, "y_pred")
_validation.validate_regression(y_test, y_pred, metrics)
_validation.log_regression_debug(y_test, y_pred, y_min, y_max, sample_weight=sample_weight)
y_min = np.nanmin(y_test) if y_min in (None, np.nan) else y_min
y_max = np.nanmax(y_test) if y_max in (None, np.nan) else y_max
y_std = np.nanstd(y_test) if y_std in (None, np.nan) else y_std
results = {}
for name in metrics:
safe_name = _scoring_utilities.get_safe_metric_name(name)
try:
metric_class = _scoring_utilities.get_metric_class(name)
metric = metric_class(
y_test, y_pred, y_min=y_min, y_max=y_max, y_std=y_std, bin_info=bin_info, sample_weight=sample_weight
)
results[name] = metric.compute()
if utilities.is_scalar(name) and np.isinf(results[name]):
logger.error("Found infinite regression score for {}, setting to nan".format(safe_name))
results[name] = np.nan
except MemoryError:
raise
except Exception as e:
logger.error("Scoring failed for regression metric {}".format(safe_name))
logging_utilities.log_traceback(e, logger, is_critical=False)
if utilities.is_scalar(name):
results[name] = np.nan
else:
results[name] = NonScalarMetric.get_error_metric()
return results
def score_forecasting(
y_test: np.ndarray,
y_pred: np.ndarray,
metrics: List[str],
horizons: np.ndarray,
y_max: Optional[float] = None,
y_min: Optional[float] = None,
y_std: Optional[float] = None,
sample_weight: Optional[np.ndarray] = None,
bin_info: Optional[Dict[str, float]] = None,
X_test: Optional[pd.DataFrame] = None,
X_train: Optional[pd.DataFrame] = None,
y_train: Optional[np.ndarray] = None,
grain_column_names: Optional[List[str]] = None,
time_column_name: Optional[str] = None,
origin_column_name: Optional[str] = None,
) -> Dict[str, Union[float, Dict[str, Any]]]:
"""
Compute model evaluation metrics for a forecasting task.
`y_max`, `y_min`, and `y_std` should be based on `y_test` information unless
you would like to compute multiple metrics for comparison (ex. cross validation),
in which case, you should use a common range and standard deviation. You may
also pass in `y_max`, `y_min`, and `y_std` if you do not want it to be calculated.
All metrics present in `metrics` will be present in the output dictionary with either
the value(s) calculated or `nan` if metric calculation failed.
:param y_test: The target values.
:param y_pred: The predicted values.
:param metrics: List of metric names for metrics to calculate.
:type metrics: list
:param horizons: The horizon of each prediction. If missing or not relevant, pass None.
:param y_max: The max target value.
:param y_min: The min target value.
:param y_std: The standard deviation of targets value.
:param sample_weight:
The sample weight to be used on metrics calculation. This does not need
to match sample weights on the fitted model.
:param bin_info:
The binning information for true values. This should be calculated from make_dataset_bins. Required for
calculating non-scalar metrics.
:param X_test: The inputs which were used to compute the predictions.
:param X_train: The inputs which were used to train the model.
:param y_train: The targets which were used to train the model.
:param grain_column_names: The grain column name.
:param time_column_name: The time column name.
:param origin_column_name: The origin time column name.
:return: A dictionary mapping metric name to metric score.
"""
# Lenient on shape of y_test, y_pred, and horizons
y_test = _validation.format_1d(y_test, "y_test")
y_pred = _validation.format_1d(y_pred, "y_pred")
horizons = _validation.format_1d(horizons, "horizons")
_validation.validate_forecasting(y_test, y_pred, horizons, metrics)
_validation.log_forecasting_debug(y_test, y_pred, horizons, y_min, y_max, sample_weight=sample_weight)
y_std = np.std(y_test) if y_std is None else y_std
results = {}
for name in metrics:
if name in constants.FORECASTING_NONSCALAR_SET:
try:
metric_class = _scoring_utilities.get_metric_class(name)
metric = metric_class(
y_test,
y_pred,
horizons,
y_std=y_std,
bin_info=bin_info,
X_test=X_test,
X_train=X_train,
y_train=y_train,
grain_column_names=grain_column_names,
time_column_name=time_column_name,
origin_column_name=origin_column_name,
)
results[name] = metric.compute()
except MemoryError:
raise
except Exception as e:
safe_name = _scoring_utilities.get_safe_metric_name(name)
logger.error("Scoring failed for forecasting metric {}".format(safe_name))
logging_utilities.log_traceback(e, logger, is_critical=False)
if utilities.is_scalar(name):
results[name] = np.nan
else:
results[name] = NonScalarMetric.get_error_metric()
return results
|
PypiClean
|
/crypto-django-0.1.1.tar.gz/crypto-django-0.1.1/crypto_django/forms/fields/addresses/bitcoin_cash.py
|
from cashaddress.convert import is_valid
from django import forms
from django.core.exceptions import ValidationError
from crypto_django.constants.address import (
BITCOIN_CASH_NEW_PREFIXES,
BITCOIN_CASH_OLD_PREFIXES,
REQUIRED_BITCOIN_CASH_NEW_ADDRESS_LENGTH,
REQUIRED_BITCOIN_CASH_OLD_ADDRESS_LENGTH,
)
class BitcoinCashAddressField(forms.CharField):
"""
Bitcoin Cash address form field implementation.
"""
default_error_messages = {
'invalid': 'Invalid Bitcoin Cash address.',
'length':
'Ensure address has %(required_address_format_length)d character (it has %(current_address_length)d).',
'prefix': 'Ensure address has \'bitcoincash:p\' or \'bitcoincash:q\' as thirteen first characters '
'for new address or has \'1\' or \'3\' as first character for old address.',
}
def to_python(self, value):
"""
Validate Bitcoin Cash address.
References:
- https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/cashaddr.md
- https://github.com/oskyk/cashaddress
- https://github.com/oskyk/cashaddress/blob/master/cashaddress/convert.py#L122
"""
# parameter will be differ from overridden 'to_python' method
# if `address` in function parameters instead of reassigning to it
address = value
address_length = len(address)
address_first_character = address[:1]
address_first_13_characters = address[:13]
error_message_params = {
'current_address_length': address_length,
}
if address_first_character in BITCOIN_CASH_OLD_PREFIXES:
if address_length != REQUIRED_BITCOIN_CASH_OLD_ADDRESS_LENGTH:
error_message_params.update({
'required_address_format_length': REQUIRED_BITCOIN_CASH_OLD_ADDRESS_LENGTH,
})
raise ValidationError(self.error_messages.get('length'), code='length', params=error_message_params)
if not is_valid(address):
raise ValidationError(self.error_messages.get('invalid'), code='invalid')
return super().to_python(address)
if address_first_13_characters in BITCOIN_CASH_NEW_PREFIXES:
if address_length != REQUIRED_BITCOIN_CASH_NEW_ADDRESS_LENGTH:
error_message_params.update({
'required_address_format_length': REQUIRED_BITCOIN_CASH_NEW_ADDRESS_LENGTH,
})
raise ValidationError(self.error_messages.get('length'), code='length', params=error_message_params)
if not is_valid(address):
raise ValidationError(self.error_messages.get('invalid'), code='invalid')
return super().to_python(address)
raise ValidationError(self.error_messages.get('prefix'), code='prefix', params=error_message_params)
|
PypiClean
|
/fmmax-0.1.1.tar.gz/fmmax-0.1.1/examples/crystal.py
|
import functools
from typing import Tuple
import jax.numpy as jnp
import matplotlib.pyplot as plt # type: ignore[import]
import numpy as onp
from skimage import measure # type: ignore[import]
from fmmax import basis, beams, fields, fmm, layer, scattering, sources
PERMITTIVITY_AMBIENT: complex = (1.0 + 0.0j) ** 2
PERMITTIVITY_SLAB: complex = (1.5 + 0.0j) ** 2
THICKNESS_AMBIENT: float = 2.0
THICKNESS_SLAB: float = 0.8
PITCH: float = 1.0
DIAMETER: float = 0.7
RESOLUTION: float = 0.01
RESOLUTION_FIELDS: float = 0.05
WAVELENGTH: float = 0.63
APPROXIMATE_NUM_TERMS: int = 50
BRILLOUIN_GRID_SHAPE: Tuple[int, int] = (9, 9)
def simulate_crystal_with_internal_source(
permittivity_ambient: complex = PERMITTIVITY_AMBIENT,
permittivity_slab: complex = PERMITTIVITY_SLAB,
thickness_ambient: float = THICKNESS_AMBIENT,
thickness_slab: float = THICKNESS_SLAB,
pitch: float = PITCH,
diameter: float = DIAMETER,
resolution: float = RESOLUTION,
resolution_fields: float = RESOLUTION_FIELDS,
wavelength: float = WAVELENGTH,
approximate_num_terms: int = APPROXIMATE_NUM_TERMS,
brillouin_grid_shape: Tuple[int, int] = BRILLOUIN_GRID_SHAPE,
) -> Tuple[
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (ex, ey, ez)
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (hx, hy, hz)
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (x, y, z)
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (xy, xz, yz) cross sections
]:
"""Simulates a dipole source inside a photonic crystal slab.
The crystal has a square unit cell with circular holes, having cross section
and dipole position as illustrated below. The dipole is located the lower-left
corner of the unit cell centered in the supercell defined by the Brillouin grid
shape. The dipole is x-oriented and centered vertically within the photonic
crystal slab.
________________
| |
|XX XX|
|XXXX XXXX|
|XXXX XXXX|
|XX XX|
x-dipole -> o________________|
Args:
permittivity_ambient: Permittivity of the region above and below the slab, and
of the holes in the slab.
permittivity_slab: Permittivity of the slab.
thickness_ambient: Thickness of the ambient layers above and below the slab.
thickness_slab: Thickness of the photonic crystal slab.
pitch: The unit cell pitch.
diameter: The diameter of the holes in the photonic crystal.
resolution: The size of a pixel in permittivity arrays.
resolution_fields: The size of a pixel in field arrays.
wavelength: The wavelength, of the dipole emission.
approximate_num_terms: The number of terms in the Fourier expansion.
brillouin_grid_shape: The shape of the grid used for Brillouin zone integration.
Returns:
The electric and magnetic fields, and the grid coordinates, `((ex, ey, ez),
(hx, hy, hz), (x,y, z))`.
"""
thickness_ambient_ = jnp.asarray(thickness_ambient)
thickness_slab_ = jnp.asarray(thickness_slab)
del thickness_ambient, thickness_slab
primitive_lattice_vectors = basis.LatticeVectors(
u=pitch * basis.X, v=pitch * basis.Y
)
expansion = basis.generate_expansion(
primitive_lattice_vectors=primitive_lattice_vectors,
approximate_num_terms=approximate_num_terms,
truncation=basis.Truncation.CIRCULAR,
)
# Brillouin zone integration creates a batch of in-plane wavevectors which are
# distributed throughout the first Brillouin zone.
in_plane_wavevector = basis.brillouin_zone_in_plane_wavevector(
brillouin_grid_shape, primitive_lattice_vectors
)
assert in_plane_wavevector.shape[-1] == 2
assert in_plane_wavevector.ndim == 3
eigensolve = functools.partial(
layer.eigensolve_isotropic_media,
wavelength=jnp.asarray(wavelength),
in_plane_wavevector=in_plane_wavevector,
primitive_lattice_vectors=primitive_lattice_vectors,
expansion=expansion,
formulation=fmm.Formulation.FFT,
)
mask = unit_cell_pattern(pitch, diameter, resolution)
permittivity_crystal = jnp.where(mask, permittivity_slab, permittivity_slab)
solve_result_crystal = eigensolve(permittivity=permittivity_crystal)
solve_result_ambient = eigensolve(
permittivity=jnp.asarray(permittivity_ambient)[jnp.newaxis, jnp.newaxis]
)
# First, we model a dipole inside the photonic crystal. For this, we must break
# the stack into two, and compute scattering matrices for the stacks above and
# below the plane containing the dipole. Since we want to visualize fields, we
# also need the interior scattering matrices.
s_matrices_interior_before_source = scattering.stack_s_matrices_interior(
layer_solve_results=[solve_result_ambient, solve_result_crystal],
layer_thicknesses=[thickness_ambient_, thickness_slab_ / 2],
)
s_matrices_interior_after_source = scattering.stack_s_matrices_interior(
layer_solve_results=[solve_result_crystal, solve_result_ambient],
layer_thicknesses=[thickness_slab_ / 2, thickness_ambient_],
)
# Extract the scattering matrices relating fields at the two ends of each substack.
s_matrix_before_source = s_matrices_interior_before_source[-1][0]
s_matrix_after_source = s_matrices_interior_after_source[-1][0]
# Generate the Fourier representation of a point dipole.
dipole_x = pitch * brillouin_grid_shape[0] // 2
dipole_y = pitch * brillouin_grid_shape[1] // 2
dipole = sources.dirac_delta_source(
location=jnp.asarray([[dipole_x, dipole_y]]),
in_plane_wavevector=in_plane_wavevector,
primitive_lattice_vectors=primitive_lattice_vectors,
expansion=expansion,
)
# Compute backward eigenmode amplitudes at the end of the layer before the
# source, and the forward amplitudes the start of the layer after the source.
(
_,
_,
bwd_amplitude_before_end,
fwd_amplitude_after_start,
_,
_,
) = sources.amplitudes_for_source(
jx=dipole,
jy=jnp.zeros_like(dipole),
jz=jnp.zeros_like(dipole),
s_matrix_before_source=s_matrix_before_source,
s_matrix_after_source=s_matrix_after_source,
)
# Compute the fields inside the structure.
amplitudes_interior = fields.stack_amplitudes_interior_with_source(
s_matrices_interior_before_source=s_matrices_interior_before_source,
s_matrices_interior_after_source=s_matrices_interior_after_source,
backward_amplitude_before_end=bwd_amplitude_before_end,
forward_amplitude_after_start=fwd_amplitude_after_start,
)
(ex, ey, ez), (hx, hy, hz), (x, y, z) = fields.stack_fields_3d_auto_grid(
amplitudes_interior=amplitudes_interior,
layer_solve_results=[
solve_result_ambient,
solve_result_crystal,
solve_result_crystal,
solve_result_ambient,
],
layer_thicknesses=[
thickness_ambient_,
thickness_slab_ / 2,
thickness_slab_ / 2,
thickness_ambient_,
],
resolution=resolution_fields,
num_unit_cells=brillouin_grid_shape,
)
# Perform the Brillouin zone integration by averaging over the Brillouin zone
# grid batch axes.
ex, ey, ez, hx, hy, hz = [
jnp.mean(field, axis=(0, 1)) for field in (ex, ey, ez, hx, hy, hz)
]
# Compute some cross sections for visualizing the structure.
section_xy, section_xz, section_yz = crystal_cross_sections(
thickness_ambient=float(thickness_ambient_),
thickness_slab=float(thickness_slab_),
pitch=pitch,
diameter=diameter,
resolution=resolution,
num_unit_cells=brillouin_grid_shape,
)
return (ex, ey, ez), (hx, hy, hz), (x, y, z), (section_xy, section_xz, section_yz)
def simulate_crystal_with_gaussian_beam(
polar_angle: float = 0.15 * jnp.pi,
azimuthal_angle: float = 0.0,
polarization_angle: float = 0.0,
beam_waist: float = 1.0,
beam_focus_offset: float = 0.0,
permittivity_ambient: complex = PERMITTIVITY_AMBIENT,
permittivity_slab: complex = PERMITTIVITY_SLAB,
thickness_ambient: float = THICKNESS_AMBIENT,
thickness_slab: float = THICKNESS_SLAB,
pitch: float = PITCH,
diameter: float = DIAMETER,
resolution: float = RESOLUTION,
resolution_fields: float = RESOLUTION_FIELDS,
wavelength: float = WAVELENGTH,
approximate_num_terms: int = APPROXIMATE_NUM_TERMS,
brillouin_grid_shape: Tuple[int, int] = BRILLOUIN_GRID_SHAPE,
) -> Tuple[
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (ex, ey, ez)
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (hx, hy, hz)
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (x, y, z)
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray], # (xy, xz, yz) cross sections
]:
"""Simulates a Gaussian beam incident on photonic crystal slab.
The crystal has a square unit cell with circular holes as illustrated below.
________________
| |
|XX XX|
|XXXX XXXX|
|XXXX XXXX|
|XX XX|
|________________|
Args:
polar_angle: The polar angle of the incident beam.
azimuthal_angle: The azimuthal angle of the incident beam.
polarization_angle: The angle giving the polarization rotation about the
propagation axis.
beam_waist: The Gaussian beam waist.
beam_focus_offset: The offset of the Gaussian beam focus from the top of the
photonic crystal slab.
permittivity_ambient: Permittivity of the region above and below the slab, and
of the holes in the slab.
permittivity_slab: Permittivity of the slab.
thickness_ambient: Thickness of the ambient layers above and below the slab.
thickness_slab: Thickness of the photonic crystal slab.
pitch: The unit cell pitch.
diameter: The diameter of the holes in the photonic crystal.
resolution: The size of a pixel in permittivity arrays.
resolution_fields: The size of a pixel in field arrays.
wavelength: The wavelength, of the dipole emission.
approximate_num_terms: The number of terms in the Fourier expansion.
brillouin_grid_shape: The shape of the grid used for Brillouin zone integration.
Returns:
The electric and magnetic fields, and the grid coordinates, `((ex, ey, ez),
(hx, hy, hz), (x,y, z))`.
"""
thickness_ambient_ = jnp.asarray(thickness_ambient)
thickness_slab_ = jnp.asarray(thickness_slab)
del thickness_ambient, thickness_slab
primitive_lattice_vectors = basis.LatticeVectors(
u=pitch * basis.X, v=pitch * basis.Y
)
expansion = basis.generate_expansion(
primitive_lattice_vectors=primitive_lattice_vectors,
approximate_num_terms=approximate_num_terms,
truncation=basis.Truncation.CIRCULAR,
)
# Brillouin zone integration creates a batch of in-plane wavevectors which are
# distributed throughout the first Brillouin zone. We shift the expansion so
# that it is centered on the direction of the incident beam.
in_plane_wavevector = basis.brillouin_zone_in_plane_wavevector(
brillouin_grid_shape, primitive_lattice_vectors
)
in_plane_wavevector += basis.plane_wave_in_plane_wavevector(
wavelength=jnp.asarray(wavelength),
polar_angle=jnp.asarray(polar_angle),
azimuthal_angle=jnp.asarray(azimuthal_angle),
permittivity=jnp.asarray(permittivity_ambient),
)
assert in_plane_wavevector.shape[-1] == 2
assert in_plane_wavevector.ndim == 3
eigensolve = functools.partial(
layer.eigensolve_isotropic_media,
wavelength=jnp.asarray(wavelength),
in_plane_wavevector=in_plane_wavevector,
primitive_lattice_vectors=primitive_lattice_vectors,
expansion=expansion,
formulation=fmm.Formulation.FFT,
)
mask = unit_cell_pattern(pitch, diameter, resolution)
permittivity_crystal = jnp.where(mask, permittivity_slab, permittivity_slab)
solve_result_crystal = eigensolve(permittivity=permittivity_crystal)
solve_result_ambient = eigensolve(
permittivity=jnp.asarray(permittivity_ambient)[jnp.newaxis, jnp.newaxis]
)
s_matrices_interior = scattering.stack_s_matrices_interior(
layer_solve_results=[
solve_result_ambient,
solve_result_crystal,
solve_result_ambient,
],
layer_thicknesses=[thickness_ambient_, thickness_slab_, thickness_ambient_],
)
# Now compute the eigenmode amplitudes for an incident Gaussian beam.
# This is done by first obtaining the electric and magnetic fields for the
# beam, and then solving for the eigenmodes.
# TODO: replace paraxial Gaussian with a more rigorous expression.
def _paraxial_gaussian_field_fn(x, y, z):
# Returns the fields of a z-propagating, x-polarized Gaussian beam.
# See https://en.wikipedia.org/wiki/Gaussian_beam
k = 2 * jnp.pi / wavelength
z_r = jnp.pi * beam_waist**2 * jnp.sqrt(permittivity_ambient) / wavelength
w_z = beam_waist * jnp.sqrt(1 + (z / z_r) ** 2)
r = jnp.sqrt(x**2 + y**2)
ex = (
beam_waist
/ w_z
* jnp.exp(-(r**2) / w_z**2)
* jnp.exp(
1j
* (
(k * z) # Phase
+ k * r**2 / 2 * z / (z**2 + z_r**2) # Wavefront curvature
- jnp.arctan(z / z_r) # Gouy phase
)
)
)
ey = jnp.zeros_like(ex)
ez = jnp.zeros_like(ex)
hx = jnp.zeros_like(ex)
hy = ex / jnp.sqrt(permittivity_ambient)
hz = jnp.zeros_like(ex)
return (ex, ey, ez), (hx, hy, hz)
# Solve for the fields of the beam with the desired rotation and shift.
x, y = basis.unit_cell_coordinates(
primitive_lattice_vectors=primitive_lattice_vectors,
shape=permittivity_crystal.shape, # type: ignore[arg-type]
num_unit_cells=brillouin_grid_shape,
)
(beam_ex, beam_ey, _), (beam_hx, beam_hy, _) = beams.shifted_rotated_fields(
field_fn=_paraxial_gaussian_field_fn,
x=x,
y=y,
z=jnp.zeros_like(x),
beam_origin_x=jnp.amax(x) / 2,
beam_origin_y=jnp.amax(y) / 2,
beam_origin_z=thickness_ambient_ - beam_focus_offset,
polar_angle=jnp.asarray(polar_angle),
azimuthal_angle=jnp.asarray(azimuthal_angle),
polarization_angle=jnp.asarray(polarization_angle),
)
fwd_amplitude, _ = sources.amplitudes_for_fields(
ex=beam_ex[..., jnp.newaxis],
ey=beam_ey[..., jnp.newaxis],
hx=beam_hx[..., jnp.newaxis],
hy=beam_hy[..., jnp.newaxis],
layer_solve_result=solve_result_ambient,
brillouin_grid_axes=(0, 1),
)
# Compute the fields inside the structure.
amplitudes_interior = fields.stack_amplitudes_interior(
s_matrices_interior=s_matrices_interior,
forward_amplitude_0_start=fwd_amplitude,
backward_amplitude_N_end=jnp.zeros_like(fwd_amplitude),
)
(ex, ey, ez), (hx, hy, hz), (x, y, z) = fields.stack_fields_3d_auto_grid(
amplitudes_interior=amplitudes_interior,
layer_solve_results=[
solve_result_ambient,
solve_result_crystal,
solve_result_ambient,
],
layer_thicknesses=[
thickness_ambient_,
thickness_slab_,
thickness_ambient_,
],
resolution=resolution_fields,
num_unit_cells=brillouin_grid_shape,
)
# Perform the Brillouin zone integration by averaging over the Brillouin zone
# grid batch axes.
ex, ey, ez, hx, hy, hz = [
jnp.mean(field, axis=(0, 1)) for field in (ex, ey, ez, hx, hy, hz)
]
# Compute some cross sections for visualizing the structure.
section_xy, section_xz, section_yz = crystal_cross_sections(
thickness_ambient=float(thickness_ambient_),
thickness_slab=float(thickness_slab_),
pitch=pitch,
diameter=diameter,
resolution=resolution,
num_unit_cells=brillouin_grid_shape,
)
return (ex, ey, ez), (hx, hy, hz), (x, y, z), (section_xy, section_xz, section_yz)
def unit_cell_pattern(
pitch: float,
diameter: float,
resolution: float,
) -> jnp.ndarray:
"""Defines the pattern of the photonic crystal."""
x, y = jnp.meshgrid(
jnp.arange(0, pitch, resolution),
jnp.arange(0, pitch, resolution),
indexing="ij",
)
return (jnp.sqrt((x - pitch / 2) ** 2 + y**2) < diameter / 2) | (
jnp.sqrt((x - pitch / 2) ** 2 + (y - pitch) ** 2) < diameter / 2
)
def crystal_cross_sections(
thickness_ambient: float,
thickness_slab: float,
pitch: float,
diameter: float,
resolution: float,
num_unit_cells: Tuple[int, int],
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Computes cross sections of the photonic crystal structure."""
mask = unit_cell_pattern(pitch, diameter, resolution)
xy_section = jnp.tile(mask, num_unit_cells)
xz_slab = mask[:, 0]
xz_section = jnp.stack(
(
[jnp.ones_like(xz_slab)] * int(thickness_ambient / resolution)
+ [xz_slab] * int(thickness_slab / resolution)
+ [jnp.ones_like(xz_slab)] * int(thickness_ambient / resolution)
),
axis=-1,
)
xz_section = jnp.tile(xz_section, (num_unit_cells[0], 1))
yz_slab = mask[0, :]
yz_section = jnp.stack(
(
[jnp.ones_like(yz_slab)] * int(thickness_ambient / resolution)
+ [yz_slab] * int(thickness_slab / resolution)
+ [jnp.ones_like(yz_slab)] * int(thickness_ambient / resolution)
),
axis=-1,
)
yz_section = jnp.tile(yz_section, (num_unit_cells[1], 1))
return xy_section, xz_section, yz_section
def plot_dipole_fields(
pitch: float = PITCH,
resolution: float = RESOLUTION,
resolution_fields: float = RESOLUTION_FIELDS,
brillouin_grid_shape: Tuple[int, int] = BRILLOUIN_GRID_SHAPE,
**sim_kwargs,
) -> None:
"""Plots an electric field slice for the crystal with embedded source."""
sim_kwargs.update(
{
"pitch": pitch,
"brillouin_grid_shape": brillouin_grid_shape,
"resolution": resolution,
"resolution_fields": resolution_fields,
}
)
(
(ex, ey, ez),
(hx, hy, hz),
(x, y, z),
(section_xy, section_xz, section_yz),
) = simulate_crystal_with_internal_source(**sim_kwargs)
# Determine the y index at which to take the cross section.
unit_cell_ydim = x.shape[1] // brillouin_grid_shape[1]
y_idx = unit_cell_ydim * (brillouin_grid_shape[1] // 2)
xplot, zplot = jnp.meshgrid(x[:, y_idx], z, indexing="ij")
field_plot = ex[:, y_idx, :, 0].real
plt.figure(figsize=(jnp.amax(xplot), jnp.amax(zplot)), dpi=80)
ax = plt.subplot(111)
im = plt.pcolormesh(xplot, zplot, field_plot, shading="nearest", cmap="bwr")
im.set_clim([-jnp.amax(field_plot), jnp.amax(field_plot)])
contours = measure.find_contours(onp.array(section_xz))
scale_factor = pitch / resolution
for c in contours:
ax.plot(c[:, 0] / scale_factor, c[:, 1] / scale_factor, "k")
ax.axis("equal")
ax.axis("off")
ax.set_ylim(ax.get_ylim()[::-1])
plt.subplots_adjust(left=0, bottom=0, right=1, top=1)
plt.savefig("crystal_dipole.png", bbox_inches="tight")
def plot_gaussian_fields(
pitch: float = PITCH,
resolution: float = RESOLUTION,
resolution_fields: float = RESOLUTION_FIELDS,
brillouin_grid_shape: Tuple[int, int] = BRILLOUIN_GRID_SHAPE,
**sim_kwargs,
) -> None:
"""Plots an electric field slice for the crystal with Gaussian beam."""
sim_kwargs.update(
{
"pitch": pitch,
"brillouin_grid_shape": brillouin_grid_shape,
"resolution": resolution,
"resolution_fields": resolution_fields,
}
)
(
(ex, ey, ez),
(hx, hy, hz),
(x, y, z),
(section_xy, section_xz, section_yz),
) = simulate_crystal_with_gaussian_beam(**sim_kwargs)
# Determine the y index at which to take the cross section.
y_idx = y.shape[1] // 2
xplot, zplot = jnp.meshgrid(x[:, y_idx], z, indexing="ij")
field_plot = ex[:, y_idx, :, 0].real
plt.figure(figsize=(jnp.amax(xplot), jnp.amax(zplot)), dpi=80)
ax = plt.subplot(111)
im = plt.pcolormesh(xplot, zplot, field_plot, shading="nearest", cmap="bwr")
im.set_clim([-jnp.amax(field_plot), jnp.amax(field_plot)])
contours = measure.find_contours(onp.array(section_xz))
scale_factor = pitch / resolution
for c in contours:
ax.plot(c[:, 0] / scale_factor, c[:, 1] / scale_factor, "k")
ax.axis("equal")
ax.axis("off")
ax.set_ylim(ax.get_ylim()[::-1])
plt.subplots_adjust(left=0, bottom=0, right=1, top=1)
plt.savefig("crystal_gaussian.png", bbox_inches="tight")
if __name__ == "__main__":
plot_dipole_fields()
plot_gaussian_fields()
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/users/item/drives/item/list/items/item/get_activities_by_interval_with_start_date_time_with_end_date_time_with_interval/get_activities_by_interval_with_start_date_time_with_end_date_time_with_interval_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from . import get_activities_by_interval_with_start_date_time_with_end_date_time_with_interval_response
from .........models.o_data_errors import o_data_error
class GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalRequestBuilder():
"""
Provides operations to call the getActivitiesByInterval method.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None, end_date_time: Optional[str] = None, interval: Optional[str] = None, start_date_time: Optional[str] = None) -> None:
"""
Instantiates a new GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalRequestBuilder and sets the default values.
Args:
endDateTime: Usage: endDateTime='{endDateTime}'
interval: Usage: interval='{interval}'
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
startDateTime: Usage: startDateTime='{startDateTime}'
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/users/{user%2Did}/drives/{drive%2Did}/list/items/{listItem%2Did}/microsoft.graph.getActivitiesByInterval(startDateTime='{startDateTime}',endDateTime='{endDateTime}',interval='{interval}'){?%24top,%24skip,%24search,%24filter,%24count,%24select,%24orderby}"
url_tpl_params = get_path_parameters(path_parameters)
url_tpl_params[""] = endDateTime
url_tpl_params[""] = interval
url_tpl_params[""] = startDateTime
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_get_request_information(self,request_configuration: Optional[GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
Invoke function getActivitiesByInterval
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = "application/json"
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
async def get(self,request_configuration: Optional[GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalRequestBuilderGetRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> Optional[get_activities_by_interval_with_start_date_time_with_end_date_time_with_interval_response.GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalResponse]:
"""
Invoke function getActivitiesByInterval
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: Optional[get_activities_by_interval_with_start_date_time_with_end_date_time_with_interval_response.GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalResponse]
"""
request_info = self.create_get_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_async(request_info, get_activities_by_interval_with_start_date_time_with_end_date_time_with_interval_response.GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalResponse, response_handler, error_mapping)
@dataclass
class GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalRequestBuilderGetQueryParameters():
"""
Invoke function getActivitiesByInterval
"""
# Include count of items
count: Optional[bool] = None
# Filter items by property values
filter: Optional[str] = None
# Order items by property values
orderby: Optional[List[str]] = None
# Search items by search phrases
search: Optional[str] = None
# Select properties to be returned
select: Optional[List[str]] = None
# Skip the first n items
skip: Optional[int] = None
# Show only the first n items
top: Optional[int] = None
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "count":
return "%24count"
if original_name == "filter":
return "%24filter"
if original_name == "orderby":
return "%24orderby"
if original_name == "search":
return "%24search"
if original_name == "select":
return "%24select"
if original_name == "skip":
return "%24skip"
if original_name == "top":
return "%24top"
return original_name
@dataclass
class GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalRequestBuilder.GetActivitiesByIntervalWithStartDateTimeWithEndDateTimeWithIntervalRequestBuilderGetQueryParameters] = None
|
PypiClean
|
/quickdocs-1.6.3-py3-none-any.whl/quickdocs-1.6.3.dist-info/LICENSE.md
|
MIT License
Copyright (c) 2021 Joel Lefkowitz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
PypiClean
|
/apache-airflow-providers-fastetl-0.0.22.tar.gz/apache-airflow-providers-fastetl-0.0.22/fastetl/hooks/dadosgovbr_hook.py
|
import requests
import json
import logging
from functools import cached_property
from collections import ChainMap
from airflow.hooks.base import BaseHook
from urllib.parse import urljoin
from typing import Union
class DadosGovBrHook(BaseHook):
"""
Provides access to the Dados Abertos Gov.br API and datasets resources
"""
def __init__(self,
conn_id: str,
*args,
**kwargs
):
self.conn_id = conn_id
@cached_property
def api_connection(self) -> tuple:
"""
Retrieve the API connection details from the Airflow connection.
Returns:
tuple: A tuple containing the API URL and token.
"""
conn = BaseHook.get_connection(self.conn_id)
url = getattr(conn, "host", None)
token = getattr(conn, "password", None)
return url, token
def _get_dataset(self, id: str) -> dict:
"""
Retrieve a dataset from the API by its ID.
Endpoint: /dados/api/publico/conjuntos-dados/{id}
Args:
id (str): A string representing the ID of the dataset.
Returns:
dict: A dictionary containing the metadata and resources of
the retrieved dataset.
Raises:
Exception: If an error occurs while making the API request
or processing the response.
"""
slug = f"/dados/api/publico/conjuntos-dados/{id}"
api_url, token = self.api_connection
headers = {
"accept": "application/json",
"chave-api-dados-abertos": token,
}
req_url = urljoin(api_url, slug)
response = requests.request(method="GET",
url=req_url,
headers=headers
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as error:
raise error
except Exception as error:
raise Exception("Erro ao retornar o conjunto de dados na API") \
from error
dataset = json.loads(response.text)
return dataset
def _get_if_resource_exists(self,
dataset:dict,
link: str) -> Union[dict, bool]:
""" Check if a resource exists in a dataset by matching its URL.
Args:
dataset (dict): dataset dictionary as returned by the API
link (str): The URL file of the resource
Returns:
dict or bool: If a matching resource is found in the dataset,
return its dictionary representation. Otherwise, return False.
"""
matching_resources = [
resource \
for resource in dataset["recursos"] \
if resource["link"] == link]
return (matching_resources[0] if matching_resources else False)
def update_dataset(
self,
dataset_id: str,
**properties
):
""" Update some properties of a given dataset
Endpoint: /dados/api/publico/conjuntos-dados/{id}
Args:
dataset_id (str): The ID of the dataset to be updated.
**properties: Keyword arguments representing the properties to be updated.
Raises:
requests.exceptions.HTTPError: If the API returns an HTTP error status.
Exception: If an error occurs during the dataset update process.
"""
print("Payload: " +
str(dataset_id) + ": " + str(properties))
slug = f"publico/conjuntos-dados/{dataset_id}"
api_url, token = self.api_connection
headers = {
"accept": "application/json",
"chave-api-dados-abertos": token,
}
req_url = urljoin(api_url, slug)
response = requests.request(method="PATCH",
url=req_url,
headers=headers,
json=properties,
)
try:
response.raise_for_status()
print("Conjunto de Dados atualizado com sucesso")
except requests.exceptions.HTTPError as error:
raise error
except Exception as error:
raise Exception("Erro ao atualizar o dataset") \
from error
def create_or_update_resource(
self,
dataset_id: str,
titulo: str,
link: str,
formato: str,
descricao: str = None,
tipo: str = "DADOS",
):
"""
Create or update a resource for a given dataset.
Example:
create_or_update_resource(
dataset_id="3b8b981c-3e44-4df2-a9f6-2473ee4caf83",
titulo="SIORG - Distribuição de Cargos e Funções para o
mês de março/2023",
link="https://repositorio.dados.gov.br/seges/siorg/distribuicao/distribuicao-orgaos-siorg-2023-03.zip",
formato="ZIP",
descricao="Contém a distribuição dos cargos e funções
ao longo da estrutura organizacional dos órgãos e entidades
que fazem parte do SIORG, para o mês de março/2023",
tipo="DADOS",
)
Args:
dataset_id (str): A string representing the ID of the dataset
to create or update the resource for.
titulo (str): A string representing the title of the resource.
link (str): A string representing the URL link of the resource.
formato (str): A string representing the format of the file.
descricao (str, optional): An optional string representing
the description of the resource. Defaults to None.
tipo (str, optional): An optional string representing the
type of the resource. Defaults to "DADOS". Valid options:
[INVALIDO, DADOS, DOCUMENTACAO, DICIONARIO_DE_DADOS, API, OUTRO]
Returns:
None
Raises:
Exception: If an error occurs while creating or updating the
resource.
"""
dataset = self._get_dataset(id=dataset_id)
existing_resource = self._get_if_resource_exists(dataset=dataset,
link=link)
if existing_resource:
resource = dict(ChainMap(
{
'titulo': titulo,
'link': link,
'descricao': resource['descricao'] \
if descricao is None else descricao,
'formato': formato,
},
existing_resource
))
else: # create resource
resource = {
'idConjuntoDados': dataset_id,
'titulo': titulo,
'link': link,
'descricao': descricao,
'tipo': tipo,
'formato': formato,
}
logging.info("Payload: " + str(resource))
slug = "recurso/salvar"
api_url, token = self.api_connection
headers = {
"accept": "application/json",
"chave-api-dados-abertos": token,
}
req_url = urljoin(api_url, slug)
response = requests.request(method="POST",
url=req_url,
headers=headers,
json=resource,
)
try:
response.raise_for_status()
if existing_resource:
logging.info("Recurso atualizado com sucesso")
else:
logging.info("Novo recurso inserido com sucesso")
except requests.exceptions.HTTPError as error:
raise error
except Exception as error:
raise Exception("Erro ao salvar o recurso") \
from error
|
PypiClean
|
/jupyter_auth-0.0.2.tar.gz/jupyter_auth-0.0.2/jupyter_auth/labextension/static/vendors-node_modules_datalayer_icons_lib_index_js.abc0aca5971d1688239b.js
|
"use strict";
(self["webpackChunk_datalayer_jupyter_auth"] = self["webpackChunk_datalayer_jupyter_auth"] || []).push([["vendors-node_modules_datalayer_icons_lib_index_js"],{
/***/ "./node_modules/@datalayer/icons/lib/Icon.js":
/*!***************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/Icon.js ***!
\***************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
const DEFAULT_SIZE = 16;
const DEFAULT_VIEWBOX = "0 0 24 24";
const Icon = ({ children, className, onClick, theme, viewBox, ...rest }) => {
const size = rest.size ? rest.size + "px" : `${DEFAULT_SIZE}px`;
let fill = "currentColor";
if (rest.color) {
fill = rest.color;
}
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("svg", Object.assign({ xmlns: "http://www.w3.org/2000/svg", viewBox: viewBox ?? DEFAULT_VIEWBOX, width: size, height: size, className: className, fill: fill, onClick: onClick }, rest.outerProps, { style: {
display: "inline-block",
userSelect: "none",
verticalAlign: "text-bottom",
overflow: "visible"
// fill: "currentColor",
// ...rest.outerProps?.style
} }, { children: children }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (Icon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/AcademicCapIcon.js":
/*!********************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/AcademicCapIcon.js ***!
\********************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const AcademicCapIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "4 4 16 16" }, { children: (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { d: "M17,13.5977511 L17,17 C17,18.6568542 14.7614237,20 12,20 C9.32225576,20 7.13615141,18.7370236 7.00611913,17.1497303 L7,17 L7,13.5977511 L10.0154442,15.3212215 C11.2451749,16.0239247 12.7548251,16.0239247 13.9845558,15.3212215 L17,13.5977511 Z M11.0077221,4.41526482 C11.6225875,4.0639132 12.3774125,4.0639132 12.9922779,4.41526482 L12.9922779,4.41526482 L19.4961389,8.13175686 C20.1679537,8.515651 20.1679537,9.484349 19.4961389,9.86824314 L19.4961389,9.86824314 L12.9922779,13.5847352 C12.3774125,13.9360868 11.6225875,13.9360868 11.0077221,13.5847352 L11.0077221,13.5847352 L4.50386106,9.86824314 C3.83204631,9.484349 3.83204631,8.515651 4.50386106,8.13175686 L4.50386106,8.13175686 Z" }, void 0) }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (AcademicCapIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/CollaborateIcon.js":
/*!********************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/CollaborateIcon.js ***!
\********************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const CollaborateIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "0 0 32 32" }, { children: (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "currentColor", d: "M6 21v-1H4v1a7 7 0 0 0 7 7h3v-2h-3a5 5 0 0 1-5-5zm18-10v1h2v-1a7 7 0 0 0-7-7h-3v2h3a5 5 0 0 1 5 5zm-13 0H5a3 3 0 0 0-3 3v2h2v-2a1 1 0 0 1 1-1h6a1 1 0 0 1 1 1v2h2v-2a3 3 0 0 0-3-3zm-3-1a4 4 0 1 0-4-4a4 4 0 0 0 4 4zm0-6a2 2 0 1 1-2 2a2 2 0 0 1 2-2zm19 21h-6a3 3 0 0 0-3 3v2h2v-2a1 1 0 0 1 1-1h6a1 1 0 0 1 1 1v2h2v-2a3 3 0 0 0-3-3zm-7-5a4 4 0 1 0 4-4a4 4 0 0 0-4 4zm6 0a2 2 0 1 1-2-2a2 2 0 0 1 2 2z" }, void 0) }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (CollaborateIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/DaskLogoIcon.js":
/*!*****************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/DaskLogoIcon.js ***!
\*****************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const DaskLogoIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "0 0 512 512" }, { children: (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsxs)("g", { children: [(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#FFC11E", className: "st0", d: "M143.71,157.61l126.5-72.99c1.25-0.72,2.02-2.05,2.02-3.5l0.01-43.77c0-6.48-2.66-12.9-7.83-16.81\n c-6.69-5.06-15.28-5.56-22.33-1.48L65.13,121.17c-6.22,3.59-10.06,10.23-10.06,17.41L55,369.18c0,6.47,2.65,12.89,7.81,16.81\n c6.68,5.07,15.29,5.57,22.35,1.49l37.48-21.62c1.25-0.72,2.02-2.05,2.02-3.5l0.05-171.85C124.71,176.93,131.95,164.4,143.71,157.61\n z" }, void 0),
(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#04255C", className: "st4", d: "M446.95,124.53c-3.15-1.82-6.61-2.73-10.06-2.73c-3.45,0-6.9,0.91-10.05,2.73l-176.96,102.1\n c-6.2,3.58-10.06,10.25-10.06,17.41l-0.07,231.47c0,7.27,3.76,13.78,10.05,17.42c6.3,3.64,13.81,3.64,20.11,0l176.95-102.11\n c6.2-3.58,10.06-10.25,10.06-17.41L457,141.95C457,134.68,453.24,128.16,446.95,124.53z" }, void 0),
(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#EF1161", className: "st2", d: "M240.95,211.14l116.78-67.38c1.25-0.72,2.02-2.05,2.02-3.5l0.02-50.98c0-6.48-2.66-12.9-7.83-16.81\n c-6.69-5.06-15.27-5.55-22.33-1.48l-48.43,27.95L152.64,173.1c-6.22,3.59-10.06,10.23-10.06,17.41l-0.05,174.18l-0.02,56.41\n c0,6.48,2.65,12.89,7.81,16.81c6.69,5.07,15.29,5.57,22.35,1.49l47.2-27.24c1.25-0.72,2.02-2.05,2.02-3.5l0.05-164.64\n C221.95,230.46,229.19,217.92,240.95,211.14z" }, void 0)] }, void 0) }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (DaskLogoIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/DatalayerLogoIcon.js":
/*!**********************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/DatalayerLogoIcon.js ***!
\**********************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const DatalayerLogoIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsxs)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "0 0 16 16" }, { children: [(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("rect", { width: "16", height: "3.2", x: "0", y: "0" }, void 0),
(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("rect", { width: "16", height: "3.2", x: "0", y: "6.4" }, void 0),
(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("rect", { width: "16", height: "3.2", x: "0", y: "12.8" }, void 0)] }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (DatalayerLogoIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/DatalayerWhiteLogoIcon.js":
/*!***************************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/DatalayerWhiteLogoIcon.js ***!
\***************************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const DatalayerWhiteLogoIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsxs)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "0 0 16 16" }, { children: [(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("rect", { width: "16", height: "3.2", x: "0", y: "0", style: { fill: "#ffffff" } }, void 0),
(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("rect", { width: "16", height: "3.2", x: "0", y: "6.4", style: { fill: "#ffffff" } }, void 0),
(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("rect", { width: "16", height: "3.2", x: "0", y: "12.8", style: { fill: "#ffffff" } }, void 0)] }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (DatalayerWhiteLogoIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/PyTorchLogoIcon.js":
/*!********************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/PyTorchLogoIcon.js ***!
\********************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const PyTorchLogoIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "0 0 64 64" }, { children: (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsxs)("g", Object.assign({ transform: "matrix(2.21262 0 0 2.21262 -39.453867 -1.770085)", fill: "#ee4c2c" }, { children: [(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { d: "M40.8 9.3l-2.1 2.1c3.5 3.5 3.5 9.2 0 12.7s-9.2 3.5-12.7 0-3.5-9.2 0-12.7l5.6-5.6.7-.8V.8l-8.5 8.5a11.89 11.89 0 0 0 0 16.9 11.89 11.89 0 0 0 16.9 0c4.8-4.7 4.8-12.3.1-16.9z" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("circle", { r: "1.6", cy: "7.1", cx: "36.6" }, void 0)] }), void 0) }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (PyTorchLogoIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/ReactJsLogoIcon.js":
/*!********************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/ReactJsLogoIcon.js ***!
\********************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const ReactJsLogoIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsxs)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "0 -1 23 23" }, { children: [(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { d: "M18.9107,6.63257h0q-.36721-.126-.74042-.2333.06187-.25141.11441-.505c.56045-2.72064.194-4.91237-1.05739-5.63386-1.1998-.692-3.1621.02952-5.14394,1.75414q-.29293.2555-.57267.52554-.18727-.17951-.3811-.352C9.05257.3439,6.97066-.43316,5.72058.29046,4.52191.98436,4.16686,3.04489,4.67144,5.62322q.0753.383.17.76179c-.29458.08367-.57908.17284-.85127.26771C1.55514,7.50165,0,8.83225,0,10.21231c0,1.42546,1.66935,2.8552,4.20575,3.722q.3085.10494.62193.19442-.10179.408-.18068.82114c-.48106,2.53354-.10535,4.54521,1.09017,5.23484,1.23481.712,3.30725-.01985,5.32533-1.78387q.23926-.20917.47994-.44238.3029.29225.62173.56727c1.95477,1.68207,3.88531,2.36132,5.07982,1.66986,1.23369-.71416,1.63454-2.87525,1.114-5.50459q-.05955-.30124-.13792-.61481.21834-.06443.42772-.13355C21.28454,13.06915,23,11.65681,23,10.21232,23,8.82726,21.39478,7.48771,18.9107,6.63257ZM12.7284,2.75581C14.42646,1.278,16.01346.69457,16.73657,1.1116h0c.77014.44421,1.06971,2.2354.5858,4.58441q-.04758.22953-.10342.45724a23.53752,23.53752,0,0,0-3.07527-.48584A23.08128,23.08128,0,0,0,12.1995,3.24094Q12.45788,2.99184,12.7284,2.75581ZM6.79111,11.39124q.312.60265.65207,1.19013.34692.59911.7221,1.18117a20.92168,20.92168,0,0,1-2.11967-.3408C6.24867,12.766,6.49887,12.08443,6.79111,11.39124ZM6.79,9.08041c-.28613-.67863-.53093-1.34586-.73085-1.99019.65624-.14688,1.356-.26689,2.08516-.358q-.36611.571-.7051,1.15877Q7.10076,8.478,6.79,9.08041Zm.52228,1.15552q.45411-.94517.9783-1.8542v.0002q.52369-.90857,1.11521-1.77542c.684-.05171,1.38536-.07879,2.09432-.07879.71212,0,1.41437.02728,2.09819.0794q.58514.86487,1.10818,1.76941.52565.90635.99153,1.84545-.46083.94817-.98828,1.86173h-.0001q-.52261.90786-1.1034,1.7803c-.6824.04876-1.3876.0739-2.10623.0739-.71568,0-1.41193-.02229-2.08241-.06575q-.59555-.86995-1.12406-1.78305Q7.76789,11.18148,7.31227,10.23593Zm8.24853,2.33862q.347-.60182.667-1.21863h0a20.86671,20.86671,0,0,1,.77238,2.02327,20.85164,20.85164,0,0,1-2.14552.36573Q15.21935,13.16682,15.5608,12.57455Zm.65767-3.49343q-.31883-.605-.66163-1.19684h0q-.33727-.58258-.6994-1.15022c.7339.09263,1.437.21579,2.09717.36654A20.95909,20.95909,0,0,1,16.21847,9.08112ZM11.511,3.94359a21.01288,21.01288,0,0,1,1.3535,1.63393q-1.35843-.06419-2.7184-.00061C10.593,4.98765,11.0507,4.44022,11.511,3.94359ZM6.21284,1.14081c.76953-.44543,2.47095.18973,4.26428,1.782.11461.10179.22974.20836.34507.3186A23.54542,23.54542,0,0,0,8.86294,5.66608a24.008,24.008,0,0,0-3.06916.477q-.088-.35228-.15808-.70866v.0001C5.20339,3.22536,5.49044,1.559,6.21284,1.14081ZM5.09132,13.18233q-.286-.08187-.56778-.17773A8.32371,8.32371,0,0,1,1.841,11.57955a2.03072,2.03072,0,0,1-.85849-1.36724c0-.83742,1.24865-1.90571,3.33117-2.63178q.39208-.1361.79162-.24908a23.56455,23.56455,0,0,0,1.121,2.90478A23.92247,23.92247,0,0,0,5.09132,13.18233ZM10.41594,17.661a8.32161,8.32161,0,0,1-2.57467,1.61184h-.0001a2.03042,2.03042,0,0,1-1.61306.06067c-.72556-.41836-1.02706-2.03376-.61573-4.20035q.07337-.38407.168-.76363a23.10444,23.10444,0,0,0,3.0995.44869,23.90954,23.90954,0,0,0,1.97431,2.43929Q10.64,17.46459,10.41594,17.661Zm1.12223-1.11053c-.46569-.50253-.93015-1.05831-1.38383-1.65612q.66051.026,1.34566.02606.70326,0,1.38841-.03084A20.89425,20.89425,0,0,1,11.53817,16.55045Zm5.96651,1.367a2.03039,2.03039,0,0,1-.753,1.4278c-.72485.41958-2.275-.12581-3.94659-1.56431q-.2875-.24735-.57837-.52727a23.08914,23.08914,0,0,0,1.9279-2.448,22.93647,22.93647,0,0,0,3.11507-.48014q.07024.284.12449.55638h0A8.32,8.32,0,0,1,17.50468,17.91749Zm.83417-4.90739h-.0001c-.12571.04163-.25478.08184-.38629.12082a23.06121,23.06121,0,0,0-1.16468-2.91373,23.05112,23.05112,0,0,0,1.11938-2.87128c.23524.0682.46365.14.68372.21579,2.12842.73258,3.42665,1.81593,3.42665,2.65061C22.01753,11.10145,20.61538,12.25574,18.33885,13.0101Z", fill: "#61dafb" }, void 0),
(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { d: "M11.5,8.1585a2.05386,2.05386,0,1,1-2.05381,2.05381A2.05381,2.05381,0,0,1,11.5,8.1585", fill: "#61dafb" }, void 0)] }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (ReactJsLogoIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/SchoolIcon.js":
/*!***************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/SchoolIcon.js ***!
\***************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const SchoolIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "2 2 44 44" }, { children: (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsxs)("g", Object.assign({ fill: "none", stroke: "currentColor", "stroke-width": "3" }, { children: [(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { "stroke-linejoin": "round", d: "M4 33a2 2 0 0 1 2-2h6v-7l12-8l12 8v7h6a2 2 0 0 1 2 2v9a2 2 0 0 1-2 2H4V33Z" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { "stroke-linecap": "round", d: "M24 6v10" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { "stroke-linecap": "round", "stroke-linejoin": "round", d: "M36 12V6s-1.5 3-6 0s-6 0-6 0v6s1.5-3 6 0s6 0 6 0Zm-8 32V31h-8v13m-2 0h12" }, void 0)] }), void 0) }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (SchoolIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/StudentIcon.js":
/*!****************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/StudentIcon.js ***!
\****************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const StudentIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "0 0 190 190" }, { children: (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { d: "M 186.92157,31.082422 98.312115,0.36094054 a 7.8456282,8.1603935 0 0 0 -4.615072,0 L 5.1798919,31.082422 H 4.995289 L 4.0722739,31.562445 H 3.979972 l -0.9230151,0.576029 c 0,0.096 -0.092302,0.096 -0.184603,0.192009 l -0.7384121,0.672032 -0.6461105,0.768037 c 0,0.096 -0.092302,0.096 -0.092302,0.192009 l -0.55380912,0.864042 c 0,0.096 0,0.096 -0.0923015,0.192009 l -0.36920604,0.864041 -0.27690452,1.056052 v 0.288014 A 3.4151558,3.5521713 0 0 0 0.0110076,38.378774 v 76.803696 a 7.3841208,7.6803705 0 0 0 14.7682414,0 V 49.035288 l 31.013306,10.752519 a 58.334554,60.674926 0 0 0 -8.860945,32.35356 59.072966,61.442963 0 0 0 27.690453,52.034503 88.70175,92.260449 0 0 0 -42.920201,35.90573 7.4764221,7.7763749 0 0 0 2.215235,10.65652 7.2918191,7.5843657 0 0 0 10.153167,-2.2081 73.841206,76.803703 0 0 1 123.868636,0 7.3841206,7.6803703 0 0 0 6.1842,3.45616 6.9226131,7.2003472 0 0 0 3.96896,-1.24806 7.4764221,7.7763749 0 0 0 2.21524,-10.65652 88.70175,92.260449 0 0 0 -42.9202,-35.90573 59.072966,61.442963 0 0 0 27.69045,-52.034503 58.334554,60.674926 0 0 0 -8.86096,-32.35356 l 40.70498,-14.11268 a 7.3841206,7.6803703 0 0 0 0,-14.592705 z M 140.3093,92.141367 a 44.304728,46.082226 0 0 1 -88.609448,0 44.858533,46.65825 0 0 1 8.584039,-27.36132 l 33.413152,11.616561 a 7.3841206,7.6803703 0 0 0 4.615072,0 L 131.72526,64.780047 a 44.858533,46.65825 0 0 1 8.58404,27.36132 z m -8.58404,-43.490098 h -0.0923 L 96.00458,61.035867 60.376194,48.651269 h -0.0923 L 30.747409,38.378774 96.00458,15.721681 161.26175,38.378774 Z" }, void 0) }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (StudentIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/TensorFlowLogoIcon.js":
/*!***********************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/TensorFlowLogoIcon.js ***!
\***********************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const TensorFlowLogoIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "0 0 32 32" }, { children: (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("g", Object.assign({ transform: "translate(-77.942529,-177.00005)", id: "layer1" }, { children: (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsxs)("g", Object.assign({ id: "g4550" }, { children: [(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { style: { fill: "#e55b2d", fillOpacity: 1 }, d: "m 360.04883,687.87305 v 18.89843 l 32.73047,18.89844 v -18.89844 z m -65.46289,18.89843 v 18.89844 l 16.36523,9.44727 V 716.2207 Z m 49.0957,9.44922 -16.36523,9.44922 v 56.69141 l 16.36523,9.44922 v -37.79493 l 16.36719,9.44922 v -18.89843 l -16.36719,-9.44922 z", transform: "scale(0.26458333)", id: "path4508" }, void 0),
(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { style: { fill: "#ed8e24", fillOpacity: 1 }, d: "m 360.04883,687.87305 -49.09766,28.34765 v 18.89649 l 32.73047,-18.89649 v 18.89649 l 16.36719,-9.44727 z m 49.09765,9.44922 -16.36718,9.44921 v 18.89844 l 16.36718,-9.44922 z m -32.73242,37.79492 -16.36523,9.44922 v 18.89843 l 16.36523,-9.44922 z m -16.36523,28.34765 -16.36719,-9.44922 v 37.79493 l 16.36719,-9.44922 z", transform: "scale(0.26458333)", id: "path4491" }, void 0),
(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { style: { fill: "#f8bf3c", fillOpacity: 1 }, d: "m 360.04883,668.97656 -65.46289,37.79492 16.36523,9.44922 49.09766,-28.34765 32.73047,18.89843 16.36718,-9.44921 z m 0,56.69336 -16.36719,9.44727 16.36719,9.44922 16.36523,-9.44922 z", transform: "scale(0.26458333)", id: "path4506" }, void 0)] }), void 0) }), void 0) }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (TensorFlowLogoIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/TwitterIcon.js":
/*!****************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/TwitterIcon.js ***!
\****************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const TwitterIcon = (props) => {
const color = props.color || "#1DA1F2";
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "0 0 24 24", color: color }, { children: (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { d: "M23.953 4.57a10 10 0 01-2.825.775 4.958 4.958 0 002.163-2.723c-.951.555-2.005.959-3.127 1.184a4.92 4.92 0 00-8.384 4.482C7.69 8.095 4.067 6.13 1.64 3.162a4.822 4.822 0 00-.666 2.475c0 1.71.87 3.213 2.188 4.096a4.904 4.904 0 01-2.228-.616v.06a4.923 4.923 0 003.946 4.827 4.996 4.996 0 01-2.212.085 4.936 4.936 0 004.604 3.417 9.867 9.867 0 01-6.102 2.105c-.39 0-.779-.023-1.17-.067a13.995 13.995 0 007.557 2.209c9.053 0 13.998-7.496 13.998-13.985 0-.21 0-.42-.015-.63A9.935 9.935 0 0024 4.59z" }, void 0) }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (TwitterIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/icons/WebRtcIcon.js":
/*!***************************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/icons/WebRtcIcon.js ***!
\***************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react/jsx-runtime */ "./node_modules/react/jsx-runtime.js");
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
const WebRtcIcon = (props) => {
return ((0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsxs)(_Icon__WEBPACK_IMPORTED_MODULE_1__["default"], Object.assign({}, props, { viewBox: "0 0 256 249" }, { children: [(0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#F60", d: "M142.077 191.087c0 31.806-25.782 57.592-57.588 57.592c-31.81 0-57.593-25.786-57.593-57.592c0-31.806 25.782-57.592 57.593-57.592c31.806 0 57.588 25.786 57.588 57.592" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#FC0", d: "M255.98 110.459c0 31.802-25.782 57.592-57.588 57.592c-31.81 0-57.592-25.79-57.592-57.592c0-31.807 25.781-57.597 57.592-57.597c31.806 0 57.588 25.79 57.588 57.597" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#0089CC", d: "M115.2 109.18c0 31.802-25.781 57.593-57.592 57.593c-31.802 0-57.588-25.79-57.588-57.592c0-31.807 25.786-57.597 57.588-57.597c31.81 0 57.592 25.79 57.592 57.597" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#009939", d: "M230.386 191.087c0 31.806-25.782 57.592-57.597 57.592c-31.802 0-57.588-25.786-57.588-57.592c0-31.806 25.786-57.592 57.588-57.592c31.815 0 57.597 25.786 57.597 57.592" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#BF0000", d: "M185.592 57.985c0 31.806-25.786 57.592-57.592 57.592c-31.806 0-57.592-25.786-57.592-57.592C70.408 26.179 96.194.392 128 .392c31.806 0 57.592 25.787 57.592 57.593" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#FC0007", d: "M140.799 110.458c0 1.212.105 2.398.181 3.593c25.546-5.894 44.61-28.733 44.61-56.068c0-1.212-.105-2.402-.18-3.597c-25.546 5.897-44.611 28.737-44.611 56.072" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#1CD306", d: "M148.397 138.975c9.925 17.352 28.576 29.075 49.997 29.075c8.73 0 16.976-2.001 24.393-5.48c-9.92-17.35-28.572-29.074-49.997-29.074c-8.73 0-16.976 2-24.393 5.48" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#0F7504", d: "M115.2 191.087c0 14.071 5.058 26.947 13.442 36.948c8.376-10 13.434-22.877 13.434-36.948c0-14.07-5.058-26.947-13.434-36.948c-8.384 10.001-13.442 22.877-13.442 36.948" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#0C5E87", d: "M34.807 162.057a57.324 57.324 0 0 0 22.801 4.716c21.21 0 39.688-11.496 49.685-28.564a57.336 57.336 0 0 0-22.801-4.711c-21.21 0-39.692 11.495-49.685 28.56" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#6B0001", d: "M70.655 53.126c-.136 1.604-.25 3.217-.25 4.86c0 27.313 19.036 50.132 44.552 56.05c.13-1.604.245-3.217.245-4.855c0-27.314-19.032-50.14-44.547-56.055" }, void 0), (0,react_jsx_runtime__WEBPACK_IMPORTED_MODULE_0__.jsx)("path", { fill: "#FFF", d: "M76.03 183.96h-9.009c-7.953 0-14.42-6.446-14.42-14.379V88.035c0-7.932 6.467-14.383 14.42-14.383H179.99c7.954 0 14.417 6.45 14.417 14.383v81.546c0 7.933-6.463 14.38-14.417 14.38h-38.484L64.29 221.81l11.74-37.85Z" }, void 0)] }), void 0));
};
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (WebRtcIcon);
/***/ }),
/***/ "./node_modules/@datalayer/icons/lib/index.js":
/*!****************************************************!*\
!*** ./node_modules/@datalayer/icons/lib/index.js ***!
\****************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "AcademicCapIcon": () => (/* reexport safe */ _icons_AcademicCapIcon__WEBPACK_IMPORTED_MODULE_1__["default"]),
/* harmony export */ "CollaborateIcon": () => (/* reexport safe */ _icons_CollaborateIcon__WEBPACK_IMPORTED_MODULE_2__["default"]),
/* harmony export */ "DaskLogoIcon": () => (/* reexport safe */ _icons_DaskLogoIcon__WEBPACK_IMPORTED_MODULE_3__["default"]),
/* harmony export */ "DatalayerLogoIcon": () => (/* reexport safe */ _icons_DatalayerLogoIcon__WEBPACK_IMPORTED_MODULE_4__["default"]),
/* harmony export */ "DatalayerWhiteLogoIcon": () => (/* reexport safe */ _icons_DatalayerWhiteLogoIcon__WEBPACK_IMPORTED_MODULE_5__["default"]),
/* harmony export */ "OrganisationIcon": () => (/* reexport safe */ _primer_octicons_react__WEBPACK_IMPORTED_MODULE_6__.OrganizationIcon),
/* harmony export */ "PyTorchLogoIcon": () => (/* reexport safe */ _icons_PyTorchLogoIcon__WEBPACK_IMPORTED_MODULE_7__["default"]),
/* harmony export */ "ReactJsLogoIcon": () => (/* reexport safe */ _icons_ReactJsLogoIcon__WEBPACK_IMPORTED_MODULE_8__["default"]),
/* harmony export */ "SchoolIcon": () => (/* reexport safe */ _icons_SchoolIcon__WEBPACK_IMPORTED_MODULE_9__["default"]),
/* harmony export */ "StudentIcon": () => (/* reexport safe */ _icons_StudentIcon__WEBPACK_IMPORTED_MODULE_10__["default"]),
/* harmony export */ "TensorFlowLogoIcon": () => (/* reexport safe */ _icons_TensorFlowLogoIcon__WEBPACK_IMPORTED_MODULE_11__["default"]),
/* harmony export */ "TwitterIcon": () => (/* reexport safe */ _icons_TwitterIcon__WEBPACK_IMPORTED_MODULE_12__["default"]),
/* harmony export */ "WebRtcIcon": () => (/* reexport safe */ _icons_WebRtcIcon__WEBPACK_IMPORTED_MODULE_13__["default"]),
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__)
/* harmony export */ });
/* harmony import */ var _Icon__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./Icon */ "./node_modules/@datalayer/icons/lib/Icon.js");
/* harmony import */ var _icons_AcademicCapIcon__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./icons/AcademicCapIcon */ "./node_modules/@datalayer/icons/lib/icons/AcademicCapIcon.js");
/* harmony import */ var _icons_CollaborateIcon__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./icons/CollaborateIcon */ "./node_modules/@datalayer/icons/lib/icons/CollaborateIcon.js");
/* harmony import */ var _icons_DaskLogoIcon__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! ./icons/DaskLogoIcon */ "./node_modules/@datalayer/icons/lib/icons/DaskLogoIcon.js");
/* harmony import */ var _icons_DatalayerLogoIcon__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ./icons/DatalayerLogoIcon */ "./node_modules/@datalayer/icons/lib/icons/DatalayerLogoIcon.js");
/* harmony import */ var _icons_DatalayerWhiteLogoIcon__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! ./icons/DatalayerWhiteLogoIcon */ "./node_modules/@datalayer/icons/lib/icons/DatalayerWhiteLogoIcon.js");
/* harmony import */ var _primer_octicons_react__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! @primer/octicons-react */ "webpack/sharing/consume/default/@primer/octicons-react/@primer/octicons-react?60b6");
/* harmony import */ var _primer_octicons_react__WEBPACK_IMPORTED_MODULE_6___default = /*#__PURE__*/__webpack_require__.n(_primer_octicons_react__WEBPACK_IMPORTED_MODULE_6__);
/* harmony import */ var _icons_PyTorchLogoIcon__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ./icons/PyTorchLogoIcon */ "./node_modules/@datalayer/icons/lib/icons/PyTorchLogoIcon.js");
/* harmony import */ var _icons_ReactJsLogoIcon__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(/*! ./icons/ReactJsLogoIcon */ "./node_modules/@datalayer/icons/lib/icons/ReactJsLogoIcon.js");
/* harmony import */ var _icons_SchoolIcon__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ./icons/SchoolIcon */ "./node_modules/@datalayer/icons/lib/icons/SchoolIcon.js");
/* harmony import */ var _icons_StudentIcon__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(/*! ./icons/StudentIcon */ "./node_modules/@datalayer/icons/lib/icons/StudentIcon.js");
/* harmony import */ var _icons_TensorFlowLogoIcon__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(/*! ./icons/TensorFlowLogoIcon */ "./node_modules/@datalayer/icons/lib/icons/TensorFlowLogoIcon.js");
/* harmony import */ var _icons_TwitterIcon__WEBPACK_IMPORTED_MODULE_12__ = __webpack_require__(/*! ./icons/TwitterIcon */ "./node_modules/@datalayer/icons/lib/icons/TwitterIcon.js");
/* harmony import */ var _icons_WebRtcIcon__WEBPACK_IMPORTED_MODULE_13__ = __webpack_require__(/*! ./icons/WebRtcIcon */ "./node_modules/@datalayer/icons/lib/icons/WebRtcIcon.js");
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (Object.assign(_Icon__WEBPACK_IMPORTED_MODULE_0__["default"], {
AcademicCap: _icons_AcademicCapIcon__WEBPACK_IMPORTED_MODULE_1__["default"],
Collaborate: _icons_CollaborateIcon__WEBPACK_IMPORTED_MODULE_2__["default"],
DaskLogo: _icons_DaskLogoIcon__WEBPACK_IMPORTED_MODULE_3__["default"],
DatalayerLogo: _icons_DatalayerLogoIcon__WEBPACK_IMPORTED_MODULE_4__["default"],
DatalayerWhiteLogo: _icons_DatalayerWhiteLogoIcon__WEBPACK_IMPORTED_MODULE_5__["default"],
Organisation: _primer_octicons_react__WEBPACK_IMPORTED_MODULE_6__.OrganizationIcon,
PyTorchLogo: _icons_PyTorchLogoIcon__WEBPACK_IMPORTED_MODULE_7__["default"],
ReactJs: _icons_ReactJsLogoIcon__WEBPACK_IMPORTED_MODULE_8__["default"],
School: _icons_SchoolIcon__WEBPACK_IMPORTED_MODULE_9__["default"],
Student: _icons_StudentIcon__WEBPACK_IMPORTED_MODULE_10__["default"],
TensorFlowLogo: _icons_TensorFlowLogoIcon__WEBPACK_IMPORTED_MODULE_11__["default"],
Twitter: _icons_TwitterIcon__WEBPACK_IMPORTED_MODULE_12__["default"],
WebRtc: _icons_WebRtcIcon__WEBPACK_IMPORTED_MODULE_13__["default"],
}));
/***/ })
}]);
//# sourceMappingURL=vendors-node_modules_datalayer_icons_lib_index_js.abc0aca5971d1688239b.js.map
|
PypiClean
|
/cohesity-sdk-1.1.0.tar.gz/cohesity-sdk-1.1.0/cohesity_sdk/cluster/model/day_schedule.py
|
import re # noqa: F401
import sys # noqa: F401
from cohesity_sdk.cluster.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from cohesity_sdk.cluster.model.frequency_schedule import FrequencySchedule
globals()['FrequencySchedule'] = FrequencySchedule
class DaySchedule(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('frequency',): {
'inclusive_minimum': 1,
},
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'frequency': (int, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'frequency': 'frequency', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, frequency, *args, **kwargs): # noqa: E501
"""DaySchedule - a model defined in OpenAPI
Args:
frequency (int, none_type): Specifies a factor to multiply the unit by, to determine the backup schedule. <br> Example: If 'frequency' set to 2 and the unit is 'Hours', then Snapshots are backed up every 2 hours. <br> This field is only applicable if unit is 'Minutes', 'Hours' or 'Days'.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'frequency': frequency,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
FrequencySchedule,
],
'oneOf': [
],
}
|
PypiClean
|
/abqpy-2023.5.6-py3-none-any.whl/abaqus/Job/JobSession.py
|
from typing_extensions import Literal
from abqpy.decorators import abaqus_class_doc, abaqus_method_doc
from ..Session.SessionBase import SessionBase
from ..UtilityAndView.abaqusConstants import ALL, LINUX, OFF, ON, Boolean
from ..UtilityAndView.abaqusConstants import abaqusConstants as C
from .Queue import Queue
@abaqus_class_doc
class JobSession(SessionBase):
@abaqus_method_doc
def Queue(
self,
name: str,
queueName: str,
hostName: str = "",
fileCopy: Boolean = ON,
directory: str = "",
driver: str = "",
remotePlatform: Literal[C.LINUX] = LINUX,
filesToCopy: str = ALL,
deleteAfterCopy: Boolean = OFF,
description: str = "",
) -> Queue:
"""This method creates a Queue object. Note:Remote queues are available only on Linux platforms.
.. note::
This function can be accessed by::
session.Queue
Parameters
----------
name
A String specifying the name of the new Queue object.
queueName
A String specifying the name of the remote analysis queue.
hostName
A String specifying the name of the remote host. The default value is an empty string.
fileCopy
A Boolean specifying if the results files are to be copied from the remote machine to
the local machine. The default value is ON.
directory
A String specifying the remote location for the execution of the simulation. The default
value is an empty string.
driver
A String specifying the designation of the remote driver. The default value is "abaqus".
remotePlatform
A SymbolicConstant specifying the type of operating system on the remote machine. The
default value is Linux.
filesToCopy
A list of Strings specifying the files to be copied from the remote location to the
local machine, or ALL. Strings specified in a list are the extensions of the job files
that will be copied, such as ('log', 'dat', 'msg', 'sta', 'odb'). The default value is
ALL.
deleteAfterCopy
A Boolean specifying whether remote files are to be deleted after they are copied to the
local machine. The default value is OFF.
description
A String specifying a description of the queue. The default value is an empty string.
Returns
-------
Queue
A Queue object.
Raises
------
Exception
Remote queue host name is not set, If **fileCopy** = ON and **hostName** is empty.
Directory in which to run the job on the remote computer is not set, If **fileCopy** = ON and **directory** is empty.
"""
self.queues[name] = queue = Queue(
name,
queueName,
hostName,
fileCopy,
directory,
driver,
remotePlatform,
filesToCopy,
deleteAfterCopy,
description,
)
return queue
|
PypiClean
|
/alipay-sdk-python-pycryptodome-3.3.202.tar.gz/alipay-sdk-python-pycryptodome-3.3.202/alipay/aop/api/domain/AlipayEcoEduKtStudentModifyModel.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.UserDetails import UserDetails
class AlipayEcoEduKtStudentModifyModel(object):
def __init__(self):
self._child_name = None
self._isv_pid = None
self._school_no = None
self._school_pid = None
self._status = None
self._student_code = None
self._student_identify = None
self._student_no = None
self._users = None
@property
def child_name(self):
return self._child_name
@child_name.setter
def child_name(self, value):
self._child_name = value
@property
def isv_pid(self):
return self._isv_pid
@isv_pid.setter
def isv_pid(self, value):
self._isv_pid = value
@property
def school_no(self):
return self._school_no
@school_no.setter
def school_no(self, value):
self._school_no = value
@property
def school_pid(self):
return self._school_pid
@school_pid.setter
def school_pid(self, value):
self._school_pid = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def student_code(self):
return self._student_code
@student_code.setter
def student_code(self, value):
self._student_code = value
@property
def student_identify(self):
return self._student_identify
@student_identify.setter
def student_identify(self, value):
self._student_identify = value
@property
def student_no(self):
return self._student_no
@student_no.setter
def student_no(self, value):
self._student_no = value
@property
def users(self):
return self._users
@users.setter
def users(self, value):
if isinstance(value, list):
self._users = list()
for i in value:
if isinstance(i, UserDetails):
self._users.append(i)
else:
self._users.append(UserDetails.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.child_name:
if hasattr(self.child_name, 'to_alipay_dict'):
params['child_name'] = self.child_name.to_alipay_dict()
else:
params['child_name'] = self.child_name
if self.isv_pid:
if hasattr(self.isv_pid, 'to_alipay_dict'):
params['isv_pid'] = self.isv_pid.to_alipay_dict()
else:
params['isv_pid'] = self.isv_pid
if self.school_no:
if hasattr(self.school_no, 'to_alipay_dict'):
params['school_no'] = self.school_no.to_alipay_dict()
else:
params['school_no'] = self.school_no
if self.school_pid:
if hasattr(self.school_pid, 'to_alipay_dict'):
params['school_pid'] = self.school_pid.to_alipay_dict()
else:
params['school_pid'] = self.school_pid
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.student_code:
if hasattr(self.student_code, 'to_alipay_dict'):
params['student_code'] = self.student_code.to_alipay_dict()
else:
params['student_code'] = self.student_code
if self.student_identify:
if hasattr(self.student_identify, 'to_alipay_dict'):
params['student_identify'] = self.student_identify.to_alipay_dict()
else:
params['student_identify'] = self.student_identify
if self.student_no:
if hasattr(self.student_no, 'to_alipay_dict'):
params['student_no'] = self.student_no.to_alipay_dict()
else:
params['student_no'] = self.student_no
if self.users:
if isinstance(self.users, list):
for i in range(0, len(self.users)):
element = self.users[i]
if hasattr(element, 'to_alipay_dict'):
self.users[i] = element.to_alipay_dict()
if hasattr(self.users, 'to_alipay_dict'):
params['users'] = self.users.to_alipay_dict()
else:
params['users'] = self.users
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoEduKtStudentModifyModel()
if 'child_name' in d:
o.child_name = d['child_name']
if 'isv_pid' in d:
o.isv_pid = d['isv_pid']
if 'school_no' in d:
o.school_no = d['school_no']
if 'school_pid' in d:
o.school_pid = d['school_pid']
if 'status' in d:
o.status = d['status']
if 'student_code' in d:
o.student_code = d['student_code']
if 'student_identify' in d:
o.student_identify = d['student_identify']
if 'student_no' in d:
o.student_no = d['student_no']
if 'users' in d:
o.users = d['users']
return o
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/domain/AlipayInsSceneProductAgreementQueryModel.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsSceneProductAgreementQueryModel(object):
def __init__(self):
self._agreement_sign_type = None
self._alipay_user_id = None
self._channel = None
self._end_effect_time = None
self._item_id = None
self._sign_user_id = None
self._sign_user_type = None
self._start_effect_time = None
self._status = None
@property
def agreement_sign_type(self):
return self._agreement_sign_type
@agreement_sign_type.setter
def agreement_sign_type(self, value):
self._agreement_sign_type = value
@property
def alipay_user_id(self):
return self._alipay_user_id
@alipay_user_id.setter
def alipay_user_id(self, value):
self._alipay_user_id = value
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def end_effect_time(self):
return self._end_effect_time
@end_effect_time.setter
def end_effect_time(self, value):
self._end_effect_time = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def sign_user_id(self):
return self._sign_user_id
@sign_user_id.setter
def sign_user_id(self, value):
self._sign_user_id = value
@property
def sign_user_type(self):
return self._sign_user_type
@sign_user_type.setter
def sign_user_type(self, value):
self._sign_user_type = value
@property
def start_effect_time(self):
return self._start_effect_time
@start_effect_time.setter
def start_effect_time(self, value):
self._start_effect_time = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.agreement_sign_type:
if hasattr(self.agreement_sign_type, 'to_alipay_dict'):
params['agreement_sign_type'] = self.agreement_sign_type.to_alipay_dict()
else:
params['agreement_sign_type'] = self.agreement_sign_type
if self.alipay_user_id:
if hasattr(self.alipay_user_id, 'to_alipay_dict'):
params['alipay_user_id'] = self.alipay_user_id.to_alipay_dict()
else:
params['alipay_user_id'] = self.alipay_user_id
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.end_effect_time:
if hasattr(self.end_effect_time, 'to_alipay_dict'):
params['end_effect_time'] = self.end_effect_time.to_alipay_dict()
else:
params['end_effect_time'] = self.end_effect_time
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.sign_user_id:
if hasattr(self.sign_user_id, 'to_alipay_dict'):
params['sign_user_id'] = self.sign_user_id.to_alipay_dict()
else:
params['sign_user_id'] = self.sign_user_id
if self.sign_user_type:
if hasattr(self.sign_user_type, 'to_alipay_dict'):
params['sign_user_type'] = self.sign_user_type.to_alipay_dict()
else:
params['sign_user_type'] = self.sign_user_type
if self.start_effect_time:
if hasattr(self.start_effect_time, 'to_alipay_dict'):
params['start_effect_time'] = self.start_effect_time.to_alipay_dict()
else:
params['start_effect_time'] = self.start_effect_time
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsSceneProductAgreementQueryModel()
if 'agreement_sign_type' in d:
o.agreement_sign_type = d['agreement_sign_type']
if 'alipay_user_id' in d:
o.alipay_user_id = d['alipay_user_id']
if 'channel' in d:
o.channel = d['channel']
if 'end_effect_time' in d:
o.end_effect_time = d['end_effect_time']
if 'item_id' in d:
o.item_id = d['item_id']
if 'sign_user_id' in d:
o.sign_user_id = d['sign_user_id']
if 'sign_user_type' in d:
o.sign_user_type = d['sign_user_type']
if 'start_effect_time' in d:
o.start_effect_time = d['start_effect_time']
if 'status' in d:
o.status = d['status']
return o
|
PypiClean
|
/artellapipe-tools-playblastmanager-0.0.7.tar.gz/artellapipe-tools-playblastmanager-0.0.7/artellapipe/tools/playblastmanager/plugins/panzoom.py
|
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "[email protected]"
from tpDcc.libs.qt.widgets import layouts, checkbox
from artellapipe.tools.playblastmanager.core import plugin
class PanZoomWidget(plugin.PlayblastPlugin, object):
"""
Allows user to set playblast display settings
"""
id = 'PanZoom'
label = 'Pan/Zoom'
collapsed = True
def __init__(self, project, config, parent=None):
super(PanZoomWidget, self).__init__(project=project, config=config, parent=parent)
def get_main_layout(self):
main_layout = layouts.HorizontalLayout()
main_layout.setContentsMargins(5, 0, 5, 0)
return main_layout
def ui(self):
super(PanZoomWidget, self).ui()
self.pan_zoom = checkbox.BaseCheckBox('Use pan/zoom from camera')
self.pan_zoom.setChecked(True)
self.main_layout.addWidget(self.pan_zoom)
self.pan_zoom.stateChanged.connect(self.optionsChanged)
def get_inputs(self, as_preset=False):
"""
Overrides base ArtellaPlayblastPlugin get_inputs function
Returns a dict with proper input variables as keys of the dictionary
:return: dict
"""
return {'pan_zoom': self.pan_zoom.isChecked()}
def get_outputs(self):
"""
Overrides base ArtellaPlayblastPlugin get_outputs function
Returns the outputs variables of the Playblast widget as dict
:return: dict
"""
if not self.pan_zoom.isChecked():
return {
'camera_options': {
'panZoomEnabled': 1,
'horizontalPan': 0.0,
'verticalPan': 0.0,
'zoom': 1.0
}
}
else:
return {}
def apply_inputs(self, attrs_dict):
"""
Overrides base ArtellaPlayblastPlugin get_outputs function
Returns the outputs variables of the Playblast widget as dict
:return: dict
"""
self.pan_zoom.setChecked(attrs_dict.get('pan_zoom', True))
|
PypiClean
|
/sdksio_juniper_mist_sdk-1.0.0-py3-none-any.whl/mistapi/models/event_12.py
|
from mistapi.api_helper import APIHelper
class Event12(object):
"""Implementation of the 'Event12' model.
TODO: type model description here.
Attributes:
asset_id (uuid|string): uuid of named asset
id (uuid|string): uuid of SDK-client
mac (string): mac address of wifi client or asset
map_id (uuid|string): map id
name (string): name of the client, may be empty
site_id (uuid|string): site id
timestamp (int): timestamp of the event, epoch
trigger (TriggerEnum): enter / exit
mtype (string): TODO: type description here.
zone_id (uuid|string): zone id
"""
# Create a mapping from Model property names to API property names
_names = {
"id": 'id',
"map_id": 'map_id',
"site_id": 'site_id',
"timestamp": 'timestamp',
"trigger": 'trigger',
"mtype": 'type',
"zone_id": 'zone_id',
"asset_id": 'asset_id',
"mac": 'mac',
"name": 'name'
}
_optionals = [
'asset_id',
'mac',
'name',
]
def __init__(self,
id=None,
map_id=None,
site_id=None,
timestamp=None,
trigger=None,
mtype=None,
zone_id=None,
asset_id=APIHelper.SKIP,
mac=APIHelper.SKIP,
name=APIHelper.SKIP):
"""Constructor for the Event12 class"""
# Initialize members of the class
if asset_id is not APIHelper.SKIP:
self.asset_id = asset_id
self.id = id
if mac is not APIHelper.SKIP:
self.mac = mac
self.map_id = map_id
if name is not APIHelper.SKIP:
self.name = name
self.site_id = site_id
self.timestamp = timestamp
self.trigger = trigger
self.mtype = mtype
self.zone_id = zone_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get("id") if dictionary.get("id") else None
map_id = dictionary.get("map_id") if dictionary.get("map_id") else None
site_id = dictionary.get("site_id") if dictionary.get("site_id") else None
timestamp = dictionary.get("timestamp") if dictionary.get("timestamp") else None
trigger = dictionary.get("trigger") if dictionary.get("trigger") else None
mtype = dictionary.get("type") if dictionary.get("type") else None
zone_id = dictionary.get("zone_id") if dictionary.get("zone_id") else None
asset_id = dictionary.get("asset_id") if dictionary.get("asset_id") else APIHelper.SKIP
mac = dictionary.get("mac") if dictionary.get("mac") else APIHelper.SKIP
name = dictionary.get("name") if dictionary.get("name") else APIHelper.SKIP
# Return an object of this model
return cls(id,
map_id,
site_id,
timestamp,
trigger,
mtype,
zone_id,
asset_id,
mac,
name)
|
PypiClean
|
/pdf417gen-0.7.1.tar.gz/pdf417gen-0.7.1/README.rst
|
===================================
PDF417 barcode generator for Python
===================================
.. image:: https://img.shields.io/travis/ihabunek/pdf417-py.svg?maxAge=3600&style=flat-square
:target: https://travis-ci.org/ihabunek/pdf417-py
.. image:: https://img.shields.io/badge/author-%40ihabunek-blue.svg?maxAge=3600&style=flat-square
:target: https://twitter.com/ihabunek
.. image:: https://img.shields.io/github/license/ihabunek/pdf417-py.svg?maxAge=3600&style=flat-square
:target: https://opensource.org/licenses/MIT
.. image:: https://img.shields.io/pypi/v/pdf417gen.svg?maxAge=3600&style=flat-square
:target: https://pypi.python.org/pypi/pdf417gen
Easily encode your data into a 2D barcode using the PDF417 format.
.. image:: https://raw.githubusercontent.com/ihabunek/pdf417-py/master/images/1_basic.jpg
Licensed under the MIT License, see `LICENSE <LICENSE>`_.
Installation
------------
Install using pip:
.. code-block::
pip install pdf417gen
CLI
---
The ``pdf417gen`` command can be used to generate a barcode from commandline. It
takes the input either as an argument or from stdin.
.. code-block:: bash
# Show help
pdf417gen encode --help
# Encode given text and display the barcode
pdf417gen encode "Beautiful is better than ugly"
# Encode given text and save barcode to a file (extension determines format)
pdf417gen encode -o barcode.png "Explicit is better than implicit"
# Input from a file
pdf417gen encode < input.txt
# Piped input
python -c "import this" | pdf417gen encode
Usage
-----
Creating bar codes is done in two steps:
* Encode a string to a list of code words using ``encode()``
* Render the barcode using one of the rendering functions: ``render_image()``,
``render_svg()``.
Usage overview:
.. code-block:: python
from pdf417gen import encode, render_image, render_svg
# Some data to encode
text = """Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated."""
# Convert to code words
codes = encode(text)
# Generate barcode as image
image = render_image(codes) # Pillow Image object
image.save('barcode.jpg')
# Generate barcode as SVG
svg = render_svg(codes) # ElementTree object
svg.write("barcode.svg")
Supports strings (unicode in py2) and byte arrays (str in py2):
.. code-block:: python
# These two inputs encode to the same code words
encode(u"love 💔")
encode(b"love \xf0\x9f\x92\x94")
# Default encoding is UTF-8, but you can specify your own
encode(u"love 💔", encoding="utf-8")
Encoding data
-------------
The first step is to encode your data to a list of code words.
.. code-block:: python
encode(data, columns=6, security_level=2˙)
Columns
~~~~~~~
The bar code size can be customized by defining the number of columns used to
render the data, between 1 and 30, the default value is 6. A bar code can have a
maximum of 90 rows, so for larger data sets you may need to increase the number
of columns to decrease the rows count.
.. code-block:: python
codes = encode(text, columns=12)
image = render_image(codes)
image.show()
.. image:: https://raw.githubusercontent.com/ihabunek/pdf417-py/master/images/2_columns.jpg
Security level
~~~~~~~~~~~~~~
Increasing the security level will produce stronger (and more numerous) error
correction codes, making the bar code larger, but less prone to corruption. The
security level can range from 0 to 8, and procuces ``2^(level+1)`` error
correction code words, meaning level 0 produces 2 code words and level 8
produces 512. The default security level is 2.
.. code-block:: python
codes = encode(text, columns=12, security_level=6)
image = render_image(codes)
image.show()
.. image:: https://raw.githubusercontent.com/ihabunek/pdf417-py/master/images/3_security_level.jpg
Render image
------------
The ``render_image`` function takes the following options:
* ``scale`` - module width, in pixels (default: 3)
* ``ratio`` - module height to width ratio (default: 3)
* ``padding`` - image padding, in pixels (default: 20)
* ``fg_color`` - foreground color (default: ``#000000``)
* ``bg_color`` - background color (default: ``#FFFFFF``)
.. note::
A module is the smallest element of a barcode, analogous to a pixel. Modules
in a PDF417 bar code are tall and narrow.
The function returns a Pillow Image_ object containing the barcode.
Colors can be specified as hex codes or using HTML color names.
.. code-block:: python
codes = encode(text, columns=3)
image = render_image(codes, scale=5, ratio=2, padding=5, fg_color="Indigo", bg_color="#ddd")
image.show()
.. image:: https://raw.githubusercontent.com/ihabunek/pdf417-py/master/images/4_rendering.jpg
Render SVG
----------
The ``render_svg`` function takes the following options:
* ``scale`` - module width, in pixels (default: 3)
* ``ratio`` - module height to width ratio (default: 3)
* ``padding`` - image padding, in pixels (default: 20)
* ``color`` - foreground color (default: `#000000`)
The function returns a ElementTree_ object containing the barcode in SVG format.
Unlike ``render_image``, this function does not take a background color option.
The background is left transparent.
.. code-block:: python
codes = encode(text, columns=3)
svg = render_svg(codes, scale=5, ratio=2, color="Seaweed")
svg.write('barcode.svg')
See also
--------
* pdf417-php_ - a PHP implementation
* golang-pdf417_ - a Go implementation
.. _pdf417-php: https://github.com/ihabunek/pdf417-php
.. _golang-pdf417: https://github.com/ruudk/golang-pdf417
.. _ElementTree: https://docs.python.org/3.5/library/xml.etree.elementtree.html#elementtree-objects
.. _Image: https://pillow.readthedocs.io/en/3.2.x/reference/Image.html
|
PypiClean
|
/pysigma_backend_elasticsearch-1.0.5-py3-none-any.whl/sigma/pipelines/elasticsearch/windows.py
|
from sigma.pipelines.common import generate_windows_logsource_items
from sigma.processing.transformations import FieldMappingTransformation, AddFieldnamePrefixTransformation
from sigma.processing.conditions import LogsourceCondition, IncludeFieldCondition, FieldNameProcessingItemAppliedCondition
from sigma.processing.pipeline import ProcessingItem, ProcessingPipeline
ecs_windows_variable_mappings = {
"FileVersion": (
("category", "process_creation", "process.pe.file_version"),
("category", "image_load", "file.pe.file_version"),
),
"Description": (
("category", "process_creation", "process.pe.description"),
("category", "image_load", "file.pe.description"),
("category", "sysmon_error", "winlog.event_data.Description"),
),
"Product": (
("category", "process_creation", "process.pe.product"),
("category", "image_load", "file.pe.product"),
),
"Company": (
("category", "process_creation", "process.pe.company"),
("category", "image_load", "file.pe.company"),
),
"OriginalFileName": (
("category", "process_creation", "process.pe.original_file_name"),
("category", "image_load", "file.pe.original_file_name"),
),
"CommandLine": (
("category", "process_creation", "process.command_line"),
("service", "security", "process.command_line"),
("service", "powershell-classic", "powershell.command.value"),
),
"Protocol": (
("category", "network_connection", "network.transport"),
),
"Initiated": (
("category", "network_connection", "network.direction"),
),
"Signature": (
("category", "driver_loaded", "file.code_signature.subject_name"),
("category", "image_loaded", "file.code_signature.subject_name"),
),
"EngineVersion": (
("service", "powershell-classic", "powershell.engine.version"),
),
"HostVersion": (
("service", "powershell-classic", "powershell.process.executable_version"),
),
"SubjectLogonId": (
("service", "security", "winlog.logon.id"),
),
"ServiceName": (
("service", "security", "service.name"),
),
"SubjectDomainName": (
("service", "security", "user.domain"),
),
"SubjectUserName": (
("service", "security", "user.name"),
),
"SubjectUserSid": (
("service", "security", "user.id"),
),
"TargetLogonId": (
("service", "security", "winlog.logon.id"),
),
}
def ecs_windows():
return ProcessingPipeline(
name="Elastic Common Schema (ECS) Windows log mappings from Winlogbeat from version 7",
priority=20,
allowed_backends=("elasticsearch", "opensearch"),
items=generate_windows_logsource_items("winlog.channel", "{source}") + [ # Variable field mappinga depending on category/service
ProcessingItem(
identifier=f"elasticsearch_windows-{field}-{logsrc_field}-{logsrc}",
transformation=FieldMappingTransformation({
field: mapped
}),
rule_conditions=[
LogsourceCondition(**{
"product": "windows",
logsrc_field: logsrc,
}),
]
)
for field, mappings in ecs_windows_variable_mappings.items()
for (logsrc_field, logsrc, mapped) in mappings
] + [
ProcessingItem( # Field mappings
identifier="ecs_windows_field_mapping",
transformation=FieldMappingTransformation({
"EventID": "event.code",
"Channel": "winlog.channel",
"Provider_Name": "winlog.provider_name",
"ComputerName": "winlog.computer_name",
"FileName": "file.path",
"ProcessGuid": "process.entity_id",
"ProcessId": "process.pid",
"Image": "process.executable",
"CurrentDirectory": "process.working_directory",
"ParentProcessGuid": "process.parent.entity_id",
"ParentProcessId": "process.parent.pid",
"ParentImage": "process.parent.executable",
"ParentCommandLine": "process.parent.command_line",
"TargetFilename": "file.path",
"SourceIp": "source.ip",
"SourceHostname": "source.domain",
"SourcePort": "source.port",
"DestinationIp": "destination.ip",
"DestinationHostname": "destination.domain",
"DestinationPort": "destination.port",
"DestinationPortName": "network.protocol",
"ImageLoaded": "file.path",
"Signed": "file.code_signature.signed",
"SignatureStatus": "file.code_signature.status",
"SourceProcessGuid": "process.entity_id",
"SourceProcessId": "process.pid",
"SourceImage": "process.executable",
"Device": "file.path",
"SourceThreadId": "process.thread.id",
"TargetObject": "registry.path",
"PipeName": "file.name",
"Destination": "process.executable",
"QueryName": "dns.question.name",
"QueryStatus": "sysmon.dns.status",
"IsExecutable": "sysmon.file.is_executable",
"Archived": "sysmon.file.archived",
"CommandName": "powershell.command.name",
"CommandPath": "powershell.command.path",
"CommandType": "powershell.command.type",
"HostApplication": "process.command_line",
"HostId": "process.entity_id",
"HostName": "process.title",
"NewEngineState": "powershell.engine.new_state",
"PipelineId": "powershell.pipeline_id",
"PreviousEngineState": "powershell.engine.previous_state",
"RunspaceId": "powershell.runspace_id",
"ScriptName": "file.path",
"SequenceNumber": "event.sequence",
"NewProviderState": "powershell.provider.new_state",
"ProviderName": "powershell.provider.name",
"MessageNumber": "powershell.sequence",
"MessageTotal": "powershell.total",
"ScriptBlockText": "powershell.file.script_block_text",
"ScriptBlockId": "powershell.file.script_block_id",
"AccountDomain": "user.domain",
"AccountName": "user.name",
"Application": "process.executable",
"ClientAddress": "source.ip",
"ClientName": "source.domain",
"DestAddress": "destination.ip",
"DestPort": "destination.port",
"IpAddress": "source.ip",
"IpPort": "source.port",
"NewProcessId": "process.pid",
"NewProcessName": "process.executable",
"ParentProcessName": "process.parent.name",
"ProcessName": "process.executable",
"SourceAddress": "source.ip",
"TargetDomainName": "user.domain",
"WorkstationName": "source.domain",
}),
rule_conditions=[
LogsourceCondition(product="windows")
],
),
ProcessingItem( # Prepend each field that was not processed by previous field mapping transformation with "winlog.event_data."
identifier="ecs_windows_winlog_eventdata_prefix",
transformation=AddFieldnamePrefixTransformation(
"winlog.event_data."),
field_name_conditions=[
FieldNameProcessingItemAppliedCondition(
"ecs_windows_field_mapping"),
IncludeFieldCondition(fields=["\\w+\\."], type="re"),
],
field_name_condition_negation=True,
field_name_condition_linking=any,
rule_conditions=[
LogsourceCondition(product="windows")
],
)
],
)
def ecs_windows_old():
return ProcessingPipeline(
name="Elastic Common Schema (ECS) Windows log mappings from Winlogbeat up to version 6",
priority=20,
allowed_backends=("elasticsearch", "opensearch"),
items=generate_windows_logsource_items("winlog.channel", "{source}") + [
ProcessingItem( # Field mappings
identifier="ecs_windows_field_mapping",
transformation=FieldMappingTransformation({
"EventID": "event_id",
"Channel": "winlog.channel",
}),
rule_conditions=[
LogsourceCondition(product="windows")
],
),
ProcessingItem( # Prepend each field that was not processed by previous field mapping transformation with "winlog.event_data."
identifier="ecs_windows_eventdata_prefix",
transformation=AddFieldnamePrefixTransformation("event_data."),
field_name_conditions=[
FieldNameProcessingItemAppliedCondition(
"ecs_windows_field_mapping"),
IncludeFieldCondition(fields=["\\w+\\."], type="re"),
],
field_name_condition_negation=True,
field_name_condition_linking=any,
rule_conditions=[
LogsourceCondition(product="windows")
],
)
],
)
|
PypiClean
|
/aliyun-python-sdk-vpc-3.0.45.tar.gz/aliyun-python-sdk-vpc-3.0.45/aliyunsdkvpc/request/v20160428/ModifyExpressCloudConnectionAttributeRequest.py
|
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class ModifyExpressCloudConnectionAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'ModifyExpressCloudConnectionAttribute','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_EccId(self): # String
return self.get_query_params().get('EccId')
def set_EccId(self, EccId): # String
self.add_query_param('EccId', EccId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_CeIp(self): # String
return self.get_query_params().get('CeIp')
def set_CeIp(self, CeIp): # String
self.add_query_param('CeIp', CeIp)
def get_BgpAs(self): # String
return self.get_query_params().get('BgpAs')
def set_BgpAs(self, BgpAs): # String
self.add_query_param('BgpAs', BgpAs)
def get_PeIp(self): # String
return self.get_query_params().get('PeIp')
def set_PeIp(self, PeIp): # String
self.add_query_param('PeIp', PeIp)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
|
PypiClean
|
/js.angular-1.1.4.tar.gz/js.angular-1.1.4/js/angular/resources/i18n/angular-locale_en-bb.js
|
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": {
"0": "AM",
"1": "PM"
},
"DAY": {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday"
},
"MONTH": {
"0": "January",
"1": "February",
"2": "March",
"3": "April",
"4": "May",
"5": "June",
"6": "July",
"7": "August",
"8": "September",
"9": "October",
"10": "November",
"11": "December"
},
"SHORTDAY": {
"0": "Sun",
"1": "Mon",
"2": "Tue",
"3": "Wed",
"4": "Thu",
"5": "Fri",
"6": "Sat"
},
"SHORTMONTH": {
"0": "Jan",
"1": "Feb",
"2": "Mar",
"3": "Apr",
"4": "May",
"5": "Jun",
"6": "Jul",
"7": "Aug",
"8": "Sep",
"9": "Oct",
"10": "Nov",
"11": "Dec"
},
"fullDate": "EEEE, MMMM d, y",
"longDate": "MMMM d, y",
"medium": "MMM d, y h:mm:ss a",
"mediumDate": "MMM d, y",
"mediumTime": "h:mm:ss a",
"short": "M/d/yy h:mm a",
"shortDate": "M/d/yy",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "$",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": {
"0": {
"gSize": 3,
"lgSize": 3,
"macFrac": 0,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
"1": {
"gSize": 3,
"lgSize": 3,
"macFrac": 0,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "(\u00A4",
"negSuf": ")",
"posPre": "\u00A4",
"posSuf": ""
}
}
},
"id": "en-bb",
"pluralCat": function (n) { if (n == 1) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
|
PypiClean
|
/videosplit-0.4.1.tar.gz/videosplit-0.4.1/README.md
|
Videosplit is a utility to extract frames from a given video.
It can extract:
- n number of frames (equally spaced throughout the video)
- relevant* frames
- frames separated by t seconds
Example usage:
```
from videosplit import VideoSplit
videosplit = VideoSplit()
frames = videosplit.get_relevant('input.mp4') # frames = [input01.jpg, input02.jpg, ...]
frames = videsplit.get_n_frames('input.mp4', 10) # frames = [input01.jpg,..., input10.jpg]
frames = videosplit.get_interval('input.mp4', 10) # one frame every 10 seconds
```
Note:
The filenames of the jpg files are based on the name of the video file. Therefore, if you call more than one call on the same file, the jpg files may be overwritten. To solve this, specify an output name:
```
frames = videosplit.get_relevant("input.mp4", "tmp") # frames = [tmp01.jpg,...]
```
*Currently, relevant frames are ex``tracted by finding the I-frames of the video (https://www.webopedia.com/TERM/I/I_frame.html). Frames could be extracted based on "scene-changes" but testing showed that it did not return relevant results (one frame out of a 9 minute video for example).
|
PypiClean
|
/django-adminbrowse-0.1.2.tar.gz/django-adminbrowse-0.1.2/adminbrowse/columns.py
|
from django.utils.text import force_unicode
from django.utils.translation import ugettext as _
from adminbrowse.base import ChangeListModelFieldColumn
class URLColumn(ChangeListModelFieldColumn):
"""Changelist column that links to the URL from the specified field.
`model` is the model class for which a changelist is being rendered,
and `name` is the name of the field containing a URL. If not provided,
`short_description` will be set to the field's `verbose_name`.
If an instance's URL field is empty, the column will display the value
of `default`, which defaults to the empty string.
The rendered link will have class="..." and target="..." attributes
defined by the `target` and `classes` arguments, which default to
'_blank' and 'external', respectively. Include the `adminbrowse`
CSS file in the ModelAdmin's `Media` definition to style this default
class with an "external link" icon.
This class is aliased as `adminbrowse.link_to_url` for better readability
in `ModelAdmin` code.
"""
allow_tags = True
def __init__(self, model, name, short_description=None, default="",
target='_blank', classes='external'):
ChangeListModelFieldColumn.__init__(self, model, name,
short_description, default)
self.target = target
if isinstance(classes, basestring):
classes = classes.split()
self.classes = list(classes)
def __call__(self, obj):
value = getattr(obj, self.field_name)
if value:
title = self.get_title(obj, value)
classes = " ".join(self.classes)
html = '<a href="%s" target="%s" class="%s" title="%s">%s</a>'
return html % (value, self.target, classes, title, value)
else:
return self.default
def get_title(self, obj, value):
if self.target == '_blank':
return _("Open URL in a new window")
else:
return _("Open URL")
class TruncatedFieldColumn(ChangeListModelFieldColumn):
"""
Changelist column that truncates the value of a field to the specified
length.
`model` is the model class for which a changelist is being rendered,
and `name` is the name of the field to render. The string value of the
field will be truncated to the length given by `max_length` (required).
If not provided, `short_description` will be set to the field's
`verbose_name`.
If an instance's field is empty, the column will display the value of
`default`, which defaults to the empty string.
The `tail` argument specifies the final truncation string, and defaults to
an ellipsis.
This class is aliased as `adminbrowse.truncated_field` for better
readability in `ModelAdmin` code.
"""
def __init__(self, model, name, max_length, short_description=None,
default="", tail=u"…"):
ChangeListModelFieldColumn.__init__(self, model, name,
short_description, default)
self.max_length = max_length
self.tail = tail
def __call__(self, obj):
value = getattr(obj, self.field_name)
if value:
text = force_unicode(value)
if len(text) > self.max_length:
text = text[:self.max_length] + self.tail
return text
else:
return self.default
link_to_url = URLColumn
truncated_field = TruncatedFieldColumn
|
PypiClean
|
/tweetynet-0.9.0.tar.gz/tweetynet-0.9.0/article/src/article/plot/error_curve.py
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# max width in inches is 7.5
# https://journals.plos.org/ploscompbiol/s/figures
FIGSIZE = (7.5, 3.75)
DPI = 300
def across_animals(curve_df,
suptitle=None,
ax1_ylim=(0, 8),
ax2_ylim=(0.1, 0.65)):
TRAIN_DUR_IND_MAP = {
k: v for k, v in zip(
sorted(curve_df['train_set_dur'].unique()),
sorted(curve_df['train_set_dur_ind'].unique())
)
}
fig = plt.figure(constrained_layout=True, figsize=FIGSIZE, dpi=DPI)
gs = fig.add_gridspec(nrows=4, ncols=2, hspace=0.005)
ax_arr = []
ax_arr.append(fig.add_subplot(gs[0, 0]))
ax_arr.append(fig.add_subplot(gs[1:, 0]))
ax_arr.append(fig.add_subplot(gs[:, 1]))
ax_arr = np.asarray(ax_arr)
ax_arr[0].get_shared_x_axes().join(*ax_arr[:2].tolist())
# for col in range(2):
ax_arr[0].spines['bottom'].set_visible(False)
ax_arr[1].spines['top'].set_visible(False)
ax_arr[1].xaxis.tick_bottom()
metric_list = ['avg_error', 'avg_segment_error_rate']
ylabels = ['Frame error (%)', 'Segment error rate\n(edits per segment)']
for col, (metric, ylabel) in enumerate(zip(metric_list, ylabels)):
if col == 0:
ax = ax_arr[1]
else:
ax = ax_arr[2]
if col == 1:
legend = 'full'
else:
legend = False
sns.lineplot(x='train_set_dur_ind',
y=metric,
hue='animal_id',
data=curve_df,
ci='sd',
palette='colorblind',
linewidth=2,
ax=ax,
legend=legend)
sns.lineplot(x='train_set_dur_ind',
y=metric,
linestyle='dashed',
color='k',
linewidth=4,
data=curve_df, ci=None, label='mean', ax=ax, legend=legend)
ax.set_ylabel('')
ax.set_xlabel('Training set duration (s)', fontsize=10)
ax.set_xticks(list(TRAIN_DUR_IND_MAP.values()))
ax.set_xticklabels(sorted(curve_df['train_set_dur'].unique().astype(int)), rotation=45)
ax_arr[0].set_xticklabels([])
ax_arr[0].set_xlabel('')
# zoom-in / limit the view to different portions of the data
ax_arr[0].set_ylim(40, 100)
# ax_arr[1, 0].set_ylim(0, 14)
ax_arr[1].set_ylim(ax1_ylim)
ax_arr[2].set_ylim(ax2_ylim)
bigax_col0 = fig.add_subplot(gs[:, 0], frameon=False)
bigax_col1 = fig.add_subplot(gs[:, 1], frameon=False)
labelpads = (2, 10)
panel_labels = ['A', 'B']
for ylabel, labelpad, panel_label, ax in zip(ylabels,
labelpads,
panel_labels,
[bigax_col0, bigax_col1]):
ax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
ax.grid(False)
ax.set_ylabel(ylabel, fontsize=10, labelpad=labelpad)
ax.text(-0.2, 1., panel_label, transform=ax.transAxes,
fontsize=12, fontweight='bold', va='top', ha='right')
# get handles from right axes legend, then remove and re-create outside
handles, _ = ax_arr[2].get_legend_handles_labels()
# [ha.set_linewidth(2) for ha in handles ]
ax_arr[2].get_legend().remove()
bigax_col1.legend(handles=handles, bbox_to_anchor=(1.35, 1))
fig.set_constrained_layout_pads(hspace=-0.05, wspace=0.0)
if suptitle is not None:
fig.suptitle(suptitle)
return fig
|
PypiClean
|
/dbt_core-1.7.0b2-py3-none-any.whl/dbt/deps/git.py
|
import os
from typing import List, Optional
from dbt.clients import git, system
from dbt.config.project import PartialProject, Project
from dbt.config.renderer import PackageRenderer
from dbt.contracts.project import (
ProjectPackageMetadata,
GitPackage,
)
from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path
from dbt.exceptions import ExecutableError, MultipleVersionGitDepsError
from dbt.events.functions import fire_event, warn_or_error
from dbt.events.types import EnsureGitInstalled, DepsUnpinned
from dbt.utils import md5
def md5sum(s: str):
return md5(s, "latin-1")
class GitPackageMixin:
def __init__(self, git: str) -> None:
super().__init__()
self.git = git
@property
def name(self):
return self.git
def source_type(self) -> str:
return "git"
class GitPinnedPackage(GitPackageMixin, PinnedPackage):
def __init__(
self,
git: str,
revision: str,
warn_unpinned: bool = True,
subdirectory: Optional[str] = None,
) -> None:
super().__init__(git)
self.revision = revision
self.warn_unpinned = warn_unpinned
self.subdirectory = subdirectory
self._checkout_name = md5sum(self.git)
def get_version(self):
return self.revision
def get_subdirectory(self):
return self.subdirectory
def nice_version_name(self):
if self.revision == "HEAD":
return "HEAD (default revision)"
else:
return "revision {}".format(self.revision)
def _checkout(self):
"""Performs a shallow clone of the repository into the downloads
directory. This function can be called repeatedly. If the project has
already been checked out at this version, it will be a no-op. Returns
the path to the checked out directory."""
try:
dir_ = git.clone_and_checkout(
self.git,
get_downloads_path(),
revision=self.revision,
dirname=self._checkout_name,
subdirectory=self.subdirectory,
)
except ExecutableError as exc:
if exc.cmd and exc.cmd[0] == "git":
fire_event(EnsureGitInstalled())
raise
return os.path.join(get_downloads_path(), dir_)
def _fetch_metadata(
self, project: Project, renderer: PackageRenderer
) -> ProjectPackageMetadata:
path = self._checkout()
if (self.revision == "HEAD" or self.revision in ("main", "master")) and self.warn_unpinned:
warn_or_error(DepsUnpinned(git=self.git))
partial = PartialProject.from_project_root(path)
return partial.render_package_metadata(renderer)
def install(self, project, renderer):
dest_path = self.get_installation_path(project, renderer)
if os.path.exists(dest_path):
if system.path_is_symlink(dest_path):
system.remove_file(dest_path)
else:
system.rmdir(dest_path)
system.move(self._checkout(), dest_path)
class GitUnpinnedPackage(GitPackageMixin, UnpinnedPackage[GitPinnedPackage]):
def __init__(
self,
git: str,
revisions: List[str],
warn_unpinned: bool = True,
subdirectory: Optional[str] = None,
) -> None:
super().__init__(git)
self.revisions = revisions
self.warn_unpinned = warn_unpinned
self.subdirectory = subdirectory
@classmethod
def from_contract(cls, contract: GitPackage) -> "GitUnpinnedPackage":
revisions = contract.get_revisions()
# we want to map None -> True
warn_unpinned = contract.warn_unpinned is not False
return cls(
git=contract.git,
revisions=revisions,
warn_unpinned=warn_unpinned,
subdirectory=contract.subdirectory,
)
def all_names(self) -> List[str]:
if self.git.endswith(".git"):
other = self.git[:-4]
else:
other = self.git + ".git"
return [self.git, other]
def incorporate(self, other: "GitUnpinnedPackage") -> "GitUnpinnedPackage":
warn_unpinned = self.warn_unpinned and other.warn_unpinned
return GitUnpinnedPackage(
git=self.git,
revisions=self.revisions + other.revisions,
warn_unpinned=warn_unpinned,
subdirectory=self.subdirectory,
)
def resolved(self) -> GitPinnedPackage:
requested = set(self.revisions)
if len(requested) == 0:
requested = {"HEAD"}
elif len(requested) > 1:
raise MultipleVersionGitDepsError(self.git, requested)
return GitPinnedPackage(
git=self.git,
revision=requested.pop(),
warn_unpinned=self.warn_unpinned,
subdirectory=self.subdirectory,
)
|
PypiClean
|
/azure-ai-textanalytics-5.3.0.zip/azure-ai-textanalytics-5.3.0/azure/ai/textanalytics/_generated/v3_0/models/__init__.py
|
from ._models_py3 import DetectedLanguage
from ._models_py3 import DocumentEntities
from ._models_py3 import DocumentError
from ._models_py3 import DocumentKeyPhrases
from ._models_py3 import DocumentLanguage
from ._models_py3 import DocumentLinkedEntities
from ._models_py3 import DocumentSentiment
from ._models_py3 import DocumentStatistics
from ._models_py3 import EntitiesResult
from ._models_py3 import Entity
from ._models_py3 import EntityLinkingResult
from ._models_py3 import ErrorResponse
from ._models_py3 import InnerError
from ._models_py3 import KeyPhraseResult
from ._models_py3 import LanguageBatchInput
from ._models_py3 import LanguageInput
from ._models_py3 import LanguageResult
from ._models_py3 import LinkedEntity
from ._models_py3 import Match
from ._models_py3 import MultiLanguageBatchInput
from ._models_py3 import MultiLanguageInput
from ._models_py3 import RequestStatistics
from ._models_py3 import SentenceSentiment
from ._models_py3 import SentimentConfidenceScorePerLabel
from ._models_py3 import SentimentResponse
from ._models_py3 import TextAnalyticsError
from ._models_py3 import TextAnalyticsWarning
from ._text_analytics_client_enums import DocumentSentimentValue
from ._text_analytics_client_enums import ErrorCodeValue
from ._text_analytics_client_enums import InnerErrorCodeValue
from ._text_analytics_client_enums import SentenceSentimentValue
from ._text_analytics_client_enums import WarningCodeValue
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
'DetectedLanguage',
'DocumentEntities',
'DocumentError',
'DocumentKeyPhrases',
'DocumentLanguage',
'DocumentLinkedEntities',
'DocumentSentiment',
'DocumentStatistics',
'EntitiesResult',
'Entity',
'EntityLinkingResult',
'ErrorResponse',
'InnerError',
'KeyPhraseResult',
'LanguageBatchInput',
'LanguageInput',
'LanguageResult',
'LinkedEntity',
'Match',
'MultiLanguageBatchInput',
'MultiLanguageInput',
'RequestStatistics',
'SentenceSentiment',
'SentimentConfidenceScorePerLabel',
'SentimentResponse',
'TextAnalyticsError',
'TextAnalyticsWarning',
'DocumentSentimentValue',
'ErrorCodeValue',
'InnerErrorCodeValue',
'SentenceSentimentValue',
'WarningCodeValue',
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
PypiClean
|
/sycc-0.8.5.tar.gz/sycc-0.8.5/sycc_/setuptools/_distutils/command/check.py
|
from email.utils import getaddresses
from distutils.core import Command
from distutils.errors import DistutilsSetupError
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
class SilentReporter(Reporter):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
super().__init__(source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
return nodes.system_message(message, level=level,
type=self.levels[level],
*children, **kwargs)
HAS_DOCUTILS = True
except Exception:
# Catch all exceptions because exceptions besides ImportError probably
# indicate that docutils is not ported to Py3k.
HAS_DOCUTILS = False
class check(Command):
"""This command checks the meta-data of the package.
"""
description = ("perform some checks on the package")
user_options = [('metadata', 'm', 'Verify meta-data'),
('restructuredtext', 'r',
('Checks if long string meta-data syntax '
'are reStructuredText-compliant')),
('strict', 's',
'Will exit with an error if a check fails')]
boolean_options = ['metadata', 'restructuredtext', 'strict']
def initialize_options(self):
"""Sets default values for options."""
self.restructuredtext = 0
self.metadata = 1
self.strict = 0
self._warnings = 0
def finalize_options(self):
pass
def warn(self, msg):
"""Counts the number of warnings that occurs."""
self._warnings += 1
return Command.warn(self, msg)
def run(self):
"""Runs the command."""
# perform the various tests
if self.metadata:
self.check_metadata()
if self.restructuredtext:
if HAS_DOCUTILS:
self.check_restructuredtext()
elif self.strict:
raise DistutilsSetupError('The docutils package is needed.')
# let's raise an error in strict mode, if we have at least
# one warning
if self.strict and self._warnings > 0:
raise DistutilsSetupError('Please correct your package.')
def check_metadata(self):
"""Ensures that all required elements of meta-data are supplied.
Required fields:
name, version
Warns if any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in 'name', 'version':
if not getattr(metadata, attr, None):
missing.append(attr)
if missing:
self.warn("missing required meta-data: %s" % ', '.join(missing))
def check_restructuredtext(self):
"""Checks if the long string fields are reST-compliant."""
data = self.distribution.get_long_description()
for warning in self._check_rst_data(data):
line = warning[-1].get('line')
if line is None:
warning = warning[1]
else:
warning = '%s (line %s)' % (warning[1], line)
self.warn(warning)
def _check_rst_data(self, data):
"""Returns warnings when the provided data doesn't compile."""
# the include and csv_table directives need this to be a path
source_path = self.distribution.script_name or 'setup.py'
parser = Parser()
settings = frontend.OptionParser(components=(Parser,)).get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError as e:
reporter.messages.append(
(-1, 'Could not finish the parsing: %s.' % e, '', {}))
return reporter.messages
|
PypiClean
|
/gerbolyze-3.1.5.tar.gz/gerbolyze-3.1.5/README.rst
|
Gerbolyze high-fidelity SVG/PNG/JPG to PCB converter
====================================================
.. note::
The command-line usage and SVG template format of gerbolyze changed between v2.0 and v3.0. You can find details on
the new format below under command_line_usage_
Gerbolyze renders SVG vector and PNG/JPG raster images into existing gerber PCB manufacturing files.
Vector data from SVG files is rendered losslessly *without* an intermediate rasterization/revectorization step.
Still, gerbolyze supports (almost) the full SVG 1.1 spec including complex, self-intersecting paths with holes,
patterns, dashes and transformations.
Raster images can either be vectorized through contour tracing (like gerbolyze v1.0 did) or they can be embedded using
high-resolution grayscale emulation while (mostly) guaranteeing trace/space design rules.
Try gerbolyze online at https://dyna.kokoroyukuma.de/gerboweb
.. figure:: pics/pcbway_sample_02_small.jpg
:width: 800px
Drawing by `トーコ Toko <https://twitter.com/fluffy2038/status/1317231121269104640>`__ converted using Gerbolyze and printed at PCBWay.
Tooling for PCB art is quite limited in both open source and closed source ecosystems. Something as simple as putting a
pretty picture on a PCB can be an extremely tedious task. Depending on the PCB tool used, various arcane incantations
may be necessary and even modestly complex images will slow down most PCB tools to a crawl.
Gerbolyze solves this problem in a toolchain-agnostic way by directly vectorizing SVG vector and PNG or JPG bitmap files
onto existing gerber layers. Gerbolyze processes any spec-compliant SVG and "gerbolyzes" SVG vector data into a Gerber
spec-compliant form. Gerbolyze has been tested against both the leading open-source KiCAD toolchain and the
industry-standard Altium Designer. Gerbolyze is written with performance in mind and will happily vectorize tens of
thousands of primitives, generating tens of megabytes of gerber code without crapping itself. With gerbolyze you can
finally be confident that your PCB fab's toolchain will fall over before yours does if you overdo it with the high-poly
anime silkscreen.
Gerbolyze is based on gerbonara_.
.. image:: pics/process-overview.png
:width: 800px
.. contents::
Tl;dr: Produce high-quality artistic PCBs in three easy steps!
--------------------------------------------------------------
Gerbolyze works in three steps.
1. Generate a scale-accurate template of the finished PCB from your CAD tool's gerber output:
.. code::
$ gerbolyze template --top template_top.svg [--bottom template_bottom.svg] my_gerber_dir
2. Load the resulting template image Inkscape_ or another SVG editing program. Put your artwork on the appropriate SVG
layer. Dark colors become filled gerber primitives, bright colors become unfilled primitives. You can directly put
raster images (PNG/JPG) into this SVG as well, just position and scale them like everything else. SVG clips work for
images, too. Masks are not supported.
3. Vectorize the edited SVG template image drectly into the PCB's gerber files:
.. code::
$ gerbolyze paste --top template_top_edited.svg [--bottom ...] my_gerber_dir output_gerber_dir
Quick Start Installation (Any Platform)
---------------------------------------
.. code-block:: shell
python -m pip install --user gerbolyze
To uninstall, run
.. code-block:: shell
python -m pip uninstall gerbolyze gerbonara resvg-wasi svg-flatten-wasi
To update, run
.. code-block:: shell
python -m pip install --user --upgrade --upgrade-strategy eager gerbolyze
Speeding up gerbolyze using natively-built binaries
---------------------------------------------------
This will install gerbolyze's binary dependency resvg and gerbolyze's svg-flatten utility as pre-built cross-platform
WASM binaries. When you first run gerbolyze, it will take some time (~30s) to link these binaries for your system. The
output is cached, so any future run is going to be fast.
WASM is slower than natively-built binaries. To speed up gerbolyze, you can natively build its two binary dependencies:
1. Install resvg natively using rust's cargo package manager: ``cargo install resvg``
2. Install gerbolyze's svg-flatten utility natively. You can get pre-built binaries from gerbolyze's gitlab CI jobs `at
this link <https://gitlab.com/gerbolyze/gerbolyze/-/pipelines?scope=tags&page=1>`__ by clicking the three dots on the
right next to the version you want. These pre-built binaries should work on any x86_64 linux since they are
statically linked. You can also build svg-flatten yourself by running ``make`` inside the ``svg-flatten`` folder from
a gerbolyze checkout.
Gerbolyze will pick up these binaries when installed in your ``$PATH``. resvg is also picked up when it is installed by
cargo in your home's ``~/.cargo``, even if it's not in your ``$PATH``. You can override the resvg, usvg or svg-flatten
binary that gerbolyze uses by giving it the absoulute path to a binary in the ``$RESVG``, ``$USVG`` and ``$SVG_FLATTEN``
environment variables.
Build from source (any distro)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: shell
git clone --recurse-submodules https://git.jaseg.de/gerbolyze.git
cd gerbolyze
python3 -m venv
source venv/bin/activate
python3 setup.py install
Features
--------
Input on the left, output on the right.
.. image:: pics/test_svg_readme_composited.png
:width: 800px
* Almost full SVG 1.1 static spec coverage (!)
* Paths with beziers, self-intersections and holes
* Strokes, even with dashes and markers
* Pattern fills and strokes
* Transformations and nested groups
* Proper text rendering with support for complex text layout (e.g. Arabic)
* <image> elements via either built-in vectorizer or built-in halftone processor
* (some) CSS
* Writes Gerber, SVG or KiCAD S-Expression (``.kicad_mod``) formats
* Can export from top/bottom SVGs to a whole gerber layer stack at once with filename autodetection
* Can export SVGs to ``.kicad_mod`` files like svg2mod (but with full SVG support)
* Beziers flattening with configurable tolerance using actual math!
* Polygon intersection removal
* Polygon hole removal (!)
* Optionally vector-compositing of output: convert black/white/transparent image to black/transparent image
* Renders SVG templates from input gerbers for accurate and easy scaling and positioning of artwork
* layer masking with offset (e.g. all silk within 1mm of soldermask)
* Can read gerbers from zip files
* Limited SVG support for board outline layers (no fill/region support)
* Dashed lines supported on board outline layers
Gerbolyze is the end-to-end "paste this svg into these gerbers" command that handles all layers on both board sides at
once. The heavy-duty computer geometry logic of gerbolyze is handled by the svg-flatten utility (``svg-flatten``
directory). svg-flatten reads an SVG file and renders it into a variety of output formats. svg-flatten can be used like
a variant of the popular svg2mod that supports all of SVG and handles arbitrary input ``<path>`` elements.
Algorithm Overview
------------------
This is the algorithm gerbolyze uses to process a stack of gerbers.
* Map input files to semantic layers by their filenames
* For each layer:
* load input gerber
* Pass mask layers through ``gerbv`` for conversion to SVG
* Pass mask layers SVG through ``svg-flatten --dilate``
* Pass input SVG through ``svg-flatten --only-groups [layer]``
* Overlay input gerber, mask and input svg
* Write result to output gerber
This is the algorithm svg-flatten uses to process an SVG.
* pass input SVG through usvg_
* iterate depth-first through resulting SVG.
* for groups: apply transforms and clip and recurse
* for images: Vectorize using selected vectorizer
* for paths:
* flatten path using Cairo
* remove self-intersections using Clipper
* if stroke is set: process dash, then offset using Clipper
* apply pattern fills
* clip to clip-path
* remove holes using Clipper
* for KiCAD S-Expression export: vector-composite results using CavalierContours: subtract each clear output primitive
from all previous dark output primitives
Web interface
-------------
You can try gerbolyze online at https://dyna.kokoroyukuma.de/gerboweb
The web interface does not expose all of gerbolyze's bells and whistles, but it allows you to simply paste a single SVG
file on a board to try out gerbolyze. Upload your design on the web interface, then download the template for either the
top or bottom side, and put your artwork on the appropriate layer of that template using Inkscape_. Finally, upload the
modified template and let gerbolyze process your design.
Command-line usage
------------------
.. _command_line_usage:
Generate SVG template from Gerber files:
.. code-block:: shell
gerbolyze template [options] [--top|--bottom] input_dir_or.zip output.svg
Render design from an SVG made with the template above into a set of gerber files:
.. code-block:: shell
gerbolyze paste [options] artwork.svg input_dir_or.zip output_dir_or.zip
Use svg-flatten to convert an SVG file into Gerber or flattened SVG:
.. code-block:: shell
svg-flatten [options] --format [gerber|svg] [input_file.svg] [output_file]
Use svg-flatten to convert an SVG file into the given layer of a KiCAD S-Expression (``.kicad_mod``) file:
.. code-block:: shell
svg-flatten [options] --format kicad --sexp-layer F.SilkS --sexp-mod-name My_Module [input_file.svg] [output_file]
Use svg-flatten to convert an SVG file into a ``.kicad_mod`` with SVG layers fed into separate KiCAD layers based on
their IDs like the popular ``svg2mod`` is doing:
Note:
Right now, the input SVG's layers must have *ids* that match up KiCAD's s-exp layer names. Note that when you name
a layer in Inkscape that only sets a ``name`` attribute, but does not change the ID. In order to change the ID in
Inkscape, you have to use Inkscape's "object properties" context menu function.
Also note that svg-flatten expects the layer names KiCAD uses in their S-Expression format. These are *different* to
the layer names KiCAD exposes in the UI (even though most of them match up!).
For your convenience, there is an SVG template with all the right layer names and IDs located next to this README.
.. code-block:: shell
svg-flatten [options] --format kicad --sexp-mod-name My_Module [input_file.svg] [output_file]
``gerbolyze template``
~~~~~~~~~~~~~~~~~~~~~~
Usage: ``gerbolyze template [OPTIONS] INPUT``
Generate SVG template for gerbolyze paste from gerber files.
INPUT may be a gerber file, directory of gerber files or zip file with gerber files. The output file contains a preview
image of the input gerbers to allow you to position your artwork, as well as prepared Inkscape layers corresponding to
each gerber layer. Simply place your artwork in this SVG template using Inkscape. Starting in v3.0, gerbolyze
automatically keeps track of which board side (top or bottom) is contained in an SVG template.
Options:
********
``--top | --bottom``
Output top or bottom side template. This affects both the preview image and the prepared Inkscape layers.
``--vector | --raster``
Embed preview renders into output file as SVG vector graphics instead of rendering them to PNG bitmaps. The
resulting preview may slow down your SVG editor.
``--raster-dpi FLOAT``
DPI for rastering preview
``--bbox TEXT``
Output file bounding box. Format: "w,h" to force [w] mm by [h] mm output canvas OR "x,y,w,h" to force [w] mm by [h]
mm output canvas with its bottom left corner at the given input gerber coördinates.
``gerbolyze paste``
~~~~~~~~~~~~~~~~~~~
(see `below <vectorization_>`__)
Usage: ``gerbolyze paste [OPTIONS] INPUT_GERBERS OVERLAY_SVG OUTPUT_GERBERS``
Render vector data and raster images from SVG file into gerbers. The SVG input file can be generated using ``gerbolyze
template`` and contains the name and board side of each layer. Note that for board outline layers, handling slightly
differs from other layers as PCB fabs do not support filled Gerber regions on these layers.
Options:
********
``--bbox TEXT``
Output file bounding box. Format: "w,h" to force [w] mm by [h] mm output canvas OR "x,y,w,h" to force [w] mm by [h]
mm output canvas with its bottom left corner at the given input gerber coördinates. This **must match the ``--bbox`` value given to
template**!
``--subtract TEXT``
Use user subtraction script from argument (see `below <subtraction_script_>`_)
``--no-subtract``
Disable subtraction (see `below <subtraction_script_>`_)
``--dilate FLOAT``
Default dilation for subtraction operations in mm (see `below <subtraction_script_>`_)
``--trace-space FLOAT``
Passed through to svg-flatten, see `below <svg_flatten_>`__.
``--vectorizer TEXT``
Passed through to svg-flatten, see `its description below <svg_flatten_>`__. Also have a look at `the examples below <vectorization_>`_.
``--vectorizer-map TEXT``
Passed through to svg-flatten, see `below <svg_flatten_>`__.
``--exclude-groups TEXT``
Passed through to svg-flatten, see `below <svg_flatten_>`__.
.. _outline_layers:
Outline layers
**************
Outline layers require special handling since PCB fabs do not support filled G36/G37 polygons on these layers. The main
difference between normal layers and outline layers is how strokes are handled. On outline layers, strokes are
translated to normal Gerber draw commands (D01, D02 etc.) with an aperture set to the stroke's width instead of tracing
them to G36/G37 filled regions. This means that on outline layers, SVG end caps and line join types do not work: All
lines are redered with round joins and end caps.
One exception from this are patterns, which work as expected for both fills and strokes with full support for joins and
end caps.
Dashed strokes are supported on outline layers and can be used to make easy mouse bites.
.. _subtraction_script:
Subtraction scripts
*******************
.. image:: pics/subtract_example.png
:width: 800px
Subtraction scripts tell ``gerbolyze paste`` to remove an area around certain input layers to from an overlay layer.
When a input layer is given in the subtraction script, gerbolyze will dilate (extend outwards) everything on this input
layer and remove it from the target overlay layer. By default, Gerbolyze subtracts the mask layer from the silk layer to
make sure there are no silk primitives that overlap bare copper, and subtracts each input layer from its corresponding
overlay to make sure the two do not overlap. In the picture above you can see both at work: The overlay contains
halftone primitives all over the place. The subtraction script has cut out an area around all pads (mask layer) and all
existing silkscreen. You can turn off this behavior by passing ``--no-subtract`` or pass your own "script".
The syntax of these scripts is:
.. code-block::
{target layer} -= {source layer} {dilation} [; ...]
The target layer must be ``out.{layer name}`` and the source layer ``in.{layer name}``. The layer names are gerbolyze's
internal layer names, i.e.: ``paste, silk, mask, copper, outline, drill``
The dilation value is optional, but can be a float with a leading ``+`` or ``-``. If given, before subtraction the
source layer's features will be extended by that many mm. If not given, the dilation defaults to the value given by
``--dilate`` if given or 0.1 mm otherwise. To disable dilation, simply pass ``+0`` here.
Multiple commands can be separated by semicolons ``;`` or line breaks.
The default subtraction script is:
.. code-block::
out.silk -= in.mask
out.silk -= in.silk+0.5
out.mask -= in.mask+0.5
out.copper -= in.copper+0.5
.. _svg_flatten:
``svg-flatten``
~~~~~~~~~~~~~~~
Usage: ``svg-flatten [OPTIONS]... [INPUT_FILE] [OUTPUT_FILE]``
Specify ``-`` for stdin/stdout.
Options:
********
``-h, --help``
Print help and exit
``-v, --version``
Print version and exit
``-o, --format``
Output format. Supported: gerber, gerber-outline (for board outline layers), svg, s-exp (KiCAD S-Expression)
``-p, --precision``
Number of decimal places use for exported coordinates (gerber: 1-9, SVG: >=0). Note that not all gerber viewers are
happy with too many digits. 5 or 6 is a reasonable choice.
``--clear-color``
SVG color to use in SVG output for "clear" areas (default: white)
``--dark-color``
SVG color to use in SVG output for "dark" areas (default: black)
``-f, --flip-gerber-polarity``
Flip polarity of all output gerber primitives for --format gerber.
``-d, --trace-space``
Minimum feature size of elements in vectorized graphics (trace/space) in mm. Default: 0.1mm.
``--no-header``
Do not export output format header/footer, only export the primitives themselves
``--flatten``
Flatten output so it only consists of non-overlapping white polygons. This perform composition at the vector level.
Potentially slow. This defaults to on when using KiCAD S-Exp export because KiCAD does not know polarity or colors.
``--no-flatten``
Disable automatic flattening for KiCAD S-Exp export
``--dilate``
Dilate output gerber primitives by this amount in mm. Used for masking out other layers.
``-g, --only-groups``
Comma-separated list of group IDs to export.
``-b, --vectorizer``
Vectorizer to use for bitmap images. One of poisson-disc (default), hex-grid, square-grid, binary-contours,
dev-null. Have a look at `the examples below <vectorization_>`_.
``--vectorizer-map``
Map from image element id to vectorizer. Overrides --vectorizer. Format: id1=vectorizer,id2=vectorizer,...
You can use this to set a certain vectorizer for specific images, e.g. if you want to use both halftone
vectorization and contour tracing in the same SVG. Note that you can set an ``<image>`` element's SVG ID from within
Inkscape though the context menu's Object Properties tool.
``--force-svg``
Force SVG input irrespective of file name
``--force-png``
Force bitmap graphics input irrespective of file name
``-s, --size``
Bitmap mode only: Physical size of output image in mm. Format: 12.34x56.78
``--sexp-mod-name``
Module name for KiCAD S-Exp output. This is a mandatory argument if using S-Exp output.
``--sexp-layer``
Layer for KiCAD S-Exp output. Defaults to auto-detect layers from SVG layer/top-level group IDs. If given, SVG
groups and layers are completely ignored and everything is simply vectorized into this layer, though you cna still
use ``-g`` for group selection.
``-a, --preserve-aspect-ratio``
Bitmap mode only: Preserve aspect ratio of image. Allowed values are meet, slice. Can also parse full SVG
preserveAspectRatio syntax.
``--no-usvg``
Do not preprocess input using usvg (do not use unless you know *exactly* what you're doing)
``--usvg-dpi``
Passed through to usvg's --dpi, in case the input file has different ideas of DPI than usvg has.
``--scale``
Scale input svg lengths by this factor (-o gerber only).
``-e, --exclude-groups``
Comma-separated list of group IDs to exclude from export. Takes precedence over --only-groups.
.. _vectorization:
Gerbolyze image vectorization
-----------------------------
Gerbolyze has two built-in strategies to translate pixel images into vector images. One is its built-in halftone
processor that tries to approximate grayscale. The other is its built-in binary vectorizer that traces contours in
black-and-white images. Below are examples for the four options.
The vectorizers can be used in isolation through ``svg-flatten`` with either an SVG input that contains an image or a
PNG/JPG input.
The vectorizer can be controlled globally using the ``--vectorizer`` flag in both ``gerbolyze`` and ``svg-flatten``. It
can also be set on a per-image basis in both using ``--vectorizer-map [image svg id]=[option]["," ...]``.
.. for f in vec_*.png; convert -background white -gravity center $f -resize 500x500 -extent 500x500 (basename -s .png $f)-square.png; end
.. for vec in hexgrid square poisson contours; convert vec_"$vec"_whole-square.png vec_"$vec"_detail-square.png -background transparent -splice 25x0+0+0 +append -chop 25x0+0+0 vec_"$vec"_composited.png; end
``--vectorizer poisson-disc`` (the default)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. image:: pics/vec_poisson_composited.png
:width: 800px
``--vectorizer hex-grid``
~~~~~~~~~~~~~~~~~~~~~~~~~
.. image:: pics/vec_hexgrid_composited.png
:width: 800px
``--vectorizer square-grid``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. image:: pics/vec_square_composited.png
:width: 800px
``--vectorizer binary-contours``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. image:: pics/vec_contours_composited.png
:width: 800px
The binary contours vectorizer requires a black-and-white binary input image. As you can see, like every bitmap tracer
it will produce some artifacts. For artistic input this is usually not too bad as long as the input data is
high-resolution. Antialiased edges in the input image are not only OK, they may even help with an accurate
vectorization.
GIMP halftone preprocessing guide
---------------------------------
Gerbolyze has its own built-in halftone processor, but you can also use the high-quality "newsprint" filter built into
GIMP_ instead if you like. This section will guide you through this. The PNG you get out of this can then be fed into
gerbolyze using ``--vectorizer binary-contours``.
1 Import your desired artwork
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Though anime or manga pictures are highly recommended, you can use any image including photographs. Be careful to select
a picture with comparatively low detail that remains recognizable at very low resolution. While working on a screen this
is hard to vizualize, but the grain resulting from the low resolution of a PCB's silkscreen is quite coarse.
.. image:: screenshots/02import02.png
:width: 800px
2 Convert the image to grayscale
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. image:: screenshots/06grayscale.png
:width: 800px
3 Fine-tune the image's contrast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To look well on the PCB, contrast is critical. If your source image is in color, you may have lost some contrast during
grayscale conversion. Now is the time to retouch that using the GIMP's color curve tool.
When using the GIMP's newsprint filter, bright grays close to white and dark grays close to black will cause very small
dots that might be beyond your PCB manufacturer's maximum resolution. To control this case, add small steps at the ends
of the grayscale value curve as shown (exaggerated) in the picture below. These steps saturate very bright grays to
white and very dark grays to black while preserving the values in the middle.
.. image:: screenshots/08curve_cut.png
:width: 800px
4 Retouch details
~~~~~~~~~~~~~~~~~
Therer might be small details that don't look right yet, such as the image's background color or small highlights that
merge into the background now. You can manually change the color of any detail now using the GIMP's flood-fill tool.
If you don't want the image's background to show up on the final PCB at all, just make it black.
Particularly on low-resolution source images it may make sense to apply a blur with a radius similar to the following
newsprint filter's cell size (10px) to smooth out the dot pattern generated by the newsprint filter.
.. image:: screenshots/09retouch.png
:width: 800px
In the following example, I retouched the highlights in the hair of the character in the picture to make them completely
white instead of light-gray, so they still stand out nicely in the finished picture.
.. image:: screenshots/10retouched.png
:width: 800px
5 Run the newsprint filter
~~~~~~~~~~~~~~~~~~~~~~~~~~
Now, run the GIMP's newsprint filter, under filters, distorts, newsprint.
The first important settings is the spot size, which should be larger than your PCB's minimum detail size (about 10px
with ``gerbolyze render`` default settings for good-quality silkscreen). In general the cheap and fast standard option of chinese PCB houses will require a larger detail size, but when you order specialty options like large size, 4-layer or non-green color along with a longer turnaround time you'll get much better-quality silk screen.
The second important setting is oversampling, which should be set to four or slightly higher. This improves the result
of the edge reconstruction of ``gerbolyze vectorize``.
.. image:: screenshots/11newsprint.png
:width: 800px
The following are examples on the detail resulting from the newsprint filter.
.. image:: screenshots/12newsprint.png
:width: 800px
6 Export the image for use with ``gerbolyze vectorize``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Simply export the image as a PNG file. Below are some pictures of the output ``gerbolyze vectorize`` produced for this
example.
.. image:: screenshots/14result_cut.png
:width: 800px
.. image:: screenshots/15result_cut.png
:width: 800px
Manufacturing Considerations
----------------------------
The main consideration when designing artwork for PCB processes is the processes' trace/space design rule. The two
things you can do here is one, to be creative with graphical parts of the design and avoid extremely narrow lines,
wedges or other thin features that will not come out well. Number two is to keep detail in raster images several times
larger than the manufacturing processes native capability. For example, to target a trace/space design rule of 100 µm,
the smallest detail in embedded raster graphics should not be much below 1mm.
Gerbolyze's halftone vectorizers have built-in support for trace/space design rules. While they can still produce small
artifacts that violate these rules, their output should be close enough to satifsy board houses and close enough for the
result to look good. The way gerbolyze does this is to clip the halftone cell's values to zero whenevery they get too
small, and to forcefully split or merge two neighboring cells when they get too close. While this process introduces
slight steps at the top and bottom of grayscale response, for most inputs these are not noticeable.
On the other hand, for SVG vector elements as well as for traced raster images, Gerbolyze cannot help with these design
rules. There is no heuristic that would allow Gerbolyze to non-destructively "fix" a design here, so all that's on the
roadmap here is to eventually include a gerber-level design rule checker.
As far as board houses go, I have made good experiences with the popular Chinese board houses. In my experience, JLC
will just produce whatever you send them with little fucks being given about design rule adherence or validity of the
input gerbers. This is great if you just want artistic circuit boards without much of a hassle, and you don't care if
they come out exactly as you imagined. The worst I've had happen was when an older version of gerbolyze generated
polygons with holes assuming standard fill-rule processing. The in the board house's online gerber viewer things looked
fine, and neither did they complain during file review. However, the resulting boards looked completely wrong because
all the dark halftones were missing.
PCBWay on the other hand has a much more rigurous file review process. They <em>will</em> complain when you throw
illegal garbage gerbers at them, and they will helpfully guide you through your design rule violations. In this way you
get much more of a professional service from them and for designs that have to be functional their higher level of
scrutiny definitely is a good thing. For the design you saw in the first picture in this article, I ended up begging
them to just plot my files if it doesn't physically break their machines and to their credit, while they seemed unhappy
about it they did it and the result looks absolutely stunning.
PCBWay is a bit more expensive on their lowest-end offering than JLC, but I found that for anything else (large boards,
multi-layer, gold plating etc.) their prices match. PCBWay offers a much broader range of manufacturing options such as
flexible circuit boards, multi-layer boards, thick or thin substrates and high-temperature substrates.
When in doubt about how your design is going to come out on the board, do not hesitate to contact your board house. Most
of the end customer-facing online PCB services have a number of different factories that do a number of different
fabrication processes for them depending on order parameters. Places like PCBWay have exceptional quality control and
good customer service, but that is mostly focused on the technical aspects of the PCB. If you rely on visual aspects
like silkscreen uniformity or solder mask color that is a strong no concern to everyone else in the electronics
industry, you may find significant variations between manufacturers or even between orders with the same manufacturer
and you may encounter challenges communicating your requirements.
Limitations
-----------
SVG raster features
~~~~~~~~~~~~~~~~~~~
Currently, SVG masks and filters are not supported. Though SVG is marketed as a "vector graphics format", these two
features are really raster primitives that all SVG viewers perform at the pixel level after rasterization. Since
supporting these would likely not end up looking like what you want, it is not a planned feature. If you need masks or
filters, simply export the relevant parts of the SVG as a PNG then include that in your template.
Gerber pass-through
~~~~~~~~~~~~~~~~~~~
Since gerbolyze has to composite your input gerbers with its own output, it has to fully parse and re-serialize them.
gerbolyze gerbonara_ for all its gerber parsing needs. Thus, gerbonara will interpret your gerbers and output will be in
gerbonara's gerber "dialect". If you find a corner case where this does not work and the output looks wrong, please file
a bug report with an example file on the gerbonara_ bug tracker. *Always* check the output files for errors before
submitting them to production.
Gerbolyze is provided without any warranty, but still please open an issue or `send me an email
<mailto:[email protected]>`__ if you find any errors or inconsistencies.
Trace/Space design rule adherence
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
While the grayscale halftone vectorizers do a reasonable job adhering to a given trace/space design rule, they can still
produce small parts of output that violate it. For the contour vectorizer as well as for all SVG primitives, you are
responsible for adhering to design rules yourself as there is no algorithm that gerboyze could use to "fix" its input.
A design rule checker is planned as a future addition to gerbolyze, but is not yet part of it. If in doubt, talk to your
fab and consider doing a test run of your design before ordering assembled boards ;)
Gallery
-------
.. image:: pics/sample3.jpg
:width: 400px
For a demonstration of ``gerbolyze convert``, check out the `Gerbolyze Protoboard Index`_, where you can download gerber
files for over 7.000 SMD and THT protoboard layouts.
Licensing
---------
This tool is licensed under the rather radical AGPLv3 license. Briefly, this means that you have to provide users of a
webapp using this tool in the backend with this tool's source.
I get that some people have issues with the AGPL. In case this license prevents you from using this software, please
send me `an email <mailto:[email protected]>`__ and I can grant you an exception. I want this software to be useful to as
many people as possible and I wouldn't want the license to be a hurdle to anyone. OTOH I see a danger of some cheap
board house just integrating a fork into their webpage without providing their changes back upstream, and I want to
avoid that so the default license is still AGPL.
.. _usvg: https://github.com/RazrFalcon/resvg
.. _Inkscape: https://inkscape.org/
.. _pcb-tools: https://github.com/curtacircuitos/pcb-tools
.. _pcb-tools-extension: https://github.com/opiopan/pcb-tools-extension
.. _GIMP: https://gimp.org/
.. _gerbonara: https://gitlab.com/gerbolyze/gerbonara
.. _`Gerbolyze Protoboard Index`: https://dyna.kokoroyukuma.de/protos/
|
PypiClean
|
/comt-2.6.4.tar.gz/comt-2.6.4/src/cm/media/js/lib/yui/yui3-3.15.0/build/series-histogram-base/series-histogram-base-coverage.js
|
if (typeof __coverage__ === 'undefined') { __coverage__ = {}; }
if (!__coverage__['build/series-histogram-base/series-histogram-base.js']) {
__coverage__['build/series-histogram-base/series-histogram-base.js'] = {"path":"build/series-histogram-base/series-histogram-base.js","s":{"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0,"52":0,"53":0,"54":0,"55":0,"56":0,"57":0,"58":0,"59":0,"60":0,"61":0,"62":0,"63":0,"64":0,"65":0,"66":0,"67":0,"68":0,"69":0,"70":0,"71":0,"72":0,"73":0,"74":0,"75":0,"76":0,"77":0,"78":0,"79":0,"80":0},"b":{"1":[0,0],"2":[0,0],"3":[0,0],"4":[0,0],"5":[0,0],"6":[0,0],"7":[0,0],"8":[0,0],"9":[0,0],"10":[0,0],"11":[0,0],"12":[0,0,0],"13":[0,0],"14":[0,0],"15":[0,0],"16":[0,0],"17":[0,0],"18":[0,0],"19":[0,0]},"f":{"1":0,"2":0,"3":0,"4":0},"fnMap":{"1":{"name":"(anonymous_1)","line":1,"loc":{"start":{"line":1,"column":33},"end":{"line":1,"column":52}}},"2":{"name":"Histogram","line":19,"loc":{"start":{"line":19,"column":0},"end":{"line":19,"column":20}}},"3":{"name":"(anonymous_3)","line":28,"loc":{"start":{"line":28,"column":16},"end":{"line":29,"column":4}}},"4":{"name":"(anonymous_4)","line":211,"loc":{"start":{"line":211,"column":22},"end":{"line":212,"column":4}}}},"statementMap":{"1":{"start":{"line":1,"column":0},"end":{"line":245,"column":72}},"2":{"start":{"line":9,"column":0},"end":{"line":9,"column":20}},"3":{"start":{"line":19,"column":0},"end":{"line":19,"column":22}},"4":{"start":{"line":21,"column":0},"end":{"line":240,"column":2}},"5":{"start":{"line":30,"column":8},"end":{"line":33,"column":9}},"6":{"start":{"line":32,"column":12},"end":{"line":32,"column":19}},"7":{"start":{"line":34,"column":8},"end":{"line":71,"column":52}},"8":{"start":{"line":72,"column":8},"end":{"line":75,"column":9}},"9":{"start":{"line":74,"column":12},"end":{"line":74,"column":51}},"10":{"start":{"line":76,"column":8},"end":{"line":79,"column":9}},"11":{"start":{"line":78,"column":12},"end":{"line":78,"column":55}},"12":{"start":{"line":80,"column":8},"end":{"line":89,"column":9}},"13":{"start":{"line":82,"column":12},"end":{"line":82,"column":34}},"14":{"start":{"line":83,"column":12},"end":{"line":83,"column":40}},"15":{"start":{"line":87,"column":12},"end":{"line":87,"column":33}},"16":{"start":{"line":88,"column":12},"end":{"line":88,"column":41}},"17":{"start":{"line":90,"column":8},"end":{"line":90,"column":36}},"18":{"start":{"line":91,"column":8},"end":{"line":91,"column":50}},"19":{"start":{"line":92,"column":8},"end":{"line":92,"column":34}},"20":{"start":{"line":93,"column":8},"end":{"line":93,"column":48}},"21":{"start":{"line":94,"column":8},"end":{"line":125,"column":9}},"22":{"start":{"line":96,"column":12},"end":{"line":104,"column":13}},"23":{"start":{"line":98,"column":16},"end":{"line":98,"column":51}},"24":{"start":{"line":99,"column":16},"end":{"line":99,"column":72}},"25":{"start":{"line":100,"column":16},"end":{"line":103,"column":17}},"26":{"start":{"line":102,"column":20},"end":{"line":102,"column":40}},"27":{"start":{"line":105,"column":12},"end":{"line":105,"column":41}},"28":{"start":{"line":106,"column":12},"end":{"line":114,"column":13}},"29":{"start":{"line":108,"column":16},"end":{"line":108,"column":58}},"30":{"start":{"line":109,"column":16},"end":{"line":109,"column":36}},"31":{"start":{"line":110,"column":16},"end":{"line":110,"column":32}},"32":{"start":{"line":111,"column":16},"end":{"line":111,"column":33}},"33":{"start":{"line":112,"column":16},"end":{"line":112,"column":47}},"34":{"start":{"line":113,"column":16},"end":{"line":113,"column":40}},"35":{"start":{"line":118,"column":12},"end":{"line":118,"column":43}},"36":{"start":{"line":119,"column":12},"end":{"line":119,"column":41}},"37":{"start":{"line":120,"column":12},"end":{"line":124,"column":13}},"38":{"start":{"line":122,"column":16},"end":{"line":122,"column":47}},"39":{"start":{"line":123,"column":16},"end":{"line":123,"column":43}},"40":{"start":{"line":126,"column":8},"end":{"line":126,"column":31}},"41":{"start":{"line":127,"column":8},"end":{"line":175,"column":9}},"42":{"start":{"line":129,"column":12},"end":{"line":129,"column":57}},"43":{"start":{"line":130,"column":12},"end":{"line":130,"column":62}},"44":{"start":{"line":131,"column":12},"end":{"line":131,"column":56}},"45":{"start":{"line":132,"column":12},"end":{"line":132,"column":62}},"46":{"start":{"line":133,"column":12},"end":{"line":133,"column":81}},"47":{"start":{"line":134,"column":12},"end":{"line":134,"column":81}},"48":{"start":{"line":135,"column":12},"end":{"line":139,"column":13}},"49":{"start":{"line":137,"column":16},"end":{"line":137,"column":41}},"50":{"start":{"line":138,"column":16},"end":{"line":138,"column":25}},"51":{"start":{"line":140,"column":12},"end":{"line":140,"column":95}},"52":{"start":{"line":141,"column":12},"end":{"line":174,"column":13}},"53":{"start":{"line":143,"column":16},"end":{"line":143,"column":33}},"54":{"start":{"line":144,"column":16},"end":{"line":144,"column":35}},"55":{"start":{"line":146,"column":16},"end":{"line":168,"column":17}},"56":{"start":{"line":148,"column":20},"end":{"line":148,"column":56}},"57":{"start":{"line":149,"column":20},"end":{"line":149,"column":77}},"58":{"start":{"line":150,"column":20},"end":{"line":150,"column":39}},"59":{"start":{"line":151,"column":20},"end":{"line":151,"column":38}},"60":{"start":{"line":155,"column":20},"end":{"line":155,"column":48}},"61":{"start":{"line":156,"column":20},"end":{"line":156,"column":69}},"62":{"start":{"line":157,"column":20},"end":{"line":157,"column":35}},"63":{"start":{"line":158,"column":20},"end":{"line":158,"column":34}},"64":{"start":{"line":159,"column":20},"end":{"line":162,"column":21}},"65":{"start":{"line":161,"column":24},"end":{"line":161,"column":77}},"66":{"start":{"line":163,"column":20},"end":{"line":166,"column":21}},"67":{"start":{"line":165,"column":24},"end":{"line":165,"column":83}},"68":{"start":{"line":167,"column":20},"end":{"line":167,"column":66}},"69":{"start":{"line":171,"column":17},"end":{"line":174,"column":13}},"70":{"start":{"line":173,"column":16},"end":{"line":173,"column":41}},"71":{"start":{"line":176,"column":8},"end":{"line":176,"column":47}},"72":{"start":{"line":177,"column":8},"end":{"line":177,"column":47}},"73":{"start":{"line":178,"column":8},"end":{"line":192,"column":9}},"74":{"start":{"line":180,"column":12},"end":{"line":187,"column":15}},"75":{"start":{"line":191,"column":12},"end":{"line":191,"column":37}},"76":{"start":{"line":213,"column":8},"end":{"line":235,"column":10}},"77":{"start":{"line":236,"column":8},"end":{"line":236,"column":80}},"78":{"start":{"line":237,"column":8},"end":{"line":237,"column":84}},"79":{"start":{"line":238,"column":8},"end":{"line":238,"column":20}},"80":{"start":{"line":242,"column":0},"end":{"line":242,"column":24}}},"branchMap":{"1":{"line":30,"type":"if","locations":[{"start":{"line":30,"column":8},"end":{"line":30,"column":8}},{"start":{"line":30,"column":8},"end":{"line":30,"column":8}}]},"2":{"line":44,"type":"cond-expr","locations":[{"start":{"line":44,"column":47},"end":{"line":44,"column":74}},{"start":{"line":44,"column":77},"end":{"line":44,"column":78}}]},"3":{"line":72,"type":"if","locations":[{"start":{"line":72,"column":8},"end":{"line":72,"column":8}},{"start":{"line":72,"column":8},"end":{"line":72,"column":8}}]},"4":{"line":76,"type":"if","locations":[{"start":{"line":76,"column":8},"end":{"line":76,"column":8}},{"start":{"line":76,"column":8},"end":{"line":76,"column":8}}]},"5":{"line":80,"type":"if","locations":[{"start":{"line":80,"column":8},"end":{"line":80,"column":8}},{"start":{"line":80,"column":8},"end":{"line":80,"column":8}}]},"6":{"line":94,"type":"if","locations":[{"start":{"line":94,"column":8},"end":{"line":94,"column":8}},{"start":{"line":94,"column":8},"end":{"line":94,"column":8}}]},"7":{"line":94,"type":"binary-expr","locations":[{"start":{"line":94,"column":11},"end":{"line":94,"column":31}},{"start":{"line":94,"column":35},"end":{"line":94,"column":48}}]},"8":{"line":100,"type":"if","locations":[{"start":{"line":100,"column":16},"end":{"line":100,"column":16}},{"start":{"line":100,"column":16},"end":{"line":100,"column":16}}]},"9":{"line":106,"type":"if","locations":[{"start":{"line":106,"column":12},"end":{"line":106,"column":12}},{"start":{"line":106,"column":12},"end":{"line":106,"column":12}}]},"10":{"line":120,"type":"if","locations":[{"start":{"line":120,"column":12},"end":{"line":120,"column":12}},{"start":{"line":120,"column":12},"end":{"line":120,"column":12}}]},"11":{"line":135,"type":"if","locations":[{"start":{"line":135,"column":12},"end":{"line":135,"column":12}},{"start":{"line":135,"column":12},"end":{"line":135,"column":12}}]},"12":{"line":135,"type":"binary-expr","locations":[{"start":{"line":135,"column":15},"end":{"line":135,"column":28}},{"start":{"line":135,"column":33},"end":{"line":135,"column":50}},{"start":{"line":135,"column":54},"end":{"line":135,"column":71}}]},"13":{"line":141,"type":"if","locations":[{"start":{"line":141,"column":12},"end":{"line":141,"column":12}},{"start":{"line":141,"column":12},"end":{"line":141,"column":12}}]},"14":{"line":141,"type":"binary-expr","locations":[{"start":{"line":141,"column":15},"end":{"line":141,"column":44}},{"start":{"line":141,"column":48},"end":{"line":141,"column":73}}]},"15":{"line":146,"type":"if","locations":[{"start":{"line":146,"column":16},"end":{"line":146,"column":16}},{"start":{"line":146,"column":16},"end":{"line":146,"column":16}}]},"16":{"line":159,"type":"if","locations":[{"start":{"line":159,"column":20},"end":{"line":159,"column":20}},{"start":{"line":159,"column":20},"end":{"line":159,"column":20}}]},"17":{"line":163,"type":"if","locations":[{"start":{"line":163,"column":20},"end":{"line":163,"column":20}},{"start":{"line":163,"column":20},"end":{"line":163,"column":20}}]},"18":{"line":171,"type":"if","locations":[{"start":{"line":171,"column":17},"end":{"line":171,"column":17}},{"start":{"line":171,"column":17},"end":{"line":171,"column":17}}]},"19":{"line":178,"type":"if","locations":[{"start":{"line":178,"column":8},"end":{"line":178,"column":8}},{"start":{"line":178,"column":8},"end":{"line":178,"column":8}}]}},"code":["(function () { YUI.add('series-histogram-base', function (Y, NAME) {","","/**"," * Provides core functionality for creating a bar or column series."," *"," * @module charts"," * @submodule series-histogram"," */","var Y_Lang = Y.Lang;","","/**"," * Histogram is the base class for Column and Bar series."," *"," * @class Histogram"," * @constructor"," * @param {Object} config (optional) Configuration parameters."," * @submodule series-histogram"," */","function Histogram(){}","","Histogram.prototype = {"," /**"," * Draws the series."," *"," * @method drawSeries"," * @protected"," */"," drawSeries: function()"," {"," if(this.get(\"xcoords\").length < 1)"," {"," return;"," }"," var style = this._copyObject(this.get(\"styles\").marker),"," graphic = this.get(\"graphic\"),"," setSize,"," calculatedSize,"," xcoords = this.get(\"xcoords\"),"," ycoords = this.get(\"ycoords\"),"," i = 0,"," len = xcoords.length,"," top = ycoords[0],"," seriesTypeCollection = this.get(\"seriesTypeCollection\"),"," seriesLen = seriesTypeCollection ? seriesTypeCollection.length : 0,"," seriesSize = 0,"," totalSize = 0,"," offset = 0,"," ratio,"," renderer,"," order = this.get(\"order\"),"," graphOrder = this.get(\"graphOrder\"),"," left,"," marker,"," setSizeKey,"," calculatedSizeKey,"," config,"," fillColors = null,"," borderColors = null,"," xMarkerPlane = [],"," yMarkerPlane = [],"," xMarkerPlaneLeft,"," xMarkerPlaneRight,"," yMarkerPlaneTop,"," yMarkerPlaneBottom,"," dimensions = {"," width: [],"," height: []"," },"," xvalues = [],"," yvalues = [],"," groupMarkers = this.get(\"groupMarkers\");"," if(Y_Lang.isArray(style.fill.color))"," {"," fillColors = style.fill.color.concat();"," }"," if(Y_Lang.isArray(style.border.color))"," {"," borderColors = style.border.color.concat();"," }"," if(this.get(\"direction\") === \"vertical\")"," {"," setSizeKey = \"height\";"," calculatedSizeKey = \"width\";"," }"," else"," {"," setSizeKey = \"width\";"," calculatedSizeKey = \"height\";"," }"," setSize = style[setSizeKey];"," calculatedSize = style[calculatedSizeKey];"," this._createMarkerCache();"," this._maxSize = graphic.get(setSizeKey);"," if(seriesTypeCollection && seriesLen > 1)"," {"," for(; i < seriesLen; ++i)"," {"," renderer = seriesTypeCollection[i];"," seriesSize += renderer.get(\"styles\").marker[setSizeKey];"," if(order > i)"," {"," offset = seriesSize;"," }"," }"," totalSize = len * seriesSize;"," if(totalSize > this._maxSize)"," {"," ratio = graphic.get(setSizeKey)/totalSize;"," seriesSize *= ratio;"," offset *= ratio;"," setSize *= ratio;"," setSize = Math.max(setSize, 1);"," this._maxSize = setSize;"," }"," }"," else"," {"," seriesSize = style[setSizeKey];"," totalSize = len * seriesSize;"," if(totalSize > this._maxSize)"," {"," seriesSize = this._maxSize/len;"," this._maxSize = seriesSize;"," }"," }"," offset -= seriesSize/2;"," for(i = 0; i < len; ++i)"," {"," xMarkerPlaneLeft = xcoords[i] - seriesSize/2;"," xMarkerPlaneRight = xMarkerPlaneLeft + seriesSize;"," yMarkerPlaneTop = ycoords[i] - seriesSize/2;"," yMarkerPlaneBottom = yMarkerPlaneTop + seriesSize;"," xMarkerPlane.push({start: xMarkerPlaneLeft, end: xMarkerPlaneRight});"," yMarkerPlane.push({start: yMarkerPlaneTop, end: yMarkerPlaneBottom});"," if(!groupMarkers && (isNaN(xcoords[i]) || isNaN(ycoords[i])))"," {"," this._markers.push(null);"," continue;"," }"," config = this._getMarkerDimensions(xcoords[i], ycoords[i], calculatedSize, offset);"," if(!isNaN(config.calculatedSize) && config.calculatedSize > 0)"," {"," top = config.top;"," left = config.left;",""," if(groupMarkers)"," {"," dimensions[setSizeKey][i] = setSize;"," dimensions[calculatedSizeKey][i] = config.calculatedSize;"," xvalues.push(left);"," yvalues.push(top);"," }"," else"," {"," style[setSizeKey] = setSize;"," style[calculatedSizeKey] = config.calculatedSize;"," style.x = left;"," style.y = top;"," if(fillColors)"," {"," style.fill.color = fillColors[i % fillColors.length];"," }"," if(borderColors)"," {"," style.border.color = borderColors[i % borderColors.length];"," }"," marker = this.getMarker(style, graphOrder, i);"," }",""," }"," else if(!groupMarkers)"," {"," this._markers.push(null);"," }"," }"," this.set(\"xMarkerPlane\", xMarkerPlane);"," this.set(\"yMarkerPlane\", yMarkerPlane);"," if(groupMarkers)"," {"," this._createGroupMarker({"," fill: style.fill,"," border: style.border,"," dimensions: dimensions,"," xvalues: xvalues,"," yvalues: yvalues,"," shape: style.shape"," });"," }"," else"," {"," this._clearMarkerCache();"," }"," },",""," /**"," * Collection of default colors used for marker fills in a series when not specified by user."," *"," * @property _defaultFillColors"," * @type Array"," * @protected"," */"," _defaultFillColors: [\"#66007f\", \"#a86f41\", \"#295454\", \"#996ab2\", \"#e8cdb7\", \"#90bdbd\",\"#000000\",\"#c3b8ca\", \"#968373\", \"#678585\"],",""," /**"," * Gets the default style values for the markers."," *"," * @method _getPlotDefaults"," * @return Object"," * @private"," */"," _getPlotDefaults: function()"," {"," var defs = {"," fill:{"," type: \"solid\","," alpha: 1,"," colors:null,"," alphas: null,"," ratios: null"," },"," border:{"," weight: 0,"," alpha: 1"," },"," width: 12,"," height: 12,"," shape: \"rect\",",""," padding:{"," top: 0,"," left: 0,"," right: 0,"," bottom: 0"," }"," };"," defs.fill.color = this._getDefaultColor(this.get(\"graphOrder\"), \"fill\");"," defs.border.color = this._getDefaultColor(this.get(\"graphOrder\"), \"border\");"," return defs;"," }","};","","Y.Histogram = Histogram;","","","}, '@VERSION@', {\"requires\": [\"series-cartesian\", \"series-plot-util\"]});","","}());"]};
}
var __cov_0tjbcXaJzRKQpBUf_9w8GQ = __coverage__['build/series-histogram-base/series-histogram-base.js'];
__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['1']++;YUI.add('series-histogram-base',function(Y,NAME){__cov_0tjbcXaJzRKQpBUf_9w8GQ.f['1']++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['2']++;var Y_Lang=Y.Lang;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['3']++;function Histogram(){__cov_0tjbcXaJzRKQpBUf_9w8GQ.f['2']++;}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['4']++;Histogram.prototype={drawSeries:function(){__cov_0tjbcXaJzRKQpBUf_9w8GQ.f['3']++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['5']++;if(this.get('xcoords').length<1){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['1'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['6']++;return;}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['1'][1]++;}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['7']++;var style=this._copyObject(this.get('styles').marker),graphic=this.get('graphic'),setSize,calculatedSize,xcoords=this.get('xcoords'),ycoords=this.get('ycoords'),i=0,len=xcoords.length,top=ycoords[0],seriesTypeCollection=this.get('seriesTypeCollection'),seriesLen=seriesTypeCollection?(__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['2'][0]++,seriesTypeCollection.length):(__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['2'][1]++,0),seriesSize=0,totalSize=0,offset=0,ratio,renderer,order=this.get('order'),graphOrder=this.get('graphOrder'),left,marker,setSizeKey,calculatedSizeKey,config,fillColors=null,borderColors=null,xMarkerPlane=[],yMarkerPlane=[],xMarkerPlaneLeft,xMarkerPlaneRight,yMarkerPlaneTop,yMarkerPlaneBottom,dimensions={width:[],height:[]},xvalues=[],yvalues=[],groupMarkers=this.get('groupMarkers');__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['8']++;if(Y_Lang.isArray(style.fill.color)){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['3'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['9']++;fillColors=style.fill.color.concat();}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['3'][1]++;}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['10']++;if(Y_Lang.isArray(style.border.color)){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['4'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['11']++;borderColors=style.border.color.concat();}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['4'][1]++;}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['12']++;if(this.get('direction')==='vertical'){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['5'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['13']++;setSizeKey='height';__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['14']++;calculatedSizeKey='width';}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['5'][1]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['15']++;setSizeKey='width';__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['16']++;calculatedSizeKey='height';}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['17']++;setSize=style[setSizeKey];__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['18']++;calculatedSize=style[calculatedSizeKey];__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['19']++;this._createMarkerCache();__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['20']++;this._maxSize=graphic.get(setSizeKey);__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['21']++;if((__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['7'][0]++,seriesTypeCollection)&&(__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['7'][1]++,seriesLen>1)){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['6'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['22']++;for(;i<seriesLen;++i){__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['23']++;renderer=seriesTypeCollection[i];__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['24']++;seriesSize+=renderer.get('styles').marker[setSizeKey];__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['25']++;if(order>i){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['8'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['26']++;offset=seriesSize;}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['8'][1]++;}}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['27']++;totalSize=len*seriesSize;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['28']++;if(totalSize>this._maxSize){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['9'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['29']++;ratio=graphic.get(setSizeKey)/totalSize;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['30']++;seriesSize*=ratio;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['31']++;offset*=ratio;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['32']++;setSize*=ratio;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['33']++;setSize=Math.max(setSize,1);__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['34']++;this._maxSize=setSize;}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['9'][1]++;}}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['6'][1]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['35']++;seriesSize=style[setSizeKey];__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['36']++;totalSize=len*seriesSize;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['37']++;if(totalSize>this._maxSize){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['10'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['38']++;seriesSize=this._maxSize/len;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['39']++;this._maxSize=seriesSize;}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['10'][1]++;}}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['40']++;offset-=seriesSize/2;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['41']++;for(i=0;i<len;++i){__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['42']++;xMarkerPlaneLeft=xcoords[i]-seriesSize/2;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['43']++;xMarkerPlaneRight=xMarkerPlaneLeft+seriesSize;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['44']++;yMarkerPlaneTop=ycoords[i]-seriesSize/2;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['45']++;yMarkerPlaneBottom=yMarkerPlaneTop+seriesSize;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['46']++;xMarkerPlane.push({start:xMarkerPlaneLeft,end:xMarkerPlaneRight});__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['47']++;yMarkerPlane.push({start:yMarkerPlaneTop,end:yMarkerPlaneBottom});__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['48']++;if((__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['12'][0]++,!groupMarkers)&&((__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['12'][1]++,isNaN(xcoords[i]))||(__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['12'][2]++,isNaN(ycoords[i])))){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['11'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['49']++;this._markers.push(null);__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['50']++;continue;}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['11'][1]++;}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['51']++;config=this._getMarkerDimensions(xcoords[i],ycoords[i],calculatedSize,offset);__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['52']++;if((__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['14'][0]++,!isNaN(config.calculatedSize))&&(__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['14'][1]++,config.calculatedSize>0)){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['13'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['53']++;top=config.top;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['54']++;left=config.left;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['55']++;if(groupMarkers){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['15'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['56']++;dimensions[setSizeKey][i]=setSize;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['57']++;dimensions[calculatedSizeKey][i]=config.calculatedSize;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['58']++;xvalues.push(left);__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['59']++;yvalues.push(top);}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['15'][1]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['60']++;style[setSizeKey]=setSize;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['61']++;style[calculatedSizeKey]=config.calculatedSize;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['62']++;style.x=left;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['63']++;style.y=top;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['64']++;if(fillColors){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['16'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['65']++;style.fill.color=fillColors[i%fillColors.length];}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['16'][1]++;}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['66']++;if(borderColors){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['17'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['67']++;style.border.color=borderColors[i%borderColors.length];}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['17'][1]++;}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['68']++;marker=this.getMarker(style,graphOrder,i);}}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['13'][1]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['69']++;if(!groupMarkers){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['18'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['70']++;this._markers.push(null);}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['18'][1]++;}}}__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['71']++;this.set('xMarkerPlane',xMarkerPlane);__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['72']++;this.set('yMarkerPlane',yMarkerPlane);__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['73']++;if(groupMarkers){__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['19'][0]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['74']++;this._createGroupMarker({fill:style.fill,border:style.border,dimensions:dimensions,xvalues:xvalues,yvalues:yvalues,shape:style.shape});}else{__cov_0tjbcXaJzRKQpBUf_9w8GQ.b['19'][1]++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['75']++;this._clearMarkerCache();}},_defaultFillColors:['#66007f','#a86f41','#295454','#996ab2','#e8cdb7','#90bdbd','#000000','#c3b8ca','#968373','#678585'],_getPlotDefaults:function(){__cov_0tjbcXaJzRKQpBUf_9w8GQ.f['4']++;__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['76']++;var defs={fill:{type:'solid',alpha:1,colors:null,alphas:null,ratios:null},border:{weight:0,alpha:1},width:12,height:12,shape:'rect',padding:{top:0,left:0,right:0,bottom:0}};__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['77']++;defs.fill.color=this._getDefaultColor(this.get('graphOrder'),'fill');__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['78']++;defs.border.color=this._getDefaultColor(this.get('graphOrder'),'border');__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['79']++;return defs;}};__cov_0tjbcXaJzRKQpBUf_9w8GQ.s['80']++;Y.Histogram=Histogram;},'@VERSION@',{'requires':['series-cartesian','series-plot-util']});
|
PypiClean
|
/annotation_tool-0.10.0-py3-none-any.whl/annotation_tool/data_model/single_annotation.py
|
from collections import namedtuple
from copy import deepcopy
from typing import Union
import numpy as np
from annotation_tool.utility.decorators import accepts, returns
from .annotation_scheme import AnnotationScheme
@returns(type(True))
@accepts((np.ndarray, dict), AnnotationScheme)
def is_compatible(raw_annotation: Union[np.ndarray, dict], scheme: AnnotationScheme):
if len(scheme) <= 0:
return False
if isinstance(raw_annotation, dict):
for elem in scheme:
val = raw_annotation.get(elem.row)(elem.column)
if val is None:
return False
if not isinstance(val, int):
return False
if val not in [0, 1]:
return False
return True
if isinstance(raw_annotation, np.ndarray):
if len(scheme) != raw_annotation.shape[0]:
return False
if len(raw_annotation.shape) > 1:
return False
return bool(np.all((raw_annotation == 0) | (raw_annotation == 1)))
return False
class SingleAnnotation:
def __init__(
self, scheme: AnnotationScheme, annotation: Union[np.ndarray, dict, None] = None
):
assert isinstance(scheme, AnnotationScheme)
if annotation is None:
annotation = np.zeros(len(scheme), dtype=np.int8)
else:
assert isinstance(annotation, (np.ndarray, dict))
assert is_compatible(
annotation, scheme
), "Annotation is not compatible: \n {} \n {}".format(annotation, scheme)
self._scheme = scheme
self._annotation_dict = self._make_dict(annotation)
self._annotation_vector = self._make_vector(annotation)
self._binary_str = self._make_binary_str(annotation)
def _make_dict(self, a):
if isinstance(a, np.ndarray):
d = {}
row = -1
for idx, scheme_element in enumerate(self.scheme):
group_name = scheme_element.group_name
if row != scheme_element.row:
row = scheme_element.row
d[group_name] = {}
group_element = scheme_element.element_name
val = int(a[idx])
assert 0 <= val <= 1
d[group_name][group_element] = val
return d
if isinstance(a, dict):
return a
else:
raise RuntimeError
def _make_vector(self, a):
if isinstance(a, np.ndarray):
a = np.array(a, copy=True, dtype=np.int8)
return a
if isinstance(a, dict):
ls = []
for scheme_element in self.scheme:
group_name, elem = (
scheme_element.group_name,
scheme_element.element_name,
)
val = a[group_name][elem]
assert 0 <= val <= 1
ls.append(val)
return np.ndarray(ls, dtype=np.int8)
else:
raise RuntimeError
def _make_binary_str(self, a):
res = [str(x) for x in self._make_vector(a)]
res = "".join(res)
return res
def get_empty_copy(self):
return SingleAnnotation(self.scheme)
@property
@returns(dict)
def annotation_dict(self):
return self._annotation_dict
@property
@returns(np.ndarray)
def annotation_vector(self):
return self._annotation_vector
@property
@returns(str)
def binary_str(self):
return self._binary_str
@property
@returns((dict, np.ndarray))
def annotation(self) -> (dict, np.ndarray):
return self.annotation_dict, self.annotation_vector
@annotation.setter
def annotation(self, annotation: Union[np.ndarray, dict]):
is_compatible(annotation, self.scheme)
self._annotation_dict = self._make_dict(annotation)
self._annotation_vector = self._make_vector(annotation)
self._binary_str = self._make_binary_str(annotation)
@property
def scheme(self):
return self._scheme
@scheme.setter
def scheme(self, x):
raise AttributeError("Cannot change the scheme!")
def is_empty(self):
return np.sum(self.annotation_vector) == 0
def __len__(self):
return self.annotation_vector.shape[0]
def __eq__(self, other):
if isinstance(other, SingleAnnotation):
scheme_equal = self.scheme == other.scheme
vec_equal = np.array_equal(self.annotation_vector, other.annotation_vector)
return scheme_equal and vec_equal
else:
return False
def __copy__(self):
new_anno = create_single_annotation(self.scheme, self.annotation_vector)
assert self == new_anno and new_anno is not self
return new_anno
def __deepcopy__(self, memo):
new_anno = create_single_annotation(
deepcopy(self.scheme), deepcopy(self.annotation_vector)
)
assert self == new_anno
assert new_anno is not self
return new_anno
def __iter__(self):
annotation_element = namedtuple(
"annotation_attribute",
["group_name", "element_name", "value", "row", "column"],
)
for scheme_element in self.scheme:
group_name = scheme_element.group_name
element_name = scheme_element.element_name
row, col = scheme_element.row, scheme_element.column
value = self.annotation_dict[group_name][element_name]
yield annotation_element(group_name, element_name, value, row, col)
def __hash__(self):
# logging.warning("Hash of annotation is deprecated")
return hash((self.scheme, self.binary_str))
@returns(SingleAnnotation)
@accepts(AnnotationScheme, (np.ndarray, dict, type(None)))
def create_single_annotation(
scheme: AnnotationScheme, annotation: Union[np.ndarray, dict, None] = None
) -> SingleAnnotation:
"""
Create an annotation from a scheme and a vector or a dict.
Args:
scheme: The scheme of the annotation.
annotation: The annotation as a vector or a dict. If None, an empty annotation is created.
Returns:
The annotation.
Raises:
ValueError: If the parameters are not valid.
"""
try:
return SingleAnnotation(scheme, annotation)
except AssertionError as e:
print(str(e))
raise ValueError(
"Cannot create annotation from {} and {}".format(scheme, annotation)
)
@returns(SingleAnnotation)
def empty_annotation(scheme: AnnotationScheme):
return SingleAnnotation(scheme)
|
PypiClean
|
/ansys_dpf_core-0.9.0.tar.gz/ansys_dpf_core-0.9.0/src/ansys/dpf/core/operators/geo/integrate_over_elements.py
|
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class integrate_over_elements(Operator):
"""Integration of an input field over mesh.
Parameters
----------
field : Field
scoping : Scoping, optional
Integrate the input field over a specific
scoping.
mesh : MeshedRegion, optional
Mesh to integrate on, if not provided the one
from input field is provided.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.geo.integrate_over_elements()
>>> # Make input connections
>>> my_field = dpf.Field()
>>> op.inputs.field.connect(my_field)
>>> my_scoping = dpf.Scoping()
>>> op.inputs.scoping.connect(my_scoping)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.geo.integrate_over_elements(
... field=my_field,
... scoping=my_scoping,
... mesh=my_mesh,
... )
>>> # Get output data
>>> result_field = op.outputs.field()
"""
def __init__(self, field=None, scoping=None, mesh=None, config=None, server=None):
super().__init__(name="element::integrate", config=config, server=server)
self._inputs = InputsIntegrateOverElements(self)
self._outputs = OutputsIntegrateOverElements(self)
if field is not None:
self.inputs.field.connect(field)
if scoping is not None:
self.inputs.scoping.connect(scoping)
if mesh is not None:
self.inputs.mesh.connect(mesh)
@staticmethod
def _spec():
description = """Integration of an input field over mesh."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="field",
type_names=["field"],
optional=False,
document="""""",
),
1: PinSpecification(
name="scoping",
type_names=["scoping"],
optional=True,
document="""Integrate the input field over a specific
scoping.""",
),
2: PinSpecification(
name="mesh",
type_names=["abstract_meshed_region"],
optional=True,
document="""Mesh to integrate on, if not provided the one
from input field is provided.""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="field",
type_names=["field"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="element::integrate", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsIntegrateOverElements
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsIntegrateOverElements
"""
return super().outputs
class InputsIntegrateOverElements(_Inputs):
"""Intermediate class used to connect user inputs to
integrate_over_elements operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.integrate_over_elements()
>>> my_field = dpf.Field()
>>> op.inputs.field.connect(my_field)
>>> my_scoping = dpf.Scoping()
>>> op.inputs.scoping.connect(my_scoping)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
"""
def __init__(self, op: Operator):
super().__init__(integrate_over_elements._spec().inputs, op)
self._field = Input(integrate_over_elements._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._field)
self._scoping = Input(integrate_over_elements._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._scoping)
self._mesh = Input(integrate_over_elements._spec().input_pin(2), 2, op, -1)
self._inputs.append(self._mesh)
@property
def field(self):
"""Allows to connect field input to the operator.
Parameters
----------
my_field : Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.integrate_over_elements()
>>> op.inputs.field.connect(my_field)
>>> # or
>>> op.inputs.field(my_field)
"""
return self._field
@property
def scoping(self):
"""Allows to connect scoping input to the operator.
Integrate the input field over a specific
scoping.
Parameters
----------
my_scoping : Scoping
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.integrate_over_elements()
>>> op.inputs.scoping.connect(my_scoping)
>>> # or
>>> op.inputs.scoping(my_scoping)
"""
return self._scoping
@property
def mesh(self):
"""Allows to connect mesh input to the operator.
Mesh to integrate on, if not provided the one
from input field is provided.
Parameters
----------
my_mesh : MeshedRegion
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.integrate_over_elements()
>>> op.inputs.mesh.connect(my_mesh)
>>> # or
>>> op.inputs.mesh(my_mesh)
"""
return self._mesh
class OutputsIntegrateOverElements(_Outputs):
"""Intermediate class used to get outputs from
integrate_over_elements operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.integrate_over_elements()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
def __init__(self, op: Operator):
super().__init__(integrate_over_elements._spec().outputs, op)
self._field = Output(integrate_over_elements._spec().output_pin(0), 0, op)
self._outputs.append(self._field)
@property
def field(self):
"""Allows to get field output of the operator
Returns
----------
my_field : Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.geo.integrate_over_elements()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
""" # noqa: E501
return self._field
|
PypiClean
|
/bpy_optix-2.82-cp37-cp37m-win_amd64.whl/bpy_optix-2.82.data/scripts/2.82/scripts/addons/object_collection_manager/internals.py
|
# Copyright 2011, Ryan Inch
import bpy
from bpy.types import (
PropertyGroup,
Operator,
)
from bpy.props import StringProperty
layer_collections = {}
collection_tree = []
expanded = []
max_lvl = 0
row_index = 0
def get_max_lvl():
return max_lvl
def update_col_name(self, context):
if self.name != self.last_name:
if self.name == '':
self.name = self.last_name
return
if self.last_name != '':
layer_collections[self.last_name]["ptr"].collection.name = self.name
update_property_group(context)
self.last_name = self.name
class CMListCollection(PropertyGroup):
name: StringProperty(update=update_col_name)
last_name: StringProperty()
def update_collection_tree(context):
global max_lvl
global row_index
collection_tree.clear()
layer_collections.clear()
max_lvl = 0
row_index = 0
init_laycol_list = context.view_layer.layer_collection.children
master_laycol = {"id": 0,
"name": context.view_layer.layer_collection.name,
"lvl": -1,
"row_index": -1,
"visible": True,
"has_children": True,
"expanded": True,
"parent": None,
"children": [],
"ptr": context.view_layer.layer_collection
}
get_all_collections(context, init_laycol_list, master_laycol, collection_tree, visible=True)
def get_all_collections(context, collections, parent, tree, level=0, visible=False):
global row_index
global max_lvl
if level > max_lvl:
max_lvl = level
for item in collections:
laycol = {"id": len(layer_collections) +1,
"name": item.name,
"lvl": level,
"row_index": row_index,
"visible": visible,
"has_children": False,
"expanded": False,
"parent": parent,
"children": [],
"ptr": item
}
row_index += 1
layer_collections[item.name] = laycol
tree.append(laycol)
if len(item.children) > 0:
laycol["has_children"] = True
if item.name in expanded and laycol["visible"]:
laycol["expanded"] = True
get_all_collections(context, item.children, laycol, laycol["children"], level+1, visible=True)
else:
get_all_collections(context, item.children, laycol, laycol["children"], level+1)
def update_property_group(context):
update_collection_tree(context)
context.scene.CMListCollection.clear()
create_property_group(context, collection_tree)
def create_property_group(context, tree):
global in_filter
for laycol in tree:
new_cm_listitem = context.scene.CMListCollection.add()
new_cm_listitem.name = laycol["name"]
if laycol["has_children"]:
create_property_group(context, laycol["children"])
class CMSendReport(Operator):
bl_label = "Send Report"
bl_idname = "view3d.cm_send_report"
message: StringProperty()
def draw(self, context):
layout = self.layout
first = True
string = ""
for num, char in enumerate(self.message):
if char == "\n":
if first:
layout.row().label(text=string, icon='ERROR')
first = False
else:
layout.row().label(text=string, icon='BLANK1')
string = ""
continue
string = string + char
if first:
layout.row().label(text=string, icon='ERROR')
else:
layout.row().label(text=string, icon='BLANK1')
def invoke(self, context, event):
wm = context.window_manager
max_len = 0
length = 0
for char in self.message:
if char == "\n":
if length > max_len:
max_len = length
length = 0
else:
length += 1
if length > max_len:
max_len = length
return wm.invoke_popup(self, width=(30 + (max_len*5.5)))
def execute(self, context):
self.report({'INFO'}, self.message)
print(self.message)
return {'FINISHED'}
def send_report(message):
def report():
window = bpy.context.window_manager.windows[0]
ctx = {'window': window, 'screen': window.screen, }
bpy.ops.view3d.cm_send_report(ctx, 'INVOKE_DEFAULT', message=message)
bpy.app.timers.register(report)
|
PypiClean
|
/slurm_script-0.1.8-py3-none-any.whl/slurm_script/main.py
|
import argparse
import tempfile
import subprocess
from slurm_script import __version__
SLURM_ARGS = argparse.ArgumentParser(
prog="slurm_script/sjob",
description="python interface to generate and run slurm command.",
)
SLURM_ARGS.add_argument(
"-n", "--nproc", help="Number of processors to run the job.", type=int
)
SLURM_ARGS.add_argument("-j", "--job_name", help="Name of the job.", type=str)
SLURM_ARGS.add_argument(
"-t", "--time", help="Maximum runtime [hours:minutes:second].", type=str
)
SLURM_ARGS.add_argument("-m", "--mem_per_cpu", help="Memory per core [MB].", type=str)
SLURM_ARGS.add_argument("-c", "--command", help="Program command.", nargs="+", type=str)
SLURM_ARGS.add_argument(
"-mail",
"--mail_type",
help="Email notification at either BEGIN, END, or FAIL.",
type=str,
)
SLURM_ARGS.add_argument("-nt", "--ntasks", help="Number of tasks.", type=int)
SLURM_ARGS.add_argument(
"-cnt", "--cpus_per_task", help="Number of cpus per task.", type=str
)
SLURM_ARGS.add_argument(
"-a", "--additional_cmd", help="Additional commands.", nargs="+", type=str
)
args_to_slurm_flag = {
"nproc": " -n ",
"job_name": " --job-name=",
"time": " --time=",
"mem_per_cpu": " --mem-per-cpu=",
"output": " --output=",
"command": " ",
"mail_type": " --mail-type=",
"ntasks": " --ntasks=",
"cpus_per_task": " --cpus-per-task=",
}
bash_header = "#!/bin/bash\n\n"
prefix = "#SBATCH"
def main() -> None:
print(f"slurm_script version: {__version__}")
argv = SLURM_ARGS.parse_args()
template = bash_header
for ar in vars(argv):
val = getattr(argv, ar)
if val is not None and ar != "command" and ar != "additional_cmd":
template += prefix + args_to_slurm_flag[ar] + str(val) + "\n"
template += "\n"
# Place additional command in between sbatch parameter and mpirun command
if argv.additional_cmd is not None:
template += " ".join(argv.additional_cmd) + "\n"
if argv.command is not None:
run_command = " ".join(argv.command)
template += "\nmpirun" + args_to_slurm_flag["command"] + run_command
else:
raise ValueError("No command provided.")
# Report
print("")
print("Preview of the generated script:")
print("--------------------------------")
print(template)
print("--------------------------------")
run_flag = input("Do you want to run the script? [y/n] ")
if run_flag.lower() == "y" or run_flag == "":
with tempfile.NamedTemporaryFile(mode="r+", suffix=".sh") as temp_file:
temp_file.write(template)
temp_file.flush()
subprocess.run(["sbatch", temp_file.name])
else:
print("Finished without running the script.")
|
PypiClean
|
/torchrec_cpu-0.1.0-py39-none-any.whl/fbgemm_gpu/split_embedding_inference_converter.py
|
# pyre-unsafe
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from typing import Optional, Tuple
import fbgemm_gpu.split_table_batched_embeddings_ops as split_table_batched_embeddings_ops
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from torch import Tensor, nn
# TODO: add per-feature based converter option (based on embedding_specs during inference)
# TODO: optimize embedding pruning and quantization latency.
class SplitEmbInferenceConverter:
def __init__(
self,
quantize_type: SparseType,
pruning_ratio: Optional[float],
use_array_for_index_remapping: bool = True,
):
self.quantize_type = quantize_type
# TODO(yingz): Change the pruning ratio to per-table settings.
self.pruning_ratio = pruning_ratio
self.use_array_for_index_remapping = use_array_for_index_remapping
def convert_model(self, model: torch.nn.Module) -> nn.Module:
self._process_split_embs(model)
return model
def _prune_by_weights_l2_norm(self, new_num_rows, weights) -> Tuple[Tensor, float]:
assert new_num_rows > 0
from numpy.linalg import norm
indicators = []
for row in weights:
indicators.append(norm(row.cpu().numpy(), ord=2))
sorted_indicators = sorted(indicators, reverse=True)
threshold = None
for i in range(new_num_rows, len(sorted_indicators)):
if sorted_indicators[i] < sorted_indicators[new_num_rows - 1]:
threshold = sorted_indicators[i]
break
if threshold is None:
threshold = sorted_indicators[-1] - 1
return (torch.tensor(indicators), threshold)
def _prune_embs(
self,
idx: int,
num_rows: int,
module: split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen,
) -> Tuple[Tensor, Optional[Tensor]]:
# TODO(yingz): Avoid DtoH / HtoD overhead.
weights = module.split_embedding_weights()[idx].cpu()
if self.pruning_ratio is None:
return (weights, None)
new_num_rows = int(math.ceil(num_rows * (1.0 - self.pruning_ratio))) # type: ignore
if new_num_rows == num_rows:
return (weights, None)
(indicators, threshold) = self._prune_by_weights_l2_norm(new_num_rows, weights)
return torch.ops.fbgemm.embedding_bag_rowwise_prune(
weights, indicators, threshold, torch.int32
)
def _quantize_embs(
self, weight: Tensor, weight_ty: SparseType
) -> Tuple[Tensor, Optional[Tensor]]:
if weight_ty == SparseType.FP32:
q_weight = weight.float()
# FIXME: How to view the PyTorch Tensor as a different type (e.g., uint8)
# Here it uses numpy and it will introduce DtoH/HtoD overhead.
res_weight = torch.tensor(
q_weight.cpu().numpy().view(np.uint8)
).contiguous()
return (res_weight, None)
elif weight_ty == SparseType.FP16:
q_weight = weight.half()
res_weight = torch.tensor(
q_weight.cpu().numpy().view(np.uint8)
).contiguous()
return (res_weight, None)
elif weight_ty == SparseType.INT8:
q_weight = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(weight)
res_weight = torch.tensor(q_weight[:, :-8].cpu().numpy().view(np.uint8))
res_scale_shift = torch.tensor(
q_weight[:, -8:]
.contiguous()
.cpu()
.numpy()
.view(np.float32)
.astype(np.float16)
.view(np.uint8)
) # [-4, -2]: scale; [-2:]: bias
return (res_weight, res_scale_shift)
elif weight_ty == SparseType.INT4 or weight_ty == SparseType.INT2:
q_weight = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
weight,
bit_rate=weight_ty.bit_rate(),
)
res_weight = torch.tensor(q_weight[:, :-4].cpu().numpy().view(np.uint8))
res_scale_shift = torch.tensor(
q_weight[:, -4:].contiguous().cpu().numpy().view(np.uint8)
) # [-4, -2]: scale; [-2:]: bias
return (res_weight, res_scale_shift)
else:
raise RuntimeError("Unsupported SparseType: {}".format(weight_ty))
def _process_split_embs(self, model: nn.Module) -> None:
for name, child in model.named_children():
if isinstance(
child,
split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen,
):
embedding_specs = []
use_cpu = (
child.embedding_specs[0][3]
== split_table_batched_embeddings_ops.ComputeDevice.CPU
)
for (E, D, _, _) in child.embedding_specs:
weights_ty = self.quantize_type
if D % weights_ty.align_size() != 0:
logging.warning(
f"Embedding dim {D} couldn't be divided by align size {weights_ty.align_size()}!"
)
assert D % 4 == 0
weights_ty = (
SparseType.FP16
) # fall back to FP16 if dimension couldn't be aligned with the required size
embedding_specs.append(("", E, D, weights_ty))
weight_lists = []
new_embedding_specs = []
index_remapping_list = []
for t, (_, E, D, weight_ty) in enumerate(embedding_specs):
# Try to prune embeddings.
(pruned_weight, index_remapping) = self._prune_embs(t, E, child)
new_embedding_specs.append(
(
"",
pruned_weight.size()[0],
D,
weight_ty,
split_table_batched_embeddings_ops.EmbeddingLocation.HOST
if use_cpu
else split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
)
)
index_remapping_list.append(index_remapping)
# Try to quantize embeddings.
weight_lists.append(self._quantize_embs(pruned_weight, weight_ty))
q_child = split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=new_embedding_specs,
index_remapping=index_remapping_list
if self.pruning_ratio is not None
else None,
pooling_mode=child.pooling_mode,
device="cpu" if use_cpu else torch.cuda.current_device(),
weight_lists=weight_lists,
use_array_for_index_remapping=self.use_array_for_index_remapping,
)
setattr(model, name, q_child)
else:
self._process_split_embs(child)
|
PypiClean
|
/azure-mgmt-containerregistry-10.1.0.zip/azure-mgmt-containerregistry-10.1.0/azure/mgmt/containerregistry/v2021_12_01_preview/aio/operations/_replications_operations.py
|
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._replications_operations import (
build_create_request,
build_delete_request,
build_get_request,
build_list_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ReplicationsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerregistry.v2021_12_01_preview.aio.ContainerRegistryManagementClient`'s
:attr:`replications` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, resource_group_name: str, registry_name: str, **kwargs: Any) -> AsyncIterable["_models.Replication"]:
"""Lists all the replications for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs. Required.
:type resource_group_name: str
:param registry_name: The name of the container registry. Required.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Replication or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-12-01-preview")
)
cls: ClsType[_models.ReplicationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ReplicationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, registry_name: str, replication_name: str, **kwargs: Any
) -> _models.Replication:
"""Gets the properties of the specified replication.
:param resource_group_name: The name of the resource group to which the container registry
belongs. Required.
:type resource_group_name: str
:param registry_name: The name of the container registry. Required.
:type registry_name: str
:param replication_name: The name of the replication. Required.
:type replication_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Replication or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-12-01-preview")
)
cls: ClsType[_models.Replication] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Replication", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}"
}
async def _create_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: Union[_models.Replication, IO],
**kwargs: Any
) -> _models.Replication:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-12-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Replication] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(replication, (IO, bytes)):
_content = replication
else:
_json = self._serialize.body(replication, "Replication")
request = build_create_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Replication", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Replication", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}"
}
@overload
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: _models.Replication,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Replication]:
"""Creates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs. Required.
:type resource_group_name: str
:param registry_name: The name of the container registry. Required.
:type registry_name: str
:param replication_name: The name of the replication. Required.
:type replication_name: str
:param replication: The parameters for creating a replication. Required.
:type replication: ~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Replication]:
"""Creates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs. Required.
:type resource_group_name: str
:param registry_name: The name of the container registry. Required.
:type registry_name: str
:param replication_name: The name of the replication. Required.
:type replication_name: str
:param replication: The parameters for creating a replication. Required.
:type replication: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: Union[_models.Replication, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.Replication]:
"""Creates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs. Required.
:type resource_group_name: str
:param registry_name: The name of the container registry. Required.
:type registry_name: str
:param replication_name: The name of the replication. Required.
:type replication_name: str
:param replication: The parameters for creating a replication. Is either a model type or a IO
type. Required.
:type replication: ~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-12-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Replication] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
replication=replication,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Replication", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, registry_name: str, replication_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-12-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, registry_name: str, replication_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a replication from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs. Required.
:type resource_group_name: str
:param registry_name: The name of the container registry. Required.
:type registry_name: str
:param replication_name: The name of the replication. Required.
:type replication_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-12-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}"
}
async def _update_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: Union[_models.ReplicationUpdateParameters, IO],
**kwargs: Any
) -> _models.Replication:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-12-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Replication] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(replication_update_parameters, (IO, bytes)):
_content = replication_update_parameters
else:
_json = self._serialize.body(replication_update_parameters, "ReplicationUpdateParameters")
request = build_update_request(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Replication", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Replication", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}"
}
@overload
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: _models.ReplicationUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Replication]:
"""Updates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs. Required.
:type resource_group_name: str
:param registry_name: The name of the container registry. Required.
:type registry_name: str
:param replication_name: The name of the replication. Required.
:type replication_name: str
:param replication_update_parameters: The parameters for updating a replication. Required.
:type replication_update_parameters:
~azure.mgmt.containerregistry.v2021_12_01_preview.models.ReplicationUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Replication]:
"""Updates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs. Required.
:type resource_group_name: str
:param registry_name: The name of the container registry. Required.
:type registry_name: str
:param replication_name: The name of the replication. Required.
:type replication_name: str
:param replication_update_parameters: The parameters for updating a replication. Required.
:type replication_update_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: Union[_models.ReplicationUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.Replication]:
"""Updates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs. Required.
:type resource_group_name: str
:param registry_name: The name of the container registry. Required.
:type registry_name: str
:param replication_name: The name of the replication. Required.
:type replication_name: str
:param replication_update_parameters: The parameters for updating a replication. Is either a
model type or a IO type. Required.
:type replication_update_parameters:
~azure.mgmt.containerregistry.v2021_12_01_preview.models.ReplicationUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-12-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Replication] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
replication_update_parameters=replication_update_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Replication", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}"
}
|
PypiClean
|
/jingan-0.0.3-py3-none-any.whl/torch_utils/custom_ops.py
|
import os
import glob
import torch
import torch.utils.cpp_extension
import importlib
import hashlib
import shutil
from pathlib import Path
from torch.utils.file_baton import FileBaton
#----------------------------------------------------------------------------
# Global options.
verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
patterns = [
'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',
]
for pattern in patterns:
matches = sorted(glob.glob(pattern))
if len(matches):
return matches[-1]
return None
#----------------------------------------------------------------------------
# Main entry point for compiling and loading C++/CUDA plugins.
_cached_plugins = dict()
def get_plugin(module_name, sources, **build_kwargs):
assert verbosity in ['none', 'brief', 'full']
# Already cached?
if module_name in _cached_plugins:
return _cached_plugins[module_name]
# Print status.
if verbosity == 'full':
print(f'Setting up PyTorch plugin "{module_name}"...')
elif verbosity == 'brief':
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
try: # pylint: disable=too-many-nested-blocks
# Make sure we can find the necessary compiler binaries.
if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
os.environ['PATH'] += ';' + compiler_bindir
# Compile and load.
verbose_build = (verbosity == 'full')
# Incremental build md5sum trickery. Copies all the input source files
# into a cached build directory under a combined md5 digest of the input
# source files. Copying is done only if the combined digest has changed.
# This keeps input file timestamps and filenames the same as in previous
# extension builds, allowing for fast incremental rebuilds.
#
# This optimization is done only in case all the source files reside in
# a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
# environment variable is set (we take this as a signal that the user
# actually cares about this.)
source_dirs_set = set(os.path.dirname(source) for source in sources)
if len(source_dirs_set) == 1 and ('TORCH_EXTENSIONS_DIR' in os.environ):
all_source_files = sorted(list(x for x in Path(list(source_dirs_set)[0]).iterdir() if x.is_file()))
# Compute a combined hash digest for all source files in the same
# custom op directory (usually .cu, .cpp, .py and .h files).
hash_md5 = hashlib.md5()
for src in all_source_files:
with open(src, 'rb') as f:
hash_md5.update(f.read())
build_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
digest_build_dir = os.path.join(build_dir, hash_md5.hexdigest())
if not os.path.isdir(digest_build_dir):
os.makedirs(digest_build_dir, exist_ok=True)
baton = FileBaton(os.path.join(digest_build_dir, 'lock'))
if baton.try_acquire():
try:
for src in all_source_files:
shutil.copyfile(src, os.path.join(digest_build_dir, os.path.basename(src)))
finally:
baton.release()
else:
# Someone else is copying source files under the digest dir,
# wait until done and continue.
baton.wait()
digest_sources = [os.path.join(digest_build_dir, os.path.basename(x)) for x in sources]
torch.utils.cpp_extension.load(name=module_name, build_directory=build_dir,
verbose=verbose_build, sources=digest_sources, **build_kwargs)
else:
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
module = importlib.import_module(module_name)
except:
if verbosity == 'brief':
print('Failed!')
raise
# Print status and add to cache.
if verbosity == 'full':
print(f'Done setting up PyTorch plugin "{module_name}".')
elif verbosity == 'brief':
print('Done.')
_cached_plugins[module_name] = module
return module
#----------------------------------------------------------------------------
|
PypiClean
|
/cdk_extensions-0.0.93-py3-none-any.whl/cdk_extensions/rds/__init__.py
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import aws_cdk as _aws_cdk_ceddda9d
import aws_cdk.aws_ec2 as _aws_cdk_aws_ec2_ceddda9d
import aws_cdk.aws_rds as _aws_cdk_aws_rds_ceddda9d
import constructs as _constructs_77d1e7e8
@jsii.implements(_aws_cdk_aws_ec2_ceddda9d.IConnectable)
class DatabaseProxyEndpoint(
_aws_cdk_ceddda9d.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="cdk-extensions.rds.DatabaseProxyEndpoint",
):
def __init__(
self,
scope: _constructs_77d1e7e8.Construct,
id: builtins.str,
*,
database_proxy: _aws_cdk_aws_rds_ceddda9d.IDatabaseProxy,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
access: typing.Optional["DatabaseProxyEndpointAccess"] = None,
name: typing.Optional[builtins.str] = None,
security_groups: typing.Optional[typing.Sequence[_aws_cdk_aws_ec2_ceddda9d.ISecurityGroup]] = None,
vpc_subnets: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
account: typing.Optional[builtins.str] = None,
environment_from_arn: typing.Optional[builtins.str] = None,
physical_name: typing.Optional[builtins.str] = None,
region: typing.Optional[builtins.str] = None,
) -> None:
'''
:param scope: -
:param id: -
:param database_proxy:
:param vpc:
:param access:
:param name:
:param security_groups:
:param vpc_subnets:
:param account: The AWS account ID this resource belongs to. Default: - the resource is in the same account as the stack it belongs to
:param environment_from_arn: ARN to deduce region and account from. The ARN is parsed and the account and region are taken from the ARN. This should be used for imported resources. Cannot be supplied together with either ``account`` or ``region``. Default: - take environment from ``account``, ``region`` parameters, or use Stack environment.
:param physical_name: The value passed in by users to the physical name prop of the resource. - ``undefined`` implies that a physical name will be allocated by CloudFormation during deployment. - a concrete value implies a specific physical name - ``PhysicalName.GENERATE_IF_NEEDED`` is a marker that indicates that a physical will only be generated by the CDK if it is needed for cross-environment references. Otherwise, it will be allocated by CloudFormation. Default: - The physical name will be allocated by CloudFormation at deployment time
:param region: The AWS region this resource belongs to. Default: - the resource is in the same region as the stack it belongs to
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__195ada8b5a01eb7b807a1d48932aa3008d7c3928d8f75490e4c8fc1910ef3d43)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
props = DatabaseProxyEndpointProps(
database_proxy=database_proxy,
vpc=vpc,
access=access,
name=name,
security_groups=security_groups,
vpc_subnets=vpc_subnets,
account=account,
environment_from_arn=environment_from_arn,
physical_name=physical_name,
region=region,
)
jsii.create(self.__class__, self, [scope, id, props])
@builtins.property
@jsii.member(jsii_name="connections")
def connections(self) -> _aws_cdk_aws_ec2_ceddda9d.Connections:
'''The network connections associated with this resource.'''
return typing.cast(_aws_cdk_aws_ec2_ceddda9d.Connections, jsii.get(self, "connections"))
@builtins.property
@jsii.member(jsii_name="databaseProxy")
def database_proxy(self) -> _aws_cdk_aws_rds_ceddda9d.IDatabaseProxy:
return typing.cast(_aws_cdk_aws_rds_ceddda9d.IDatabaseProxy, jsii.get(self, "databaseProxy"))
@builtins.property
@jsii.member(jsii_name="databaseProxyEndpointArn")
def database_proxy_endpoint_arn(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "databaseProxyEndpointArn"))
@builtins.property
@jsii.member(jsii_name="databaseProxyEndpointHost")
def database_proxy_endpoint_host(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "databaseProxyEndpointHost"))
@builtins.property
@jsii.member(jsii_name="databaseProxyEndpointIsDefault")
def database_proxy_endpoint_is_default(self) -> _aws_cdk_ceddda9d.IResolvable:
return typing.cast(_aws_cdk_ceddda9d.IResolvable, jsii.get(self, "databaseProxyEndpointIsDefault"))
@builtins.property
@jsii.member(jsii_name="databaseProxyEndpointVpcId")
def database_proxy_endpoint_vpc_id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "databaseProxyEndpointVpcId"))
@builtins.property
@jsii.member(jsii_name="resource")
def resource(self) -> _aws_cdk_aws_rds_ceddda9d.CfnDBProxyEndpoint:
return typing.cast(_aws_cdk_aws_rds_ceddda9d.CfnDBProxyEndpoint, jsii.get(self, "resource"))
@builtins.property
@jsii.member(jsii_name="vpc")
def vpc(self) -> _aws_cdk_aws_ec2_ceddda9d.IVpc:
return typing.cast(_aws_cdk_aws_ec2_ceddda9d.IVpc, jsii.get(self, "vpc"))
@builtins.property
@jsii.member(jsii_name="vpcSubnets")
def vpc_subnets(self) -> _aws_cdk_aws_ec2_ceddda9d.SubnetSelection:
return typing.cast(_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, jsii.get(self, "vpcSubnets"))
@builtins.property
@jsii.member(jsii_name="access")
def access(self) -> typing.Optional["DatabaseProxyEndpointAccess"]:
return typing.cast(typing.Optional["DatabaseProxyEndpointAccess"], jsii.get(self, "access"))
@builtins.property
@jsii.member(jsii_name="name")
def name(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "name"))
class DatabaseProxyEndpointAccess(
metaclass=jsii.JSIIMeta,
jsii_type="cdk-extensions.rds.DatabaseProxyEndpointAccess",
):
@jsii.member(jsii_name="of")
@builtins.classmethod
def of(cls, role: builtins.str) -> "DatabaseProxyEndpointAccess":
'''
:param role: -
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ec1e97f3090817e05279b08d6d3d4ec756c2bb8d03642b7788404c4d2d9a13dc)
check_type(argname="argument role", value=role, expected_type=type_hints["role"])
return typing.cast("DatabaseProxyEndpointAccess", jsii.sinvoke(cls, "of", [role]))
@jsii.python.classproperty
@jsii.member(jsii_name="READ_ONLY")
def READ_ONLY(cls) -> "DatabaseProxyEndpointAccess":
return typing.cast("DatabaseProxyEndpointAccess", jsii.sget(cls, "READ_ONLY"))
@jsii.python.classproperty
@jsii.member(jsii_name="READ_WRITE")
def READ_WRITE(cls) -> "DatabaseProxyEndpointAccess":
return typing.cast("DatabaseProxyEndpointAccess", jsii.sget(cls, "READ_WRITE"))
@builtins.property
@jsii.member(jsii_name="role")
def role(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "role"))
@jsii.data_type(
jsii_type="cdk-extensions.rds.DatabaseProxyEndpointProps",
jsii_struct_bases=[_aws_cdk_ceddda9d.ResourceProps],
name_mapping={
"account": "account",
"environment_from_arn": "environmentFromArn",
"physical_name": "physicalName",
"region": "region",
"database_proxy": "databaseProxy",
"vpc": "vpc",
"access": "access",
"name": "name",
"security_groups": "securityGroups",
"vpc_subnets": "vpcSubnets",
},
)
class DatabaseProxyEndpointProps(_aws_cdk_ceddda9d.ResourceProps):
def __init__(
self,
*,
account: typing.Optional[builtins.str] = None,
environment_from_arn: typing.Optional[builtins.str] = None,
physical_name: typing.Optional[builtins.str] = None,
region: typing.Optional[builtins.str] = None,
database_proxy: _aws_cdk_aws_rds_ceddda9d.IDatabaseProxy,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
access: typing.Optional[DatabaseProxyEndpointAccess] = None,
name: typing.Optional[builtins.str] = None,
security_groups: typing.Optional[typing.Sequence[_aws_cdk_aws_ec2_ceddda9d.ISecurityGroup]] = None,
vpc_subnets: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
) -> None:
'''
:param account: The AWS account ID this resource belongs to. Default: - the resource is in the same account as the stack it belongs to
:param environment_from_arn: ARN to deduce region and account from. The ARN is parsed and the account and region are taken from the ARN. This should be used for imported resources. Cannot be supplied together with either ``account`` or ``region``. Default: - take environment from ``account``, ``region`` parameters, or use Stack environment.
:param physical_name: The value passed in by users to the physical name prop of the resource. - ``undefined`` implies that a physical name will be allocated by CloudFormation during deployment. - a concrete value implies a specific physical name - ``PhysicalName.GENERATE_IF_NEEDED`` is a marker that indicates that a physical will only be generated by the CDK if it is needed for cross-environment references. Otherwise, it will be allocated by CloudFormation. Default: - The physical name will be allocated by CloudFormation at deployment time
:param region: The AWS region this resource belongs to. Default: - the resource is in the same region as the stack it belongs to
:param database_proxy:
:param vpc:
:param access:
:param name:
:param security_groups:
:param vpc_subnets:
'''
if isinstance(vpc_subnets, dict):
vpc_subnets = _aws_cdk_aws_ec2_ceddda9d.SubnetSelection(**vpc_subnets)
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__19c3f0008b43c9b8508101d52f7ea2c02aa18d521c93f21caf39b1303afa2fda)
check_type(argname="argument account", value=account, expected_type=type_hints["account"])
check_type(argname="argument environment_from_arn", value=environment_from_arn, expected_type=type_hints["environment_from_arn"])
check_type(argname="argument physical_name", value=physical_name, expected_type=type_hints["physical_name"])
check_type(argname="argument region", value=region, expected_type=type_hints["region"])
check_type(argname="argument database_proxy", value=database_proxy, expected_type=type_hints["database_proxy"])
check_type(argname="argument vpc", value=vpc, expected_type=type_hints["vpc"])
check_type(argname="argument access", value=access, expected_type=type_hints["access"])
check_type(argname="argument name", value=name, expected_type=type_hints["name"])
check_type(argname="argument security_groups", value=security_groups, expected_type=type_hints["security_groups"])
check_type(argname="argument vpc_subnets", value=vpc_subnets, expected_type=type_hints["vpc_subnets"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"database_proxy": database_proxy,
"vpc": vpc,
}
if account is not None:
self._values["account"] = account
if environment_from_arn is not None:
self._values["environment_from_arn"] = environment_from_arn
if physical_name is not None:
self._values["physical_name"] = physical_name
if region is not None:
self._values["region"] = region
if access is not None:
self._values["access"] = access
if name is not None:
self._values["name"] = name
if security_groups is not None:
self._values["security_groups"] = security_groups
if vpc_subnets is not None:
self._values["vpc_subnets"] = vpc_subnets
@builtins.property
def account(self) -> typing.Optional[builtins.str]:
'''The AWS account ID this resource belongs to.
:default: - the resource is in the same account as the stack it belongs to
'''
result = self._values.get("account")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def environment_from_arn(self) -> typing.Optional[builtins.str]:
'''ARN to deduce region and account from.
The ARN is parsed and the account and region are taken from the ARN.
This should be used for imported resources.
Cannot be supplied together with either ``account`` or ``region``.
:default: - take environment from ``account``, ``region`` parameters, or use Stack environment.
'''
result = self._values.get("environment_from_arn")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def physical_name(self) -> typing.Optional[builtins.str]:
'''The value passed in by users to the physical name prop of the resource.
- ``undefined`` implies that a physical name will be allocated by
CloudFormation during deployment.
- a concrete value implies a specific physical name
- ``PhysicalName.GENERATE_IF_NEEDED`` is a marker that indicates that a physical will only be generated
by the CDK if it is needed for cross-environment references. Otherwise, it will be allocated by CloudFormation.
:default: - The physical name will be allocated by CloudFormation at deployment time
'''
result = self._values.get("physical_name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def region(self) -> typing.Optional[builtins.str]:
'''The AWS region this resource belongs to.
:default: - the resource is in the same region as the stack it belongs to
'''
result = self._values.get("region")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def database_proxy(self) -> _aws_cdk_aws_rds_ceddda9d.IDatabaseProxy:
result = self._values.get("database_proxy")
assert result is not None, "Required property 'database_proxy' is missing"
return typing.cast(_aws_cdk_aws_rds_ceddda9d.IDatabaseProxy, result)
@builtins.property
def vpc(self) -> _aws_cdk_aws_ec2_ceddda9d.IVpc:
result = self._values.get("vpc")
assert result is not None, "Required property 'vpc' is missing"
return typing.cast(_aws_cdk_aws_ec2_ceddda9d.IVpc, result)
@builtins.property
def access(self) -> typing.Optional[DatabaseProxyEndpointAccess]:
result = self._values.get("access")
return typing.cast(typing.Optional[DatabaseProxyEndpointAccess], result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def security_groups(
self,
) -> typing.Optional[typing.List[_aws_cdk_aws_ec2_ceddda9d.ISecurityGroup]]:
result = self._values.get("security_groups")
return typing.cast(typing.Optional[typing.List[_aws_cdk_aws_ec2_ceddda9d.ISecurityGroup]], result)
@builtins.property
def vpc_subnets(self) -> typing.Optional[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection]:
result = self._values.get("vpc_subnets")
return typing.cast(typing.Optional[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DatabaseProxyEndpointProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"DatabaseProxyEndpoint",
"DatabaseProxyEndpointAccess",
"DatabaseProxyEndpointProps",
]
publication.publish()
def _typecheckingstub__195ada8b5a01eb7b807a1d48932aa3008d7c3928d8f75490e4c8fc1910ef3d43(
scope: _constructs_77d1e7e8.Construct,
id: builtins.str,
*,
database_proxy: _aws_cdk_aws_rds_ceddda9d.IDatabaseProxy,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
access: typing.Optional[DatabaseProxyEndpointAccess] = None,
name: typing.Optional[builtins.str] = None,
security_groups: typing.Optional[typing.Sequence[_aws_cdk_aws_ec2_ceddda9d.ISecurityGroup]] = None,
vpc_subnets: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
account: typing.Optional[builtins.str] = None,
environment_from_arn: typing.Optional[builtins.str] = None,
physical_name: typing.Optional[builtins.str] = None,
region: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ec1e97f3090817e05279b08d6d3d4ec756c2bb8d03642b7788404c4d2d9a13dc(
role: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__19c3f0008b43c9b8508101d52f7ea2c02aa18d521c93f21caf39b1303afa2fda(
*,
account: typing.Optional[builtins.str] = None,
environment_from_arn: typing.Optional[builtins.str] = None,
physical_name: typing.Optional[builtins.str] = None,
region: typing.Optional[builtins.str] = None,
database_proxy: _aws_cdk_aws_rds_ceddda9d.IDatabaseProxy,
vpc: _aws_cdk_aws_ec2_ceddda9d.IVpc,
access: typing.Optional[DatabaseProxyEndpointAccess] = None,
name: typing.Optional[builtins.str] = None,
security_groups: typing.Optional[typing.Sequence[_aws_cdk_aws_ec2_ceddda9d.ISecurityGroup]] = None,
vpc_subnets: typing.Optional[typing.Union[_aws_cdk_aws_ec2_ceddda9d.SubnetSelection, typing.Dict[builtins.str, typing.Any]]] = None,
) -> None:
"""Type checking stubs"""
pass
|
PypiClean
|
/berk-0.1.tar.gz/berk-0.1/versioneer.py
|
# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring
# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements
# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error
# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with
# pylint:disable=attribute-defined-outside-init,too-many-arguments
import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
my_path = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(my_path)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(my_path), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise OSError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as cfg_file:
parser.read_file(cfg_file)
VCS = parser.get("versioneer", "VCS") # mandatory
# Dict-like interface for non-mandatory entries
section = parser["versioneer"]
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = section.get("style", "")
cfg.versionfile_source = section.get("versionfile_source")
cfg.versionfile_build = section.get("versionfile_build")
cfg.tag_prefix = section.get("tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = section.get("parentdir_prefix")
cfg.verbose = section.get("verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
HANDLERS.setdefault(vcs, {})[method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.21 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
TAG_PREFIX_REGEX = "*"
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
TAG_PREFIX_REGEX = r"\*"
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match",
"%%s%%s" %% (tag_prefix, TAG_PREFIX_REGEX)],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%%d.dev%%d" %% (post_version+1, pieces["distance"])
else:
rendered += ".post0.dev%%d" %% (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
TAG_PREFIX_REGEX = "*"
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
TAG_PREFIX_REGEX = r"\*"
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match",
"%s%s" % (tag_prefix, TAG_PREFIX_REGEX)],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
my_path = __file__
if my_path.endswith(".pyc") or my_path.endswith(".pyo"):
my_path = os.path.splitext(my_path)[0] + ".py"
versioneer_file = os.path.relpath(my_path)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
with open(".gitattributes", "r") as fobj:
for line in fobj:
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
break
except OSError:
pass
if not present:
with open(".gitattributes", "a+") as fobj:
fobj.write(f"{versionfile_source} export-subst\n")
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.21) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version+1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if 'build_py' in cmds:
_build_py = cmds['build_py']
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if 'build_ext' in cmds:
_build_ext = cmds['build_ext']
elif "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if 'sdist' in cmds:
_sdist = cmds['sdist']
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
OLD_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
INIT_PY_SNIPPET = """
from . import {0}
__version__ = {0}.get_versions()['version']
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except OSError:
old = ""
module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0]
snippet = INIT_PY_SNIPPET.format(module)
if OLD_SNIPPET in old:
print(" replacing boilerplate in %s" % ipy)
with open(ipy, "w") as f:
f.write(old.replace(OLD_SNIPPET, snippet))
elif snippet not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(snippet)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
PypiClean
|
/easy-menu-1.3.1.tar.gz/easy-menu-1.3.1/README.rst
|
=========
Easy Menu
=========
Super Simple Terminal Command Launcher Generator
.. image:: https://badge.fury.io/py/easy-menu.svg
:target: http://badge.fury.io/py/easy-menu
:alt: PyPI version
.. image:: https://travis-ci.org/mogproject/easy-menu.svg?branch=master
:target: https://travis-ci.org/mogproject/easy-menu
:alt: Build Status
.. image:: https://ci.appveyor.com/api/projects/status/tcw4sabh96fl7u2y/branch/master?svg=true
:target: https://ci.appveyor.com/project/mogproject/easy-menu/branch/master
:alt: Build Status
.. image:: https://coveralls.io/repos/mogproject/easy-menu/badge.svg?branch=master&service=github
:target: https://coveralls.io/github/mogproject/easy-menu?branch=master
:alt: Coverage Status
.. image:: https://img.shields.io/badge/license-Apache%202.0-blue.svg
:target: http://choosealicense.com/licenses/apache-2.0/
:alt: License
.. image:: https://badge.waffle.io/mogproject/easy-menu.svg?label=ready&title=Ready
:target: https://waffle.io/mogproject/easy-menu
:alt: 'Stories in Ready'
--------
Features
--------
*Simplify your daily terminal operations!*
Do you have any routine tasks such as login to your servers, deploying, troubleshooting or something like that?
This small tool would help to speed up your operations and prevent human errors.
It should be helpful not only for salted engineers but unskilled operators.
.. image:: https://raw.githubusercontent.com/wiki/mogproject/easy-menu/img/demo.gif
------------
Dependencies
------------
* Python: 2.6 / 2.7 / 3.2 / 3.3 / 3.4 / 3.5
* pyyaml
* six
* jinja2 == 2.6
* `mog-commons <https://github.com/mogproject/mog-commons-python>`_
----------
Quickstart
----------
You can try Easy Menu by just two command lines.
::
pip install easy-menu
easy-menu http://git.io/vGWla
Note: ``http://git.io/vGWla`` will redirect to ``https://raw.githubusercontent.com/mogproject/easy-menu/master/easy-menu.example.yml``
------------
Installation
------------
* ``pip`` command may need ``sudo``
+-------------------------+---------------------------------------+
| Operation | Command |
+=========================+=======================================+
| Install |``pip install easy-menu`` |
+-------------------------+---------------------------------------+
| Upgrade |``pip install --upgrade easy-menu`` |
+-------------------------+---------------------------------------+
| Uninstall |``pip uninstall easy-menu`` |
+-------------------------+---------------------------------------+
| Check installed version |``easy-menu --version`` |
+-------------------------+---------------------------------------+
| Help |``easy-menu -h`` |
+-------------------------+---------------------------------------+
* Then, write your configuration to the file ``easy-menu.yml``.
See an example below.
---------------------
Configuration Example
---------------------
``easy-menu.example.yml``::
Main Menu:
- Service health check: "echo Condition all green!"
- Check hardware resources: "echo Hardware resources OK."
- Server Login Menu:
- Login to server-1: "echo logging into server-1"
- Login to server-2: "echo logging into server-2"
- Login to server-3: "echo logging into server-3"
- Web Service Management Menu:
- Check the status of web service: "echo Check web service status"
- Start web service: "echo Start web service"
- Stop web service: "echo Stop web service"
- Reboot this server: "echo Reboot OS"
Each menu (i.e. root menu and sub menu) and each item is represented as *Mapping* which contains just one key-value pair.
In case its value is a *Sequence*, the sub menu will be generated.
The general syntax is like this.
::
meta: # Some meta variables are available
META_KEY: META_VALUE
ROOT_MENU_TITLE:
- ITEM_DESCRIPTION: COMMAND
- ITEM_DESCRIPTION: COMMAND
- SUB_MENU_TITLE: # You can create sub menu if you need.
- ITEM_DESCRIPTION: COMMAND
- ITEM_DESCRIPTION: COMMAND
- SUB_MENU_TITLE: # More nested menu
- ITEM_DESCRIPTION: COMMAND
- ITEM_DESCRIPTION: COMMAND
- ITEM_DESCRIPTION: # You can write a list of command lines for one item.
- COMMAND1
- COMMAND2
- COMMAND3
- include: INCLUDE_FILE_PATH # "include" keyword enables to load
# another configuration file.
- eval: COMMAND # "eval" keyword will execute command line
# and use its output as configuration YAML string.
Remember these commands are executed after changing the current directory to the directory which holds the configuration file by default.
You can find more examples in `this directory <https://github.com/mogproject/easy-menu/tree/master/tests/resources>`_.
-----------
Lookup Path
-----------
Similar to `Vagrant <https://docs.vagrantup.com/v2/vagrantfile/>`_, when you run any ``easy-menu`` command, Easy Menu climbs up the directory tree looking for the first ``easy-menu.yml`` it can find, starting first in the current directory.
So if you run ``easy-menu`` in ``/home/mogproject/projects/foo``, it will search the following paths in order for a ``easy-menu.yml``, until it finds one:
::
/home/mogproject/projects/foo/easy-menu.yml
/home/mogproject/projects/easy-menu.yml
/home/mogproject/easy-menu.yml
/home/easy-menu.yml
/easy-menu.yml
This feature lets you run ``easy-menu`` from any directory in your project.
You can change default name of the configuration file by setting the ``EASY_MENU_CONFIG`` environmental variable to some other name.
-------------
Audit Logging
-------------
Anytime you execute the command, the result (return code) will be recorded to the syslog facility in your operating system.
This feature is not available on Windows OS.
Example::
Aug 31 00:09:59 ullr.local easy-menu[28802]: [INFO] Command started: echo Condition all green!
Aug 31 00:09:59 ullr.local easy-menu[28802]: [INFO] Command ended with return code: 0
----------
Go further
----------
Need more help?
* `Advanced Configuration <https://github.com/mogproject/easy-menu/wiki/AdvancedConfiguration>`_
* `Troubleshooting <https://github.com/mogproject/easy-menu/wiki/Troubleshooting>`_
* `Add issue <https://waffle.io/mogproject/easy-menu>`_
* Looking for legacy version? Please refer to `v0.0 <https://github.com/mogproject/easy-menu/tree/v0.0>`_.
|
PypiClean
|
/simono-especificaciones-0.1.8.tar.gz/simono-especificaciones-0.1.8/simono_especificaciones/sensor.py
|
import logging
import json
from datetime import datetime
from apps.especificaciones import especificaciones as EspecificacionesApp
from apps.funciones import funciones as FuncionesApp
from apps.json.json import JsonManager
from configuraciones.general_texto_spa import *
from modelos.models import db, Registro, RegistroInterno
log = logging.getLogger(__name__)
class Sensor(object):
"""
Clase Sensor, estructura básica para todos los sensores
"""
@classmethod
def get_codigo_sensor(cls):
return "GENERICO"
@classmethod
def get_codigo_especificacion():
"""Código asignado en especificaciones"""
return -1
@classmethod
def get_especificacion(cls):
"""
Retorna el codigo especifico del Sensor definido en Especificaciones.
"""
for codigo in EspecificacionesApp.CODES_SENSORES:
print(EspecificacionesApp.CODES_SENSORES[codigo]["clase"] , get_codigo_especificacion() )
if EspecificacionesApp.CODES_SENSORES[codigo]["clase"] == get_codigo_especificacion():
return EspecificacionesApp.CODES_SENSORES[codigo]
return None
@classmethod
def esActuador(cls):
return False
def __init__(self):
self.id_sensor = 0
self.guid_sensor = ""
self.id_modulo = 0
self.pin = 0
self.nombre = "Nombre Sensor"
self.estado = False # desactivado por defecto el sensor
self.tipo_actualizacion = ""
self.tiempo = None
self.variacion = None
self.fe_registrado = None
self.valor_max = None
self.valor_min = None
self.codigo = ""
self.descripcion_serie = ""
self.ti_conexion = 1
self.topico_suscripto = ""
def __str__(self):
# s = super(Sensor, self).__str__()
return " Sensor_" + "%%%" + "_" + str(self.id_sensor) + "_" + str(self.id_modulo) + ": " + str(self.nombre)
def set_id_sensor(self, valor):
self.id_sensor = valor
def get_id_sensor(self):
return self.id_sensor
def set_pin(self, valor):
self.pin = valor
def get_pin(self):
return self.pin
def get_nombre(self):
return self.nombre
def activar(self):
self.estado = True
def desactivar(self):
self.estado = False
def get_estado(self):
return self.estado
def get_tipo_act(self):
return self.tipo_actualizacion
def get_tiempo(self):
return self.tiempo
def get_variacion(self):
return self.variacion
def get_fecha_registrado(self):
return self.fe_registrado
def get_valor_max(self):
return self.valor_max
def get_valor_min(self):
return self.valor_min
def set_values(self, dict): # MEJORAR: Tratarlo con escpecificaciones
"""
Setea los valores para un Sensor a partir de los datos de un diccionario.
"""
if dict:
self.id_sensor = dict["id_sensor"]
self.guid_sensor = dict["guid_sensor"]
self.id_modulo = dict["id_modulo"]
self.nombre = dict["nombre_sensor"]
self.pin = dict["pin_sensor"]
self.estado = dict["estado_sensor"]
self.tipo_actualizacion = dict["tipo_actualizacion"]
self.tiempo = dict["tiempo"]
self.variacion = dict["variacion"]
self.valor_max = dict["val_max"]
self.valor_min = dict["val_min"]
self.descripcion_serie = dict["descripcion_serie"]
self.ti_conexion = dict["ti_conexion"]
self.topico_suscripto = dict["topico_suscripto"]
def mostrar(self):
log.info(TXT_ID_SEN + str(self.id_sensor))
log.info(TXT_NOM + self.nombre)
log.info(TXT_PIN_SEN + str(self.pin))
if self.estado:
log.info(TXT_EST + TXT_EST_VAL)
else:
log.info(TXT_EST + TXT_EST_INV)
def mostrar_con_validacion(self, condicion):
log.info(TXT_ID_SEN + str(self.id_sensor))
log.info(TXT_NOM + self.nombre)
log.info(TXT_PIN_SEN + str(self.pin))
if condicion:
log.info(TXT_EST + TXT_EST_V)
else:
log.info(TXT_EST + TXT_EST_I)
def get_json(self, sensorEspecifico, get_data=True, data_mqtt=None):
"""
Devuelve un Json apartir de los datos de atributo de una instancia de un SensorEspecifico.
Parametros:
_sensorEspecifico: Instancia de un algun Sensor Especifico
_get_data: *bool* con valor de verdad para saber si obtiene datos de un sensor especifico
Retorna:
_json_instance: Instancia del tipo *json* con los datos de Registro para un Sensor Especifico.
"""
if get_data:
if data_mqtt:
sensorEspecifico.valor = float(data_mqtt)
else:
resultado = sensorEspecifico.obtener_datos()
if resultado is None: return None
sensorEspecifico.fe_registrado = str(datetime.now())
json_instance = JsonManager.generateJsonRegistro(self=None, sensorEspecifico=sensorEspecifico)
return json_instance
def desactivar_sensor(self):
bool = FuncionesApp.desactivar_sensor(self.id_sensor)
return bool
def get_dict_values(self, sensorEspecifico):
"""
Retorna un diccionario con los datos de todos los campos para un Sensor Especifico.
Ejemplo:
{'humedad': 10.0, 'modulo_0': None, 'modulo_1': None, 'temperatura': 23.0, 'grados': 'C',
'id_sensor': 3, 'pin': 0, 'nombre': 'Nombre Sensor', 'estado': False, 'tipo_actualizacion': '',
'tiempo': None, 'variacion': None, 'fe_registrado': '2018-02-26 11:44:02.319040', 'valor_max': None,
'valor_min': None, 'codigo': ''}
"""
return sensorEspecifico.__dict__
def registrar(self, sensorEspecifico):
"""
Crea una instancia del tipo RegistroInterno (de acuerdo al Modelo) a partir de un Sensor Especifico con la ayuda
de Especificaciones.
Parametros:
_sensorEspeifico: Instancia del tipo Sensor Especifico a partir del cual se generara el Registro.
"""
registro = Registro()
msj_ok = "Se ha insertado correctamente un Registro para " + str(sensorEspecifico)
msj_error = 'Ha ocurrido un problema al Insertar Registro: '
self.generar_registro(registro=registro, sensorEspecifico=sensorEspecifico, msj_ok=msj_ok, msj_error=msj_error)
def registrar_estado_interno(self, sensorEspecifico, registro):
"""
Crea una instancia del tipo RegistroInterno (de acuerdo al Modelo) a partir de un Sensor Especifico con la ayuda
de Especificaciones.
Parametros:
_sensorEspeifico: Instancia del tipo Sensor Especifico a partir del cual se generara el Registro.
"""
msj_ok = "Se ha insertado actualizado correctamente Registro Interno de: " + str(sensorEspecifico)
msj_error = 'Ha ocurrido un problema al Actualizar Registro Interno de : ' + str(sensorEspecifico)
self.generar_registro(registro=registro, sensorEspecifico=sensorEspecifico, msj_ok=msj_ok, msj_error=msj_error)
def generar_registro(self, sensorEspecifico, registro, msj_ok, msj_error):
"""
Genera un *Registro o RegistroInterno* a partir de un sensorEspecifico.
Parametros:
_sensorEspecifico:
_registro: Instancia del registro o RegistroInterno al cual se le setearan los valores.
_msj_ok: Mensaje de Resultado correcto de la operacion.
_msj_error: Mensaje de Resultado de falla de la operacion.
Retorna:
Nada, solo muestra mensaje en log del resultado de la operacion.
"""
list_fields_sensor = EspecificacionesApp.CODES_SENSORES[sensorEspecifico.get_especificacion()]['fields']
for field_sensor in list_fields_sensor:
try:
field_registro = EspecificacionesApp.PARSEO_REGISTRO[field_sensor]
value = EspecificacionesApp.sensor_gestion_value(sensorEspecifico=sensorEspecifico, field=field_sensor)
except Exception as e:
log.critical('CODE ERROR: k123k - ' + str(e))
if value:
EspecificacionesApp.registro_gestion_value(field=field_registro, value=value, registro=registro,
modo="SET")
db.session.add(registro)
try:
db.session.commit()
log.info(msj_ok)
except Exception as e:
db.session.rollback()
log.error(msj_error + str(e))
def stopThread(self):
""" Encargado de detener hilos que se creen para un sensor en aprticular.
Se redefine en cada sensor especifico"""
#print("stopThread!")
return None
|
PypiClean
|
/inline-importer-0.0.4.tar.gz/inline-importer-0.0.4/README.md
|
# InlineImporter
InlineImporter is a library for python projects that uses the PEP 302 import protocol to inline libraries into a script.
## Why?
Because we can.
But in all seriousness, this came out from spending days managing adhoc scripts that shared a lot of functionality.
For ease of development, it would have been nice to extract the common pieces to a common library, but that would have meant distributing a whole directory and managing import paths on the destination systems versus a single self-contained file.
## How it works
PEP 302 defined a protocol for managing module imports.
The protocol defines two components: `Finder`s and `Loader`s.
The Finder is responsible for, unsurprisingly, finding modules.
If a Finder finds a module, i.e. _knows_ which loader can load a module, it returns a `ModuleSpec`.
This ModuleSpec gives details on some parameters of the module, such as filename and package, and states which Loader can load the module.
The Loader is, as you've guessed it, responsible for loading modules into the environment.
It does so by first creating a module object, which the python machinery places into the `sys.modules` dictionary, then executing the module code.
An object that can both Find and Load is called an `Importer`.
InlineImporter works by placing the source code of modules in a dictionary, keyed by module name.
The finder searche the dictionary for a key matching the given module name.
If found, it returns a ModuleSpec with itself listed as the loader.
The loader simply compiles the inlined source code to python bytecode, and executes it as the normal python loader does.
## Usage
Include `inline-importer` in your development dependencies.
**`inline-importer` is not a runtime dependency, but a build-time dependency instead.**
Build your final script using `inline-python` or `python -m inline_importer` and distribute the output of that instead.
Your users will not require `inline-importer`.
However, if you have dependencies on other modules, your users will have to install those.
## What's next
While the importer is built, the rest of the machinery isn't.
* [x] Importer with PoC.
* [x] Script to collect all the modules to be inlined and build the dictionary.
* [x] Script that can combine the importer and the modules.
* [ ] Support for inlining distributed python libraries.
* [ ] Support for pre-compiled bytecode.
|
PypiClean
|
/dsin100daysv34-6.0.1.tar.gz/dsin100daysv34-6.0.1/notebook/static/components/codemirror/addon/mode/overlay.js
|
// Utility function that allows modes to be combined. The mode given
// as the base argument takes care of most of the normal mode
// functionality, but a second (typically simple) mode is used, which
// can override the style of text. Both modes get to parse all of the
// text, but when both assign a non-null style to a piece of code, the
// overlay wins, unless the combine argument was true and not overridden,
// or state.overlay.combineTokens was true, in which case the styles are
// combined.
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.overlayMode = function(base, overlay, combine) {
return {
startState: function() {
return {
base: CodeMirror.startState(base),
overlay: CodeMirror.startState(overlay),
basePos: 0, baseCur: null,
overlayPos: 0, overlayCur: null,
streamSeen: null
};
},
copyState: function(state) {
return {
base: CodeMirror.copyState(base, state.base),
overlay: CodeMirror.copyState(overlay, state.overlay),
basePos: state.basePos, baseCur: null,
overlayPos: state.overlayPos, overlayCur: null
};
},
token: function(stream, state) {
if (stream != state.streamSeen ||
Math.min(state.basePos, state.overlayPos) < stream.start) {
state.streamSeen = stream;
state.basePos = state.overlayPos = stream.start;
}
if (stream.start == state.basePos) {
state.baseCur = base.token(stream, state.base);
state.basePos = stream.pos;
}
if (stream.start == state.overlayPos) {
stream.pos = stream.start;
state.overlayCur = overlay.token(stream, state.overlay);
state.overlayPos = stream.pos;
}
stream.pos = Math.min(state.basePos, state.overlayPos);
// state.overlay.combineTokens always takes precedence over combine,
// unless set to null
if (state.overlayCur == null) return state.baseCur;
else if (state.baseCur != null &&
state.overlay.combineTokens ||
combine && state.overlay.combineTokens == null)
return state.baseCur + " " + state.overlayCur;
else return state.overlayCur;
},
indent: base.indent && function(state, textAfter) {
return base.indent(state.base, textAfter);
},
electricChars: base.electricChars,
innerMode: function(state) { return {state: state.base, mode: base}; },
blankLine: function(state) {
var baseToken, overlayToken;
if (base.blankLine) baseToken = base.blankLine(state.base);
if (overlay.blankLine) overlayToken = overlay.blankLine(state.overlay);
return overlayToken == null ?
baseToken :
(combine && baseToken != null ? baseToken + " " + overlayToken : overlayToken);
}
};
};
});
|
PypiClean
|
/onshape-test-client-1.0.0.tar.gz/onshape-test-client-1.0.0/onshape_client/oas/model/bt_ellipse_description866.py
|
import re # noqa: F401
import sys # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from onshape_client.oas.exceptions import ApiAttributeError
def lazy_import():
from onshape_client.oas.model.bt_circle_description1145 import BTCircleDescription1145
from onshape_client.oas.model.bt_curve_description1583 import BTCurveDescription1583
from onshape_client.oas.model.bt_ellipse_description866 import BTEllipseDescription866
from onshape_client.oas.model.bt_ellipse_description866_all_of import BTEllipseDescription866AllOf
from onshape_client.oas.model.bt_line_description1559 import BTLineDescription1559
from onshape_client.oas.model.bt_spline_description2118 import BTSplineDescription2118
from onshape_client.oas.model.bt_vector3d389 import BTVector3d389
globals()['BTCircleDescription1145'] = BTCircleDescription1145
globals()['BTCurveDescription1583'] = BTCurveDescription1583
globals()['BTEllipseDescription866'] = BTEllipseDescription866
globals()['BTEllipseDescription866AllOf'] = BTEllipseDescription866AllOf
globals()['BTLineDescription1559'] = BTLineDescription1559
globals()['BTSplineDescription2118'] = BTSplineDescription2118
globals()['BTVector3d389'] = BTVector3d389
class BTEllipseDescription866(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('type',): {
'OTHER': "OTHER",
'LINE': "LINE",
'CIRCLE': "CIRCLE",
'ELLIPSE': "ELLIPSE",
'BCURVE': "BCURVE",
'ICURVE': "ICURVE",
'UNKNOWN': "UNKNOWN",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'bt_type': (str,), # noqa: E501
'major_axis': (BTVector3d389,), # noqa: E501
'major_radius': (float,), # noqa: E501
'minor_radius': (float,), # noqa: E501
'normal': (BTVector3d389,), # noqa: E501
'direction': (BTVector3d389,), # noqa: E501
'direction_oriented_with_face': (BTVector3d389,), # noqa: E501
'origin': (BTVector3d389,), # noqa: E501
'type': (str,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'BTCircleDescription-1145': BTCircleDescription1145,
'BTEllipseDescription-866': BTEllipseDescription866,
'BTLineDescription-1559': BTLineDescription1559,
'BTSplineDescription-2118': BTSplineDescription2118,
}
if not val:
return None
return {'bt_type': val}
attribute_map = {
'bt_type': 'btType', # noqa: E501
'major_axis': 'majorAxis', # noqa: E501
'major_radius': 'majorRadius', # noqa: E501
'minor_radius': 'minorRadius', # noqa: E501
'normal': 'normal', # noqa: E501
'direction': 'direction', # noqa: E501
'direction_oriented_with_face': 'directionOrientedWithFace', # noqa: E501
'origin': 'origin', # noqa: E501
'type': 'type', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""BTEllipseDescription866 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bt_type (str): [optional] # noqa: E501
major_axis (BTVector3d389): [optional] # noqa: E501
major_radius (float): [optional] # noqa: E501
minor_radius (float): [optional] # noqa: E501
normal (BTVector3d389): [optional] # noqa: E501
direction (BTVector3d389): [optional] # noqa: E501
direction_oriented_with_face (BTVector3d389): [optional] # noqa: E501
origin (BTVector3d389): [optional] # noqa: E501
type (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BTEllipseDescription866 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bt_type (str): [optional] # noqa: E501
major_axis (BTVector3d389): [optional] # noqa: E501
major_radius (float): [optional] # noqa: E501
minor_radius (float): [optional] # noqa: E501
normal (BTVector3d389): [optional] # noqa: E501
direction (BTVector3d389): [optional] # noqa: E501
direction_oriented_with_face (BTVector3d389): [optional] # noqa: E501
origin (BTVector3d389): [optional] # noqa: E501
type (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
BTCurveDescription1583,
BTEllipseDescription866AllOf,
],
'oneOf': [
],
}
|
PypiClean
|
/python-sources-3.10.5.tar.gz/python-sources-3.10.5/Python-3.10.5/Doc/library/curses.ascii.rst
|
:mod:`curses.ascii` --- Utilities for ASCII characters
======================================================
.. module:: curses.ascii
:synopsis: Constants and set-membership functions for ASCII characters.
.. moduleauthor:: Eric S. Raymond <[email protected]>
.. sectionauthor:: Eric S. Raymond <[email protected]>
--------------
The :mod:`curses.ascii` module supplies name constants for ASCII characters and
functions to test membership in various ASCII character classes. The constants
supplied are names for control characters as follows:
+--------------+----------------------------------------------+
| Name | Meaning |
+==============+==============================================+
| :const:`NUL` | |
+--------------+----------------------------------------------+
| :const:`SOH` | Start of heading, console interrupt |
+--------------+----------------------------------------------+
| :const:`STX` | Start of text |
+--------------+----------------------------------------------+
| :const:`ETX` | End of text |
+--------------+----------------------------------------------+
| :const:`EOT` | End of transmission |
+--------------+----------------------------------------------+
| :const:`ENQ` | Enquiry, goes with :const:`ACK` flow control |
+--------------+----------------------------------------------+
| :const:`ACK` | Acknowledgement |
+--------------+----------------------------------------------+
| :const:`BEL` | Bell |
+--------------+----------------------------------------------+
| :const:`BS` | Backspace |
+--------------+----------------------------------------------+
| :const:`TAB` | Tab |
+--------------+----------------------------------------------+
| :const:`HT` | Alias for :const:`TAB`: "Horizontal tab" |
+--------------+----------------------------------------------+
| :const:`LF` | Line feed |
+--------------+----------------------------------------------+
| :const:`NL` | Alias for :const:`LF`: "New line" |
+--------------+----------------------------------------------+
| :const:`VT` | Vertical tab |
+--------------+----------------------------------------------+
| :const:`FF` | Form feed |
+--------------+----------------------------------------------+
| :const:`CR` | Carriage return |
+--------------+----------------------------------------------+
| :const:`SO` | Shift-out, begin alternate character set |
+--------------+----------------------------------------------+
| :const:`SI` | Shift-in, resume default character set |
+--------------+----------------------------------------------+
| :const:`DLE` | Data-link escape |
+--------------+----------------------------------------------+
| :const:`DC1` | XON, for flow control |
+--------------+----------------------------------------------+
| :const:`DC2` | Device control 2, block-mode flow control |
+--------------+----------------------------------------------+
| :const:`DC3` | XOFF, for flow control |
+--------------+----------------------------------------------+
| :const:`DC4` | Device control 4 |
+--------------+----------------------------------------------+
| :const:`NAK` | Negative acknowledgement |
+--------------+----------------------------------------------+
| :const:`SYN` | Synchronous idle |
+--------------+----------------------------------------------+
| :const:`ETB` | End transmission block |
+--------------+----------------------------------------------+
| :const:`CAN` | Cancel |
+--------------+----------------------------------------------+
| :const:`EM` | End of medium |
+--------------+----------------------------------------------+
| :const:`SUB` | Substitute |
+--------------+----------------------------------------------+
| :const:`ESC` | Escape |
+--------------+----------------------------------------------+
| :const:`FS` | File separator |
+--------------+----------------------------------------------+
| :const:`GS` | Group separator |
+--------------+----------------------------------------------+
| :const:`RS` | Record separator, block-mode terminator |
+--------------+----------------------------------------------+
| :const:`US` | Unit separator |
+--------------+----------------------------------------------+
| :const:`SP` | Space |
+--------------+----------------------------------------------+
| :const:`DEL` | Delete |
+--------------+----------------------------------------------+
Note that many of these have little practical significance in modern usage. The
mnemonics derive from teleprinter conventions that predate digital computers.
The module supplies the following functions, patterned on those in the standard
C library:
.. function:: isalnum(c)
Checks for an ASCII alphanumeric character; it is equivalent to ``isalpha(c) or
isdigit(c)``.
.. function:: isalpha(c)
Checks for an ASCII alphabetic character; it is equivalent to ``isupper(c) or
islower(c)``.
.. function:: isascii(c)
Checks for a character value that fits in the 7-bit ASCII set.
.. function:: isblank(c)
Checks for an ASCII whitespace character; space or horizontal tab.
.. function:: iscntrl(c)
Checks for an ASCII control character (in the range 0x00 to 0x1f or 0x7f).
.. function:: isdigit(c)
Checks for an ASCII decimal digit, ``'0'`` through ``'9'``. This is equivalent
to ``c in string.digits``.
.. function:: isgraph(c)
Checks for ASCII any printable character except space.
.. function:: islower(c)
Checks for an ASCII lower-case character.
.. function:: isprint(c)
Checks for any ASCII printable character including space.
.. function:: ispunct(c)
Checks for any printable ASCII character which is not a space or an alphanumeric
character.
.. function:: isspace(c)
Checks for ASCII white-space characters; space, line feed, carriage return, form
feed, horizontal tab, vertical tab.
.. function:: isupper(c)
Checks for an ASCII uppercase letter.
.. function:: isxdigit(c)
Checks for an ASCII hexadecimal digit. This is equivalent to ``c in
string.hexdigits``.
.. function:: isctrl(c)
Checks for an ASCII control character (ordinal values 0 to 31).
.. function:: ismeta(c)
Checks for a non-ASCII character (ordinal values 0x80 and above).
These functions accept either integers or single-character strings; when the argument is a
string, it is first converted using the built-in function :func:`ord`.
Note that all these functions check ordinal bit values derived from the
character of the string you pass in; they do not actually know anything about
the host machine's character encoding.
The following two functions take either a single-character string or integer
byte value; they return a value of the same type.
.. function:: ascii(c)
Return the ASCII value corresponding to the low 7 bits of *c*.
.. function:: ctrl(c)
Return the control character corresponding to the given character (the character
bit value is bitwise-anded with 0x1f).
.. function:: alt(c)
Return the 8-bit character corresponding to the given ASCII character (the
character bit value is bitwise-ored with 0x80).
The following function takes either a single-character string or integer value;
it returns a string.
.. index::
single: ^ (caret); in curses module
single: ! (exclamation); in curses module
.. function:: unctrl(c)
Return a string representation of the ASCII character *c*. If *c* is printable,
this string is the character itself. If the character is a control character
(0x00--0x1f) the string consists of a caret (``'^'``) followed by the
corresponding uppercase letter. If the character is an ASCII delete (0x7f) the
string is ``'^?'``. If the character has its meta bit (0x80) set, the meta bit
is stripped, the preceding rules applied, and ``'!'`` prepended to the result.
.. data:: controlnames
A 33-element string array that contains the ASCII mnemonics for the thirty-two
ASCII control characters from 0 (NUL) to 0x1f (US), in order, plus the mnemonic
``SP`` for the space character.
|
PypiClean
|
/chaty-0.8.11.tar.gz/chaty-0.8.11/README.md
|
<!-- markdownlint-disable MD033 -->
# python-wechaty [](https://pypi.org/project/wechaty/) [](https://github.com/wechaty/python-wechaty/actions?query=workflow%3APyPI)

[](https://github.com/wechaty/python-wechaty-getting-started)
[](https://www.python.org/downloads/release/python-370/)
[](https://pepy.tech/project/wechaty)
[](https://github.com/wechaty/python-wechaty)
<!--

-->
## Connecting Chatbots
[](https://github.com/Wechaty/wechaty)
Wechaty is a Conversational SDK for Chatbot Makers that can help you create a bot in 9 lines of Python.
## Voice of the Developers
> "Wechaty is a great solution, I believe there would be much more users recognize it." [link](https://github.com/Wechaty/wechaty/pull/310#issuecomment-285574472)
> — <cite>@Gcaufy, Tencent Engineer, Author of [WePY](https://github.com/Tencent/wepy)</cite>
>
> "太好用,好用的想哭"
> — <cite>@xinbenlv, Google Engineer, Founder of HaoShiYou.org</cite>
>
> "最好的微信开发库" [link](http://weibo.com/3296245513/Ec4iNp9Ld?type=comment)
> — <cite>@Jarvis, Baidu Engineer</cite>
>
> "Wechaty让运营人员更多的时间思考如何进行活动策划、留存用户,商业变现" [link](http://mp.weixin.qq.com/s/dWHAj8XtiKG-1fIS5Og79g)
> — <cite>@lijiarui, Founder & CEO of Juzi.BOT.</cite>
>
> "If you know js ... try Wechaty, it's easy to use."
> — <cite>@Urinx Uri Lee, Author of [WeixinBot(Python)](https://github.com/Urinx/WeixinBot)</cite>
See more at [Wiki:Voice Of Developer](https://github.com/Wechaty/wechaty/wiki/Voice%20Of%20Developer)
## Join Us
Wechaty is used in many ChatBot projects by thousands of developers. If you want to talk with other developers, just scan the following QR Code in WeChat with secret code _python wechaty_, join our **Wechaty Python Developers' Home**.

Scan now, because other Wechaty Python developers want to talk with you too! (secret code: _python wechaty_)
## The World's Shortest Python ChatBot: 9 lines of Code
```python
from wechaty import Wechaty
import asyncio
async def main():
bot = Wechaty()
bot.on('scan', lambda status, qrcode, data: print('Scan QR Code to login: {}\nhttps://wechaty.js.org/qrcode/{}'.format(status, qrcode)))
bot.on('login', lambda user: print('User {} logged in'.format(user)))
bot.on('message', lambda message: print('Message: {}'.format(message)))
await bot.start()
asyncio.run(main())
```
## Python Wechaty Developing Plan
We already have Wechaty in TypeScript, It will be not too hard to translate the TypeScript(TS) to Python(PY) because [wechaty](https://github.com/wechaty/wechaty) has only 3,000 lines of the TS code, they are well designed and de-coupled by the [wechaty-puppet](https://github.com/wechaty/wechaty-puppet/) abstraction. So after we have translated those 3,000 lines of TypeScript code, we will almost be done.
As we have already a ecosystem of Wechaty in TypeScript, so we will not have to implement everything in Python, especially, in the Feb 2020, we have finished the [wechaty-grpc](https://github.com/wechaty/grpc) service abstracting module with the [wechaty-puppet-service](https://github.com/wechaty/wechaty-puppet-service) implmentation.
The following diagram shows out that we can reuse almost everything in TypeScript, and what we need to do is only the block located at the top right of the diagram: `Wechaty (Python)`.
```ascii
+--------------------------+ +--------------------------+
| | | |
| Wechaty (TypeScript) | | Wechaty (Python) |
| | | |
+--------------------------+ +--------------------------+
+-------------------------------------------------------+
| Wechaty Puppet Service |
| |
| (wechaty-puppet-service) |
+-------------------------------------------------------+
+--------------------- wechaty-grpc ----------------------+
+-------------------------------------------------------+
| Wechaty Puppet Abstract |
| |
| (wechaty-puppet) |
+-------------------------------------------------------+
+--------------------------+ +--------------------------+
| Pad Protocol | | Web Protocol |
| | | |
| wechaty-puppet-padplus | |(wechaty-puppet-puppeteer)|
+--------------------------+ +--------------------------+
+--------------------------+ +--------------------------+
| Windows Protocol | | Mac Protocol |
| | | |
| (wechaty-puppet-windows) | | (wechaty-puppet-macpro) |
+--------------------------+ +--------------------------+
```
## Example: How to Translate TypeScript to Python
There's a 100 lines class named `Image` in charge of downloading the WeChat image to different sizes.
It is a great example for demonstrating how do we translate the TypeScript to Python in Wechaty Way:
### Image Class Source Code
- TypeScript: <https://github.com/wechaty/wechaty/blob/master/src/user/image.ts>
- Python: <https://github.com/wechaty/python-wechaty/blob/master/src/wechaty/user/image.py>
If you are interested in the translation and want to look at how it works, it will be a good start from reading and comparing those two `Image` class files in TypeScript and Python at the same time.
## To-do List
- TS: TypeScript
- SLOC: Source Lines Of Code
### Wechaty Internal Modules
1. [ ] Class Wechaty @wj-mCat
- TS SLOC(1160): <https://github.com/wechaty/wechaty/blob/master/src/wechaty.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class Contact
- TS SLOC(804): <https://github.com/wechaty/wechaty/blob/master/src/user/contact.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class ContactSelf
- TS SLOC(199): <https://github.com/wechaty/wechaty/blob/master/src/user/contact-self.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class Message
- TS SLOC(1054): <https://github.com/wechaty/wechaty/blob/master/src/user/message.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class Room
- TS SLOC(1194): <https://github.com/wechaty/wechaty/blob/master/src/user/room.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class Image @wj-mCat
- TS SLOC(60): <https://github.com/wechaty/wechaty/blob/master/src/user/image.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [x] Class Accessory @huan
- TS SLOC(179): <https://github.com/wechaty/wechaty/blob/master/src/accessory.ts>
- [x] Code
- [x] Unit Tests
- [ ] Documentation
1. [ ] Class Config @wj-mCat
- TS SLOC(187): <https://github.com/wechaty/wechaty/blob/master/src/config.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class Favorite
- TS SLOC(52): <https://github.com/wechaty/wechaty/blob/master/src/user/favorite.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class Friendship
- TS SLOC(417): <https://github.com/wechaty/wechaty/blob/master/src/user/friendship.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class MiniProgram
- TS SLOC(70): <https://github.com/wechaty/wechaty/blob/master/src/user/mini-program.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class RoomInvitation
- TS SLOC(317): <https://github.com/wechaty/wechaty/blob/master/src/user/room-invitation.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class Tag
- TS SLOC(190): <https://github.com/wechaty/wechaty/blob/master/src/user/tag.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class UrlLink
- TS SLOC(107): <https://github.com/wechaty/wechaty/blob/master/src/user/url-link.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
### Wechaty External Modules
1. [ ] Class FileBox
- TS SLOC(638): <https://github.com/huan/file-box/blob/master/src/file-box.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class MemoryCard
- TS SLOC(376): <https://github.com/huan/memory-card/blob/master/src/memory-card.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class WechatyPuppet
- TS SLOC(1115): <https://github.com/wechaty/wechaty-puppet/blob/master/src/puppet.ts>
- [ ] Code
- [ ] Unit Tests
- [ ] Documentation
1. [ ] Class WechatyPuppetHostie
- TS SLOC(909): <https://github.com/wechaty/wechaty-puppet-service/blob/master/src/client/puppet-service.ts>
## Usage
WIP...
## Requirements
1. Python 3.7+
## Install
```shell
pip3 install wechaty
```
## See Also
- [Packaging Python Projects](https://packaging.python.org/tutorials/packaging-projects/)
### Static & Instance of Class
- [Static variables and methods in Python](https://radek.io/2011/07/21/static-variables-and-methods-in-python/)
### Typings
- [PEP 526 -- Syntax for Variable Annotations - Class and instance variable annotations](https://www.python.org/dev/peps/pep-0526/#class-and-instance-variable-annotations)
- [Python Type Checking (Guide)](https://realpython.com/python-type-checking/) by [Geir Arne Hjelle](https://realpython.com/team/gahjelle/)
## History
### v0.6 (Jun 19, 2020)
Python Wechaty Scala Wechaty **BETA** Released!
Read more from our Multi-language Wechaty Beta Release event from our blog:
- [Multi Language Wechaty Beta Release Announcement!](https://wechaty.js.org/2020/06/19/multi-language-wechaty-beta-release/)
### v0.4 (Mar 15, 2020) master
Welcome [@huangaszaq](https://github.com/huangaszaq) for joining the project! [#42](https://github.com/wechaty/python-wechaty/pull/42)
1. Add a friendly exception message for PyPI users. [#24](https://github.com/wechaty/python-wechaty/issues/24)
### v0.1 (Mar 8, 2020)
Welcome [@wj-Mcat](https://github.com/wj-Mcat) for joining the project! [#4](https://github.com/wechaty/python-wechaty/pull/4)
1. Starting translate TypeScript of Wechaty to Python
1. DevOps Setup
1. Type Checking: mypy & pytype
1. Unit Testing: pytest
1. Linting: pylint, pycodestyle, and flake8
1. CI/CD: GitHub Actions
1. Publish to PyPI automatically after the tests passed.
### v0.0.1 (Aug 25, 2018)
Project created, publish a empty module `wechaty` on PyPI.
## Related Projects
- [Wechaty](https://github.com/wechaty/wechaty) - Conversatioanl AI Chatot SDK for Wechaty Individual Accounts (TypeScript)
- [Python Wechaty](https://github.com/wechaty/python-wechaty) - Python WeChaty Conversational AI Chatbot SDK for Wechat Individual Accounts (Python)
- [Go Wechaty](https://github.com/wechaty/go-wechaty) - Go WeChaty Conversational AI Chatbot SDK for Wechat Individual Accounts (Go)
- [Java Wechaty](https://github.com/wechaty/java-wechaty) - Java WeChaty Conversational AI Chatbot SDK for Wechat Individual Accounts (Java)
- [Scala Wechaty](https://github.com/wechaty/scala-wechaty) - Scala WeChaty Conversational AI Chatbot SDK for WechatyIndividual Accounts (Scala)
## Badge
[](https://github.com/wechaty/python-wechaty)
```md
[](https://github.com/wechaty/python-wechaty)
```
## Stargazers over time
[](https://starchart.cc/wechaty/python-wechaty)
## Contributors
[](https://sourcerer.io/fame/huan/wechaty/python-wechaty/links/0)
[](https://sourcerer.io/fame/huan/wechaty/python-wechaty/links/1)
[](https://sourcerer.io/fame/huan/wechaty/python-wechaty/links/2)
[](https://sourcerer.io/fame/huan/wechaty/python-wechaty/links/3)
[](https://sourcerer.io/fame/huan/wechaty/python-wechaty/links/4)
[](https://sourcerer.io/fame/huan/wechaty/python-wechaty/links/5)
[](https://sourcerer.io/fame/huan/wechaty/python-wechaty/links/6)
[](https://sourcerer.io/fame/huan/wechaty/python-wechaty/links/7)
## Committers
1. [@huangaszaq](https://github.com/huangaszaq) - Chunhong HUANG (黄纯洪)
## Creators
- [@wj-Mcat](https://github.com/wj-Mcat) - Jingjing WU (吴京京)
- [@huan](https://github.com/huan) - ([李卓桓](http://linkedin.com/in/zixia)) [email protected]
## Copyright & License
- Code & Docs © 2018 Wechaty Contributors <https://github.com/wechaty>
- Code released under the Apache-2.0 License
- Docs released under Creative Commons
|
PypiClean
|
/optimization_algorithms_tools-1.0.1-py3-none-any.whl/optimization_algorithms_tools/algorithms/graph_search.py
|
import heapq
import math
from collections import deque
from ..structures import Solution
from time import process_time
from sys import getsizeof
from copy import deepcopy
import networkx as nx
"""
Requirements:
The node class must have the following public object methods:
- path(): generates a path from origin to this node by tracing node parents
- expand(): generates a list of all the children (reachable nodes) from this node
"""
def BFS(origin, destination):
time_start = process_time() # Time tracking
max_frontier = 0 # Space tracking
route = []
frontier = deque([origin])
explored = set() # Explored tracking
found = False
while frontier and not found:
node = frontier.popleft()
explored.add(node)
for child in node.expand():
if child not in explored and child not in frontier:
if child == destination:
route = child.path()
found = True
frontier.append(child)
if getsizeof(frontier) > max_frontier:
max_frontier = getsizeof(frontier)
time_end = process_time() # Time tracking
return Solution(route, time_end - time_start, max_frontier, len(explored))
"""
Requirements:
The node class must have the following public object methods:
- path(): generates a path from origin to this node by tracing node parents
- expand(): generates a list of all the children (reachable nodes) from this node
"""
def DFS(origin, destination):
time_start = process_time() # Time tracking
max_frontier = 0 # Space tracking
route = []
frontier = deque([origin])
explored = set() # Explored tracking
found = False
while frontier and not found:
node = frontier.pop()
explored.add(node)
for child in node.expand():
if child not in explored and child not in frontier:
if child == destination:
route = child.path()
found = True
frontier.append(child)
if getsizeof(frontier) > max_frontier:
max_frontier = getsizeof(frontier)
time_end = process_time() # Time tracking
return Solution(route, time_end - time_start, max_frontier, len(explored))
"""
Requirements:
The node class must have the following public object methods:
- path(): generates a path from origin to this node by tracing node parents
- expand(): generates a list of all the children (reachable nodes) from this node
- get_id(): returns a unique identifier for the node
- set_parent(node): sets the parent node of this node (this value will be used for the path method)
- get_distance(): returns the edge distance between this node and its parent
- set_distance(distance): sets the edge distance between this node and its parent
"""
def Dijkstra(origin, destination, unrelaxed_nodes):
time_start = process_time() # Time tracking
space = getsizeof(deepcopy(unrelaxed_nodes)) # Space tracking
# Using a set here avoids the problem with self loops
seen = set() # explored tracking
shortest_dist = {node.get_id(): math.inf for node in unrelaxed_nodes}
shortest_dist[origin.get_id()] = 0
found = False
route = None
while len(unrelaxed_nodes) > 0 and not found:
node = min(unrelaxed_nodes, key=lambda node: shortest_dist[node.get_id()])
# relaxing the node, so this node's value in shortest_dist is the shortest distance between the origin and destination
unrelaxed_nodes.remove(node)
seen.add(node.get_id())
# if the destination node has been relaxed then that is the route we want
if node == destination:
route = node.path()
found = True
continue
# otherwise, let's relax edges of its neighbours
for child in node.expand():
# skip self-loops
if child.get_id() in seen:
continue
child_obj = next(
(node for node in unrelaxed_nodes if node.get_id() == child.get_id()),
None,
)
child_obj.set_distance(child.get_distance())
distance = shortest_dist[node.get_id()] + child.get_distance()
if distance < shortest_dist[child_obj.get_id()]:
shortest_dist[child_obj.get_id()] = distance
child_obj.set_parent(node)
time_end = process_time() # Time tracking
return Solution(route, time_end - time_start, space, len(seen))
# This implementation uses a heap with tuples (a,b),
# a is the cost of a node, and b is the node itself.
def UCS(origin, destination):
time_start = process_time() # Time tracking
max_priority = 0 # Space tracking
entry_count = 1
priority_queue = [(0, 0, origin)]
found = False
visited = [] # Explored tracking
while priority_queue and not found:
node = heapq.heappop(priority_queue)
node_cost = node[0]
node = node[2]
if node in visited:
continue
visited.append(node)
# We found the destination
if node == destination:
route = node.path()
found = True
continue
for child in node.expand():
total_cost = child.get_distance() + node_cost
matches = [item for item in priority_queue if item[2] == child]
if matches:
# Update the entry if the new priority is better
if total_cost < matches[0][0]:
priority_queue[priority_queue.index(matches[0])] = (
total_cost,
entry_count,
child,
)
entry_count += 1
heapq.heapify(priority_queue)
else:
heapq.heappush(priority_queue, (total_cost, entry_count, child))
if getsizeof(priority_queue) > max_priority:
max_priority = getsizeof(priority_queue)
entry_count += 1
time_end = process_time() # Time tracking
return Solution(route, time_end - time_start, max_priority, len(visited))
def Bidirectional_Dijkstra(origin, destination, unrelaxed_nodes, expand_kwargs = {}):
time_start = process_time() # Time tracking
frontier = deepcopy(unrelaxed_nodes)
space_required = getsizeof(frontier)
explored_f = set()
explored_b = set()
shortest_dist_f = {node.get_id(): math.inf for node in frontier}
shortest_dist_b = {node.get_id(): math.inf for node in frontier}
shortest_dist_f[origin] = 0
shortest_dist_b[destination] = 0
found = False
route = []
altr_expand = True # to alternate between front and back
while frontier and not found:
if altr_expand: # Forward
node = min(frontier, key=lambda node: shortest_dist_f[node.get_id()])
# relaxing the node, so this node's value in shortest_dist is the shortest distance between the origin and destination
frontier.remove(node)
explored_f.add(node)
# if the destination node has been relaxed then that is the route we want
if node == destination:
route = node.path()
found = True
continue
# otherwise, let's relax edges of its neighbours
for child in node.expand(**expand_kwargs):
# skip self-loops
if child.get_id() in explored_f:
continue
# Check the child is collided
if child in explored_b:
overlapped = next((node for node in explored_b if node == child))
# we don't take the overlapped node twice
route = child.path()[:-1] + overlapped.path()[::-1]
found = True
break
child_obj = next(
(node for node in frontier if node.get_id() == child.get_id()), None
)
child_obj.set_distance(child.get_distance())
distance = shortest_dist_f[node.get_id()] + child.get_distance()
if distance < shortest_dist_f[child_obj.get_id()]:
shortest_dist_f[child_obj.get_id()] = distance
child_obj.set_parent(node)
altr_expand = False
if not altr_expand: # Backward
node = min(frontier, key=lambda node: shortest_dist_b[node.get_id()])
# relaxing the node, so this node's value in shortest_dist is the shortest distance between the origin and destination
frontier.remove(node)
explored_b.add(node)
# if the destination node has been relaxed then that is the route we want
if node == origin:
route = node.path()[::-1]
found = True
continue
# otherwise, let's relax edges of its neighbours
for child in node.expand(reverse=True, **expand_kwargs):
# skip self-loops
if child.get_id() in explored_b:
continue
# Check the child is collided
if child in explored_f:
overlapped = next(
(node for node in explored_f if node == child), None
)
route = overlapped.path()[:-1] + child.path()[::-1]
found = True
break
child_obj = next(
(node for node in frontier if node.get_id() == child.get_id()), None
)
child_obj.set_distance(child.get_distance())
distance = shortest_dist_b[node.get_id()] + child.get_distance()
if distance < shortest_dist_b[child_obj.get_id()]:
shortest_dist_b[child_obj.get_id()] = distance
child_obj.set_parent(node)
altr_expand = True
time_end = process_time() # Time tracking
return Solution(
route, time_end - time_start, space_required, len(explored_f) + len(explored_b)
)
def Kruskal(G, attr="weight", sorted_edges=False, edges=None, graph_type=nx.Graph):
if not sorted_edges:
edges = sorted(G.edges(data=True), key=lambda t: t[2].get(attr, 1))
mst = graph_type()
mst.add_nodes_from(G)
for e in edges:
mst.add_edges_from([e])
try:
nx.find_cycle(mst)
mst.remove_edge(e[0], e[1])
except:
try:
if nx.is_connected(mst):
break
except:
if nx.is_strongly_connected(mst):
break
continue
return mst
"""
Requirements:
The node class must have the following public object methods:
- path(): generates a path from origin to this node by tracing node parents
- expand(): generates a list of all the children (reachable nodes) from this node
"""
def A_Star(
G, origin, destination, heuristic_fn, heuristic_kwargs={}, expand_kwargs={}
):
start_time = process_time()
toDestination, toOrigin = heuristic_fn(G, origin, destination, **heuristic_kwargs)
route = []
frontier = list()
frontier.append(origin)
explored = set()
found = False
while frontier and not found:
# choose a node based on its heuristic value
node = min(frontier, key=lambda node: toOrigin[node.get_id()] + toDestination[node.get_id()])
frontier.remove(node)
explored.add(node)
# expand its children
for child in node.expand(**expand_kwargs):
if child not in explored and child not in frontier:
if child == destination:
route = child.path()
found = True
continue
frontier.append(child)
space = getsizeof(explored)
end_time = process_time()
return Solution(route, end_time-start_time, space, len(explored))
|
PypiClean
|
/cdktf-cdktf-provider-azurerm-10.0.1.tar.gz/cdktf-cdktf-provider-azurerm-10.0.1/src/cdktf_cdktf_provider_azurerm/bot_channel_slack/__init__.py
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import cdktf as _cdktf_9a9027ec
import constructs as _constructs_77d1e7e8
class BotChannelSlack(
_cdktf_9a9027ec.TerraformResource,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.botChannelSlack.BotChannelSlack",
):
'''Represents a {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack azurerm_bot_channel_slack}.'''
def __init__(
self,
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
bot_name: builtins.str,
client_id: builtins.str,
client_secret: builtins.str,
location: builtins.str,
resource_group_name: builtins.str,
verification_token: builtins.str,
id: typing.Optional[builtins.str] = None,
landing_page_url: typing.Optional[builtins.str] = None,
signing_secret: typing.Optional[builtins.str] = None,
timeouts: typing.Optional[typing.Union["BotChannelSlackTimeouts", typing.Dict[builtins.str, typing.Any]]] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
'''Create a new {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack azurerm_bot_channel_slack} Resource.
:param scope: The scope in which to define this construct.
:param id_: The scoped construct ID. Must be unique amongst siblings in the same scope
:param bot_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#bot_name BotChannelSlack#bot_name}.
:param client_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#client_id BotChannelSlack#client_id}.
:param client_secret: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#client_secret BotChannelSlack#client_secret}.
:param location: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#location BotChannelSlack#location}.
:param resource_group_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#resource_group_name BotChannelSlack#resource_group_name}.
:param verification_token: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#verification_token BotChannelSlack#verification_token}.
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#id BotChannelSlack#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param landing_page_url: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#landing_page_url BotChannelSlack#landing_page_url}.
:param signing_secret: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#signing_secret BotChannelSlack#signing_secret}.
:param timeouts: timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#timeouts BotChannelSlack#timeouts}
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__becd65c29bf3130980f77d011d11e322e260486431b89f12b9e2147f5affa203)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id_", value=id_, expected_type=type_hints["id_"])
config = BotChannelSlackConfig(
bot_name=bot_name,
client_id=client_id,
client_secret=client_secret,
location=location,
resource_group_name=resource_group_name,
verification_token=verification_token,
id=id,
landing_page_url=landing_page_url,
signing_secret=signing_secret,
timeouts=timeouts,
connection=connection,
count=count,
depends_on=depends_on,
for_each=for_each,
lifecycle=lifecycle,
provider=provider,
provisioners=provisioners,
)
jsii.create(self.__class__, self, [scope, id_, config])
@jsii.member(jsii_name="putTimeouts")
def put_timeouts(
self,
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
'''
:param create: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#create BotChannelSlack#create}.
:param delete: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#delete BotChannelSlack#delete}.
:param read: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#read BotChannelSlack#read}.
:param update: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#update BotChannelSlack#update}.
'''
value = BotChannelSlackTimeouts(
create=create, delete=delete, read=read, update=update
)
return typing.cast(None, jsii.invoke(self, "putTimeouts", [value]))
@jsii.member(jsii_name="resetId")
def reset_id(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetId", []))
@jsii.member(jsii_name="resetLandingPageUrl")
def reset_landing_page_url(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetLandingPageUrl", []))
@jsii.member(jsii_name="resetSigningSecret")
def reset_signing_secret(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetSigningSecret", []))
@jsii.member(jsii_name="resetTimeouts")
def reset_timeouts(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetTimeouts", []))
@jsii.member(jsii_name="synthesizeAttributes")
def _synthesize_attributes(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "synthesizeAttributes", []))
@jsii.python.classproperty
@jsii.member(jsii_name="tfResourceType")
def TF_RESOURCE_TYPE(cls) -> builtins.str:
return typing.cast(builtins.str, jsii.sget(cls, "tfResourceType"))
@builtins.property
@jsii.member(jsii_name="timeouts")
def timeouts(self) -> "BotChannelSlackTimeoutsOutputReference":
return typing.cast("BotChannelSlackTimeoutsOutputReference", jsii.get(self, "timeouts"))
@builtins.property
@jsii.member(jsii_name="botNameInput")
def bot_name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "botNameInput"))
@builtins.property
@jsii.member(jsii_name="clientIdInput")
def client_id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "clientIdInput"))
@builtins.property
@jsii.member(jsii_name="clientSecretInput")
def client_secret_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "clientSecretInput"))
@builtins.property
@jsii.member(jsii_name="idInput")
def id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "idInput"))
@builtins.property
@jsii.member(jsii_name="landingPageUrlInput")
def landing_page_url_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "landingPageUrlInput"))
@builtins.property
@jsii.member(jsii_name="locationInput")
def location_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "locationInput"))
@builtins.property
@jsii.member(jsii_name="resourceGroupNameInput")
def resource_group_name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "resourceGroupNameInput"))
@builtins.property
@jsii.member(jsii_name="signingSecretInput")
def signing_secret_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "signingSecretInput"))
@builtins.property
@jsii.member(jsii_name="timeoutsInput")
def timeouts_input(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, "BotChannelSlackTimeouts"]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, "BotChannelSlackTimeouts"]], jsii.get(self, "timeoutsInput"))
@builtins.property
@jsii.member(jsii_name="verificationTokenInput")
def verification_token_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "verificationTokenInput"))
@builtins.property
@jsii.member(jsii_name="botName")
def bot_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "botName"))
@bot_name.setter
def bot_name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4ba1f65b5243b77ad9fc93e88788378006afc1cec9dd92c3381bee3bbfde8fcb)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "botName", value)
@builtins.property
@jsii.member(jsii_name="clientId")
def client_id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "clientId"))
@client_id.setter
def client_id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__b714d531d0f10f2ce6f4396ed15f231fb480812778a0796ff8062a10d111e0b0)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "clientId", value)
@builtins.property
@jsii.member(jsii_name="clientSecret")
def client_secret(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "clientSecret"))
@client_secret.setter
def client_secret(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__6eca956aa1bdefdeddd2339c93fab4a2aa6dc519d941bd192f5ac0758ab1d825)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "clientSecret", value)
@builtins.property
@jsii.member(jsii_name="id")
def id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "id"))
@id.setter
def id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__a316db77e7466b0ae889ad83ebd935d85f4a9561ad9432e3f5f454479e31c528)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "id", value)
@builtins.property
@jsii.member(jsii_name="landingPageUrl")
def landing_page_url(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "landingPageUrl"))
@landing_page_url.setter
def landing_page_url(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__d1917f004ce985f973477e2b0d085e8fda53f742e54719508f97c30c0ad808c1)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "landingPageUrl", value)
@builtins.property
@jsii.member(jsii_name="location")
def location(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "location"))
@location.setter
def location(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__5d1fab5353865f03f88c2e843af3580a568767f95a1bce04fa5d8e20d6be64cb)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "location", value)
@builtins.property
@jsii.member(jsii_name="resourceGroupName")
def resource_group_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "resourceGroupName"))
@resource_group_name.setter
def resource_group_name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__c18bfb570f205344dd60d0e557d8fa38e0e3f6da1940a573328ac10bd6d98ff2)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "resourceGroupName", value)
@builtins.property
@jsii.member(jsii_name="signingSecret")
def signing_secret(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "signingSecret"))
@signing_secret.setter
def signing_secret(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__85d14679ebf43b65d157ebb896219541a6c464bfa5ee870a979100f060fb7088)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "signingSecret", value)
@builtins.property
@jsii.member(jsii_name="verificationToken")
def verification_token(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "verificationToken"))
@verification_token.setter
def verification_token(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__7171018b7b8572d64cb1ae346178fd753e4411cee550e92a71ba17b8e44e3428)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "verificationToken", value)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.botChannelSlack.BotChannelSlackConfig",
jsii_struct_bases=[_cdktf_9a9027ec.TerraformMetaArguments],
name_mapping={
"connection": "connection",
"count": "count",
"depends_on": "dependsOn",
"for_each": "forEach",
"lifecycle": "lifecycle",
"provider": "provider",
"provisioners": "provisioners",
"bot_name": "botName",
"client_id": "clientId",
"client_secret": "clientSecret",
"location": "location",
"resource_group_name": "resourceGroupName",
"verification_token": "verificationToken",
"id": "id",
"landing_page_url": "landingPageUrl",
"signing_secret": "signingSecret",
"timeouts": "timeouts",
},
)
class BotChannelSlackConfig(_cdktf_9a9027ec.TerraformMetaArguments):
def __init__(
self,
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
bot_name: builtins.str,
client_id: builtins.str,
client_secret: builtins.str,
location: builtins.str,
resource_group_name: builtins.str,
verification_token: builtins.str,
id: typing.Optional[builtins.str] = None,
landing_page_url: typing.Optional[builtins.str] = None,
signing_secret: typing.Optional[builtins.str] = None,
timeouts: typing.Optional[typing.Union["BotChannelSlackTimeouts", typing.Dict[builtins.str, typing.Any]]] = None,
) -> None:
'''
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
:param bot_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#bot_name BotChannelSlack#bot_name}.
:param client_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#client_id BotChannelSlack#client_id}.
:param client_secret: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#client_secret BotChannelSlack#client_secret}.
:param location: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#location BotChannelSlack#location}.
:param resource_group_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#resource_group_name BotChannelSlack#resource_group_name}.
:param verification_token: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#verification_token BotChannelSlack#verification_token}.
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#id BotChannelSlack#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param landing_page_url: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#landing_page_url BotChannelSlack#landing_page_url}.
:param signing_secret: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#signing_secret BotChannelSlack#signing_secret}.
:param timeouts: timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#timeouts BotChannelSlack#timeouts}
'''
if isinstance(lifecycle, dict):
lifecycle = _cdktf_9a9027ec.TerraformResourceLifecycle(**lifecycle)
if isinstance(timeouts, dict):
timeouts = BotChannelSlackTimeouts(**timeouts)
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__abce3b4bf17166327244c68fe5bcfdfb0ac54775f29b18b27c78b129248a4aa8)
check_type(argname="argument connection", value=connection, expected_type=type_hints["connection"])
check_type(argname="argument count", value=count, expected_type=type_hints["count"])
check_type(argname="argument depends_on", value=depends_on, expected_type=type_hints["depends_on"])
check_type(argname="argument for_each", value=for_each, expected_type=type_hints["for_each"])
check_type(argname="argument lifecycle", value=lifecycle, expected_type=type_hints["lifecycle"])
check_type(argname="argument provider", value=provider, expected_type=type_hints["provider"])
check_type(argname="argument provisioners", value=provisioners, expected_type=type_hints["provisioners"])
check_type(argname="argument bot_name", value=bot_name, expected_type=type_hints["bot_name"])
check_type(argname="argument client_id", value=client_id, expected_type=type_hints["client_id"])
check_type(argname="argument client_secret", value=client_secret, expected_type=type_hints["client_secret"])
check_type(argname="argument location", value=location, expected_type=type_hints["location"])
check_type(argname="argument resource_group_name", value=resource_group_name, expected_type=type_hints["resource_group_name"])
check_type(argname="argument verification_token", value=verification_token, expected_type=type_hints["verification_token"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
check_type(argname="argument landing_page_url", value=landing_page_url, expected_type=type_hints["landing_page_url"])
check_type(argname="argument signing_secret", value=signing_secret, expected_type=type_hints["signing_secret"])
check_type(argname="argument timeouts", value=timeouts, expected_type=type_hints["timeouts"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"bot_name": bot_name,
"client_id": client_id,
"client_secret": client_secret,
"location": location,
"resource_group_name": resource_group_name,
"verification_token": verification_token,
}
if connection is not None:
self._values["connection"] = connection
if count is not None:
self._values["count"] = count
if depends_on is not None:
self._values["depends_on"] = depends_on
if for_each is not None:
self._values["for_each"] = for_each
if lifecycle is not None:
self._values["lifecycle"] = lifecycle
if provider is not None:
self._values["provider"] = provider
if provisioners is not None:
self._values["provisioners"] = provisioners
if id is not None:
self._values["id"] = id
if landing_page_url is not None:
self._values["landing_page_url"] = landing_page_url
if signing_secret is not None:
self._values["signing_secret"] = signing_secret
if timeouts is not None:
self._values["timeouts"] = timeouts
@builtins.property
def connection(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]]:
'''
:stability: experimental
'''
result = self._values.get("connection")
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]], result)
@builtins.property
def count(
self,
) -> typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]]:
'''
:stability: experimental
'''
result = self._values.get("count")
return typing.cast(typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]], result)
@builtins.property
def depends_on(
self,
) -> typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]]:
'''
:stability: experimental
'''
result = self._values.get("depends_on")
return typing.cast(typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]], result)
@builtins.property
def for_each(self) -> typing.Optional[_cdktf_9a9027ec.ITerraformIterator]:
'''
:stability: experimental
'''
result = self._values.get("for_each")
return typing.cast(typing.Optional[_cdktf_9a9027ec.ITerraformIterator], result)
@builtins.property
def lifecycle(self) -> typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle]:
'''
:stability: experimental
'''
result = self._values.get("lifecycle")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle], result)
@builtins.property
def provider(self) -> typing.Optional[_cdktf_9a9027ec.TerraformProvider]:
'''
:stability: experimental
'''
result = self._values.get("provider")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformProvider], result)
@builtins.property
def provisioners(
self,
) -> typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]]:
'''
:stability: experimental
'''
result = self._values.get("provisioners")
return typing.cast(typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]], result)
@builtins.property
def bot_name(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#bot_name BotChannelSlack#bot_name}.'''
result = self._values.get("bot_name")
assert result is not None, "Required property 'bot_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def client_id(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#client_id BotChannelSlack#client_id}.'''
result = self._values.get("client_id")
assert result is not None, "Required property 'client_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def client_secret(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#client_secret BotChannelSlack#client_secret}.'''
result = self._values.get("client_secret")
assert result is not None, "Required property 'client_secret' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def location(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#location BotChannelSlack#location}.'''
result = self._values.get("location")
assert result is not None, "Required property 'location' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def resource_group_name(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#resource_group_name BotChannelSlack#resource_group_name}.'''
result = self._values.get("resource_group_name")
assert result is not None, "Required property 'resource_group_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def verification_token(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#verification_token BotChannelSlack#verification_token}.'''
result = self._values.get("verification_token")
assert result is not None, "Required property 'verification_token' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def id(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#id BotChannelSlack#id}.
Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2.
If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
'''
result = self._values.get("id")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def landing_page_url(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#landing_page_url BotChannelSlack#landing_page_url}.'''
result = self._values.get("landing_page_url")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def signing_secret(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#signing_secret BotChannelSlack#signing_secret}.'''
result = self._values.get("signing_secret")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def timeouts(self) -> typing.Optional["BotChannelSlackTimeouts"]:
'''timeouts block.
Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#timeouts BotChannelSlack#timeouts}
'''
result = self._values.get("timeouts")
return typing.cast(typing.Optional["BotChannelSlackTimeouts"], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "BotChannelSlackConfig(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.botChannelSlack.BotChannelSlackTimeouts",
jsii_struct_bases=[],
name_mapping={
"create": "create",
"delete": "delete",
"read": "read",
"update": "update",
},
)
class BotChannelSlackTimeouts:
def __init__(
self,
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
'''
:param create: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#create BotChannelSlack#create}.
:param delete: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#delete BotChannelSlack#delete}.
:param read: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#read BotChannelSlack#read}.
:param update: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#update BotChannelSlack#update}.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__9a68171e0a0432b6d3f2fdffc5edfd0e2513f2cac73d21d3689e42d6dfe1d59c)
check_type(argname="argument create", value=create, expected_type=type_hints["create"])
check_type(argname="argument delete", value=delete, expected_type=type_hints["delete"])
check_type(argname="argument read", value=read, expected_type=type_hints["read"])
check_type(argname="argument update", value=update, expected_type=type_hints["update"])
self._values: typing.Dict[builtins.str, typing.Any] = {}
if create is not None:
self._values["create"] = create
if delete is not None:
self._values["delete"] = delete
if read is not None:
self._values["read"] = read
if update is not None:
self._values["update"] = update
@builtins.property
def create(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#create BotChannelSlack#create}.'''
result = self._values.get("create")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def delete(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#delete BotChannelSlack#delete}.'''
result = self._values.get("delete")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def read(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#read BotChannelSlack#read}.'''
result = self._values.get("read")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def update(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/bot_channel_slack#update BotChannelSlack#update}.'''
result = self._values.get("update")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "BotChannelSlackTimeouts(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class BotChannelSlackTimeoutsOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.botChannelSlack.BotChannelSlackTimeoutsOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__f58ad1728911040e14fccf88aed7748aa4dd6aad250434132b5ac0abf0a69b50)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute])
@jsii.member(jsii_name="resetCreate")
def reset_create(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetCreate", []))
@jsii.member(jsii_name="resetDelete")
def reset_delete(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetDelete", []))
@jsii.member(jsii_name="resetRead")
def reset_read(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetRead", []))
@jsii.member(jsii_name="resetUpdate")
def reset_update(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetUpdate", []))
@builtins.property
@jsii.member(jsii_name="createInput")
def create_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "createInput"))
@builtins.property
@jsii.member(jsii_name="deleteInput")
def delete_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "deleteInput"))
@builtins.property
@jsii.member(jsii_name="readInput")
def read_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "readInput"))
@builtins.property
@jsii.member(jsii_name="updateInput")
def update_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "updateInput"))
@builtins.property
@jsii.member(jsii_name="create")
def create(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "create"))
@create.setter
def create(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__d20d3df108105f28d961f4037f98524613a10f2adf19b0968428e668602884d8)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "create", value)
@builtins.property
@jsii.member(jsii_name="delete")
def delete(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "delete"))
@delete.setter
def delete(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__f4eacd23d135c270b4f8aadbc07469a0402f5779949420e0b7aec7785818ce16)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "delete", value)
@builtins.property
@jsii.member(jsii_name="read")
def read(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "read"))
@read.setter
def read(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__e69d3e07fb5e6566d2b4d6b61e34f5d8f190b5f6a9b015f52d3a8ca353594638)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "read", value)
@builtins.property
@jsii.member(jsii_name="update")
def update(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "update"))
@update.setter
def update(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__d64f3730dcfd23cc8a01468e70ed8574421891381fbbaaa5854e23a942b5f743)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "update", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, BotChannelSlackTimeouts]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, BotChannelSlackTimeouts]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, BotChannelSlackTimeouts]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4f4d669426a2cb0f0c81d17497731866584e3da182e6553aa4ff9fc19a94399e)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
__all__ = [
"BotChannelSlack",
"BotChannelSlackConfig",
"BotChannelSlackTimeouts",
"BotChannelSlackTimeoutsOutputReference",
]
publication.publish()
def _typecheckingstub__becd65c29bf3130980f77d011d11e322e260486431b89f12b9e2147f5affa203(
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
bot_name: builtins.str,
client_id: builtins.str,
client_secret: builtins.str,
location: builtins.str,
resource_group_name: builtins.str,
verification_token: builtins.str,
id: typing.Optional[builtins.str] = None,
landing_page_url: typing.Optional[builtins.str] = None,
signing_secret: typing.Optional[builtins.str] = None,
timeouts: typing.Optional[typing.Union[BotChannelSlackTimeouts, typing.Dict[builtins.str, typing.Any]]] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4ba1f65b5243b77ad9fc93e88788378006afc1cec9dd92c3381bee3bbfde8fcb(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__b714d531d0f10f2ce6f4396ed15f231fb480812778a0796ff8062a10d111e0b0(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__6eca956aa1bdefdeddd2339c93fab4a2aa6dc519d941bd192f5ac0758ab1d825(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__a316db77e7466b0ae889ad83ebd935d85f4a9561ad9432e3f5f454479e31c528(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__d1917f004ce985f973477e2b0d085e8fda53f742e54719508f97c30c0ad808c1(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__5d1fab5353865f03f88c2e843af3580a568767f95a1bce04fa5d8e20d6be64cb(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__c18bfb570f205344dd60d0e557d8fa38e0e3f6da1940a573328ac10bd6d98ff2(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__85d14679ebf43b65d157ebb896219541a6c464bfa5ee870a979100f060fb7088(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__7171018b7b8572d64cb1ae346178fd753e4411cee550e92a71ba17b8e44e3428(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__abce3b4bf17166327244c68fe5bcfdfb0ac54775f29b18b27c78b129248a4aa8(
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
bot_name: builtins.str,
client_id: builtins.str,
client_secret: builtins.str,
location: builtins.str,
resource_group_name: builtins.str,
verification_token: builtins.str,
id: typing.Optional[builtins.str] = None,
landing_page_url: typing.Optional[builtins.str] = None,
signing_secret: typing.Optional[builtins.str] = None,
timeouts: typing.Optional[typing.Union[BotChannelSlackTimeouts, typing.Dict[builtins.str, typing.Any]]] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__9a68171e0a0432b6d3f2fdffc5edfd0e2513f2cac73d21d3689e42d6dfe1d59c(
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__f58ad1728911040e14fccf88aed7748aa4dd6aad250434132b5ac0abf0a69b50(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__d20d3df108105f28d961f4037f98524613a10f2adf19b0968428e668602884d8(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__f4eacd23d135c270b4f8aadbc07469a0402f5779949420e0b7aec7785818ce16(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__e69d3e07fb5e6566d2b4d6b61e34f5d8f190b5f6a9b015f52d3a8ca353594638(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__d64f3730dcfd23cc8a01468e70ed8574421891381fbbaaa5854e23a942b5f743(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4f4d669426a2cb0f0c81d17497731866584e3da182e6553aa4ff9fc19a94399e(
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, BotChannelSlackTimeouts]],
) -> None:
"""Type checking stubs"""
pass
|
PypiClean
|
/ansible-8.3.0-py3-none-any.whl/ansible_collections/purestorage/flasharray/plugins/modules/purefa_ra.py
|
# (c) 2018, Simon Dodsley ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: purefa_ra
version_added: '1.0.0'
short_description: Enable or Disable Pure Storage FlashArray Remote Assist
description:
- Enablke or Disable Remote Assist for a Pure Storage FlashArray.
author:
- Pure Storage Ansible Team (@sdodsley) <[email protected]>
options:
state:
description:
- Define state of remote assist
- When set to I(enable) the RA port can be exposed using the
I(debug) module.
type: str
default: enable
choices: [ enable, disable ]
extends_documentation_fragment:
- purestorage.flasharray.purestorage.fa
"""
EXAMPLES = r"""
- name: Enable Remote Assist port
purestorage.flasharray.purefa_ra:
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
register: result
debug:
msg: "Remote Assist: {{ result['ra_facts'] }}"
- name: Disable Remote Assist port
purestorage.flasharray.purefa_ra:
state: disable
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
"""
RETURN = r"""
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.purestorage.flasharray.plugins.module_utils.purefa import (
get_system,
purefa_argument_spec,
)
def enable_ra(module, array):
"""Enable Remote Assist"""
changed = False
ra_facts = {}
if not array.get_remote_assist_status()["status"] in ["connected", "enabled"]:
changed = True
if not module.check_mode:
try:
ra_data = array.enable_remote_assist()
ra_facts["fa_ra"] = {"name": ra_data["name"], "port": ra_data["port"]}
except Exception:
module.fail_json(msg="Enabling Remote Assist failed")
else:
if not module.check_mode:
try:
ra_data = array.get_remote_assist_status()
ra_facts["fa_ra"] = {"name": ra_data["name"], "port": ra_data["port"]}
except Exception:
module.fail_json(msg="Getting Remote Assist failed")
module.exit_json(changed=changed, ra_info=ra_facts)
def disable_ra(module, array):
"""Disable Remote Assist"""
changed = False
if array.get_remote_assist_status()["status"] in ["connected", "enabled"]:
changed = True
if not module.check_mode:
try:
array.disable_remote_assist()
except Exception:
module.fail_json(msg="Disabling Remote Assist failed")
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(
dict(
state=dict(type="str", default="enable", choices=["enable", "disable"]),
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
array = get_system(module)
if module.params["state"] == "enable":
enable_ra(module, array)
else:
disable_ra(module, array)
module.exit_json(changed=False)
if __name__ == "__main__":
main()
|
PypiClean
|
/openhub-api-0.0.352.tar.gz/openhub-api-0.0.352/OpenHubAPI/data/serializers/serializers.py
|
from rest_framework import serializers
from data.models.models import Channel, Accessory, HardwareConfig, PiPico, \
DHT22, MCP3008, ModProbe, VEML7700, Hardware, Hub, SPIIo, SerialIo, PwmIo, I2cIo, DeviceFileIo, MCPAnalogIo, \
PiPicoAnalogIo, PiPicoACAnalogIo, PiGpio, StepperMotor, DataTransformer,DataTransformerConstants,DataTransformerTypes,ChannelStats,ChannelStatDataPoint
class RecursiveField(serializers.Serializer):
def to_representation(self, value):
serializer = self.parent.parent.__class__(value, context=self.context)
return serializer.data
class HubSerializer(serializers.ModelSerializer):
class Meta:
model = Hub
fields = '__all__'
class ChannelStatsSerializer(serializers.ModelSerializer):
channel = serializers.PrimaryKeyRelatedField(read_only=True)
model = serializers.SerializerMethodField()
def get_model(self, instance):
try:
return instance.__class__.__name__
except:
return None
class Meta:
model = ChannelStats
fields = ['id', 'channel', 'type', 'value','model']
class ChannelStatDataPointSerializer(serializers.ModelSerializer):
channel = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = ChannelStatDataPoint
fields = ['channel','value']
class ChannelSerializer(serializers.ModelSerializer):
channelstats_set = ChannelStatsSerializer(read_only=True,many=True, required=False)
model = serializers.SerializerMethodField()
def get_model(self, instance):
try:
return instance.__class__.__name__
except:
return None
class Meta:
model = Channel
fields = ['id', 'type', 'channel_index', 'hardware','channelstats_set','model','keep_statistics']
class DataTransformerSerializer(serializers.ModelSerializer):
children = RecursiveField(many=True)
model = serializers.SerializerMethodField()
def get_model(self, instance):
try:
return instance.__class__.__name__
except:
return None
class Meta:
model = DataTransformer
fields = '__all__'
class HardwareConfigSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = HardwareConfig
fields = ['id', 'type', 'value']
class PiPicoSerializer(serializers.ModelSerializer):
class Meta:
model = PiPico
fields = '__all__'
class DHT22Serializer(serializers.ModelSerializer):
class Meta:
model = DHT22
fields = '__all__'
class MCP3008Serializer(serializers.ModelSerializer):
class Meta:
model = MCP3008
fields = '__all__'
class ModProbeSerializer(serializers.ModelSerializer):
class Meta:
model = ModProbe
fields = '__all__'
class VEML7700Serializer(serializers.ModelSerializer):
class Meta:
model = VEML7700
fields = '__all__'
class HardwareSerializer(serializers.ModelSerializer):
model = serializers.SerializerMethodField()
def get_model(self, instance):
try:
return instance.__class__.__name__
except:
return None
def to_json_array(self, obt_list):
serialized_hardware = []
for obj in obt_list:
serialized_hardware.append(self.to_representation(obj))
return serialized_hardware
def to_representation(self, obj):
"""
Because GalleryItem is Polymorphic
"""
if isinstance(obj, PiPico):
return PiPicoSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, DHT22):
return DHT22Serializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, MCP3008):
return MCP3008Serializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, ModProbe):
return ModProbeSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, VEML7700):
return VEML7700Serializer(obj, context=self.context).to_representation(obj)
return super(HardwareSerializer, self).to_representation(obj)
class Meta:
model = Hardware
fields = '__all__'
class SPIIoSerializer(serializers.ModelSerializer):
class Meta:
model = SPIIo
fields = '__all__'
class SerialIoSerializer(serializers.ModelSerializer):
class Meta:
model = SerialIo
fields = '__all__'
class PwmIoSerializer(serializers.ModelSerializer):
class Meta:
model = PwmIo
fields = '__all__'
class I2cIoSerializer(serializers.ModelSerializer):
class Meta:
model = I2cIo
fields = '__all__'
class DeviceFileIoSerializer(serializers.ModelSerializer):
class Meta:
model = DeviceFileIo
fields = '__all__'
class MCPAnalogIoSerializer(serializers.ModelSerializer):
class Meta:
model = MCPAnalogIo
fields = '__all__'
class PiPicoAnalogIoSerializer(serializers.ModelSerializer):
class Meta:
model = PiPicoAnalogIo
fields = '__all__'
class PiPicoACAnalogIoSerializer(serializers.ModelSerializer):
class Meta:
model = PiPicoACAnalogIo
fields = '__all__'
class PiGpioSerializer(serializers.ModelSerializer):
class Meta:
model = PiGpio
fields = '__all__'
class StepperMotorSerializer(serializers.ModelSerializer):
class Meta:
model = StepperMotor
fields = '__all__'
class HardwareIOSerializer(serializers.ModelSerializer):
def to_json_array(self, obt_list):
serialized_hardware = []
for obj in obt_list:
serialized_hardware.append(self.to_representation(obj))
return serialized_hardware
def to_representation(self, obj):
"""
Because GalleryItem is Polymorphic
"""
if isinstance(obj, SPIIo):
return SPIIoSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, SerialIo):
return SerialIoSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, PwmIo):
return PwmIoSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, I2cIo):
return I2cIoSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, DeviceFileIo):
return DeviceFileIoSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, MCPAnalogIo):
return MCPAnalogIoSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, PiPicoAnalogIo):
return PiPicoAnalogIoSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, PiPicoACAnalogIo):
return PiPicoACAnalogIoSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, PiGpio):
return PiGpioSerializer(obj, context=self.context).to_representation(obj)
elif isinstance(obj, StepperMotor):
return StepperMotorSerializer(obj, context=self.context).to_representation(obj)
return super(HardwareIOSerializer, self).to_representation(obj)
class Meta:
model = Hardware
fields = '__all__'
class DataTransformerConstantsSerializer(serializers.ModelSerializer):
data_transformer = serializers.PrimaryKeyRelatedField(read_only=True)
model = serializers.SerializerMethodField()
def get_model(self, instance):
try:
return instance.__class__.__name__
except:
return None
class Meta:
model = DataTransformerConstants
fields = '__all__'
class DataTransformerTypeSerializer(serializers.ModelSerializer):
class Meta:
model=DataTransformerTypes
fields = ['type'] # add here rest of the fields from model
class DataTransformerTreeSerializer(serializers.ModelSerializer):
data_transformer_constants = DataTransformerConstantsSerializer(read_only=True,many=True)
channels = serializers.PrimaryKeyRelatedField(read_only=True,many=True)
channel_stats = serializers.PrimaryKeyRelatedField(read_only=True,many=True)
accessory = serializers.PrimaryKeyRelatedField(read_only=True)
type = serializers.StringRelatedField(many=False)
children = serializers.SerializerMethodField(source='get_children')
model = serializers.SerializerMethodField()
def get_model(self, instance):
try:
return instance.__class__.__name__
except:
return None
class Meta:
model=DataTransformer
fields = '__all__' # add here rest of the fields from model
def get_children(self, obj):
print(str(self.context))
if 'children' in self.context.keys():
children = self.context['children'].get(obj.id, [])
serializer = DataTransformerTreeSerializer(children, many=True, context=self.context)
return serializer.data
else:
return None
class AccessorySerializer(serializers.ModelSerializer):
# channels = ChannelSerializer(source='channel_set', many=True)
datatransformer = DataTransformerTreeSerializer(read_only=True)
model = serializers.SerializerMethodField()
def get_model(self, instance):
try:
return instance.__class__.__name__
except:
return None
class Meta:
model = Accessory
fields = ['id', 'category', 'type', 'display_name', 'aid',
'channels','datatransformer','model']
|
PypiClean
|
/hana_automl-0.0.4.tar.gz/hana_automl-0.0.4/docs/source/algorithms.rst
|
Ensembles
*********
Ensemble is just a boolean flag in our fit function. However,there are some lines of code hidden under it.
Well, how do ensembles actually work? In our library, currently only *blending* technique is supported.
Blending is an ensemble machine learning technique that uses a machine learning model to learn how to best combine the predictions from multiple contributing ensemble member models.
.. image:: images/ensemble.jpg
It takes top 3 algorithms with highest accuracy on validation data from model list.
To enable ensemble, just pass *ensemble=True* to :meth:`hana_automl.automl.AutoML.fit` function when creating AutoML model.
|
PypiClean
|
/django-limbo-0.7.7.tar.gz/django-limbo-0.7.7/limbo/diff_match_patch.py
|
"""Functions for diff, match and patch.
Computes the difference between two texts to create a patch.
Applies the patch onto another text, allowing for errors.
"""
__author__ = '[email protected] (Neil Fraser)'
import math
import time
import urllib
import re
import sys
class diff_match_patch:
"""Class containing the diff, match and patch methods.
Also contains the behaviour settings.
"""
def __init__(self):
"""Inits a diff_match_patch object with default settings.
Redefine these in your program to override the defaults.
"""
# Number of seconds to map a diff before giving up (0 for infinity).
self.Diff_Timeout = 1.0
# Cost of an empty edit operation in terms of edit characters.
self.Diff_EditCost = 4
# At what point is no match declared (0.0 = perfection, 1.0 = very loose).
self.Match_Threshold = 0.5
# How far to search for a match (0 = exact location, 1000+ = broad match).
# A match this many characters away from the expected location will add
# 1.0 to the score (0.0 is a perfect match).
self.Match_Distance = 1000
# When deleting a large block of text (over ~64 characters), how close does
# the contents have to match the expected contents. (0.0 = perfection,
# 1.0 = very loose). Note that Match_Threshold controls how closely the
# end points of a delete need to match.
self.Patch_DeleteThreshold = 0.5
# Chunk size for context length.
self.Patch_Margin = 4
# The number of bits in an int.
# Python has no maximum, thus to disable patch splitting set to 0.
# However to avoid long patches in certain pathological cases, use 32.
# Multiple short patches (using native ints) are much faster than long ones.
self.Match_MaxBits = 32
# DIFF FUNCTIONS
# The data structure representing a diff is an array of tuples:
# [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")]
# which means: delete "Hello", add "Goodbye" and keep " world."
DIFF_DELETE = -1
DIFF_INSERT = 1
DIFF_EQUAL = 0
def diff_main(self, text1, text2, checklines=True, deadline=None):
"""Find the differences between two texts. Simplifies the problem by
stripping any common prefix or suffix off the texts before diffing.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Optional speedup flag. If present and false, then don't run
a line-level diff first to identify the changed areas.
Defaults to true, which does a faster, slightly less optimal diff.
deadline: Optional time when the diff should be complete by. Used
internally for recursive calls. Users should set DiffTimeout instead.
Returns:
Array of changes.
"""
# Set a deadline by which time the diff must be complete.
if deadline == None:
# Unlike in most languages, Python counts time in seconds.
if self.Diff_Timeout <= 0:
deadline = sys.maxint
else:
deadline = time.time() + self.Diff_Timeout
# Check for null inputs.
if text1 == None or text2 == None:
raise ValueError("Null inputs. (diff_main)")
# Check for equality (speedup).
if text1 == text2:
if text1:
return [(self.DIFF_EQUAL, text1)]
return []
# Trim off common prefix (speedup).
commonlength = self.diff_commonPrefix(text1, text2)
commonprefix = text1[:commonlength]
text1 = text1[commonlength:]
text2 = text2[commonlength:]
# Trim off common suffix (speedup).
commonlength = self.diff_commonSuffix(text1, text2)
if commonlength == 0:
commonsuffix = ''
else:
commonsuffix = text1[-commonlength:]
text1 = text1[:-commonlength]
text2 = text2[:-commonlength]
# Compute the diff on the middle block.
diffs = self.diff_compute(text1, text2, checklines, deadline)
# Restore the prefix and suffix.
if commonprefix:
diffs[:0] = [(self.DIFF_EQUAL, commonprefix)]
if commonsuffix:
diffs.append((self.DIFF_EQUAL, commonsuffix))
self.diff_cleanupMerge(diffs)
return diffs
def diff_compute(self, text1, text2, checklines, deadline):
"""Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
if not text1:
# Just add some text (speedup).
return [(self.DIFF_INSERT, text2)]
if not text2:
# Just delete some text (speedup).
return [(self.DIFF_DELETE, text1)]
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
i = longtext.find(shorttext)
if i != -1:
# Shorter text is inside the longer text (speedup).
diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext),
(self.DIFF_INSERT, longtext[i + len(shorttext):])]
# Swap insertions for deletions if diff is reversed.
if len(text1) > len(text2):
diffs[0] = (self.DIFF_DELETE, diffs[0][1])
diffs[2] = (self.DIFF_DELETE, diffs[2][1])
return diffs
if len(shorttext) == 1:
# Single character string.
# After the previous speedup, the character can't be an equality.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
longtext = shorttext = None # Garbage collect.
# Check to see if the problem can be split in two.
hm = self.diff_halfMatch(text1, text2)
if hm:
# A half-match was found, sort out the return data.
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
# Send both pairs off for separate processing.
diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
# Merge the results.
return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b
if checklines and len(text1) > 100 and len(text2) > 100:
return self.diff_lineMode(text1, text2, deadline)
return self.diff_bisect(text1, text2, deadline)
def diff_lineMode(self, text1, text2, deadline):
"""Do a quick line-level diff on both strings, then rediff the parts for
greater accuracy.
This speedup can produce non-minimal diffs.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
# Scan the text on a line-by-line basis first.
(text1, text2, linearray) = self.diff_linesToChars(text1, text2)
diffs = self.diff_main(text1, text2, False, deadline)
# Convert the diff back to original text.
self.diff_charsToLines(diffs, linearray)
# Eliminate freak matches (e.g. blank lines)
self.diff_cleanupSemantic(diffs)
# Rediff any replacement blocks, this time character-by-character.
# Add a dummy entry at the end.
diffs.append((self.DIFF_EQUAL, ''))
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete >= 1 and count_insert >= 1:
# Delete the offending records and add the merged ones.
a = self.diff_main(text_delete, text_insert, False, deadline)
diffs[pointer - count_delete - count_insert : pointer] = a
pointer = pointer - count_delete - count_insert + len(a)
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
pointer += 1
diffs.pop() # Remove the dummy entry at the end.
return diffs
def diff_bisect(self, text1, text2, deadline):
"""Find the 'middle snake' of a diff, split the problem in two
and return the recursively constructed diff.
See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
max_d = (text1_length + text2_length + 1) / 2
v_offset = max_d
v_length = 2 * max_d
v1 = [-1] * v_length
v1[v_offset + 1] = 0
v2 = v1[:]
delta = text1_length - text2_length
# If the total number of characters is odd, then the front path will
# collide with the reverse path.
front = (delta % 2 != 0)
# Offsets for start and end of k loop.
# Prevents mapping of space beyond the grid.
k1start = 0
k1end = 0
k2start = 0
k2end = 0
for d in xrange(max_d):
# Bail out if deadline is reached.
if time.time() > deadline:
break
# Walk the front path one step.
for k1 in xrange(-d + k1start, d + 1 - k1end, 2):
k1_offset = v_offset + k1
if (k1 == -d or k1 != d and
v1[k1_offset - 1] < v1[k1_offset + 1]):
x1 = v1[k1_offset + 1]
else:
x1 = v1[k1_offset - 1] + 1
y1 = x1 - k1
while (x1 < text1_length and y1 < text2_length and
text1[x1] == text2[y1]):
x1 += 1
y1 += 1
v1[k1_offset] = x1
if x1 > text1_length:
# Ran off the right of the graph.
k1end += 2
elif y1 > text2_length:
# Ran off the bottom of the graph.
k1start += 2
elif front:
k2_offset = v_offset + delta - k1
if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - v2[k2_offset]
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Walk the reverse path one step.
for k2 in xrange(-d + k2start, d + 1 - k2end, 2):
k2_offset = v_offset + k2
if (k2 == -d or k2 != d and
v2[k2_offset - 1] < v2[k2_offset + 1]):
x2 = v2[k2_offset + 1]
else:
x2 = v2[k2_offset - 1] + 1
y2 = x2 - k2
while (x2 < text1_length and y2 < text2_length and
text1[-x2 - 1] == text2[-y2 - 1]):
x2 += 1
y2 += 1
v2[k2_offset] = x2
if x2 > text1_length:
# Ran off the left of the graph.
k2end += 2
elif y2 > text2_length:
# Ran off the top of the graph.
k2start += 2
elif not front:
k1_offset = v_offset + delta - k2
if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
x1 = v1[k1_offset]
y1 = v_offset + x1 - k1_offset
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - x2
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Diff took too long and hit the deadline or
# number of diffs equals number of characters, no commonality at all.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
def diff_bisectSplit(self, text1, text2, x, y, deadline):
"""Given the location of the 'middle snake', split the diff in two parts
and recurse.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
x: Index of split point in text1.
y: Index of split point in text2.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
text1a = text1[:x]
text2a = text2[:y]
text1b = text1[x:]
text2b = text2[y:]
# Compute both diffs serially.
diffs = self.diff_main(text1a, text2a, False, deadline)
diffsb = self.diff_main(text1b, text2b, False, deadline)
return diffs + diffsb
def diff_linesToChars(self, text1, text2):
"""Split two texts into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Args:
text1: First string.
text2: Second string.
Returns:
Three element tuple, containing the encoded text1, the encoded text2 and
the array of unique strings. The zeroth element of the array of unique
strings is intentionally blank.
"""
lineArray = [] # e.g. lineArray[4] == "Hello\n"
lineHash = {} # e.g. lineHash["Hello\n"] == 4
# "\x00" is a valid character, but various debuggers don't like it.
# So we'll insert a junk entry to avoid generating a null character.
lineArray.append('')
def diff_linesToCharsMunge(text):
"""Split a text into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Modifies linearray and linehash through being a closure.
Args:
text: String to encode.
Returns:
Encoded string.
"""
chars = []
# Walk the text, pulling out a substring for each line.
# text.split('\n') would would temporarily double our memory footprint.
# Modifying text would create many large strings to garbage collect.
lineStart = 0
lineEnd = -1
while lineEnd < len(text) - 1:
lineEnd = text.find('\n', lineStart)
if lineEnd == -1:
lineEnd = len(text) - 1
line = text[lineStart:lineEnd + 1]
lineStart = lineEnd + 1
if line in lineHash:
chars.append(unichr(lineHash[line]))
else:
lineArray.append(line)
lineHash[line] = len(lineArray) - 1
chars.append(unichr(len(lineArray) - 1))
return "".join(chars)
chars1 = diff_linesToCharsMunge(text1)
chars2 = diff_linesToCharsMunge(text2)
return (chars1, chars2, lineArray)
def diff_charsToLines(self, diffs, lineArray):
"""Rehydrate the text in a diff from a string of line hashes to real lines
of text.
Args:
diffs: Array of diff tuples.
lineArray: Array of unique strings.
"""
for x in xrange(len(diffs)):
text = []
for char in diffs[x][1]:
text.append(lineArray[ord(char)])
diffs[x] = (diffs[x][0], "".join(text))
def diff_commonPrefix(self, text1, text2):
"""Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = int((pointermax - pointermin) / 2 + pointermin)
return pointermid
def diff_commonSuffix(self, text1, text2):
"""Determine the common suffix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the end of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[-1] != text2[-1]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerend = 0
while pointermin < pointermid:
if (text1[-pointermid:len(text1) - pointerend] ==
text2[-pointermid:len(text2) - pointerend]):
pointermin = pointermid
pointerend = pointermin
else:
pointermax = pointermid
pointermid = int((pointermax - pointermin) / 2 + pointermin)
return pointermid
def diff_commonOverlap(self, text1, text2):
"""Determine if the suffix of one string is the prefix of another.
Args:
text1 First string.
text2 Second string.
Returns:
The number of characters common to the end of the first
string and the start of the second string.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
# Eliminate the null case.
if text1_length == 0 or text2_length == 0:
return 0
# Truncate the longer string.
if text1_length > text2_length:
text1 = text1[-text2_length:]
elif text1_length < text2_length:
text2 = text2[:text1_length]
text_length = min(text1_length, text2_length)
# Quick check for the worst case.
if text1 == text2:
return text_length
# Start by looking for a single character match
# and increase length until no match is found.
# Performance analysis: http://neil.fraser.name/news/2010/11/04/
best = 0
length = 1
while True:
pattern = text1[-length:]
found = text2.find(pattern)
if found == -1:
return best
length += found
if found == 0 or text1[-length:] == text2[:length]:
best = length
length += 1
def diff_halfMatch(self, text1, text2):
"""Do the two texts share a substring which is at least half the length of
the longer text?
This speedup can produce non-minimal diffs.
Args:
text1: First string.
text2: Second string.
Returns:
Five element Array, containing the prefix of text1, the suffix of text1,
the prefix of text2, the suffix of text2 and the common middle. Or None
if there was no match.
"""
if self.Diff_Timeout <= 0:
# Don't risk returning a non-optimal diff if we have unlimited time.
return None
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
if len(longtext) < 4 or len(shorttext) * 2 < len(longtext):
return None # Pointless.
def diff_halfMatchI(longtext, shorttext, i):
"""Does a substring of shorttext exist within longtext such that the
substring is at least half the length of longtext?
Closure, but does not reference any external variables.
Args:
longtext: Longer string.
shorttext: Shorter string.
i: Start index of quarter length substring within longtext.
Returns:
Five element Array, containing the prefix of longtext, the suffix of
longtext, the prefix of shorttext, the suffix of shorttext and the
common middle. Or None if there was no match.
"""
seed = longtext[i:i + len(longtext) / 4]
best_common = ''
j = shorttext.find(seed)
while j != -1:
prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])
suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])
if len(best_common) < suffixLength + prefixLength:
best_common = (shorttext[j - suffixLength:j] +
shorttext[j:j + prefixLength])
best_longtext_a = longtext[:i - suffixLength]
best_longtext_b = longtext[i + prefixLength:]
best_shorttext_a = shorttext[:j - suffixLength]
best_shorttext_b = shorttext[j + prefixLength:]
j = shorttext.find(seed, j + 1)
if len(best_common) * 2 >= len(longtext):
return (best_longtext_a, best_longtext_b,
best_shorttext_a, best_shorttext_b, best_common)
else:
return None
# First check if the second quarter is the seed for a half-match.
hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) / 4)
# Check again based on the third quarter.
hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) / 2)
if not hm1 and not hm2:
return None
elif not hm2:
hm = hm1
elif not hm1:
hm = hm2
else:
# Both matched. Select the longest.
if len(hm1[4]) > len(hm2[4]):
hm = hm1
else:
hm = hm2
# A half-match was found, sort out the return data.
if len(text1) > len(text2):
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
else:
(text2_a, text2_b, text1_a, text1_b, mid_common) = hm
return (text1_a, text1_b, text2_a, text2_b, mid_common)
def diff_cleanupSemantic(self, diffs):
"""Reduce the number of edits by eliminating semantically trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to equalities[-1][1]
pointer = 0 # Index of current position.
# Number of chars that changed prior to the equality.
length_insertions1, length_deletions1 = 0, 0
# Number of chars that changed after the equality.
length_insertions2, length_deletions2 = 0, 0
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
equalities.append(pointer)
length_insertions1, length_insertions2 = length_insertions2, 0
length_deletions1, length_deletions2 = length_deletions2, 0
lastequality = diffs[pointer][1]
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_INSERT:
length_insertions2 += len(diffs[pointer][1])
else:
length_deletions2 += len(diffs[pointer][1])
# Eliminate an equality that is smaller or equal to the edits on both
# sides of it.
if (lastequality != None and (len(lastequality) <=
max(length_insertions1, length_deletions1)) and
(len(lastequality) <= max(length_insertions2, length_deletions2))):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
# Throw away the equality we just deleted.
equalities.pop()
# Throw away the previous equality (it needs to be reevaluated).
if len(equalities):
equalities.pop()
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
# Reset the counters.
length_insertions1, length_deletions1 = 0, 0
length_insertions2, length_deletions2 = 0, 0
lastequality = None
changes = True
pointer += 1
# Normalize the diff.
if changes:
self.diff_cleanupMerge(diffs)
self.diff_cleanupSemanticLossless(diffs)
# Find any overlaps between deletions and insertions.
# e.g: <del>abcxxx</del><ins>xxxdef</ins>
# -> <del>abc</del>xxx<ins>def</ins>
# Only extract an overlap if it is as big as the edit ahead or behind it.
pointer = 1
while pointer < len(diffs):
if (diffs[pointer - 1][0] == self.DIFF_DELETE and
diffs[pointer][0] == self.DIFF_INSERT):
deletion = diffs[pointer - 1][1]
insertion = diffs[pointer][1]
overlap_length = self.diff_commonOverlap(deletion, insertion)
if (overlap_length >= len(deletion) / 2.0 or
overlap_length >= len(insertion) / 2.0):
# Overlap found. Insert an equality and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL, insertion[:overlap_length]))
diffs[pointer - 1] = (self.DIFF_DELETE,
deletion[:len(deletion) - overlap_length])
diffs[pointer + 1] = (self.DIFF_INSERT, insertion[overlap_length:])
pointer += 1
pointer += 1
pointer += 1
def diff_cleanupSemanticLossless(self, diffs):
"""Look for single edits surrounded on both sides by equalities
which can be shifted sideways to align the edit to a word boundary.
e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came.
Args:
diffs: Array of diff tuples.
"""
def diff_cleanupSemanticScore(one, two):
"""Given two strings, compute a score representing whether the
internal boundary falls on logical boundaries.
Scores range from 5 (best) to 0 (worst).
Closure, but does not reference any external variables.
Args:
one: First string.
two: Second string.
Returns:
The score.
"""
if not one or not two:
# Edges are the best.
return 5
# Each port of this function behaves slightly differently due to
# subtle differences in each language's definition of things like
# 'whitespace'. Since this function's purpose is largely cosmetic,
# the choice has been made to use each language's native features
# rather than force total conformity.
score = 0
# One point for non-alphanumeric.
if not one[-1].isalnum() or not two[0].isalnum():
score += 1
# Two points for whitespace.
if one[-1].isspace() or two[0].isspace():
score += 1
# Three points for line breaks.
if (one[-1] == "\r" or one[-1] == "\n" or
two[0] == "\r" or two[0] == "\n"):
score += 1
# Four points for blank lines.
if (re.search("\\n\\r?\\n$", one) or
re.match("^\\r?\\n\\r?\\n", two)):
score += 1
return score
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
equality1 = diffs[pointer - 1][1]
edit = diffs[pointer][1]
equality2 = diffs[pointer + 1][1]
# First, shift the edit as far left as possible.
commonOffset = self.diff_commonSuffix(equality1, edit)
if commonOffset:
commonString = edit[-commonOffset:]
equality1 = equality1[:-commonOffset]
edit = commonString + edit[:-commonOffset]
equality2 = commonString + equality2
# Second, step character by character right, looking for the best fit.
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
bestScore = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
while edit and equality2 and edit[0] == equality2[0]:
equality1 += edit[0]
edit = edit[1:] + equality2[0]
equality2 = equality2[1:]
score = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
# The >= encourages trailing rather than leading whitespace on edits.
if score >= bestScore:
bestScore = score
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
if diffs[pointer - 1][1] != bestEquality1:
# We have an improvement, save it back to the diff.
if bestEquality1:
diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1)
else:
del diffs[pointer - 1]
pointer -= 1
diffs[pointer] = (diffs[pointer][0], bestEdit)
if bestEquality2:
diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2)
else:
del diffs[pointer + 1]
pointer -= 1
pointer += 1
def diff_cleanupEfficiency(self, diffs):
"""Reduce the number of edits by eliminating operationally trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = '' # Always equal to equalities[-1][1]
pointer = 0 # Index of current position.
pre_ins = False # Is there an insertion operation before the last equality.
pre_del = False # Is there a deletion operation before the last equality.
post_ins = False # Is there an insertion operation after the last equality.
post_del = False # Is there a deletion operation after the last equality.
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
if (len(diffs[pointer][1]) < self.Diff_EditCost and
(post_ins or post_del)):
# Candidate found.
equalities.append(pointer)
pre_ins = post_ins
pre_del = post_del
lastequality = diffs[pointer][1]
else:
# Not a candidate, and can never become one.
equalities = []
lastequality = ''
post_ins = post_del = False
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_DELETE:
post_del = True
else:
post_ins = True
# Five types to be split:
# <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
# <ins>A</ins>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<ins>C</ins>
# <ins>A</del>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<del>C</del>
if lastequality and ((pre_ins and pre_del and post_ins and post_del) or
((len(lastequality) < self.Diff_EditCost / 2) and
(pre_ins + pre_del + post_ins + post_del) == 3)):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
equalities.pop() # Throw away the equality we just deleted.
lastequality = ''
if pre_ins and pre_del:
# No changes made which could affect previous entry, keep going.
post_ins = post_del = True
equalities = []
else:
if len(equalities):
equalities.pop() # Throw away the previous equality.
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
post_ins = post_del = False
changes = True
pointer += 1
if changes:
self.diff_cleanupMerge(diffs)
def diff_cleanupMerge(self, diffs):
"""Reorder and merge like edit sections. Merge equalities.
Any edit section can move as long as it doesn't cross an equality.
Args:
diffs: Array of diff tuples.
"""
diffs.append((self.DIFF_EQUAL, '')) # Add a dummy entry at the end.
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete + count_insert > 1:
if count_delete != 0 and count_insert != 0:
# Factor out any common prefixies.
commonlength = self.diff_commonPrefix(text_insert, text_delete)
if commonlength != 0:
x = pointer - count_delete - count_insert - 1
if x >= 0 and diffs[x][0] == self.DIFF_EQUAL:
diffs[x] = (diffs[x][0], diffs[x][1] +
text_insert[:commonlength])
else:
diffs.insert(0, (self.DIFF_EQUAL, text_insert[:commonlength]))
pointer += 1
text_insert = text_insert[commonlength:]
text_delete = text_delete[commonlength:]
# Factor out any common suffixies.
commonlength = self.diff_commonSuffix(text_insert, text_delete)
if commonlength != 0:
diffs[pointer] = (diffs[pointer][0], text_insert[-commonlength:] +
diffs[pointer][1])
text_insert = text_insert[:-commonlength]
text_delete = text_delete[:-commonlength]
# Delete the offending records and add the merged ones.
if count_delete == 0:
diffs[pointer - count_insert : pointer] = [
(self.DIFF_INSERT, text_insert)]
elif count_insert == 0:
diffs[pointer - count_delete : pointer] = [
(self.DIFF_DELETE, text_delete)]
else:
diffs[pointer - count_delete - count_insert : pointer] = [
(self.DIFF_DELETE, text_delete),
(self.DIFF_INSERT, text_insert)]
pointer = pointer - count_delete - count_insert + 1
if count_delete != 0:
pointer += 1
if count_insert != 0:
pointer += 1
elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL:
# Merge this equality with the previous one.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer][1])
del diffs[pointer]
else:
pointer += 1
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
if diffs[-1][1] == '':
diffs.pop() # Remove the dummy entry at the end.
# Second pass: look for single edits surrounded on both sides by equalities
# which can be shifted sideways to eliminate an equality.
# e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
changes = False
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
# Shift the edit over the previous equality.
diffs[pointer] = (diffs[pointer][0],
diffs[pointer - 1][1] +
diffs[pointer][1][:-len(diffs[pointer - 1][1])])
diffs[pointer + 1] = (diffs[pointer + 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
del diffs[pointer - 1]
changes = True
elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
# Shift the edit over the next equality.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
diffs[pointer] = (diffs[pointer][0],
diffs[pointer][1][len(diffs[pointer + 1][1]):] +
diffs[pointer + 1][1])
del diffs[pointer + 1]
changes = True
pointer += 1
# If shifts were made, the diff needs reordering and another shift sweep.
if changes:
self.diff_cleanupMerge(diffs)
def diff_xIndex(self, diffs, loc):
"""loc is a location in text1, compute and return the equivalent location
in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8
Args:
diffs: Array of diff tuples.
loc: Location within text1.
Returns:
Location within text2.
"""
chars1 = 0
chars2 = 0
last_chars1 = 0
last_chars2 = 0
for x in xrange(len(diffs)):
(op, text) = diffs[x]
if op != self.DIFF_INSERT: # Equality or deletion.
chars1 += len(text)
if op != self.DIFF_DELETE: # Equality or insertion.
chars2 += len(text)
if chars1 > loc: # Overshot the location.
break
last_chars1 = chars1
last_chars2 = chars2
if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE:
# The location was deleted.
return last_chars2
# Add the remaining len(character).
return last_chars2 + (loc - last_chars1)
def diff_prettyHtml(self, diffs):
"""Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation.
"""
html = []
i = 0
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "¶<br>"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
if op != self.DIFF_DELETE:
i += len(data)
return "".join(html)
def diff_text1(self, diffs):
"""Compute and return the source text (all equalities and deletions).
Args:
diffs: Array of diff tuples.
Returns:
Source text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_INSERT:
text.append(data)
return "".join(text)
def diff_text2(self, diffs):
"""Compute and return the destination text (all equalities and insertions).
Args:
diffs: Array of diff tuples.
Returns:
Destination text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_DELETE:
text.append(data)
return "".join(text)
def diff_levenshtein(self, diffs):
"""Compute the Levenshtein distance; the number of inserted, deleted or
substituted characters.
Args:
diffs: Array of diff tuples.
Returns:
Number of changes.
"""
levenshtein = 0
insertions = 0
deletions = 0
for (op, data) in diffs:
if op == self.DIFF_INSERT:
insertions += len(data)
elif op == self.DIFF_DELETE:
deletions += len(data)
elif op == self.DIFF_EQUAL:
# A deletion and an insertion is one substitution.
levenshtein += max(insertions, deletions)
insertions = 0
deletions = 0
levenshtein += max(insertions, deletions)
return levenshtein
def diff_toDelta(self, diffs):
"""Crush the diff into an encoded string which describes the operations
required to transform text1 into text2.
E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'.
Operations are tab-separated. Inserted text is escaped using %xx notation.
Args:
diffs: Array of diff tuples.
Returns:
Delta text.
"""
text = []
for (op, data) in diffs:
if op == self.DIFF_INSERT:
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,# "))
elif op == self.DIFF_DELETE:
text.append("-%d" % len(data))
elif op == self.DIFF_EQUAL:
text.append("=%d" % len(data))
return "\t".join(text)
def diff_fromDelta(self, text1, delta):
"""Given the original text1, and an encoded string which describes the
operations required to transform text1 into text2, compute the full diff.
Args:
text1: Source string for the diff.
delta: Delta text.
Returns:
Array of diff tuples.
Raises:
ValueError: If invalid input.
"""
if type(delta) == unicode:
# Deltas should be composed of a subset of ascii chars, Unicode not
# required. If this encode raises UnicodeEncodeError, delta is invalid.
delta = delta.encode("ascii")
diffs = []
pointer = 0 # Cursor in text1
tokens = delta.split("\t")
for token in tokens:
if token == "":
# Blank tokens are ok (from a trailing \t).
continue
# Each token begins with a one character parameter which specifies the
# operation of this token (delete, insert, equality).
param = token[1:]
if token[0] == "+":
param = urllib.unquote(param).decode("utf-8")
diffs.append((self.DIFF_INSERT, param))
elif token[0] == "-" or token[0] == "=":
try:
n = int(param)
except ValueError:
raise ValueError("Invalid number in diff_fromDelta: " + param)
if n < 0:
raise ValueError("Negative number in diff_fromDelta: " + param)
text = text1[pointer : pointer + n]
pointer += n
if token[0] == "=":
diffs.append((self.DIFF_EQUAL, text))
else:
diffs.append((self.DIFF_DELETE, text))
else:
# Anything else is an error.
raise ValueError("Invalid diff operation in diff_fromDelta: " +
token[0])
if pointer != len(text1):
raise ValueError(
"Delta length (%d) does not equal source text length (%d)." %
(pointer, len(text1)))
return diffs
# MATCH FUNCTIONS
def match_main(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc'.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Check for null inputs.
if text == None or pattern == None:
raise ValueError("Null inputs. (match_main)")
loc = max(0, min(loc, len(text)))
if text == pattern:
# Shortcut (potentially not guaranteed by the algorithm)
return 0
elif not text:
# Nothing to match.
return -1
elif text[loc:loc + len(pattern)] == pattern:
# Perfect match at the perfect spot! (Includes case of null pattern)
return loc
else:
# Do a fuzzy compare.
match = self.match_bitap(text, pattern, loc)
return match
def match_bitap(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc' using the
Bitap algorithm.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Python doesn't have a maxint limit, so ignore this check.
#if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:
# raise ValueError("Pattern too long for this application.")
# Initialise the alphabet.
s = self.match_alphabet(pattern)
def match_bitapScore(e, x):
"""Compute and return the score for a match with e errors and x location.
Accesses loc and pattern through being a closure.
Args:
e: Number of errors in match.
x: Location of match.
Returns:
Overall score for match (0.0 = good, 1.0 = bad).
"""
accuracy = float(e) / len(pattern)
proximity = abs(loc - x)
if not self.Match_Distance:
# Dodge divide by zero error.
return proximity and 1.0 or accuracy
return accuracy + (proximity / float(self.Match_Distance))
# Highest score beyond which we give up.
score_threshold = self.Match_Threshold
# Is there a nearby exact match? (speedup)
best_loc = text.find(pattern, loc)
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# What about in the other direction? (speedup)
best_loc = text.rfind(pattern, loc + len(pattern))
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# Initialise the bit arrays.
matchmask = 1 << (len(pattern) - 1)
best_loc = -1
bin_max = len(pattern) + len(text)
# Empty initialization added to appease pychecker.
last_rd = None
for d in xrange(len(pattern)):
# Scan for the best match each iteration allows for one more error.
# Run a binary search to determine how far from 'loc' we can stray at
# this error level.
bin_min = 0
bin_mid = bin_max
while bin_min < bin_mid:
if match_bitapScore(d, loc + bin_mid) <= score_threshold:
bin_min = bin_mid
else:
bin_max = bin_mid
bin_mid = (bin_max - bin_min) / 2 + bin_min
# Use the result from this iteration as the maximum for the next.
bin_max = bin_mid
start = max(1, loc - bin_mid + 1)
finish = min(loc + bin_mid, len(text)) + len(pattern)
rd = range(finish + 1)
rd.append((1 << d) - 1)
for j in xrange(finish, start - 1, -1):
if len(text) <= j - 1:
# Out of range.
charMatch = 0
else:
charMatch = s.get(text[j - 1], 0)
if d == 0: # First pass: exact match.
rd[j] = ((rd[j + 1] << 1) | 1) & charMatch
else: # Subsequent passes: fuzzy match.
rd[j] = ((rd[j + 1] << 1) | 1) & charMatch | (
((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1]
if rd[j] & matchmask:
score = match_bitapScore(d, j - 1)
# This match will almost certainly be better than any existing match.
# But check anyway.
if score <= score_threshold:
# Told you so.
score_threshold = score
best_loc = j - 1
if best_loc > loc:
# When passing loc, don't exceed our current distance from loc.
start = max(1, 2 * loc - best_loc)
else:
# Already passed loc, downhill from here on in.
break
# No hope for a (better) match at greater error levels.
if match_bitapScore(d + 1, loc) > score_threshold:
break
last_rd = rd
return best_loc
def match_alphabet(self, pattern):
"""Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
"""
s = {}
for char in pattern:
s[char] = 0
for i in xrange(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s
# PATCH FUNCTIONS
def patch_addContext(self, patch, text):
"""Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text.
"""
if len(text) == 0:
return
pattern = text[patch.start2 : patch.start2 + patch.length1]
padding = 0
# Look for the first and last matches of pattern in text. If two different
# matches are found, increase the pattern length.
while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits ==
0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin -
self.Patch_Margin)):
padding += self.Patch_Margin
pattern = text[max(0, patch.start2 - padding) :
patch.start2 + patch.length1 + padding]
# Add one chunk for good luck.
padding += self.Patch_Margin
# Add the prefix.
prefix = text[max(0, patch.start2 - padding) : patch.start2]
if prefix:
patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]
# Add the suffix.
suffix = text[patch.start2 + patch.length1 :
patch.start2 + patch.length1 + padding]
if suffix:
patch.diffs.append((self.DIFF_EQUAL, suffix))
# Roll back the start points.
patch.start1 -= len(prefix)
patch.start2 -= len(prefix)
# Extend lengths.
patch.length1 += len(prefix) + len(suffix)
patch.length2 += len(prefix) + len(suffix)
def patch_make(self, a, b=None, c=None):
"""Compute a list of patches to turn text1 into text2.
Use diffs if provided, otherwise compute it ourselves.
There are four ways to call this function, depending on what data is
available to the caller:
Method 1:
a = text1, b = text2
Method 2:
a = diffs
Method 3 (optimal):
a = text1, b = diffs
Method 4 (deprecated, use method 3):
a = text1, b = text2, c = diffs
Args:
a: text1 (methods 1,3,4) or Array of diff tuples for text1 to
text2 (method 2).
b: text2 (methods 1,4) or Array of diff tuples for text1 to
text2 (method 3) or undefined (method 2).
c: Array of diff tuples for text1 to text2 (method 4) or
undefined (methods 1,2,3).
Returns:
Array of patch objects.
"""
text1 = None
diffs = None
# Note that texts may arrive as 'str' or 'unicode'.
if isinstance(a, basestring) and isinstance(b, basestring) and c is None:
# Method 1: text1, text2
# Compute diffs from text1 and text2.
text1 = a
diffs = self.diff_main(text1, b, True)
if len(diffs) > 2:
self.diff_cleanupSemantic(diffs)
self.diff_cleanupEfficiency(diffs)
elif isinstance(a, list) and b is None and c is None:
# Method 2: diffs
# Compute text1 from diffs.
diffs = a
text1 = self.diff_text1(diffs)
elif isinstance(a, basestring) and isinstance(b, list) and c is None:
# Method 3: text1, diffs
text1 = a
diffs = b
elif (isinstance(a, basestring) and isinstance(b, basestring) and
isinstance(c, list)):
# Method 4: text1, text2, diffs
# text2 is not used.
text1 = a
diffs = c
else:
raise ValueError("Unknown call format to patch_make.")
if not diffs:
return [] # Get rid of the None case.
patches = []
patch = patch_obj()
char_count1 = 0 # Number of characters into the text1 string.
char_count2 = 0 # Number of characters into the text2 string.
prepatch_text = text1 # Recreate the patches to determine context info.
postpatch_text = text1
for x in xrange(len(diffs)):
(diff_type, diff_text) = diffs[x]
if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL:
# A new patch starts here.
patch.start1 = char_count1
patch.start2 = char_count2
if diff_type == self.DIFF_INSERT:
# Insertion
patch.diffs.append(diffs[x])
patch.length2 += len(diff_text)
postpatch_text = (postpatch_text[:char_count2] + diff_text +
postpatch_text[char_count2:])
elif diff_type == self.DIFF_DELETE:
# Deletion.
patch.length1 += len(diff_text)
patch.diffs.append(diffs[x])
postpatch_text = (postpatch_text[:char_count2] +
postpatch_text[char_count2 + len(diff_text):])
elif (diff_type == self.DIFF_EQUAL and
len(diff_text) <= 2 * self.Patch_Margin and
len(patch.diffs) != 0 and len(diffs) != x + 1):
# Small equality inside a patch.
patch.diffs.append(diffs[x])
patch.length1 += len(diff_text)
patch.length2 += len(diff_text)
if (diff_type == self.DIFF_EQUAL and
len(diff_text) >= 2 * self.Patch_Margin):
# Time for a new patch.
if len(patch.diffs) != 0:
self.patch_addContext(patch, prepatch_text)
patches.append(patch)
patch = patch_obj()
# Unlike Unidiff, our patch lists have a rolling context.
# http://code.google.com/p/google-diff-match-patch/wiki/Unidiff
# Update prepatch text & pos to reflect the application of the
# just completed patch.
prepatch_text = postpatch_text
char_count1 = char_count2
# Update the current character count.
if diff_type != self.DIFF_INSERT:
char_count1 += len(diff_text)
if diff_type != self.DIFF_DELETE:
char_count2 += len(diff_text)
# Pick up the leftover patch if not empty.
if len(patch.diffs) != 0:
self.patch_addContext(patch, prepatch_text)
patches.append(patch)
return patches
def patch_deepCopy(self, patches):
"""Given an array of patches, return another array that is identical.
Args:
patches: Array of patch objects.
Returns:
Array of patch objects.
"""
patchesCopy = []
for patch in patches:
patchCopy = patch_obj()
# No need to deep copy the tuples since they are immutable.
patchCopy.diffs = patch.diffs[:]
patchCopy.start1 = patch.start1
patchCopy.start2 = patch.start2
patchCopy.length1 = patch.length1
patchCopy.length2 = patch.length2
patchesCopy.append(patchCopy)
return patchesCopy
def patch_apply(self, patches, text):
"""Merge a set of patches onto the text. Return a patched text, as well
as a list of true/false values indicating which patches were applied.
Args:
patches: Array of patch objects.
text: Old text.
Returns:
Two element Array, containing the new text and an array of boolean values.
"""
if not patches:
return (text, [])
# Deep copy the patches so that no changes are made to originals.
patches = self.patch_deepCopy(patches)
nullPadding = self.patch_addPadding(patches)
text = nullPadding + text + nullPadding
self.patch_splitMax(patches)
# delta keeps track of the offset between the expected and actual location
# of the previous patch. If there are patches expected at positions 10 and
# 20, but the first patch was found at 12, delta is 2 and the second patch
# has an effective expected position of 22.
delta = 0
results = []
for patch in patches:
expected_loc = patch.start2 + delta
text1 = self.diff_text1(patch.diffs)
end_loc = -1
if len(text1) > self.Match_MaxBits:
# patch_splitMax will only provide an oversized pattern in the case of
# a monster delete.
start_loc = self.match_main(text, text1[:self.Match_MaxBits],
expected_loc)
if start_loc != -1:
end_loc = self.match_main(text, text1[-self.Match_MaxBits:],
expected_loc + len(text1) - self.Match_MaxBits)
if end_loc == -1 or start_loc >= end_loc:
# Can't find valid trailing context. Drop this patch.
start_loc = -1
else:
start_loc = self.match_main(text, text1, expected_loc)
if start_loc == -1:
# No match found. :(
results.append(False)
# Subtract the delta for this failed patch from subsequent patches.
delta -= patch.length2 - patch.length1
else:
# Found a match. :)
results.append(True)
delta = start_loc - expected_loc
if end_loc == -1:
text2 = text[start_loc : start_loc + len(text1)]
else:
text2 = text[start_loc : end_loc + self.Match_MaxBits]
if text1 == text2:
# Perfect match, just shove the replacement text in.
text = (text[:start_loc] + self.diff_text2(patch.diffs) +
text[start_loc + len(text1):])
else:
# Imperfect match.
# Run a diff to get a framework of equivalent indices.
diffs = self.diff_main(text1, text2, False)
if (len(text1) > self.Match_MaxBits and
self.diff_levenshtein(diffs) / float(len(text1)) >
self.Patch_DeleteThreshold):
# The end points match, but the content is unacceptably bad.
results[-1] = False
else:
self.diff_cleanupSemanticLossless(diffs)
index1 = 0
for (op, data) in patch.diffs:
if op != self.DIFF_EQUAL:
index2 = self.diff_xIndex(diffs, index1)
if op == self.DIFF_INSERT: # Insertion
text = text[:start_loc + index2] + data + text[start_loc +
index2:]
elif op == self.DIFF_DELETE: # Deletion
text = text[:start_loc + index2] + text[start_loc +
self.diff_xIndex(diffs, index1 + len(data)):]
if op != self.DIFF_DELETE:
index1 += len(data)
# Strip the padding off.
text = text[len(nullPadding):-len(nullPadding)]
return (text, results)
def patch_addPadding(self, patches):
"""Add some padding on text start and end so that edges can match
something. Intended to be called only from within patch_apply.
Args:
patches: Array of patch objects.
Returns:
The padding string added to each side.
"""
paddingLength = self.Patch_Margin
nullPadding = ""
for x in xrange(1, paddingLength + 1):
nullPadding += chr(x)
# Bump all the patches forward.
for patch in patches:
patch.start1 += paddingLength
patch.start2 += paddingLength
# Add some padding on start of first diff.
patch = patches[0]
diffs = patch.diffs
if not diffs or diffs[0][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.insert(0, (self.DIFF_EQUAL, nullPadding))
patch.start1 -= paddingLength # Should be 0.
patch.start2 -= paddingLength # Should be 0.
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[0][1]):
# Grow first equality.
extraLength = paddingLength - len(diffs[0][1])
newText = nullPadding[len(diffs[0][1]):] + diffs[0][1]
diffs[0] = (diffs[0][0], newText)
patch.start1 -= extraLength
patch.start2 -= extraLength
patch.length1 += extraLength
patch.length2 += extraLength
# Add some padding on end of last diff.
patch = patches[-1]
diffs = patch.diffs
if not diffs or diffs[-1][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.append((self.DIFF_EQUAL, nullPadding))
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[-1][1]):
# Grow last equality.
extraLength = paddingLength - len(diffs[-1][1])
newText = diffs[-1][1] + nullPadding[:extraLength]
diffs[-1] = (diffs[-1][0], newText)
patch.length1 += extraLength
patch.length2 += extraLength
return nullPadding
def patch_splitMax(self, patches):
"""Look through the patches and break up any which are longer than the
maximum limit of the match algorithm.
Intended to be called only from within patch_apply.
Args:
patches: Array of patch objects.
"""
patch_size = self.Match_MaxBits
if patch_size == 0:
# Python has the option of not splitting strings due to its ability
# to handle integers of arbitrary precision.
return
for x in xrange(len(patches)):
if patches[x].length1 > patch_size:
bigpatch = patches[x]
# Remove the big old patch.
del patches[x]
x -= 1
start1 = bigpatch.start1
start2 = bigpatch.start2
precontext = ''
while len(bigpatch.diffs) != 0:
# Create one of several smaller patches.
patch = patch_obj()
empty = True
patch.start1 = start1 - len(precontext)
patch.start2 = start2 - len(precontext)
if precontext:
patch.length1 = patch.length2 = len(precontext)
patch.diffs.append((self.DIFF_EQUAL, precontext))
while (len(bigpatch.diffs) != 0 and
patch.length1 < patch_size - self.Patch_Margin):
(diff_type, diff_text) = bigpatch.diffs[0]
if diff_type == self.DIFF_INSERT:
# Insertions are harmless.
patch.length2 += len(diff_text)
start2 += len(diff_text)
patch.diffs.append(bigpatch.diffs.pop(0))
empty = False
elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and
patch.diffs[0][0] == self.DIFF_EQUAL and
len(diff_text) > 2 * patch_size):
# This is a large deletion. Let it pass in one chunk.
patch.length1 += len(diff_text)
start1 += len(diff_text)
empty = False
patch.diffs.append((diff_type, diff_text))
del bigpatch.diffs[0]
else:
# Deletion or equality. Only take as much as we can stomach.
diff_text = diff_text[:patch_size - patch.length1 -
self.Patch_Margin]
patch.length1 += len(diff_text)
start1 += len(diff_text)
if diff_type == self.DIFF_EQUAL:
patch.length2 += len(diff_text)
start2 += len(diff_text)
else:
empty = False
patch.diffs.append((diff_type, diff_text))
if diff_text == bigpatch.diffs[0][1]:
del bigpatch.diffs[0]
else:
bigpatch.diffs[0] = (bigpatch.diffs[0][0],
bigpatch.diffs[0][1][len(diff_text):])
# Compute the head context for the next patch.
precontext = self.diff_text2(patch.diffs)
precontext = precontext[-self.Patch_Margin:]
# Append the end context for this patch.
postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin]
if postcontext:
patch.length1 += len(postcontext)
patch.length2 += len(postcontext)
if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:
patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] +
postcontext)
else:
patch.diffs.append((self.DIFF_EQUAL, postcontext))
if not empty:
x += 1
patches.insert(x, patch)
def patch_toText(self, patches):
"""Take a list of patches and return a textual representation.
Args:
patches: Array of patch objects.
Returns:
Text representation of patches.
"""
text = []
for patch in patches:
text.append(str(patch))
return "".join(text)
def patch_fromText(self, textline):
"""Parse a textual representation of patches and return a list of patch
objects.
Args:
textline: Text representation of patches.
Returns:
Array of patch objects.
Raises:
ValueError: If invalid input.
"""
if type(textline) == unicode:
# Patches should be composed of a subset of ascii chars, Unicode not
# required. If this encode raises UnicodeEncodeError, patch is invalid.
textline = textline.encode("ascii")
patches = []
if not textline:
return patches
text = textline.split('\n')
while len(text) != 0:
m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0])
if not m:
raise ValueError("Invalid patch string: " + text[0])
patch = patch_obj()
patches.append(patch)
patch.start1 = int(m.group(1))
if m.group(2) == '':
patch.start1 -= 1
patch.length1 = 1
elif m.group(2) == '0':
patch.length1 = 0
else:
patch.start1 -= 1
patch.length1 = int(m.group(2))
patch.start2 = int(m.group(3))
if m.group(4) == '':
patch.start2 -= 1
patch.length2 = 1
elif m.group(4) == '0':
patch.length2 = 0
else:
patch.start2 -= 1
patch.length2 = int(m.group(4))
del text[0]
while len(text) != 0:
if text[0]:
sign = text[0][0]
else:
sign = ''
line = urllib.unquote(text[0][1:])
line = line.decode("utf-8")
if sign == '+':
# Insertion.
patch.diffs.append((self.DIFF_INSERT, line))
elif sign == '-':
# Deletion.
patch.diffs.append((self.DIFF_DELETE, line))
elif sign == ' ':
# Minor equality.
patch.diffs.append((self.DIFF_EQUAL, line))
elif sign == '@':
# Start of next patch.
break
elif sign == '':
# Blank line? Whatever.
pass
else:
# WTF?
raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line))
del text[0]
return patches
class patch_obj:
"""Class representing one patch operation.
"""
def __init__(self):
"""Initializes with an empty list of diffs.
"""
self.diffs = []
self.start1 = None
self.start2 = None
self.length1 = 0
self.length2 = 0
def __str__(self):
"""Emmulate GNU diff's format.
Header: @@ -382,8 +481,9 @@
Indicies are printed as 1-based, not 0-based.
Returns:
The GNU diff string.
"""
if self.length1 == 0:
coords1 = str(self.start1) + ",0"
elif self.length1 == 1:
coords1 = str(self.start1 + 1)
else:
coords1 = str(self.start1 + 1) + "," + str(self.length1)
if self.length2 == 0:
coords2 = str(self.start2) + ",0"
elif self.length2 == 1:
coords2 = str(self.start2 + 1)
else:
coords2 = str(self.start2 + 1) + "," + str(self.length2)
text = ["@@ -", coords1, " +", coords2, " @@\n"]
# Escape the body of the patch with %xx notation.
for (op, data) in self.diffs:
if op == diff_match_patch.DIFF_INSERT:
text.append("+")
elif op == diff_match_patch.DIFF_DELETE:
text.append("-")
elif op == diff_match_patch.DIFF_EQUAL:
text.append(" ")
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append(urllib.quote(data, "!~*'();/?:@&=+$,# ") + "\n")
return "".join(text)
|
PypiClean
|
/opal-azure-cli-hdinsight-0.3.8.tar.gz/opal-azure-cli-hdinsight-0.3.8/azure/cli/command_modules/hdinsight/util.py
|
def get_key_for_storage_account(cmd, storage_account): # pylint: disable=unused-argument
from ._client_factory import cf_storage
from msrestazure.tools import parse_resource_id, is_valid_resource_id
from knack.util import CLIError
storage_account_key = None
if is_valid_resource_id(storage_account):
parsed_storage_account = parse_resource_id(storage_account)
resource_group_name = parsed_storage_account['resource_group']
storage_account_name = parsed_storage_account['resource_name']
storage_client = cf_storage(cmd.cli_ctx)
keys = storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
storage_account_key = keys.keys[0].value # pylint: disable=no-member
elif storage_account:
raise CLIError('Failed to get access key for storage account: {}'.format(storage_account))
return storage_account_key
def get_storage_account_endpoint(cmd, storage_account, is_wasb):
from ._client_factory import cf_storage
from msrestazure.tools import parse_resource_id, is_valid_resource_id
host = None
if is_valid_resource_id(storage_account):
parsed_storage_account = parse_resource_id(storage_account)
resource_group_name = parsed_storage_account['resource_group']
storage_account_name = parsed_storage_account['resource_name']
storage_client = cf_storage(cmd.cli_ctx)
storage_account = storage_client.storage_accounts.get_properties(
resource_group_name=resource_group_name,
account_name=storage_account_name)
def extract_endpoint(storage_account, is_wasb):
if not storage_account:
return None
return storage_account.primary_endpoints.dfs if not is_wasb else storage_account.primary_endpoints.blob
def extract_host(uri):
import re
return uri and re.search('//(.*)/', uri).groups()[0]
host = extract_host(extract_endpoint(storage_account, is_wasb))
return host
def build_identities_info(identities):
from azure.mgmt.hdinsight.models import ClusterIdentity, ResourceIdentityType
identity = None
if identities:
identity_type = ResourceIdentityType.user_assigned
identity = ClusterIdentity(type=identity_type)
identity.user_assigned_identities = {e: {} for e in identities}
return identity
def build_virtual_network_profile(subnet):
from msrestazure.tools import resource_id, parse_resource_id, is_valid_resource_id
from azure.mgmt.hdinsight.models import VirtualNetworkProfile
from knack.util import CLIError
vnet_profile = None
if is_valid_resource_id(subnet):
parsed_subnet_id = parse_resource_id(subnet)
subscription_name = parsed_subnet_id['subscription']
resource_group_name = parsed_subnet_id['resource_group']
vnet_namespace = parsed_subnet_id['namespace']
vnet_type = parsed_subnet_id['type']
vnet_name = parsed_subnet_id['name']
vnet_id = resource_id(
subscription=subscription_name,
resource_group=resource_group_name,
namespace=vnet_namespace,
type=vnet_type,
name=vnet_name)
vnet_profile = VirtualNetworkProfile(id=vnet_id, subnet=subnet)
elif subnet:
raise CLIError('Invalid subnet: {}'.format(subnet))
return vnet_profile
def parse_domain_name(domain):
from msrestazure.tools import parse_resource_id, is_valid_resource_id
domain_name = None
if is_valid_resource_id(domain):
parsed_domain_id = parse_resource_id(domain)
domain_name = parsed_domain_id['resource_name']
return domain_name
# Validate ESP cluster creation required parameters
def validate_esp_cluster_create_params(esp,
cluster_name,
resource_group_name,
cluster_type,
subnet,
domain,
cluster_admin_account,
assign_identity,
ldaps_urls,
cluster_admin_password,
cluster_users_group_dns):
from knack.util import CLIError
if esp:
missing_params = []
if not cluster_name:
missing_params.append("--name/-n")
if not resource_group_name:
missing_params.append("--resource-group/-g")
if not cluster_type:
missing_params.append("--type/-t")
if not subnet:
missing_params.append("--subnet")
if not domain:
missing_params.append("--domain")
if not cluster_admin_account:
missing_params.append("--cluster-admin-account")
if not assign_identity:
missing_params.append("--assign-identity")
if missing_params:
raise CLIError('the following params are required '
'when --esp is specified: {}'.format(', '.join(missing_params)))
else:
esp_params = []
if domain:
esp_params.append("--domain")
if cluster_admin_account:
esp_params.append("--cluster-admin_account")
if ldaps_urls:
esp_params.append("--ldaps-urls")
if cluster_admin_password:
esp_params.append("--cluster-admin-password")
if cluster_users_group_dns:
esp_params.append("--cluster-users-group-dns")
if esp_params:
raise CLIError('the following params are required only '
'when --esp is specified: {}'.format(', '.join(esp_params)))
|
PypiClean
|
/neural_lifetimes-0.1.0-py3-none-any.whl/neural_lifetimes/utils/data/encoder_with_unknown.py
|
from typing import Dict, Union
import numpy as np
from sklearn.preprocessing import OrdinalEncoder
import torch
# TODO What does this function actually do? it doesn't normalize the data
def normalize(x):
"""
Normalize the data. Only 1 encoding is handled at a time.
Args:
x: The data to be normalized.
Returns:
np.array: The normalized data.
Note:
Since we are using np.array, it may lead to errors with GPUs.
"""
try:
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
except Exception:
pass
x = np.array(x) # TODO Why copy the data?
if len(x.shape) == 1:
x = x[:, None] # TODO is this the same as np.expand_dims() ?
assert x.shape[1] == 1 # only handle one encoding at a time
return x
# TODO The encoder truncates the "<Unkown>" token when the original dtype is shorted. This could be better.
class OrdinalEncoderWithUnknown(OrdinalEncoder):
"""An ordinal encoder that encodes unknown values as 0.
The OrdinalEncoderWithUnknown works with unknown values. If an unknown value is passed into ``transform()``,
it will be encoded as ``0``. The ``inverse_transform`` maps ``0`` to ``<Unknown>``.
The encoder acts as a surjective mapping.
Attributes:
levels (np.ndarray): The raw levels that can be decoded to. Includes the ``<Unknown>`` token.
Basis:
``sklearn.preprocessing.OrdinalEncoder``
"""
# uses 0 to encode unknown values
def transform(self, x: np.ndarray) -> np.ndarray:
"""Transforms the data into encoded format.
Args:
x (np.ndarray): The raw data.
Returns:
np.ndarray: The encoded data with dtype ``int64``.
"""
x = normalize(x)
out = np.zeros(x.shape).astype(int)
# The below was the old implementation
# known = np.array([xx[0] in self.categories_[0] for xx in x])
# this should give identical results but faster
known = np.isin(x, self.categories_[0]).reshape(-1)
if any(known):
out[known] = super(OrdinalEncoderWithUnknown, self).transform(np.array(x)[known]) + 1
return out
def fit(self, x: np.ndarray) -> None:
"""Fits the encoder.
Args:
x (np.ndarray): The raw data array.
Returns:
_type_: The encoded data array.
"""
x = normalize(x)
return super().fit(x)
def __len__(self) -> int:
return len(self.categories_[0]) + 1
def inverse_transform(self, x: Union[np.ndarray, torch.Tensor]) -> np.ndarray:
"""Transforms the data into the decoded format.
Unknown values will be decoded as "<Unknown>".
Args:
x (Union[np.ndarray, torch.Tensor]): The encoded data.
Returns:
np.ndarray: The decoded data. The dtype will match the dtype of the array past into the ``fit`` method.
Note:
If the string dtype passed into ``fit`` too short for "<Unkown>", this token will be truncated.
"""
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
out = np.full_like(x, "<Unknown>", dtype=self.categories_[0].dtype)
known = x > 0
if any(known):
out[known] = (
super(OrdinalEncoderWithUnknown, self)
.inverse_transform(np.expand_dims(x[known], axis=-1) - 1)
.reshape(-1)
)
return out
@property
def levels(self):
return np.concatenate((np.array(["<Unknown>"]).astype(self.categories_[0].dtype), self.categories_[0]))
def to_dict(self) -> Dict[str, int]:
"""Converts the encoder into a dictionary structure mapping raw to encoded values. Includes unknown token.
Returns:
Dict[str, int]: Dictionary of form ``raw: encoded``.
"""
return {level: self.transform(np.array([level])).item() for level in self.levels}
|
PypiClean
|
/Products.PyConBrasil-2.4.1.tar.gz/Products.PyConBrasil-2.4.1/Products/PyConBrasil/Treinamento.py
|
__author__ = """Jean Rodrigo Ferri / Dorneles Treméa / Fabiano Weimar / Rodrigo Senra /
Érico Andrei <[email protected]>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from Products.PyConBrasil.Trabalho import Trabalho
from Products.PyConBrasil.config import *
##code-section module-header #fill in your manual code here
##/code-section module-header
schema = Schema((
),
)
##code-section after-local-schema #fill in your manual code here
##/code-section after-local-schema
Treinamento_schema = BaseSchema.copy() + \
getattr(Trabalho, 'schema', Schema(())).copy() + \
schema.copy()
##code-section after-schema #fill in your manual code here
Treinamento_schema['title'].required = 0
Treinamento_schema['title'].widget.visible = {'view':'invisible', 'edit':'invisible'}
##/code-section after-schema
class Treinamento(Trabalho, BaseContent):
"""Inscricaoo de um treinamento, curso, mini-curso, tutorial,
etc... Este treinamento depende da aprovacao pela comissao do
evento.
"""
security = ClassSecurityInfo()
__implements__ = (getattr(Trabalho,'__implements__',()),) + (getattr(BaseContent,'__implements__',()),)
# This name appears in the 'add' box
archetype_name = 'Treinamento'
meta_type = 'Treinamento'
portal_type = 'Treinamento'
allowed_content_types = [] + list(getattr(Trabalho, 'allowed_content_types', []))
filter_content_types = 0
global_allow = 0
content_icon = 'treinamento_icon.gif'
immediate_view = 'base_view'
default_view = 'base_view'
suppl_views = ()
typeDescription = "Inscrição de um treimanento no evento."
typeDescMsgId = 'description_edit_treinamento'
_at_rename_after_creation = True
schema = Treinamento_schema
for schemata in ['settings','categorization','metadata','dates','ownership']:
for field in schema.getSchemataFields(schemata):
field.widget.visible={'edit':'invisible','view':'invisible'}
##code-section class-header #fill in your manual code here
##/code-section class-header
# Methods
security.declarePrivate('getVocTempo')
def getVocTempo(self):
"""
"""
vocTempo = ['Indiferente',
'2 horas',
'4 horas',
'8 horas',
'16 horas',]
return tuple(vocTempo)
def modify_fti(fti):
# Hide unnecessary tabs (usability enhancement)
for a in fti['actions']:
if a['id'] in ['metadata', 'sharing']:
a['visible'] = 0
return fti
registerType(Treinamento, PROJECTNAME)
# end of class Treinamento
##code-section module-footer #fill in your manual code here
##/code-section module-footer
|
PypiClean
|
/robust_json-1.2.7-py3-none-any.whl/robust_json/__internal_utils.py
|
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################
# * This file contains all internal utils
# * used by main package
###############################
import jsonpath_ng.ext as jsonpath
import os.path
from pathlib2 import Path
import json as JSON
from robust_json.errors import (
JSONFileError,
JSONPathError,
IncorrectFunctionParameterTypeError,
)
class service:
"""
Internal 'robust_json' package utils
"""
def __init__(self):
pass
def check_file(self, path: str, file_formats: list[str]) -> bool:
"""
Check if source file is ready for processing.
This function will check if file exists, has a correct extension and
contains valid JSON (JSON that can be parsed).
Parameters: `path : str` specifies path to the file, that need to be checked.
`file_formats : list of str` contains all supported file extensions. If source
file extension is not supported, this method will raise an exception.
This function returnsn a boolean: `True` - file is supported and is ready for processing,
`False` - Something is wrong with the file. If file is empty, this method will add an empty object ({}) there.
In this case, function will return `True`.
This function will raise a `JSONFileError` if file extension is not supported.
This function will raise a `FileNotFoundError` if specified file doesn't exist or cannot be accessed.
"""
# Checking parameters type
if type(path) != str:
raise IncorrectFunctionParameterTypeError(
"path", "str", type(path).__name__
)
if type(file_formats) != list:
raise IncorrectFunctionParameterTypeError(
"file_formats", "str", type(file_formats).__name__
)
# Checking type of each array element
for i in enumerate(file_formats):
if type(i[1]) != str:
raise TypeError(
f"Array `file_formats` must contain only strings; got {type(i[1]).__name__} instead (Array index: [{i[0]}])."
)
# Verifying file extension and path
if Path(path).suffix not in file_formats:
raise JSONFileError(
f'Supported file extensions are {", ".join(file_formats)}; got {Path(path).suffix} instead.'
)
# If file does not exist, raise an exception
if not os.path.exists(path):
raise FileNotFoundError(f"File `{path}` is not found.")
file = open(path, "r")
cont = file.read()
file.close()
if cont == None or cont == "":
# If file is empty, write empty dictionary there and close it
file = open(path, "w")
file.write(JSON.dumps({}))
file.close()
# Read this file again
file = open(path, "r")
cont = file.read()
file.close()
try:
# Try to deserialize JSON fron file
JSON.loads(cont)
return True
except:
return False
def check_json_path(self, path: str, json: dict) -> bool:
"""
Check if JSON path exists
This function will check if given JSON path exists in specified JSON object.
Parameters: `path : str` specifies property path that needs to be checked.
`json : dict` specifies Python dictionary (JSON object), where this JSON path
needs to be present.
This function returns `True` if path is found and `False` if path cannot be
accessed (does not exist).
This function raises a `IncorrectFunctionParameterTypeError` exception if one or more of its parameters have incorrect types.
This function raises a `JSONPathError` exception is JSON path is equal to an empty string.
"""
# Checking types of functions' parameters
if type(path) != str:
raise IncorrectFunctionParameterTypeError(
"path", "str", type(path).__name__
)
if path == "":
raise JSONPathError("JSON path is empty.")
if type(json) != dict:
raise IncorrectFunctionParameterTypeError(
"json", "dict", type(json).__name__
)
js_expr = jsonpath.parse(path) # Parsing JSON using JSON path
# If path is valid, return True. Otherwise return False
if js_expr.find(json):
return True
else:
return False
def check_file_path(self, path: str) -> bool:
"""
Check if file exists.
Parameters: `path : str` specifies the path to the file.
This function returns `True` if file exists,
otherwise it returns `False`
This function raises an `IncorrectFunctionParameterTypeError` if `path` parameter
has an incorrect type.
This function raises a `ValueError` exception if `path` parameter is
equal to an empty string.
"""
if type(path) != str:
raise IncorrectFunctionParameterTypeError(
"path", "str", type(path).__name__
)
if path == "":
raise ValueError("Parameter `path` is an empty string.")
if os.path.exists(path):
return True
else:
return False
|
PypiClean
|
/dmri-amico-1.5.4.tar.gz/dmri-amico-1.5.4/amico/models.py
|
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy
from os.path import join as pjoin
import amico.lut
from tqdm import tqdm
import abc
from amico.util import PRINT, ERROR, get_verbose
from amico.synthesis import Stick, Zeppelin, Ball, CylinderGPD, SphereGPD, Astrosticks, NODDIIntraCellular, NODDIExtraCellular, NODDIIsotropic
import warnings
warnings.filterwarnings("ignore") # needed for a problem with spams
warnings.formatwarning = lambda message, category, filename, lineno, line=None: \
"[WARNING] %s " % message
# import the spams module, which is used only to fit the models in AMICO.
# But, on the other hand, using the models from COMMIT does not require that!
try :
import spams
except ImportError:
warnings.warn('Module "spams" does not seems to be installed; perhaps you will not be able to call the fit() functions of some models.')
class BaseModel( object ) :
#class BaseModel( object, metaclass=abc.ABCMeta ) :
"""Basic class to build a model; new models should inherit from this class.
All the methods need to be overloaded to account for the specific needs of the model.
Each method will then be called by a dispatcher when needed.
NB: this model also serves the purpose of illustrating the creation of new models.
Attributes
----------
id : string
Identification code for the model
name : string
A more human-readable description for the model (can be equal to id)
scheme: Scheme class
Acquisition scheme to be used for resampling
maps_name : list of strings
Names of the maps computed/returned by the model (suffix to saved filenames)
maps_descr : list of strings
Description of each map (will be saved in the description of the NIFTI header)
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__( self ) :
"""To define the parameters of the model, e.g. id and name, returned maps,
model-specific parameters etc.
"""
self.id = 'BaseModel'
self.name = 'Base Model'
self.maps_name = []
self.maps_descr = []
self.scheme = None
return
@abc.abstractmethod
def set( self, *args, **kwargs ) :
"""For setting all the parameters specific to the model.
NB: the parameters are model-dependent.
"""
return
@abc.abstractmethod
def get_params( self ) :
"""For getting the actual values of all the parameters specific to the model.
NB: the parameters are model-dependent.
"""
return
@abc.abstractmethod
def set_solver( self, *args, **kwargs ) :
"""For setting the parameters required by the solver to fit the model.
NB: the parameters are model-dependent.
Returns
-------
params : dictionary
All the parameters that the solver will need to fit the model
"""
return
@abc.abstractmethod
def generate( self, out_path, aux, idx_in, idx_out, ndirs ) :
"""For generating the signal response-functions and createing the LUT.
NB: do not change the signature!
Parameters
----------
out_path : string
Path where the response function have to be saved
aux : structure
Auxiliary structures to perform SH fitting and rotations
idx_in : array
Indices of the samples belonging to each shell
idx_out : array
Indices of the SH coefficients corresponding to each shell
ndirs : int
Number of directions on the half of the sphere representing the possible orientations of the response functions
"""
return
@abc.abstractmethod
def resample( self, in_path, idx_out, Ylm_out, doMergeB0, ndirs ) :
"""For projecting the LUT to the subject space.
NB: do not change the signature!
Parameters
----------
in_path : Scheme class
Acquisition scheme of the acquired signal
idx_out : array
Indices of the samples belonging to each shell
Ylm_out : array
SH bases to project back each shell to signal space
doMergeB0: bool
Merge b0-volumes into a single volume if True
ndirs : int
Number of directions on the half of the sphere representing the possible orientations of the response functions
Returns
-------
KERNELS : dictionary
Contains the LUT and all corresponding details. In particular, it is
required to have a field 'model' set to "self.id".
"""
return
@abc.abstractmethod
def fit( self, y, dirs, KERNELS, params ) :
"""For fitting the model to the data.
NB: do not change the signature!
Parameters
----------
y : array
Diffusion signal at this voxel
dirs : list of arrays
Directions fitted in the voxel
KERNELS : dictionary
Contains the LUT and all corresponding details
params : dictionary
Parameters to be used by the solver
Returns
-------
MAPs : list of floats
Scalar values eastimated in each voxel
dirs_mod : list of arrays
Updated directions (if applicable), otherwise just return dirs
x : array
Coefficients of the fitting
A : array
Actual dictionary used in the fitting
"""
return
class StickZeppelinBall( BaseModel ) :
"""Implements the Stick-Zeppelin-Ball model [1].
The intra-cellular contributions from within the axons are modeled as "sticks", i.e.
tensors with a given axial diffusivity (d_par) but null perpendicular diffusivity (d_perp=0);
if d_perp>0, then a Zeppelin is used instead of a Stick.
Extra-cellular contributions are modeled as "Zeppelins", i.e. tensors with a given axial
diffusivity (d_par_zep) and, possibily, a series of perpendicular diffusivities (d_perps_zep).
If the axial diffusivity of the Zeppelins is not specified, then it is assumed equal to that
of the Stick. Isotropic contributions are modeled as "Balls", i.e. tensors with isotropic
diffusivities (d_isos).
References
----------
.. [1] Panagiotaki et al. (2012) Compartment models of the diffusion MR signal
in brain white matter: A taxonomy and comparison. NeuroImage, 59: 2241-54
"""
def __init__( self ) :
self.id = 'StickZeppelinBall'
self.name = 'Stick-Zeppelin-Ball'
self.maps_name = [ ]
self.maps_descr = [ ]
self.d_par = 1.7E-3 # Parallel diffusivity for the Stick [mm^2/s]
self.d_perp = 0 # Perpendicular diffusivity for the Stick [mm^2/s]
self.d_par_zep = 1.7E-3 # Parallel diffusivity for the Zeppelins [mm^2/s]
self.d_perps_zep = np.array([ 1.19E-3, 0.85E-3, 0.51E-3, 0.17E-3]) # Perpendicular diffusivitie(s) [mm^2/s]
self.d_isos = np.array([ 3.0E-3 ]) # Isotropic diffusivitie(s) [mm^2/s]
def set( self, d_par, d_perps_zep, d_isos, d_par_zep=None, d_perp=0 ) :
self.d_par = d_par
self.d_perp = d_perp
if d_par_zep is None:
self.d_par_zep = d_par
else:
self.d_par_zep = d_par_zep
self.d_perps_zep = np.array( d_perps_zep )
self.d_isos = np.array( d_isos )
def get_params( self ) :
params = {}
params['id'] = self.id
params['name'] = self.name
params['d_par'] = self.d_par
params['d_perp'] = self.d_perp
params['d_par_zep'] = self.d_par_zep
params['d_perps_zep'] = self.d_perps_zep
params['d_isos'] = self.d_isos
return params
def set_solver( self ) :
ERROR( 'Not implemented' )
def generate( self, out_path, aux, idx_in, idx_out, ndirs ) :
scheme_high = amico.lut.create_high_resolution_scheme( self.scheme )
stick = Stick(scheme_high)
zeppelin = Zeppelin(scheme_high)
ball = Ball(scheme_high)
nATOMS = 1 + len(self.d_perps_zep) + len(self.d_isos)
idx = 0
with tqdm(total=nATOMS, ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
# Stick
signal = stick.get_signal(self.d_par)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
# Zeppelin(s)
for d in self.d_perps_zep :
signal = zeppelin.get_signal(self.d_par_zep, d)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
# Ball(s)
for d in self.d_isos :
signal = ball.get_signal(d)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
def resample( self, in_path, idx_out, Ylm_out, doMergeB0, ndirs ) :
KERNELS = {}
KERNELS['model'] = self.id
if doMergeB0:
nS = 1+self.scheme.dwi_count
merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
else:
nS = self.scheme.nS
merge_idx = np.arange(nS)
KERNELS['wmr'] = np.zeros( (1,ndirs,nS), dtype=np.float32 )
KERNELS['wmh'] = np.zeros( (len(self.d_perps_zep),ndirs,nS), dtype=np.float32 )
KERNELS['iso'] = np.zeros( (len(self.d_isos),nS), dtype=np.float32 )
nATOMS = 1 + len(self.d_perps_zep) + len(self.d_isos)
idx = 0
with tqdm(total=nATOMS, ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
# Stick
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
if lm.shape[0] != ndirs:
ERROR( 'Outdated LUT. Call "generate_kernels( regenerate=True )" to update the LUT' )
KERNELS['wmr'][0,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False, ndirs )[:,merge_idx]
idx += 1
progress.update()
# Zeppelin(s)
for i in range(len(self.d_perps_zep)) :
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
if lm.shape[0] != ndirs:
ERROR( 'Outdated LUT. Call "generate_kernels( regenerate=True )" to update the LUT' )
KERNELS['wmh'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False, ndirs )[:,merge_idx]
idx += 1
progress.update()
# Ball(s)
for i in range(len(self.d_isos)) :
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
KERNELS['iso'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True, ndirs )[merge_idx]
idx += 1
progress.update()
return KERNELS
def fit( self, y, dirs, KERNELS, params ) :
ERROR( 'Not implemented' )
class CylinderZeppelinBall( BaseModel ) :
"""Implements the Cylinder-Zeppelin-Ball model [1].
The intra-cellular contributions from within the axons are modeled as "cylinders"
with specific radii (Rs) and a given axial diffusivity (d_par).
Extra-cellular contributions are modeled as tensors with the same axial diffusivity
as the cylinders (d_par) and, possibily, a series of perpendicular diffusivities (d_perps).
Isotropic contributions are modeled as tensors with isotropic diffusivities (d_isos).
NB: this model works only with schemes containing the full specification of
the diffusion gradients (eg gradient strength, small delta etc).
References
----------
.. [1] Panagiotaki et al. (2012) Compartment models of the diffusion MR signal
in brain white matter: A taxonomy and comparison. NeuroImage, 59: 2241-54
"""
def __init__( self ) :
self.id = 'CylinderZeppelinBall'
self.name = 'Cylinder-Zeppelin-Ball'
self.maps_name = [ 'v', 'a', 'd' ]
self.maps_descr = [ 'Intra-cellular volume fraction', 'Mean axonal diameter', 'Axonal density' ]
self.d_par = 0.6E-3 # Parallel diffusivity [mm^2/s]
self.Rs = np.concatenate( ([0.01],np.linspace(0.5,8.0,20)) ) * 1E-6 # Radii of the axons [meters]
self.d_perps = np.array([ 1.19E-3, 0.85E-3, 0.51E-3, 0.17E-3]) # Perpendicular diffusivitie(s) [mm^2/s]
self.d_isos = np.array( [ 2.0E-3 ] ) # Isotropic diffusivitie(s) [mm^2/s]
self.isExvivo = False # Add dot compartment to dictionary (exvivo data)
def set( self, d_par, Rs, d_perps, d_isos ) :
self.d_par = d_par
self.Rs = np.array(Rs)
self.d_perps = np.array(d_perps)
self.d_isos = np.array(d_isos)
def get_params( self ) :
params = {}
params['id'] = self.id
params['name'] = self.name
params['d_par'] = self.d_par
params['Rs'] = self.Rs
params['d_perps'] = self.d_perps
params['d_isos'] = self.d_isos
params['isExvivo'] = self.isExvivo
return params
def set_solver( self, lambda1 = 0.0, lambda2 = 4.0 ) :
params = {}
params['mode'] = 2
params['pos'] = True
params['lambda1'] = lambda1
params['lambda2'] = lambda2
return params
def generate( self, out_path, aux, idx_in, idx_out, ndirs ) :
if self.scheme.version != 1 :
ERROR( 'This model requires a "VERSION: STEJSKALTANNER" scheme' )
scheme_high = amico.lut.create_high_resolution_scheme( self.scheme )
cylinder = CylinderGPD(scheme_high)
zeppelin = Zeppelin(scheme_high)
ball = Ball(scheme_high)
nATOMS = len(self.Rs) + len(self.d_perps) + len(self.d_isos)
idx = 0
with tqdm(total=nATOMS, ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
# Cylinder(s)
for R in self.Rs :
signal = cylinder.get_signal(self.d_par, R)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
# Zeppelin(s)
for d in self.d_perps :
signal = zeppelin.get_signal(self.d_par, d)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
# Ball(s)
for d in self.d_isos :
signal = ball.get_signal(d)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
def resample( self, in_path, idx_out, Ylm_out, doMergeB0, ndirs ) :
if doMergeB0:
nS = 1+self.scheme.dwi_count
merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
else:
nS = self.scheme.nS
merge_idx = np.arange(nS)
KERNELS = {}
KERNELS['model'] = self.id
KERNELS['wmr'] = np.zeros( (len(self.Rs),ndirs,nS,), dtype=np.float32 )
KERNELS['wmh'] = np.zeros( (len(self.d_perps),ndirs,nS,), dtype=np.float32 )
KERNELS['iso'] = np.zeros( (len(self.d_isos),nS,), dtype=np.float32 )
nATOMS = len(self.Rs) + len(self.d_perps) + len(self.d_isos)
idx = 0
with tqdm(total=nATOMS, ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
# Cylinder(s)
for i in range(len(self.Rs)) :
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
if lm.shape[0] != ndirs:
ERROR( 'Outdated LUT. Call "generate_kernels( regenerate=True )" to update the LUT' )
KERNELS['wmr'][i,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False, ndirs )[:,merge_idx]
idx += 1
progress.update()
# Zeppelin(s)
for i in range(len(self.d_perps)) :
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
if lm.shape[0] != ndirs:
ERROR( 'Outdated LUT. Call "generate_kernels( regenerate=True )" to update the LUT' )
KERNELS['wmh'][i,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False, ndirs )[:,merge_idx]
idx += 1
progress.update()
# Ball(s)
for i in range(len(self.d_isos)) :
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
KERNELS['iso'][i,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True, ndirs )[merge_idx]
idx += 1
progress.update()
return KERNELS
def fit( self, y, dirs, KERNELS, params, htable ) :
nD = dirs.shape[0]
n1 = len(self.Rs)
n2 = len(self.d_perps)
n3 = len(self.d_isos)
if self.isExvivo:
nATOMS = nD*(n1+n2)+n3+1
else:
nATOMS = nD*(n1+n2)+n3
# prepare DICTIONARY from dirs and lookup tables
A = np.ones( (len(y), nATOMS ), dtype=np.float64, order='F' )
o = 0
for i in range(nD) :
lut_idx = amico.lut.dir_TO_lut_idx( dirs[i], htable )
A[:,o:(o+n1)] = KERNELS['wmr'][:,lut_idx,:].T
o += n1
for i in range(nD) :
lut_idx = amico.lut.dir_TO_lut_idx( dirs[i], htable )
A[:,o:(o+n2)] = KERNELS['wmh'][:,lut_idx,:].T
o += n2
A[:,o:] = KERNELS['iso'].T
# empty dictionary
if A.shape[1] == 0 :
return [0, 0, 0], None, None, None
# fit
x = spams.lasso( np.asfortranarray( y.reshape(-1,1) ), D=A, numThreads=1, **params ).todense().A1
# return estimates
f1 = x[ :(nD*n1) ].sum()
f2 = x[ (nD*n1):(nD*(n1+n2)) ].sum()
v = f1 / ( f1 + f2 + 1e-16 )
xIC = x[:nD*n1].reshape(-1,n1).sum(axis=0)
a = 1E6 * 2.0 * np.dot(self.Rs,xIC) / ( f1 + 1e-16 )
d = (4.0*v) / ( np.pi*a**2 + 1e-16 )
return [v, a, d], dirs, x, A
class NODDI( BaseModel ) :
"""Implements the NODDI model [2].
NB: this model does not require to have the "NODDI MATLAB toolbox" installed;
all the necessary functions have been ported to Python.
References
----------
.. [2] Zhang et al. (2012) NODDI: Practical in vivo neurite orientation
dispersion and density imaging of the human brain. NeuroImage, 61: 1000-16
"""
def __init__( self ):
self.id = "NODDI"
self.name = "NODDI"
self.maps_name = [ 'ICVF', 'OD', 'ISOVF' ]
self.maps_descr = [ 'Intra-cellular volume fraction', 'Orientation dispersion', 'Isotropic volume fraction' ]
self.dPar = 1.7E-3
self.dIso = 3.0E-3
self.IC_VFs = np.linspace(0.1,0.99,12)
self.IC_ODs = np.hstack((np.array([0.03, 0.06]),np.linspace(0.09,0.99,10)))
self.isExvivo = False
def set( self, dPar, dIso, IC_VFs, IC_ODs, isExvivo ):
self.dPar = dPar
self.dIso = dIso
self.IC_VFs = np.array( IC_VFs )
self.IC_ODs = np.array( IC_ODs )
self.isExvivo = isExvivo
if isExvivo:
self.maps_name = [ 'ICVF', 'OD', 'ISOVF', 'dot' ]
self.maps_descr = [ 'Intra-cellular volume fraction', 'Orientation dispersion', 'Isotropic volume fraction', 'Dot volume fraction' ]
else:
self.maps_name = [ 'ICVF', 'OD', 'ISOVF']
self.maps_descr = [ 'Intra-cellular volume fraction', 'Orientation dispersion', 'Isotropic volume fraction']
def get_params( self ) :
params = {}
params['id'] = self.id
params['name'] = self.name
params['dPar'] = self.dPar
params['dIso'] = self.dIso
params['IC_VFs'] = self.IC_VFs
params['IC_ODs'] = self.IC_ODs
params['isExvivo'] = self.isExvivo
return params
def set_solver( self, lambda1 = 5e-1, lambda2 = 1e-3 ):
params = {}
params['mode'] = 2
params['pos'] = True
params['lambda1'] = lambda1
params['lambda2'] = lambda2
return params
def generate( self, out_path, aux, idx_in, idx_out, ndirs ):
scheme_high = amico.lut.create_high_resolution_scheme( self.scheme )
noddi_ic = NODDIIntraCellular(scheme_high)
noddi_ec = NODDIExtraCellular(scheme_high)
noddi_iso = NODDIIsotropic(scheme_high)
nATOMS = len(self.IC_ODs)*len(self.IC_VFs) + 1
idx = 0
with tqdm(total=nATOMS, ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
# Coupled contributions
IC_KAPPAs = 1 / np.tan(self.IC_ODs*np.pi/2)
for kappa in IC_KAPPAs:
signal_ic = noddi_ic.get_signal(self.dPar, kappa)
for v_ic in self.IC_VFs:
signal_ec = noddi_ec.get_signal(self.dPar, kappa, v_ic)
signal = v_ic*signal_ic + (1-v_ic)*signal_ec
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy') , lm )
idx += 1
progress.update()
# Isotropic
signal = noddi_iso.get_signal(self.dIso)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True, ndirs )
np.save( pjoin( out_path, f'A_{nATOMS:03d}.npy') , lm )
progress.update()
def resample( self, in_path, idx_out, Ylm_out, doMergeB0, ndirs ):
nATOMS = len(self.IC_ODs)*len(self.IC_VFs) + 1
if doMergeB0:
nS = 1+self.scheme.dwi_count
merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
else:
nS = self.scheme.nS
merge_idx = np.arange(nS)
KERNELS = {}
KERNELS['model'] = self.id
KERNELS['wm'] = np.zeros( (nATOMS-1,ndirs,nS), dtype=np.float32 )
KERNELS['iso'] = np.zeros( nS, dtype=np.float32 )
KERNELS['kappa'] = np.zeros( nATOMS-1, dtype=np.float32 )
KERNELS['icvf'] = np.zeros( nATOMS-1, dtype=np.float32 )
KERNELS['norms'] = np.zeros( (self.scheme.dwi_count, nATOMS-1) )
idx = 0
with tqdm(total=nATOMS, ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
# Coupled contributions
for i in range( len(self.IC_ODs) ):
for j in range( len(self.IC_VFs) ):
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
if lm.shape[0] != ndirs:
ERROR( 'Outdated LUT. Call "generate_kernels( regenerate=True )" to update the LUT' )
KERNELS['wm'][idx,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False, ndirs )[:,merge_idx]
KERNELS['kappa'][idx] = 1.0 / np.tan( self.IC_ODs[i]*np.pi/2.0 )
KERNELS['icvf'][idx] = self.IC_VFs[j]
if doMergeB0:
KERNELS['norms'][:,idx] = 1 / np.linalg.norm( KERNELS['wm'][idx,0,1:] ) # norm of coupled atoms (for l1 minimization)
else:
KERNELS['norms'][:,idx] = 1 / np.linalg.norm( KERNELS['wm'][idx,0,self.scheme.dwi_idx] ) # norm of coupled atoms (for l1 minimization)
idx += 1
progress.update()
# Isotropic
lm = np.load( pjoin( in_path, f'A_{nATOMS:03d}.npy' ) )
KERNELS['iso'] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True, ndirs )[merge_idx]
progress.update()
return KERNELS
def fit( self, y, dirs, KERNELS, params, htable ) :
singleb0 = True if len(y) == (1+self.scheme.dwi_count) else False
nD = dirs.shape[0]
if nD != 1 :
ERROR( '"%s" model requires exactly 1 orientation' % self.name )
# prepare DICTIONARY from dir and lookup tables
nWM = len(self.IC_ODs)*len(self.IC_VFs)
nATOMS = nWM + 1
if self.isExvivo == True :
nATOMS += 1
lut_idx = amico.lut.dir_TO_lut_idx( dirs[0], htable )
A = np.ones( (len(y), nATOMS), dtype=np.float64, order='F' )
A[:,:nWM] = KERNELS['wm'][:,lut_idx,:].T
A[:,-1] = KERNELS['iso']
# estimate CSF partial volume (and isotropic restriction, if exvivo) and remove from signal
x, _ = scipy.optimize.nnls( A, y )
yy = y - x[-1]*A[:,-1]
if self.isExvivo == True :
yy = yy - x[-2]*A[:,-2]
yy[ yy<0 ] = 0
# estimate IC and EC compartments and promote sparsity
if singleb0:
An = A[1:, :nWM] * KERNELS['norms']
yy = yy[1:].reshape(-1,1)
else:
An = A[ self.scheme.dwi_idx, :nWM ] * KERNELS['norms']
yy = yy[ self.scheme.dwi_idx ].reshape(-1,1)
x = spams.lasso( np.asfortranarray(yy), D=np.asfortranarray(An), numThreads=1, **params ).todense().A1
# debias coefficients
x = np.append( x, 1 )
if self.isExvivo == True :
x = np.append( x, 1 )
idx = x>0
x[idx], _ = scipy.optimize.nnls( A[:,idx], y )
# return estimates
xx = x / ( x.sum() + 1e-16 )
xWM = xx[:nWM]
fISO = xx[-1]
xWM = xWM / ( xWM.sum() + 1e-16 )
f1 = np.dot( KERNELS['icvf'], xWM )
f2 = np.dot( (1.0-KERNELS['icvf']), xWM )
v = f1 / ( f1 + f2 + 1e-16 )
k = np.dot( KERNELS['kappa'], xWM )
od = 2.0/np.pi * np.arctan2(1.0,k)
if self.isExvivo:
return [v, od, fISO, xx[-2]], dirs, x, A
else:
return [v, od, fISO], dirs, x, A
class FreeWater( BaseModel ) :
"""Implements the Free-Water model.
"""
def __init__( self ) :
self.id = 'FreeWater'
self.name = 'Free-Water'
self.type = 'Human'
if self.type == 'Mouse' :
self.maps_name = [ 'FiberVolume', 'FW', 'FW_blood', 'FW_csf' ]
self.maps_descr = [ 'fiber volume fraction',
'Isotropic free-water volume fraction',
'FW blood', 'FW csf' ]
# for mouse imaging
self.d_par = 1.0E-3
self.d_perps = np.linspace(0.15,0.55,10)*1E-3
self.d_isos = [1.5e-3, 3e-3]
else :
self.maps_name = [ 'FiberVolume', 'FW' ]
self.maps_descr = [ 'fiber volume fraction',
'Isotropic free-water volume fraction']
self.d_par = 1.0E-3 # Parallel diffusivity [mm^2/s]
self.d_perps = np.linspace(0.1,1.0,10)*1E-3 # Parallel diffusivities [mm^2/s]
self.d_isos = [ 2.5E-3 ] # Isotropic diffusivities [mm^2/s]
def set( self, d_par, d_perps, d_isos, type ) :
self.d_par = d_par
self.d_perps = d_perps
self.d_isos = d_isos
self.type = type
if self.type == 'Mouse' :
self.maps_name = [ 'FiberVolume', 'FW', 'FW_blood', 'FW_csf' ]
self.maps_descr = [ 'fiber volume fraction',
'Isotropic free-water volume fraction',
'FW blood', 'FW csf' ]
else :
self.maps_name = [ 'FiberVolume', 'FW' ]
self.maps_descr = [ 'fiber volume fraction',
'Isotropic free-water volume fraction']
PRINT(' %s settings for Freewater elimination... ' % self.type)
PRINT(' -iso compartments: ', self.d_isos)
PRINT(' -perp compartments: ', self.d_perps)
PRINT(' -para compartments: ', self.d_par)
def get_params( self ) :
params = {}
params['id'] = self.id
params['name'] = self.name
params['d_par'] = self.d_par
params['d_perps'] = self.d_perps
params['d_isos'] = self.d_isos
params['type'] = self.type
return params
def set_solver( self, lambda1 = 0.0, lambda2 = 1e-3 ):
params = {}
params['mode'] = 2
params['pos'] = True
params['lambda1'] = lambda1
params['lambda2'] = lambda2
# need more regul for mouse data
if self.type == 'Mouse' :
lambda2 = 0.25
return params
def generate( self, out_path, aux, idx_in, idx_out, ndirs ) :
scheme_high = amico.lut.create_high_resolution_scheme( self.scheme )
zeppelin = Zeppelin(scheme_high)
ball = Ball(scheme_high)
nATOMS = len(self.d_perps) + len(self.d_isos)
idx = 0
with tqdm(total=nATOMS, ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
# Tensor compartment(s)
for d in self.d_perps :
signal = zeppelin.get_signal(self.d_par, d)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
# Isotropic compartment(s)
for d in self.d_isos :
signal = ball.get_signal(d)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
def resample( self, in_path, idx_out, Ylm_out, doMergeB0, ndirs ) :
if doMergeB0:
nS = 1+self.scheme.dwi_count
merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
else:
nS = self.scheme.nS
merge_idx = np.arange(nS)
KERNELS = {}
KERNELS['model'] = self.id
KERNELS['D'] = np.zeros( (len(self.d_perps),ndirs,nS), dtype=np.float32 )
KERNELS['CSF'] = np.zeros( (len(self.d_isos),nS), dtype=np.float32 )
nATOMS = len(self.d_perps) + len(self.d_isos)
idx = 0
with tqdm(total=nATOMS, ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
# Tensor compartment(s)
for i in range(len(self.d_perps)) :
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
if lm.shape[0] != ndirs:
ERROR( 'Outdated LUT. Call "generate_kernels( regenerate=True )" to update the LUT' )
KERNELS['D'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False, ndirs )[:,merge_idx]
idx += 1
progress.update()
# Isotropic compartment(s)
for i in range(len(self.d_isos)) :
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
KERNELS['CSF'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True, ndirs )[merge_idx]
idx += 1
progress.update()
return KERNELS
def fit( self, y, dirs, KERNELS, params, htable ) :
nD = dirs.shape[0]
if nD > 1 : # model works only with one direction
ERROR( '"%s" model requires exactly 1 orientation' % self.name )
n1 = len(self.d_perps)
n2 = len(self.d_isos)
nATOMS = n1+n2
if nATOMS == 0 : # empty dictionary
return [0, 0], None, None, None
# prepare DICTIONARY from dir and lookup tables
lut_idx = amico.lut.dir_TO_lut_idx( dirs[0], htable )
A = np.zeros( (len(y), nATOMS), dtype=np.float64, order='F' )
A[:,:(nD*n1)] = KERNELS['D'][:,lut_idx,:].T
A[:,(nD*n1):] = KERNELS['CSF'].T
# fit
x = spams.lasso( np.asfortranarray( y.reshape(-1,1) ), D=A, numThreads=1, **params ).todense().A1
# return estimates
v = x[ :n1 ].sum() / ( x.sum() + 1e-16 )
# checking that there is more than 1 isotropic compartment
if self.type == 'Mouse' :
v_blood = x[ n1 ] / ( x.sum() + 1e-16 )
v_csf = x[ n1+1 ] / ( x.sum() + 1e-16 )
return [ v, 1-v, v_blood, v_csf ], dirs, x, A
else :
return [ v, 1-v ], dirs, x, A
class VolumeFractions( BaseModel ) :
"""Implements a simple model where each compartment contributes only with
its own volume fraction. This model has been created to test there
ability to remove false positive fibers with COMMIT.
"""
def __init__( self ) :
self.id = 'VolumeFractions'
self.name = 'Volume fractions'
self.maps_name = [ ]
self.maps_descr = [ ]
def get_params( self ) :
params = {}
params['id'] = self.id
params['name'] = self.name
return params
def set_solver( self ) :
ERROR( 'Not implemented' )
def generate( self, out_path, aux, idx_in, idx_out, ndirs ) :
return
def resample( self, in_path, idx_out, Ylm_out, doMergeB0, ndirs ) :
if doMergeB0:
nS = 1+self.scheme.dwi_count
merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
else:
nS = self.scheme.nS
merge_idx = np.arange(nS)
KERNELS = {}
KERNELS['model'] = self.id
KERNELS['wmr'] = np.ones( (1,ndirs,nS), dtype=np.float32 )
KERNELS['wmh'] = np.ones( (0,ndirs,nS), dtype=np.float32 )
KERNELS['iso'] = np.ones( (0,nS), dtype=np.float32 )
return KERNELS
def fit( self, y, dirs, KERNELS, params ) :
ERROR( 'Not implemented' )
class SANDI( BaseModel ) :
"""Implements the SANDI model [1].
The intra-cellular contributions from within the neural cells are modeled as intra-soma + intra-neurite,
with the soma modelled as "sphere" of radius (Rs) and fixed intra-soma diffusivity (d_is) to 3 micron^2/ms;
the neurites are modelled as randomly oriented sticks with axial intra-neurite diffusivity (d_in).
Extra-cellular contributions are modeled as isotropic gaussian diffusion, i.e. "ball", with the mean diffusivity (d_iso)
NB: this model works only with direction-averaged signal and schemes containing the full specification of
the diffusion gradients (eg gradient strength, small delta etc).
References
----------
.. [1] Palombo, Marco, et al. "SANDI: a compartment-based model for non-invasive apparent soma and neurite imaging by diffusion MRI." Neuroimage 215 (2020): 116835.
"""
def __init__( self ) :
self.id = 'SANDI'
self.name = 'SANDI'
self.maps_name = [ 'fsoma', 'fneurite', 'fextra', 'Rsoma', 'Din', 'De' ]
self.maps_descr = [ 'Intra-soma volume fraction', 'Intra-neurite volume fraction', 'Extra-cellular volume fraction', 'Apparent soma radius', 'Neurite axial diffusivity', 'Extra-cellular mean diffusivity' ]
self.d_is = 3.0E-3 # Intra-soma diffusivity [mm^2/s]
self.Rs = np.linspace(1.0,12.0,5) * 1E-6 # Radii of the soma [meters]
self.d_in = np.linspace(0.25,3.0,5) * 1E-3 # Intra-neurite diffusivitie(s) [mm^2/s]
self.d_isos = np.linspace(0.25,3.0,5) * 1E-3 # Extra-cellular isotropic mean diffusivitie(s) [mm^2/s]
def set( self, d_is, Rs, d_in, d_isos ) :
self.d_is = d_is
self.Rs = np.array(Rs)
self.d_in = np.array(d_in)
self.d_isos = np.array(d_isos)
def get_params( self ) :
params = {}
params['id'] = self.id
params['name'] = self.name
params['d_is'] = self.d_is
params['Rs'] = self.Rs
params['d_in'] = self.d_in
params['d_isos'] = self.d_isos
return params
def set_solver( self, lambda1 = 0.0, lambda2 = 5.0E-3 ) :
params = {}
params['mode'] = 2
params['pos'] = True
params['lambda1'] = lambda1
params['lambda2'] = lambda2
return params
def generate( self, out_path, aux, idx_in, idx_out, ndirs ) :
if self.scheme.version != 1 :
ERROR( 'This model requires a "VERSION: STEJSKALTANNER" scheme' )
scheme_high = amico.lut.create_high_resolution_scheme( self.scheme )
sphere = SphereGPD(scheme_high)
astrosticks = Astrosticks(scheme_high)
ball = Ball(scheme_high)
nATOMS = len(self.Rs) + len(self.d_in) + len(self.d_isos)
idx = 0
with tqdm(total=nATOMS, ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
# Soma = SPHERE
for R in self.Rs :
signal = sphere.get_signal(self.d_is, R)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
# Neurites = ASTRO STICKS
for d in self.d_in :
signal = astrosticks.get_signal(d)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
# Extra-cellular = BALL
for d in self.d_isos :
signal = ball.get_signal(d)
lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True, ndirs )
np.save( pjoin( out_path, f'A_{idx+1:03d}.npy' ), lm )
idx += 1
progress.update()
def resample( self, in_path, idx_out, Ylm_out, doMergeB0, ndirs ) :
nATOMS = len(self.Rs) + len(self.d_in) + len(self.d_isos)
if doMergeB0:
nS = 1+self.scheme.dwi_count
merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
else:
nS = self.scheme.nS
merge_idx = np.arange(nS)
KERNELS = {}
KERNELS['model'] = self.id
KERNELS['signal'] = np.zeros( (nS,nATOMS), dtype=np.float64, order='F' )
KERNELS['norms'] = np.zeros( nATOMS, dtype=np.float64 )
idx = 0
with tqdm(total=nATOMS, ncols=70, bar_format=' |{bar}| {percentage:4.1f}%', disable=(get_verbose()<3)) as progress:
# Soma = SPHERE
for i in range(len(self.Rs)) :
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
signal = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True, ndirs )[merge_idx].T
KERNELS['norms'][idx] = 1.0 / np.linalg.norm( signal )
KERNELS['signal'][:,idx] = signal * KERNELS['norms'][idx]
idx += 1
progress.update()
# Neurites = STICKS
for i in range(len(self.d_in)) :
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
signal = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True, ndirs )[merge_idx].T
KERNELS['norms'][idx] = 1.0 / np.linalg.norm( signal )
KERNELS['signal'][:,idx] = signal * KERNELS['norms'][idx]
idx += 1
progress.update()
# Extra-cellular = BALL
for i in range(len(self.d_isos)) :
lm = np.load( pjoin( in_path, f'A_{idx+1:03d}.npy' ) )
signal = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True, ndirs )[merge_idx].T
KERNELS['norms'][idx] = 1.0 / np.linalg.norm( signal )
KERNELS['signal'][:,idx] = signal * KERNELS['norms'][idx]
idx += 1
progress.update()
return KERNELS
def fit( self, y, dirs, KERNELS, params, htable ) :
# if dictionary is empty
if KERNELS['signal'].shape[1] == 0 :
return [0, 0, 0, 0, 0, 0], None, None, None
# fit
x = spams.lasso( np.asfortranarray( y.reshape(-1,1) ), D=KERNELS['signal'], numThreads=1, **params ).todense().A1
x = x*KERNELS['norms']
# return estimates
n1 = len(self.Rs)
n2 = len(self.d_in)
xsph = x[:n1]
xstk = x[n1:n1+n2]
xiso = x[n1+n2:]
fsoma = xsph.sum()/(x.sum()+1e-16)
fneurite = xstk.sum()/(x.sum()+1e-16)
fextra = xiso.sum()/(x.sum()+1e-16)
Rsoma = 1E6*np.dot(self.Rs,xsph)/(xsph.sum()+1e-16 ) # Sphere radius [micron]
Din = 1E3*np.dot(self.d_in,xstk)/(xstk.sum()+1e-16 ) # Intra-stick diffusivity [micron^2/ms]
De = 1E3*np.dot(self.d_isos,xiso)/(xiso.sum()+1e-16 ) # Extra-cellular diffusivity [micron^2/ms]
return [fsoma, fneurite, fextra, Rsoma, Din, De], dirs, x, KERNELS['signal']
|
PypiClean
|
/carml-22.7.1.tar.gz/carml-22.7.1/doc/command-cmd.rst
|
.. _cmd:
``cmd``
=======
The command named ``cmd`` takes the rest of the command-line and sends
it straight to Tor as a control-protocol request (see `the torspec
repository <https://gitweb.torproject.org/torspec.git>`_ for full
details). It then prints out the reply from Tor. (This isn't really
suitable for events; see the ``events`` command).
If you pass a single dash as the command-line (that is, ``carml cmd
-``) then commands are read one line at a time from stdin and executed
sequentially.
Examples
--------
.. sourcecode::
console
$ carml -q cmd getinfo info/names | tail -5
status/version/recommended -- List of currently recommended versions.
stream-status -- List of current streams.
traffic/read -- Bytes read since the process was started.
traffic/written -- Bytes written since the process was started.
version -- The current version of Tor.
$ carml -q cmd SIGNAL NEWNYM
OK
$ echo "getinfo net/listeners/socks" > commands
$ echo "getinfo traffic/read" >> commands
$ echo "getinfo traffic/written" >> commands
$ cat commands | carml -q cmd -
Keep entering keys to run CMD on. Control-d to exit.
net/listeners/socks="127.0.0.1:9050"
traffic/read=6667674
traffic/written=391959
$ carml -q cmd getinfo net/listeners/socks traffic/read traffic/written
net/listeners/socks="127.0.0.1:9050"
traffic/read=10012841
traffic/written=516428
|
PypiClean
|
/intel_tensorflow-2.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/tensorflow/python/ops/cond.py
|
"""Cond function for Control Flow Operations."""
from tensorflow.python.eager import context
from tensorflow.python.eager.polymorphic_function import eager_function_run
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util as util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.types import core
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# TODO(b/269483538): below lazy loads
# needed for references while refactors are in progress
control_flow_ops = LazyLoader(
"control_flow_ops", globals(),
"tensorflow.python.ops.control_flow_ops")
# This is to avoid a circular dependency:
# cond_v2 -> gradients_util -> control_flow_ops
cond_v2 = LazyLoader("cond_v2", globals(),
"tensorflow.python.ops.cond_v2")
# pylint: disable=redefined-outer-name
# pylint: disable=g-doc-args
@tf_export(v1=["cond"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(
None, "fn1/fn2 are deprecated in favor of the true_fn/false_fn arguments.",
"fn1", "fn2")
def cond(pred,
true_fn=None,
false_fn=None,
strict=False,
name=None,
fn1=None,
fn2=None):
"""Return `true_fn()` if the predicate `pred` is true else `false_fn()`.
`true_fn` and `false_fn` both return lists of output tensors. `true_fn` and
`false_fn` must have the same non-zero number and type of outputs.
**WARNING**: Any Tensors or Operations created outside of `true_fn` and
`false_fn` will be executed regardless of which branch is selected at runtime.
Although this behavior is consistent with the dataflow model of TensorFlow,
it has frequently surprised users who expected a lazier semantics.
Consider the following simple program:
```python
z = tf.multiply(a, b)
result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y))
```
If `x < y`, the `tf.add` operation will be executed and `tf.square`
operation will not be executed. Since `z` is needed for at least one
branch of the `cond`, the `tf.multiply` operation is always executed,
unconditionally.
Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the
call to `cond`, and not at all during `Session.run()`). `cond`
stitches together the graph fragments created during the `true_fn` and
`false_fn` calls with some additional graph nodes to ensure that the right
branch gets executed depending on the value of `pred`.
`tf.cond` supports nested structures as implemented in
`tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the
same (possibly nested) value structure of lists, tuples, and/or named tuples.
Singleton lists and tuples form the only exceptions to this: when returned by
`true_fn` and/or `false_fn`, they are implicitly unpacked to single values.
This behavior is disabled by passing `strict=True`.
Args:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
strict: A boolean that enables/disables 'strict' mode; see above.
name: Optional name prefix for the returned tensors.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`. If the
callables return a singleton list, the element is extracted from the list.
Raises:
TypeError: if `true_fn` or `false_fn` is not callable.
ValueError: if `true_fn` and `false_fn` do not return the same number of
tensors, or return tensors of different types.
Example:
```python
x = tf.constant(2)
y = tf.constant(5)
def f1(): return tf.multiply(x, 17)
def f2(): return tf.add(y, 23)
r = tf.cond(tf.less(x, y), f1, f2)
# r is set to f1().
# Operations in f2 (e.g., tf.add) are not executed.
```
"""
# We needed to make true_fn/false_fn keyword arguments for
# backwards-compatibility. This check exists so that we can convert back to
# having them be positional arguments.
# TODO(josh11b): Make `true_fn` and `false_fn` positional arguments after
# `fn1` and `fn2` are deleted.
if fn1 is not None:
if true_fn is not None:
raise TypeError(
"cond(): 'true_fn' and 'fn1' may not be set simultaneously.")
true_fn = fn1
elif true_fn is None:
raise TypeError("cond(): 'true_fn' argument required")
if fn2 is not None:
if false_fn is not None:
raise TypeError(
"cond(): 'false_fn' and 'fn2' may not be set simultaneously.")
false_fn = fn2
elif false_fn is None:
raise TypeError("cond(): 'false_fn' argument required")
if not callable(true_fn):
raise TypeError("'true_fn' must be callable.")
if not callable(false_fn):
raise TypeError("'false_fn' must be callable.")
if context.executing_eagerly():
return _eager_cond_implementation(pred, true_fn, false_fn, strict, name)
# Always enable control flow v2 if building a function, regardless of toggle.
if util.EnableControlFlowV2(ops.get_default_graph()):
return cond_v2.cond_v2(pred, true_fn, false_fn, name)
with ops.name_scope(name, "cond", [pred]):
# Add the Switch to the graph.
if isinstance(pred, bool):
raise TypeError("'pred' must not be a Python bool.")
p_2, p_1 = control_flow_ops.switch(pred, pred)
pivot_1 = array_ops.identity(p_1, name="switch_t")
pivot_2 = array_ops.identity(p_2, name="switch_f")
pred = array_ops.identity(pred, name="pred_id")
# Disable the fetching of tensors that are only on one branch of cond.
for tensor in [p_1, p_2, pivot_1, pivot_2, pred]:
tensor.op.graph.prevent_fetching(tensor.op)
# Build the graph for the true branch in a new context.
context_t = control_flow_ops.CondContext(pred, pivot_1, branch=1)
try:
context_t.Enter()
orig_res_t, res_t = context_t.BuildCondBranch(true_fn)
if orig_res_t is None:
raise ValueError("'true_fn' must have a return value.")
context_t.ExitResult(res_t)
finally:
context_t.Exit()
# Build the graph for the false branch in a new context.
context_f = control_flow_ops.CondContext(pred, pivot_2, branch=0)
try:
context_f.Enter()
orig_res_f, res_f = context_f.BuildCondBranch(false_fn)
if orig_res_f is None:
raise ValueError("'false_fn' must have a return value.")
context_f.ExitResult(res_f)
finally:
context_f.Exit()
if not strict:
orig_res_t = _UnpackIfSingleton(orig_res_t)
orig_res_f = _UnpackIfSingleton(orig_res_f)
# Check that the return values of the two branches have the same structure.
try:
nest.assert_same_structure(orig_res_t, orig_res_f, expand_composites=True)
except (TypeError, ValueError):
nest.map_structure(_cast_indexed_slice_indices, orig_res_t, orig_res_f)
nest.map_structure(_cast_indexed_slice_indices, res_t, res_f)
try:
nest.assert_same_structure(orig_res_t, orig_res_f,
expand_composites=True)
except TypeError as e:
raise TypeError(
f"Incompatible return types of 'true_fn' and 'false_fn': {e}")
except ValueError as e:
raise ValueError(
f"Incompatible return values of 'true_fn' and 'false_fn': {e}")
# Add the final merge to the graph.
if not res_t:
raise ValueError(
"'true_fn' and 'false_fn' must return at least one result.")
res_t_flat = nest.flatten(res_t, expand_composites=True)
res_f_flat = nest.flatten(res_f, expand_composites=True)
for (x, y) in zip(res_t_flat, res_f_flat):
assert isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)
if x.dtype.base_dtype != y.dtype.base_dtype:
raise ValueError(
"Outputs of 'true_fn' and 'false_fn' must have the same type(s). "
f"Received {x.dtype.name} from 'true_fn' "
f"and {y.dtype.name} from 'false_fn'.")
merges = [
control_flow_ops.merge(pair)[0] for pair in zip(res_f_flat, res_t_flat)]
merges = nest.map_structure(
control_flow_ops._convert_flow_to_tensorarray, # pylint: disable=protected-access
nest.flatten(orig_res_t, expand_composites=True),
merges)
# Only add non-nested conds to the collection. Any nested control flow will
# be encapsulated in the root context.
assert context_t.outer_context == context_f.outer_context
if context_t.outer_context is None:
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_t)
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_f)
merges = nest.pack_sequence_as(
structure=orig_res_t, flat_sequence=merges, expand_composites=True)
# Singleton lists and tuples are automatically unpacked if strict == False.
if not strict:
merges = _UnpackIfSingleton(merges)
return merges
@tf_export("cond", v1=[])
@dispatch.add_dispatch_support
def cond_for_tf_v2(pred, true_fn=None, false_fn=None, name=None):
"""Return `true_fn()` if the predicate `pred` is true else `false_fn()`.
Note: This op is automatically used in a `tf.function` to convert Python
if-statements when the predicate is a `tf.Tensor`, unless `autograph=False` is
explicitly specified in `tf.function` args. For example, the following are
equivalent:
>>> @tf.function
... def fun1(x,y):
... if x > 0: # AutoGraph converts if-statement to tf.cond().
... z = y+1
... else:
... z = y-1
... return z
>>> fun1(tf.constant(7), tf.constant(3)).numpy()
4
>>> @tf.function
... def fun2(x,y):
... pred = x > 0
... true_fn = lambda: y+1
... false_fn = lambda: y-1
... return tf.cond(pred, true_fn, false_fn) # Use tf.cond() explicitly.
>>> fun1(tf.constant(7), tf.constant(3)).numpy()
4
For more information, see [tf.function and AutoGraph guide](
https://www.tensorflow.org/guide/function#autograph_transformations).
`true_fn` and `false_fn` both return lists of output tensors. `true_fn` and
`false_fn` must have the same non-zero number and type of outputs.
**WARNING**: Any Tensors or Operations created outside of `true_fn` and
`false_fn` will be executed regardless of which branch is selected at runtime.
Although this behavior is consistent with the dataflow model of TensorFlow,
it has frequently surprised users who expected a lazier semantics.
Consider the following simple program:
>>> x, y = tf.constant(2, dtype=tf.int32), tf.constant(4, dtype=tf.int32)
>>> z = tf.multiply(x, y)
>>> r = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y))
>>> r.numpy()
10
If `x < y`, the `tf.add` operation will be executed and `tf.square`
operation will not be executed. Since `z` is needed for at least one
branch of the `cond`, the `tf.multiply` operation is always executed,
unconditionally.
Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the
call to `cond`, and not at all during `Session.run()`). `cond`
stitches together the graph fragments created during the `true_fn` and
`false_fn` calls with some additional graph nodes to ensure that the right
branch gets executed depending on the value of `pred`.
`tf.cond` supports nested structures as implemented in
`tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the
same (possibly nested) value structure of lists, tuples, and/or named tuples.
Singleton lists and tuples form the only exceptions to this: when returned by
`true_fn` and/or `false_fn`, they are implicitly unpacked to single values.
Note: It is illegal to "directly" use tensors created inside a cond branch
outside it, e.g. by storing a reference to a branch tensor in the python
state. If you need to use a tensor created in a branch function you should
return it as an output of the branch function and use the output from
`tf.cond` instead.
Args:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix for the returned tensors.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`. If the
callables return a singleton list, the element is extracted from the list.
Raises:
TypeError: if `true_fn` or `false_fn` is not callable.
ValueError: if `true_fn` and `false_fn` do not return the same number of
tensors, or return tensors of different types.
Example:
>>> x = tf.constant(2)
>>> y = tf.constant(5)
>>> def f1(): return tf.multiply(x, 7)
>>> def f2(): return tf.add(y, 3)
>>> r = tf.cond(tf.less(x, y), f1, f2)
>>> # r is set to f1().
>>> # Operations in f2 (e.g., tf.add) are not executed.
>>> r.numpy()
14
"""
return cond(pred, true_fn=true_fn, false_fn=false_fn, strict=True, name=name)
def _UnpackIfSingleton(res):
if isinstance(res, (list, tuple)) and len(res) == 1:
return res[0]
else:
return res
def _eager_cond_implementation(pred, true_fn, false_fn, strict, name):
"""Special cases for `cond` when executing eagerly."""
pred = ops.convert_to_tensor(pred)
pred_constant_value = tensor_util.constant_value(pred)
if pred_constant_value is None:
# Eager tensors from a parallel device may not have a constant
# value. Running the cond op itself would work, but we don't have logic to
# build cond ops without wrapping in a function first.
if (not isinstance(true_fn, core.GenericFunction)
or not isinstance(false_fn, core.GenericFunction)):
raise TypeError("When running tf.cond on a parallel device, 'true_fn' "
"and 'false_fn' must be decorated with `tf.function`.")
functions_run_eagerly = eager_function_run.functions_run_eagerly()
if functions_run_eagerly:
# We need to use tf.function to deal with variable creation inside the
# cond, and skipping it because of run_functions_eagerly would just
# crash immediately.
logging.warning(
"It looks like tf.function behavior was disabled, perhaps using "
"tf.config.run_functions_eagerly. Parallelized tf.cond requires "
"tf.function to work. This primitive will override the disable.")
eager_function_run.run_functions_eagerly(False)
try:
return cond_v2.cond_v2(pred, true_fn, false_fn, name)
finally:
if functions_run_eagerly is not None:
eager_function_run.run_functions_eagerly(functions_run_eagerly)
else:
# For conditions which are eager tensors with a constant value (most of
# them), we only call the relevant branch function and execute it eagerly.
with ops.name_scope(name, "cond", [pred]):
if pred_constant_value:
result = true_fn()
else:
result = false_fn()
if not strict:
result = _UnpackIfSingleton(result)
return result
def _cast_indexed_slice_indices(a, b):
"""Cast IndexedSlice.indices from int32 to int64 where necessary.
If `a` and `b` are both IndexedSlices, and their indices have different
dtypes, then cast both their dtypes to `int64` (modifies `a` and `b`
in-place). Otherwise, does nothing.
Args:
a: A value, which may be an IndexedSlices.
b: A value, which may be an IndexedSlices.
"""
if (isinstance(a, indexed_slices.IndexedSlices) and
isinstance(b, indexed_slices.IndexedSlices) and
a.indices.dtype != b.indices.dtype):
# pylint: disable=protected-access
a._indices = math_ops.cast(a.indices, dtypes.int64)
b._indices = math_ops.cast(b.indices, dtypes.int64)
|
PypiClean
|
/love_course_2016_2019-2023.3.1.0-py3-none-any.whl/LoveCourse20162019/docs/an-xiao-yao/13-安小妖《好男人,你也要学会撩妹》:209.进挪就是这么简单,教你如何轻松和妹子靠得越来越近.md
|
# 13-安小妖《好男人,你也要学会撩妹》:209.进挪就是这么简单,教你如何轻松和妹子靠得越来越近
好大家好,欢迎来到我们这期的好男人,你要学会了美,我是朱亚人,是一点心声。,喜欢我的同学,要记得点击进入条下方的订阅按钮,订阅我的专辑,这样就不会弥露,就可以很快,找到我的最新音频教学。。
别人让我们会发现,很多同学能够做到跟妹子微信聊天聊的好,约会游玩玩的好。,但是,就是持持不能够升级关系,一直在这个有意趣里待着,我们讲为什么,就是因为他没有能够即时的进行KNO进落。,那说到这儿。
有同学就会问了,说老师,那什么是进落呢?,我们讲,传统的恋爱达人把进落理解为逐步扩大和妹子职体接触的联系,也就是说和妹子的这个身体接触越来越平凡,越来越深入。,但是,后来我们研究。
有一区带来定不能够升级关系这个问题的时候,我们发现原因并不仅仅如此。,他还有另外一个非常重要的原因,那就是没有能够即时的进行情绪升级,也就是说没有能够即时的跟妹子进行爱媒调情。,所以我们讲。
如果说要解决有一区带来定的问题,那要做两方面的工作,一个就是咱们刚才讲的传统意义上的进落,我们称之为知体进落。,另外一个就是我们结合社会心理学和恋爱学研究所得出的情绪进落。,今天这个就先给大家讲一下。
什么是知体进落,那相机课会大家讲到什么是情绪进落。,我们讲知体进落的讲解,一共分为三个部分,第一,知体进落的重要性和意义,第二,进落要遵循的顺序原则。,第三,进落的具体操作,我们先来看第一部分。
知体进落的重要性和意义。,那有些同学他之所以没有进行知体进落,是因为他还没有明确的认识到进落的重要性。,要知道,我们从社会心理学的角度来看,两个人的心理程度是由他们的空间距离提升出来的。,你比如说。
在公共场合,孟老生人之间的距离一般保持在3。7-7。6米之间,这叫做公共距离。,而如果说是在工作环境,或者是社交局会上面,那大家是同事关系,或者是普通的朋友关系。,那么距离一般会保持在1。2-2。
1米之间,这叫做社交距离。,但是如果两个人之间的距离是在46-76米之间,这样的一个范围内,这就属于个人距离了。,两个人就会时不时的有一些身体上面接触。,如果说,我们把这个距离再缩短。
缩短到15-44米。,这两个人肯定就是非常好的朋友,可以粗息而谈,但是如果说再进一步的话,,那就进入到了只有恋人之间才有的一个区域,达到15厘米以内,让这两个人就可以击覆相处而并私墨。,这叫做亲密距离。
根据这样的一个数据报告,我们讲,两个人空间距离的缩短是会促进两个人关系发生一定程度上的变化的。,也就是越是靠近两个人的关系就会越是亲密。,否则的话,即使是说你们聊得很好,但是没有能够即使进行进逻。
升级关系。,那么在之后的接触当中,就会一直消耗你之前的印象分,分数就会越来越低,关系也就越来越淡。,最后也就是有缘无分,不了了之。,所以说,同学们一定要学会趁热打贴。
要学会用你的肢体去释放女对妹子的喜欢和爱。,那么说到这,下一个问题就来了,我们知道了进逻的重要性,但是到底该如何进行进逻呢?,很多同学不知道该如何下手,你比如说,一上来就去楼这个妹子的腰。
只会让妹子觉得你很清服从来说远离你。,这个时候我们就讲一下,进逻的一个顺序原则,一般情况下,咱们讲,首先要进行的是非敏感不畏的进逻。,你比如说,像是手部,肩部,头顶等部,你可以拉拉手,对吧?
合理的碰触一下肩膀,或者是开穿门的时候,挡一下这个车顶。,这些都属于非敏感不畏的进逻。其次就是半敏感不畏的进逻,你比如说,脸部,腰部可以帮它擦一下脸,或者是楼下腰。,这些属于半敏感不畏的进逻。
最后是敏感不畏的进逻,像是胸部,腿部,腾部,这些屈育的进逻,那就是属于敏感不畏的进逻。,在这咱们就不详细地展开来讲,大家可以自行的发挥一下想象力。,这个呢。
这个规律就是咱们目前为止发现的最文妥的进逻顺序。,也就是说,在十级操作的共产党中可尽可退,而不会因为说你没有进逻好,侵犯到了女生的身体,导致这个女生生气,彻底把你拉黑。,讲到这。
我们就知道了进逻的一个顺序,那么,具体到十级上我们应该怎么操作呢?,对吧,这就是咱们今天讲的第三部分,进逻的具体操作,那在这里可以交给大家一个极其简单的方法,自然进逻法。,现在的双方当中。
我们会发现有一些人跟别人产生肢体接触的时候就会显得非常的自然。,那对方也会觉得很自然,很舒服,很自在。,比如说,一个很友好经常习惯性礼貌的碰触别人的人,经常他会做一些像这个握手碰触肩膀。
这些理解性的非敏感部位的接触就会显得很自然。,而一旦说,一个女生习惯了你的接触,那么,你再享有后续的动作也就会变得水刀去成,非常容易。,所以说很简单的一个方法就是,生活当中多释方善意,多表达礼貌。
不管是言语上的还是肢体上的。,这样就能够体现出你是一个很有修养,有良好品格的人,不管是在这个了位上还是工作上都会给你带来极大的帮助。,而其他的一些方法也很简单,主要是运用一些很生活化的场景去进行进落。。
比如说,两个人已经聊到了比较爱媚的地步,就可以在这个路上走着的时候就故意地越靠越近,只要说这个媒体不主动的远离你,那么在有这个手部碰触的时候,你就可以勇敢的签起媒子的时候,。
但是一定要记住不要在第一时间内去看媒子的反应,那有些同学可能他没有什么自信心,对吧,他需要去看一下这个媒子的反应来判断媒子是否接受他。,但是这个时候你看到这一眼就会给这个媒子实在压力。
那么这个媒子就有可能在这个压力的情况下去争托你,让你下一次再想去签他的手的时候,这个难度就会变得更高。,所以说我们应该装作很自然的去看待别处,就像是很自然的去签了一下手,如果说他不整开,这就是默认。
有些事情没有必要说得那么清楚,只要他默认了这就OK。,而咱们讲说,这体进落的第一步其实也就是最难的内容。,一旦说这个媒子允讯你签了他的手,也就代表了这个媒子对你开放了他的身体。
那么你下面需要做的就是寻许剑剑,外加一点点的勇敢。,像这些方法大家可以在平时都去练习一下。,好,今天的课程我们就先到这里,希望大家能够有所收获。,最后依然是给大家送上一个小福利。
如果说大家想要提升恋爱情商学习了美聊天,也可以关注我们最新被大家清理达到的精品教学专籍,高情商了美聊天保典。,那大家关注我们的微信功能号,欢迎来自Dial,欢迎来小写P1家英文字母WX后台回复数字,1。
0,1就可以看到我们的课程相经介绍。,欢迎大家的订阅,我们下期再见。,欢迎大家的订阅,我们下期再见。,欢迎大家的订阅,我们下期再见。,欢迎大家的订阅,我们下期再见。
|
PypiClean
|
/fileflowio-0.0.0.tar.gz/fileflowio-0.0.0/file_flow/data_io.py
|
from typing import (
TypeVar, Generic, ClassVar,
Optional, Callable, Type, Any, Dict
)
from attrs import define
__all__ = [
"IO",
"TextIO",
"BytesIO",
"IOContainer"
]
_D = TypeVar("_D")
@define
class IO(Generic[_D]):
"""A class to represent a generic io operation handler."""
loader: Optional[Callable[[str], _D]] = None
saver: Optional[Callable[[_D, str], None]] = None
name: ClassVar[str] = None
silent: Optional[bool] = None
load_kwargs: Optional[Dict[str, Any]] = None
save_kwargs: Optional[Dict[str, Any]] = None
base: ClassVar[Type] = _D
def load(self, path: str, **kwargs: Any) -> _D:
"""
Loads the data from the file.
:param path: The path to the source file.
:return: The loaded file data.
"""
if self.loader is not None:
return self.loader(path)
# end if
# end load
def save(self, data: _D, path: str, **kwargs: Any) -> None:
"""
Loads the data from the file.
:param path: The path to save the data.
:param data: The data to save in the file.
"""
if self.saver is not None:
self.saver(data, path)
# end if
# end load
# end ID
@define
class TextIO(IO[str]):
"""A class to represent a text io operation handler."""
name: ClassVar[str] = "txt"
base: ClassVar[Type[str]] = str
def load(self, path: str, **kwargs: Any) -> str:
"""
Loads the data from the file.
:param path: The path to the source file.
:return: The loaded file data.
"""
with open(path, "r") as file:
return file.read()
# end open
# end load
def save(self, data: str, path: str, **kwargs: Any) -> None:
"""
Loads the data from the file.
:param path: The path to save the data.
:param data: The data to save in the file.
"""
with open(path, "w") as file:
file.write(data)
# end open
# end load
# end TextIO
@define
class BytesIO(IO[bytes]):
"""A class to represent a bytes io operation handler."""
name: ClassVar[str] = "bytes"
base: ClassVar[Type[bytes]] = bytes
def load(self, path: str, **kwargs: Any) -> bytes:
"""
Loads the data from the file.
:param path: The path to the source file.
:return: The loaded file data.
"""
with open(path, "rb") as file:
return file.read()
# end open
# end load
def save(self, data: bytes, path: str, **kwargs: Any) -> None:
"""
Loads the data from the file.
:param path: The path to save the data.
:param data: The data to save in the file.
"""
with open(path, "wb") as file:
file.write(data)
# end open
# end load
# end BytesIO
_O = TypeVar("_O")
@define
class IOContainer(Generic[_D, _O]):
"""A class to contain io objects."""
input: Optional[IO[_D]] = None
output: Optional[IO[_O]] = None
# end IOContainer
|
PypiClean
|
/logpie-4.0.0.tar.gz/logpie-4.0.0/README.md
|
# logpie
A versatile logging framework.
Simple, efficient, and configurable logging framework to manage and streamline your application logs.
### Installation:
```commandline
python -m pip install [--upgrade] logpie
```
#### Key Features
* Supports both file-based and console logging.
* Allows chronological organization of log files by year and month.
* Automatic log file cycling based on file size.
* Thread-safe operations for reliable logging.
* Customize your logs with configurable formatters.
* Can prefix log files with dates for easy tracking.
### Usage:
```python
# -*- coding: UTF-8 -*-
from logpie import Logger
log = Logger("my_logger")
if __name__ == '__main__':
log.debug("Testing debug messages...")
log.info("Testing info messages...")
log.warning("Testing warning messages...")
log.error("Testing error messages...")
log.critical("Testing critical messages...")
```
#### _class_ logpie.Logger
###### Parameters:
* `name` (required): str - The name of the logger (defaults to `logpie`).
The name of the logger cannot be changed after instantiation!
* `level` (optional): LEVEL - Logging level of the logger (defaults to `LEVEL.NOTSET`).
* `state` (optional): STATE - State of the logger (defaults to `STATE.ON`).
* `handlers` (optional): Handler - A handler or a list/tuple of handlers for the logger (defaults to `None`).
`StreamHandler` will use the default handler `StdStream`.
###### Methods:
* `name`
A property that returns the name of the logger.
> NOTE: Cannot be changed after instantiation!
* `set_level(value: LEVEL)`
Sets the attribute `level` of the logger to _value_.
All other messages with severity level less than this _value_ will be ignored.
By default, the logger is instantiated with severity level `LEVEL.NOTSET` (0) and therefore all messages are logged.
Available levels:
* `DEBUG`
* `INFO`
* `WARNING`
* `ERROR`
* `CRITICAL`
* `set_state(value: STATE)`
Sets the attribute `state` of the logger to _value_.
If set to `STATE.OFF` (disabled) no message will be recorded.
By default, the logger is instantiated with state `STATE.ON` (enabled) and therefore all messages are logged.
* `set_handlers(value: Union[Handler, List[Handler], Tuple[Handler]])`
Sets the stream handlers for the logger to __value__.
Available handlers:
* `StdStream`
* `FileStream`
* `del_handlers()`
Delete the handlers of the logger.
* `add_handler(value: Handler)`
Add a handler to the logger.
* `remove_handler(value: Handler)`
Remove a handler from the logger.
* `is_enabled()`
Return `True` if the logger is enabled and `False` otherwise.
* `log(level: LEVEL, msg: str, *args, **kwargs)`
Log a message with `msg % args` with level `level`.
To add exception info to the message use the `exc_info` keyword argument with a `True` value.
Example:
```python
log(LEVEL.ERROR, "Testing '%s' messages!", "ERROR", exc_info=True)
```
or
```python
log(LEVEL.ERROR, "Testing '%(level)s' messages!", {"level": "ERROR"}, exc_info=True)
```
To get the correct frame from the stack, when called, the `depth` param must be set to 8 or called inside a logging function.
Example:
```python
log(LEVEL.DEBUG, "Testing 'DEBUG' messages!", depth=8)
```
or
```python
def debug(self, msg: str, *args, **kwargs):
self.log(LEVEL.DEBUG, msg, *args, **kwargs)
```
* `close()`
Close the handlers of the logger and release the resources.
* `debug(msg: str, *args, **kwargs)`
Log a message with `msg % args` with level `DEBUG`.
To add exception info to the message use the `exc_info` keyword argument with a `True` value.
Example:
```python
log.debug("Testing 'DEBUG' messages!")
```
* `info(msg: str, *args, **kwargs)`
Log a message with `msg % args` with level `INFO`
The arguments and keyword arguments are interpreted as for `debug()`
Example:
```python
log.info("Testing 'INFO' messages!")
```
* `warning(msg: str, *args, **kwargs)`
Log a message with `msg % args` with level `WARNING`
The arguments and keyword arguments are interpreted as for `debug()`
Example:
```python
log.warning("Testing 'WARNING' messages!")
```
* `error(msg: str, *args, **kwargs)`
Log a message with `msg % args` with level `ERROR`
The arguments and keyword arguments are interpreted as for `debug()`
Example:
```python
try:
raise TypeError("Type error occurred!")
except TypeError:
log.error("Action failed!", exc_info=True)
```
or
```python
try:
raise TypeError("Type error occurred!")
except TypeError as type_error:
log.error("Action failed!", exc_info=type_error)
```
* `exception(msg: str, *args, **kwargs)`
Just a more convenient way of logging an `ERROR` message with `exc_info=True`.
Example:
```python
try:
raise TypeError("Type error occurred!")
except TypeError:
log.exception("Action failed!")
```
* `critical(msg: str, *args, **kwargs)`
Log a message with `msg % args` with level `CRITICAL`
The arguments and keyword arguments are interpreted as for `debug()`
Example:
```python
try:
raise TypeError("Critical error occurred!")
except TypeError as critical_error:
log.critical("Action failed!", exc_info=critical_error)
```
### StreamHandlers:
By default, the logger streams all messages to the console output `sys.stdout` and `sys.stderr` using the `StdStream` handler.
To log messages into a file we must use the `FileStream` handler.
To use a different handler or more:
```python
from logpie import Logger, StdStream, FileStream
console_hdlr = StdStream()
file_hdlr = FileStream("my_log_file.log")
log = Logger("my_logger", handlers=[console_hdlr, file_hdlr])
if __name__ == '__main__':
log.debug("Logging debug messages!")
```
#### _class_ logpie.StdStream
###### Parameters:
* `formatter`: Formatter - Formatter object to format the logs (defaults to `None`).
###### Methods:
* `emit(row: Row)`
Emit a log row.
This method acquires the thread lock and passes the log row formatted as
a string along with the handle associated with the logging level to the `write()` method.
* `write(handle: TextIO, message: str)`
Write the log `message` using the given `handle`.
#### _class_ logpie.FileStream
###### Parameters:
* `filename` (required): str - Name of the file to write logs into.
* `mode` (optional): str - Mode of file opening (defaults to `a`).
* `a` for appending to file.
* `w` for truncating the file.
* `encoding` (optional): str - Encoding of the file (defaults to `UTF-8`).
* `folder`: str - Folder to write logs in (defaults to `None`).
If omitted and filename is not a full file path it defaults to
the `logs` folder at the root of your project.
* `max_size`: int - Maximum size of the log file (defaults to `(1024 ** 2) * 4`, 4 MB).
If `cycle` is enabled, when the file reaches the maximum size,
the handler will switch to another file by incrementing the index with `1`.
* `cycle`: bool - Whether to cycle files when maximum size is reached (defaults to `False`).
When the file reaches the maximum size,
the handler will switch to another file by incrementing the index with `1`
* `chronological`: bool - Whether to sort files chronologically (defaults to `False`).
The folder tree is by default structured as follows:
```markdown
.
└─logs
└─year (ex: 2022)
└─month (ex: january)
├─2022-08-01_logpie.1.log
├─2022-08-01_logpie.2.log
└─2022-08-01_logpie.3.log
```
* `date_prefix`: bool - Whether to add date prefix to filename (defaults to `False`).
* `date_aware`: bool - Whether to use date awareness to the log file (defaults to `False`).
If `date_prefix` is enabled this will enforce the current date to be
used rather than the date when the handler was created.
* `formatter`: Formatter - Formatter object to format the logs (defaults to `None`).
###### Methods:
* `emit(row: Row)`
Emit a log row.
This method acquires the thread lock and passes the log `row` formatted as
a string to the `write()` method.
* `write(message: str)`
Write a log `message` into the file.
The log rows are formatted with the help of the `Formatter` class.
To change the formatting:
```python
from src.logpie import Logger, FileStream, Formatter
# here we're also adding a new field (e.g. 'ip') used by the 'extra' keyword arguments.
my_formatter = Formatter(
row="${timestamp} - ${ip} - ${level} - ${source}: ${message}",
timestamp="[%Y-%m-%d %H:%M:%S.%f]",
stack="<${file}, ${line}, ${code}>",
)
my_handler = FileStream("my_log_file.log", formatter=my_formatter)
log = Logger("my_logger", handlers=my_handler)
if __name__ == '__main__':
# here we are passing the 'ip' keyword argument for the 'extra' field
log.debug("Testing 'DEBUG' messages!", ip="192.168.1.100")
```
#### _class_ logpie.Formatter
###### Parameters:
* `row`: str - The row formatting template.
This template uses the `string.Template` style with placeholders (e.g. `${field}`).
* `timestamp`: str - The timestamp formatting template.
This template uses the `datetime.strftime()` style (e.g. `%Y-%m-%d %H:%M:%S.%f`).
* `stack`: str - The stack info formatting template.
This template uses the `string.Template` style with placeholders (e.g. `${field}`).
###### Methods:
* `as_string(row: Row)`
Format a given row into a string based on predefined templates.
|
PypiClean
|
/datarobot_early_access-3.3.0.2023.8.28.tar.gz/datarobot_early_access-3.3.0.2023.8.28/datarobot/_experimental/models/recipe_operations.py
|
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
import trafaret as t
from typing_extensions import TypedDict
from datarobot._experimental.models.enums import (
CategoricalStatsMethods,
DatetimeSamplingStrategy,
DownsamplingOperations,
FilterOperationFunctions,
NumericStatsMethods,
SamplingOperations,
WranglingOperations,
)
from datarobot.enums import enum_to_list
from datarobot.models.api_object import APIObject
class BaseOperation(APIObject):
"""Single base transformation unit in Data Wrangler recipe."""
def __init__(self, directive: str, arguments: Any):
self.directive = directive
self.arguments = arguments
class WranglingOperation(BaseOperation):
_converter = t.Dict(
{
t.Key("directive"): t.Enum(*enum_to_list(WranglingOperations)),
t.Key("arguments"): t.Mapping(t.String(), t.Any()),
}
).allow_extra("*")
class DownsamplingOperation(BaseOperation):
_converter = t.Dict(
{
t.Key("directive"): t.Enum(*enum_to_list(DownsamplingOperations)),
t.Key("arguments"): t.Mapping(t.String(), t.Any()),
}
).allow_extra("*")
class SamplingOperation(BaseOperation):
_converter = t.Dict(
{
t.Key("directive"): t.Enum(*enum_to_list(SamplingOperations)),
t.Key("arguments"): t.Mapping(t.String(), t.Any()),
}
).allow_extra("*")
class BaseTimeAwareTask(APIObject):
def __init__(self, name: str, arguments: Dict[str, Any]):
self.name = name
self.arguments = arguments
class CategoricalStats(BaseTimeAwareTask):
def __init__(self, methods: List[CategoricalStatsMethods], window_size: int):
super().__init__("categorical-stats", {"window_size": window_size, "methods": methods})
class NumericStats(BaseTimeAwareTask):
def __init__(self, methods: List[NumericStatsMethods], window_size: int):
super().__init__("numeric-stats", {"window_size": window_size, "methods": methods})
class Lags(BaseTimeAwareTask):
def __init__(self, orders: List[int]):
super().__init__("lags", {"orders": orders})
class LagsOperation(WranglingOperation):
def __init__(
self,
column: str,
orders: List[int],
datetime_partition_column: str,
multiseries_id_column: Optional[str] = None,
):
super().__init__(
directive=WranglingOperations.LAGS,
arguments={
"column": column,
"orders": orders,
"datetime_partition_column": datetime_partition_column,
"multiseries_id_column": multiseries_id_column,
},
)
class WindowNumericStatsOperation(WranglingOperation):
"""Generate various rolling numeric statistics in a window. Output could be a several columns."""
def __init__(
self,
column: str,
window_size: int,
methods: List[NumericStatsMethods],
datetime_partition_column: str,
multiseries_id_column: Optional[str] = None,
rolling_median_snowflake_udf: Optional[str] = None,
):
super().__init__(
directive=WranglingOperations.WINDOW_NUMERIC_STATS,
arguments={
"column": column,
"window_size": window_size,
"methods": methods,
"datetime_partition_column": datetime_partition_column,
"multiseries_id_column": multiseries_id_column,
"rolling_median_snowflake_user_defined_function": rolling_median_snowflake_udf,
},
)
class TimeSeriesOperation(WranglingOperation):
"""Operation to generate a dataset ready for time series modeling: with forecast point, forecast distances,
known in advance columns, etc.
"""
def __init__(
self,
target_column: str,
datetime_partition_column: str,
forecast_distances: List[int],
task_plan: Dict[str, List[BaseTimeAwareTask]],
baseline_periods: Optional[List[int]] = None,
known_in_advance_columns: Optional[List[str]] = None,
multiseries_id_column: Optional[str] = None,
rolling_median_snowflake_udf: Optional[str] = None,
rolling_most_frequent_snowflake_udf: Optional[str] = None,
forecast_point: Optional[datetime] = None,
):
"""
Parameters
----------
target_column
datetime_partition_column
forecast_distances
task_plan:
contains a task list for each column
baseline_periods:
generates naive features from the target. For example: baseline period = 1 corresponds to the naive
latest baseline.
known_in_advance_columns
multiseries_id_column
rolling_median_snowflake_udf:
Fully qualified path to rolling median user defined function. Used to optimize sql execution with snowflake.
rolling_most_frequent_snowflake_udf:
Fully qualified path to rolling most frequent user defined function.
forecast_point:
To use at prediction time.
"""
arguments = {
"target_column": target_column,
"datetime_partition_column": datetime_partition_column,
"forecast_distances": forecast_distances,
"task_plan": [
{"column": column, "task_list": task_list}
for column, task_list in task_plan.items()
],
"multiseries_id_column": multiseries_id_column,
"known_in_advance_columns": known_in_advance_columns,
"baseline_periods": baseline_periods,
"rolling_median_snowflake_user_defined_function": rolling_median_snowflake_udf,
"rolling_most_frequent_snowflake_user_defined_function": rolling_most_frequent_snowflake_udf,
"forecast_point": forecast_point,
}
super().__init__(directive=WranglingOperations.TIME_SERIES, arguments=arguments)
class ComputeNewOperation(WranglingOperation):
def __init__(self, expression: str, new_feature_name: str):
super().__init__(
directive=WranglingOperations.COMPUTE_NEW,
arguments={"expression": expression, "new_feature_name": new_feature_name},
)
class RenameColumnsOperation(WranglingOperation):
def __init__(self, column_mappings: Dict[str, str]):
"""
column_mapping: dict, where
key: str
Original name
value: str
New name
"""
super().__init__(
directive=WranglingOperations.RENAME_COLUMNS,
arguments={
"column_mappings": [
{"original_name": k, "new_name": v} for k, v in column_mappings.items()
]
},
)
class FilterCondition(TypedDict):
column: str
function: FilterOperationFunctions
function_arguments: List[Union[str, int, float]]
class FilterOperation(WranglingOperation):
"""Filter rows."""
def __init__(
self,
conditions: List[FilterCondition],
keep_rows: Optional[bool] = True,
operator: Optional[str] = "and",
):
"""
keep_rows: bool
If matching rows should be kept or dropped
operator: str
"and" or "or"
conditions: list of FilterCondition
"""
super().__init__(
directive=WranglingOperations.FILTER,
arguments={"keep_rows": keep_rows, "operator": operator, "conditions": conditions},
)
class DropColumnsOperation(WranglingOperation):
def __init__(self, columns: List[str]):
"""
columns:
Columns to delete
"""
super().__init__(
directive=WranglingOperations.DROP_COLUMNS,
arguments={"columns": columns},
)
class RandomSamplingOperation(SamplingOperation):
def __init__(self, rows: int, seed: Optional[int] = None):
super().__init__(
directive=SamplingOperations.RANDOM_SAMPLE,
arguments={"rows": rows, "seed": seed},
)
class DatetimeSamplingOperation(SamplingOperation):
def __init__(
self,
datetime_partition_column: str,
rows: int,
strategy: Optional[DatetimeSamplingStrategy] = None,
multiseries_id_column: Optional[str] = None,
selected_series: Optional[List[str]] = None,
):
super().__init__(
directive=SamplingOperations.DATETIME_SAMPLE,
arguments={
"rows": rows,
"strategy": strategy,
"datetime_partition_column": datetime_partition_column,
"multiseries_id_column": multiseries_id_column,
"selected_series": selected_series,
},
)
|
PypiClean
|
/tencentcloud_iac_pulumi-0.1.5.tar.gz/tencentcloud_iac_pulumi-0.1.5/tencentcloud_iac_pulumi/vpc/ipv6_eni_address.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['Ipv6EniAddressArgs', 'Ipv6EniAddress']
@pulumi.input_type
class Ipv6EniAddressArgs:
def __init__(__self__, *,
network_interface_id: pulumi.Input[str],
vpc_id: pulumi.Input[str],
ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['Ipv6EniAddressIpv6AddressArgs']]]] = None):
"""
The set of arguments for constructing a Ipv6EniAddress resource.
:param pulumi.Input[str] network_interface_id: ENI instance `ID`, in the form of `eni-m6dyj72l`.
:param pulumi.Input[str] vpc_id: VPC `ID`, in the form of `vpc-m6dyj72l`.
:param pulumi.Input[Sequence[pulumi.Input['Ipv6EniAddressIpv6AddressArgs']]] ipv6_addresses: The specified `IPv6` address list, up to 10 can be specified at a time. Combined with the input parameter `Ipv6AddressCount` to calculate the quota. Mandatory one with Ipv6AddressCount.
"""
pulumi.set(__self__, "network_interface_id", network_interface_id)
pulumi.set(__self__, "vpc_id", vpc_id)
if ipv6_addresses is not None:
pulumi.set(__self__, "ipv6_addresses", ipv6_addresses)
@property
@pulumi.getter(name="networkInterfaceId")
def network_interface_id(self) -> pulumi.Input[str]:
"""
ENI instance `ID`, in the form of `eni-m6dyj72l`.
"""
return pulumi.get(self, "network_interface_id")
@network_interface_id.setter
def network_interface_id(self, value: pulumi.Input[str]):
pulumi.set(self, "network_interface_id", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
"""
VPC `ID`, in the form of `vpc-m6dyj72l`.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="ipv6Addresses")
def ipv6_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['Ipv6EniAddressIpv6AddressArgs']]]]:
"""
The specified `IPv6` address list, up to 10 can be specified at a time. Combined with the input parameter `Ipv6AddressCount` to calculate the quota. Mandatory one with Ipv6AddressCount.
"""
return pulumi.get(self, "ipv6_addresses")
@ipv6_addresses.setter
def ipv6_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['Ipv6EniAddressIpv6AddressArgs']]]]):
pulumi.set(self, "ipv6_addresses", value)
@pulumi.input_type
class _Ipv6EniAddressState:
def __init__(__self__, *,
ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['Ipv6EniAddressIpv6AddressArgs']]]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
vpc_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Ipv6EniAddress resources.
:param pulumi.Input[Sequence[pulumi.Input['Ipv6EniAddressIpv6AddressArgs']]] ipv6_addresses: The specified `IPv6` address list, up to 10 can be specified at a time. Combined with the input parameter `Ipv6AddressCount` to calculate the quota. Mandatory one with Ipv6AddressCount.
:param pulumi.Input[str] network_interface_id: ENI instance `ID`, in the form of `eni-m6dyj72l`.
:param pulumi.Input[str] vpc_id: VPC `ID`, in the form of `vpc-m6dyj72l`.
"""
if ipv6_addresses is not None:
pulumi.set(__self__, "ipv6_addresses", ipv6_addresses)
if network_interface_id is not None:
pulumi.set(__self__, "network_interface_id", network_interface_id)
if vpc_id is not None:
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter(name="ipv6Addresses")
def ipv6_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['Ipv6EniAddressIpv6AddressArgs']]]]:
"""
The specified `IPv6` address list, up to 10 can be specified at a time. Combined with the input parameter `Ipv6AddressCount` to calculate the quota. Mandatory one with Ipv6AddressCount.
"""
return pulumi.get(self, "ipv6_addresses")
@ipv6_addresses.setter
def ipv6_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['Ipv6EniAddressIpv6AddressArgs']]]]):
pulumi.set(self, "ipv6_addresses", value)
@property
@pulumi.getter(name="networkInterfaceId")
def network_interface_id(self) -> Optional[pulumi.Input[str]]:
"""
ENI instance `ID`, in the form of `eni-m6dyj72l`.
"""
return pulumi.get(self, "network_interface_id")
@network_interface_id.setter
def network_interface_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_interface_id", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> Optional[pulumi.Input[str]]:
"""
VPC `ID`, in the form of `vpc-m6dyj72l`.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_id", value)
class Ipv6EniAddress(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['Ipv6EniAddressIpv6AddressArgs']]]]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource to create a vpc ipv6_eni_address
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['Ipv6EniAddressIpv6AddressArgs']]]] ipv6_addresses: The specified `IPv6` address list, up to 10 can be specified at a time. Combined with the input parameter `Ipv6AddressCount` to calculate the quota. Mandatory one with Ipv6AddressCount.
:param pulumi.Input[str] network_interface_id: ENI instance `ID`, in the form of `eni-m6dyj72l`.
:param pulumi.Input[str] vpc_id: VPC `ID`, in the form of `vpc-m6dyj72l`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Ipv6EniAddressArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to create a vpc ipv6_eni_address
:param str resource_name: The name of the resource.
:param Ipv6EniAddressArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(Ipv6EniAddressArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['Ipv6EniAddressIpv6AddressArgs']]]]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = Ipv6EniAddressArgs.__new__(Ipv6EniAddressArgs)
__props__.__dict__["ipv6_addresses"] = ipv6_addresses
if network_interface_id is None and not opts.urn:
raise TypeError("Missing required property 'network_interface_id'")
__props__.__dict__["network_interface_id"] = network_interface_id
if vpc_id is None and not opts.urn:
raise TypeError("Missing required property 'vpc_id'")
__props__.__dict__["vpc_id"] = vpc_id
super(Ipv6EniAddress, __self__).__init__(
'tencentcloud:Vpc/ipv6EniAddress:Ipv6EniAddress',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['Ipv6EniAddressIpv6AddressArgs']]]]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
vpc_id: Optional[pulumi.Input[str]] = None) -> 'Ipv6EniAddress':
"""
Get an existing Ipv6EniAddress resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['Ipv6EniAddressIpv6AddressArgs']]]] ipv6_addresses: The specified `IPv6` address list, up to 10 can be specified at a time. Combined with the input parameter `Ipv6AddressCount` to calculate the quota. Mandatory one with Ipv6AddressCount.
:param pulumi.Input[str] network_interface_id: ENI instance `ID`, in the form of `eni-m6dyj72l`.
:param pulumi.Input[str] vpc_id: VPC `ID`, in the form of `vpc-m6dyj72l`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _Ipv6EniAddressState.__new__(_Ipv6EniAddressState)
__props__.__dict__["ipv6_addresses"] = ipv6_addresses
__props__.__dict__["network_interface_id"] = network_interface_id
__props__.__dict__["vpc_id"] = vpc_id
return Ipv6EniAddress(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="ipv6Addresses")
def ipv6_addresses(self) -> pulumi.Output[Optional[Sequence['outputs.Ipv6EniAddressIpv6Address']]]:
"""
The specified `IPv6` address list, up to 10 can be specified at a time. Combined with the input parameter `Ipv6AddressCount` to calculate the quota. Mandatory one with Ipv6AddressCount.
"""
return pulumi.get(self, "ipv6_addresses")
@property
@pulumi.getter(name="networkInterfaceId")
def network_interface_id(self) -> pulumi.Output[str]:
"""
ENI instance `ID`, in the form of `eni-m6dyj72l`.
"""
return pulumi.get(self, "network_interface_id")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Output[str]:
"""
VPC `ID`, in the form of `vpc-m6dyj72l`.
"""
return pulumi.get(self, "vpc_id")
|
PypiClean
|
/app_scraper-0.0.5-py3-none-any.whl/app_scraper/constants.py
|
GL_COUNTRY_CODES = {
"ad": "Andorra",
"ae": "United Arab Emirates",
"af": "Afghanistan",
"ag": "Antigua and Barbuda",
"ai": "Anguilla",
"al": "Albania",
"am": "Armenia",
"an": "Netherlands Antilles",
"ao": "Angola",
"aq": "Antarctica",
"ar": "Argentina",
"as": "American Samoa",
"at": "Austria",
"au": "Australia",
"aw": "Aruba",
"az": "Azerbaijan",
"ba": "Bosnia and Herzegovina",
"bb": "Barbados",
"bd": "Bangladesh",
"be": "Belgium",
"bf": "Burkina Faso",
"bg": "Bulgaria",
"bh": "Bahrain",
"bi": "Burundi",
"bj": "Benin",
"bm": "Bermuda",
"bn": "Brunei",
"bo": "Bolivia",
"br": "Brazil",
"bs": "Bahamas",
"bt": "Bhutan",
"bv": "Bouvet Island",
"bw": "Botswana",
"by": "Belarus",
"bz": "Belize",
"ca": "Canada",
"cc": "Cocos [Keeling] Islands",
"cd": "Congo [DRC]",
"cf": "Central African Republic",
"cg": "Congo [Republic]",
"ch": "Switzerland",
"ci": "Côte d'Ivoire",
"ck": "Cook Islands",
"cl": "Chile",
"cm": "Cameroon",
"cn": "China",
"co": "Colombia",
"cr": "Costa Rica",
"cu": "Cuba",
"cv": "Cape Verde",
"cx": "Christmas Island",
"cy": "Cyprus",
"cz": "Czech Republic",
"de": "Germany",
"dj": "Djibouti",
"dk": "Denmark",
"dm": "Dominica",
"do": "Dominican Republic",
"dz": "Algeria",
"ec": "Ecuador",
"ee": "Estonia",
"eg": "Egypt",
"eh": "Western Sahara",
"er": "Eritrea",
"es": "Spain",
"et": "Ethiopia",
"fi": "Finland",
"fj": "Fiji",
"fk": "Falkland Islands [Islas Malvinas]",
"fm": "Micronesia",
"fo": "Faroe Islands",
"fr": "France",
"ga": "Gabon",
"gb": "United Kingdom",
"gd": "Grenada",
"ge": "Georgia",
"gf": "French Guiana",
"gg": "Guernsey",
"gh": "Ghana",
"gi": "Gibraltar",
"gl": "Greenland",
"gm": "Gambia",
"gn": "Guinea",
"gp": "Guadeloupe",
"gq": "Equatorial Guinea",
"gr": "Greece",
"gs": "South Georgia and the South Sandwich Islands",
"gt": "Guatemala",
"gu": "Guam",
"gw": "Guinea-Bissau",
"gy": "Guyana",
"gz": "Gaza Strip",
"hk": "Hong Kong",
"hm": "Heard Island and McDonald Islands",
"hn": "Honduras",
"hr": "Croatia",
"ht": "Haiti",
"hu": "Hungary",
"id": "Indonesia",
"ie": "Ireland",
"il": "Israel",
"im": "Isle of Man",
"in": "India",
"io": "British Indian Ocean Territory",
"iq": "Iraq",
"ir": "Iran",
"is": "Iceland",
"it": "Italy",
"je": "Jersey",
"jm": "Jamaica",
"jo": "Jordan",
"jp": "Japan",
"ke": "Kenya",
"kg": "Kyrgyzstan",
"kh": "Cambodia",
"ki": "Kiribati",
"km": "Comoros",
"kn": "Saint Kitts and Nevis",
"kp": "North Korea",
"kr": "South Korea",
"kw": "Kuwait",
"ky": "Cayman Islands",
"kz": "Kazakhstan",
"la": "Laos",
"lb": "Lebanon",
"lc": "Saint Lucia",
"li": "Liechtenstein",
"lk": "Sri Lanka",
"lr": "Liberia",
"ls": "Lesotho",
"lt": "Lithuania",
"lu": "Luxembourg",
"lv": "Latvia",
"ly": "Libya",
"ma": "Morocco",
"mc": "Monaco",
"md": "Moldova",
"me": "Montenegro",
"mg": "Madagascar",
"mh": "Marshall Islands",
"mk": "Macedonia [FYROM]",
"ml": "Mali",
"mm": "Myanmar [Burma]",
"mn": "Mongolia",
"mo": "Macau",
"mp": "Northern Mariana Islands",
"mq": "Martinique",
"mr": "Mauritania",
"ms": "Montserrat",
"mt": "Malta",
"mu": "Mauritius",
"mv": "Maldives",
"mw": "Malawi",
"mx": "Mexico",
"my": "Malaysia",
"mz": "Mozambique",
"na": "Namibia",
"nc": "New Caledonia",
"ne": "Niger",
"nf": "Norfolk Island",
"ng": "Nigeria",
"ni": "Nicaragua",
"nl": "Netherlands",
"no": "Norway",
"np": "Nepal",
"nr": "Nauru",
"nu": "Niue",
"nz": "New Zealand",
"om": "Oman",
"pa": "Panama",
"pe": "Peru",
"pf": "French Polynesia",
"pg": "Papua New Guinea",
"ph": "Philippines",
"pk": "Pakistan",
"pl": "Poland",
"pm": "Saint Pierre and Miquelon",
"pn": "Pitcairn Islands",
"pr": "Puerto Rico",
"ps": "Palestinian Territories",
"pt": "Portugal",
"pw": "Palau",
"py": "Paraguay",
"qa": "Qatar",
"re": "Réunion",
"ro": "Romania",
"rs": "Serbia",
"ru": "Russia",
"rw": "Rwanda",
"sa": "Saudi Arabia",
"sb": "Solomon Islands",
"sc": "Seychelles",
"sd": "Sudan",
"se": "Sweden",
"sg": "Singapore",
"sh": "Saint Helena",
"si": "Slovenia",
"sj": "Svalbard and Jan Mayen",
"sk": "Slovakia",
"sl": "Sierra Leone",
"sm": "San Marino",
"sn": "Senegal",
"so": "Somalia",
"sr": "Suriname",
"st": "São Tomé and Príncipe",
"sv": "El Salvador",
"sy": "Syria",
"sz": "Swaziland",
"tc": "Turks and Caicos Islands",
"td": "Chad",
"tf": "French Southern Territories",
"tg": "Togo",
"th": "Thailand",
"tj": "Tajikistan",
"tk": "Tokelau",
"tl": "Timor-Leste",
"tm": "Turkmenistan",
"tn": "Tunisia",
"to": "Tonga",
"tr": "Turkey",
"tt": "Trinidad and Tobago",
"tv": "Tuvalu",
"tw": "Taiwan",
"tz": "Tanzania",
"ua": "Ukraine",
"ug": "Uganda",
"um": "U.S. Minor Outlying Islands",
"us": "United States",
"uy": "Uruguay",
"uz": "Uzbekistan",
"va": "Vatican City",
"vc": "Saint Vincent and the Grenadines",
"ve": "Venezuela",
"vg": "British Virgin Islands",
"vi": "U.S. Virgin Islands",
"vn": "Vietnam",
"vu": "Vanuatu",
"wf": "Wallis and Futuna",
"ws": "Samoa",
"xk": "Kosovo",
"ye": "Yemen",
"yt": "Mayotte",
"za": "South Africa",
"zm": "Zambia",
"zw": "Zimbabwe",
}
HL_LANGUAGE_CODES = {
"af": "Afrikaans",
"sq": "Albanian",
"sm": "Amharic",
"ar": "Arabic",
"az": "Azerbaijani",
"eu": "Basque",
"be": "Belarusian",
"bn": "Bengali",
"bh": "Bihari",
"bs": "Bosnian",
"bg": "Bulgarian",
"ca": "Catalan",
"zh-CN": "Chinese (Simplified)",
"zh-TW": "Chinese (Traditional)",
"hr": "Croatian",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"eo": "Esperanto",
"et": "Estonian",
"fo": "Faroese",
"fi": "Finnish",
"fr": "French",
"fy": "Frisian",
"gl": "Galician",
"ka": "Georgian",
"de": "German",
"el": "Greek",
"gu": "Gujarati",
"iw": "Hebrew",
"hi": "Hindi",
"hu": "Hungarian",
"is": "Icelandic",
"id": "Indonesian",
"ia": "Interlingua",
"ga": "Irish",
"it": "Italian",
"ja": "Japanese",
"jw": "Javanese",
"kn": "Kannada",
"ko": "Korean",
"la": "Latin",
"lv": "Latvian",
"lt": "Lithuanian",
"mk": "Macedonian",
"ms": "Malay",
"ml": "Malayam",
"mt": "Maltese",
"mr": "Marathi",
"ne": "Nepali",
"no": "Norwegian",
"nn": "Norwegian (Nynorsk)",
"oc": "Occitan",
"fa": "Persian",
"pl": "Polish",
"pt-BR": "Portuguese (Brazil)",
"pt-PT": "Portuguese (Portugal)",
"pa": "Punjabi",
"ro": "Romanian",
"ru": "Russian",
"gd": "Scots Gaelic",
"sr": "Serbian",
"si": "Sinhalese",
"sk": "Slovak",
"sl": "Slovenian",
"es": "Spanish",
"su": "Sudanese",
"sw": "Swahili",
"sv": "Swedish",
"tl": "Tagalog",
"ta": "Tamil",
"te": "Telugu",
"th": "Thai",
"ti": "Tigrinya",
"tr": "Turkish",
"uk": "Ukrainian",
"ur": "Urdu",
"uz": "Uzbek",
"vi": "Vietnamese",
"cy": "Welsh",
"xh": "Xhosa",
"zu": "Zulu",
}
|
PypiClean
|
/stor-blockchain-1.0.4.tar.gz/stor-blockchain-1.0.4/stor/pools/pool_wallet.py
|
import logging
import time
from typing import Any, Optional, Set, Tuple, List, Dict
from blspy import PrivateKey, G2Element, G1Element
from stor.consensus.block_record import BlockRecord
from stor.pools.pool_config import PoolWalletConfig, load_pool_config, update_pool_config
from stor.pools.pool_wallet_info import (
PoolWalletInfo,
PoolSingletonState,
PoolState,
FARMING_TO_POOL,
SELF_POOLING,
LEAVING_POOL,
create_pool_state,
)
from stor.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from stor.types.announcement import Announcement
from stor.types.blockchain_format.coin import Coin
from stor.types.blockchain_format.sized_bytes import bytes32
from stor.types.blockchain_format.program import Program, SerializedProgram
from stor.types.coin_record import CoinRecord
from stor.types.coin_spend import CoinSpend
from stor.types.spend_bundle import SpendBundle
from stor.pools.pool_puzzles import (
create_waiting_room_inner_puzzle,
create_full_puzzle,
SINGLETON_LAUNCHER,
create_pooling_inner_puzzle,
solution_to_pool_state,
pool_state_to_inner_puzzle,
get_most_recent_singleton_coin_from_coin_spend,
launcher_id_to_p2_puzzle_hash,
create_travel_spend,
uncurry_pool_member_inner_puzzle,
create_absorb_spend,
is_pool_member_inner_puzzle,
is_pool_waitingroom_inner_puzzle,
uncurry_pool_waitingroom_inner_puzzle,
get_delayed_puz_info_from_launcher_spend,
)
from stor.util.ints import uint8, uint32, uint64
from stor.wallet.derive_keys import (
master_sk_to_pooling_authentication_sk,
find_owner_sk,
)
from stor.wallet.sign_coin_spends import sign_coin_spends
from stor.wallet.transaction_record import TransactionRecord
from stor.wallet.util.wallet_types import WalletType
from stor.wallet.wallet import Wallet
from stor.wallet.wallet_coin_record import WalletCoinRecord
from stor.wallet.wallet_info import WalletInfo
from stor.wallet.util.transaction_type import TransactionType
class PoolWallet:
MINIMUM_INITIAL_BALANCE = 1
MINIMUM_RELATIVE_LOCK_HEIGHT = 5
MAXIMUM_RELATIVE_LOCK_HEIGHT = 1000
wallet_state_manager: Any
log: logging.Logger
wallet_info: WalletInfo
target_state: Optional[PoolState]
next_transaction_fee: uint64
standard_wallet: Wallet
wallet_id: int
singleton_list: List[Coin]
"""
From the user's perspective, this is not a wallet at all, but a way to control
whether their pooling-enabled plots are being self-farmed, or farmed by a pool,
and by which pool. Self-pooling and joint pooling rewards are swept into the
users' regular wallet.
If this wallet is in SELF_POOLING state, the coin ID associated with the current
pool wallet contains the rewards gained while self-farming, so care must be taken
to disallow joining a new pool while we still have money on the pooling singleton UTXO.
Pools can be joined anonymously, without an account or prior signup.
The ability to change the farm-to target prevents abuse from pools
by giving the user the ability to quickly change pools, or self-farm.
The pool is also protected, by not allowing members to cheat by quickly leaving a pool,
and claiming a block that was pledged to the pool.
The pooling protocol and smart coin prevents a user from quickly leaving a pool
by enforcing a wait time when leaving the pool. A minimum number of blocks must pass
after the user declares that they are leaving the pool, and before they can start to
self-claim rewards again.
Control of switching states is granted to the owner public key.
We reveal the inner_puzzle to the pool during setup of the pooling protocol.
The pool can prove to itself that the inner puzzle pays to the pooling address,
and it can follow state changes in the pooling puzzle by tracing destruction and
creation of coins associate with this pooling singleton (the singleton controlling
this pool group).
The user trusts the pool to send mining rewards to the <XXX address XXX>
TODO: We should mark which address is receiving funds for our current state.
If the pool misbehaves, it is the user's responsibility to leave the pool
It is the Pool's responsibility to claim the rewards sent to the pool_puzzlehash.
The timeout for leaving the pool is expressed in number of blocks from the time
the user expresses their intent to leave.
"""
@classmethod
def type(cls) -> uint8:
return uint8(WalletType.POOLING_WALLET)
def id(self):
return self.wallet_info.id
@classmethod
def _verify_self_pooled(cls, state) -> Optional[str]:
err = ""
if state.pool_url != "":
err += " Unneeded pool_url for self-pooling"
if state.relative_lock_height != 0:
err += " Incorrect relative_lock_height for self-pooling"
return None if err == "" else err
@classmethod
def _verify_pooling_state(cls, state) -> Optional[str]:
err = ""
if state.relative_lock_height < cls.MINIMUM_RELATIVE_LOCK_HEIGHT:
err += (
f" Pool relative_lock_height ({state.relative_lock_height})"
f"is less than recommended minimum ({cls.MINIMUM_RELATIVE_LOCK_HEIGHT})"
)
elif state.relative_lock_height > cls.MAXIMUM_RELATIVE_LOCK_HEIGHT:
err += (
f" Pool relative_lock_height ({state.relative_lock_height})"
f"is greater than recommended maximum ({cls.MAXIMUM_RELATIVE_LOCK_HEIGHT})"
)
if state.pool_url in [None, ""]:
err += " Empty pool url in pooling state"
return err
@classmethod
def _verify_pool_state(cls, state: PoolState) -> Optional[str]:
if state.target_puzzle_hash is None:
return "Invalid puzzle_hash"
if state.version > POOL_PROTOCOL_VERSION:
return (
f"Detected pool protocol version {state.version}, which is "
f"newer than this wallet's version ({POOL_PROTOCOL_VERSION}). Please upgrade "
f"to use this pooling wallet"
)
if state.state == PoolSingletonState.SELF_POOLING:
return cls._verify_self_pooled(state)
elif state.state == PoolSingletonState.FARMING_TO_POOL or state.state == PoolSingletonState.LEAVING_POOL:
return cls._verify_pooling_state(state)
else:
return "Internal Error"
@classmethod
def _verify_initial_target_state(cls, initial_target_state):
err = cls._verify_pool_state(initial_target_state)
if err:
raise ValueError(f"Invalid internal Pool State: {err}: {initial_target_state}")
async def get_spend_history(self) -> List[Tuple[uint32, CoinSpend]]:
return self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)
async def get_current_state(self) -> PoolWalletInfo:
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
all_spends: List[CoinSpend] = [cs for _, cs in history]
# We must have at least the launcher spend
assert len(all_spends) >= 1
launcher_coin: Coin = all_spends[0].coin
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(all_spends[0])
tip_singleton_coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(all_spends[-1])
launcher_id: bytes32 = launcher_coin.name()
p2_singleton_puzzle_hash = launcher_id_to_p2_puzzle_hash(launcher_id, delayed_seconds, delayed_puzhash)
assert tip_singleton_coin is not None
curr_spend_i = len(all_spends) - 1
pool_state: Optional[PoolState] = None
last_singleton_spend_height = uint32(0)
while pool_state is None:
full_spend: CoinSpend = all_spends[curr_spend_i]
pool_state = solution_to_pool_state(full_spend)
last_singleton_spend_height = uint32(history[curr_spend_i][0])
curr_spend_i -= 1
assert pool_state is not None
current_inner = pool_state_to_inner_puzzle(
pool_state,
launcher_coin.name(),
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
return PoolWalletInfo(
pool_state,
self.target_state,
launcher_coin,
launcher_id,
p2_singleton_puzzle_hash,
current_inner,
tip_singleton_coin.name(),
last_singleton_spend_height,
)
async def get_unconfirmed_transactions(self) -> List[TransactionRecord]:
return await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.wallet_id)
async def get_tip(self) -> Tuple[uint32, CoinSpend]:
return self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)[-1]
async def update_pool_config(self, make_new_authentication_key: bool):
current_state: PoolWalletInfo = await self.get_current_state()
pool_config_list: List[PoolWalletConfig] = load_pool_config(self.wallet_state_manager.root_path)
pool_config_dict: Dict[bytes32, PoolWalletConfig] = {c.launcher_id: c for c in pool_config_list}
existing_config: Optional[PoolWalletConfig] = pool_config_dict.get(current_state.launcher_id, None)
if make_new_authentication_key or existing_config is None:
new_auth_sk: PrivateKey = master_sk_to_pooling_authentication_sk(
self.wallet_state_manager.private_key, uint32(self.wallet_id), uint32(0)
)
auth_pk: G1Element = new_auth_sk.get_g1()
payout_instructions: str = (await self.standard_wallet.get_new_puzzlehash(in_transaction=True)).hex()
else:
auth_pk = existing_config.authentication_public_key
payout_instructions = existing_config.payout_instructions
new_config: PoolWalletConfig = PoolWalletConfig(
current_state.launcher_id,
current_state.current.pool_url if current_state.current.pool_url else "",
payout_instructions,
current_state.current.target_puzzle_hash,
current_state.p2_singleton_puzzle_hash,
current_state.current.owner_pubkey,
auth_pk,
)
pool_config_dict[new_config.launcher_id] = new_config
await update_pool_config(self.wallet_state_manager.root_path, list(pool_config_dict.values()))
@staticmethod
def get_next_interesting_coin_ids(spend: CoinSpend) -> List[bytes32]:
# CoinSpend of one of the coins that we cared about. This coin was spent in a block, but might be in a reorg
# If we return a value, it is a coin ID that we are also interested in (to support two transitions per block)
coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(spend)
if coin is not None:
return [coin.name()]
return []
async def apply_state_transitions(self, block_spends: List[CoinSpend], block_height: uint32):
"""
Updates the Pool state (including DB) with new singleton spends. The block spends can contain many spends
that we are not interested in, and can contain many ephemeral spends. They must all be in the same block.
The DB must be committed after calling this method. All validation should be done here.
"""
coin_name_to_spend: Dict[bytes32, CoinSpend] = {cs.coin.name(): cs for cs in block_spends}
tip: Tuple[uint32, CoinSpend] = await self.get_tip()
tip_height = tip[0]
tip_spend = tip[1]
assert block_height >= tip_height # We should not have a spend with a lesser block height
while True:
tip_coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(tip_spend)
assert tip_coin is not None
spent_coin_name: bytes32 = tip_coin.name()
if spent_coin_name not in coin_name_to_spend:
break
spend: CoinSpend = coin_name_to_spend[spent_coin_name]
await self.wallet_state_manager.pool_store.add_spend(self.wallet_id, spend, block_height)
tip_spend = (await self.get_tip())[1]
self.log.info(f"New PoolWallet singleton tip_coin: {tip_spend}")
coin_name_to_spend.pop(spent_coin_name)
# If we have reached the target state, resets it to None. Loops back to get current state
for _, added_spend in reversed(self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)):
latest_state: Optional[PoolState] = solution_to_pool_state(added_spend)
if latest_state is not None:
if self.target_state == latest_state:
self.target_state = None
self.next_transaction_fee = uint64(0)
break
await self.update_pool_config(False)
async def rewind(self, block_height: int) -> bool:
"""
Rolls back all transactions after block_height, and if creation was after block_height, deletes the wallet.
Returns True if the wallet should be removed.
"""
try:
history: List[Tuple[uint32, CoinSpend]] = self.wallet_state_manager.pool_store.get_spends_for_wallet(
self.wallet_id
).copy()
prev_state: PoolWalletInfo = await self.get_current_state()
await self.wallet_state_manager.pool_store.rollback(block_height, self.wallet_id)
if len(history) > 0 and history[0][0] > block_height:
# If we have no entries in the DB, we have no singleton, so we should not have a wallet either
# The PoolWallet object becomes invalid after this.
await self.wallet_state_manager.interested_store.remove_interested_puzzle_hash(
prev_state.p2_singleton_puzzle_hash, in_transaction=True
)
return True
else:
if await self.get_current_state() != prev_state:
await self.update_pool_config(False)
return False
except Exception as e:
self.log.error(f"Exception rewinding: {e}")
return False
@staticmethod
async def create(
wallet_state_manager: Any,
wallet: Wallet,
launcher_coin_id: bytes32,
block_spends: List[CoinSpend],
block_height: uint32,
in_transaction: bool,
name: str = None,
):
"""
This creates a new PoolWallet with only one spend: the launcher spend. The DB MUST be committed after calling
this method.
"""
self = PoolWallet()
self.wallet_state_manager = wallet_state_manager
self.wallet_info = await wallet_state_manager.user_store.create_wallet(
"Pool wallet", WalletType.POOLING_WALLET.value, "", in_transaction=in_transaction
)
self.wallet_id = self.wallet_info.id
self.standard_wallet = wallet
self.target_state = None
self.next_transaction_fee = uint64(0)
self.log = logging.getLogger(name if name else __name__)
launcher_spend: Optional[CoinSpend] = None
for spend in block_spends:
if spend.coin.name() == launcher_coin_id:
launcher_spend = spend
assert launcher_spend is not None
await self.wallet_state_manager.pool_store.add_spend(self.wallet_id, launcher_spend, block_height)
await self.update_pool_config(True)
p2_puzzle_hash: bytes32 = (await self.get_current_state()).p2_singleton_puzzle_hash
await self.wallet_state_manager.interested_store.add_interested_puzzle_hash(
p2_puzzle_hash, self.wallet_id, True
)
await self.wallet_state_manager.add_new_wallet(self, self.wallet_info.id, create_puzzle_hashes=False)
self.wallet_state_manager.set_new_peak_callback(self.wallet_id, self.new_peak)
return self
@staticmethod
async def create_from_db(
wallet_state_manager: Any,
wallet: Wallet,
wallet_info: WalletInfo,
name: str = None,
):
"""
This creates a PoolWallet from DB. However, all data is already handled by WalletPoolStore, so we don't need
to do anything here.
"""
self = PoolWallet()
self.wallet_state_manager = wallet_state_manager
self.wallet_id = wallet_info.id
self.standard_wallet = wallet
self.wallet_info = wallet_info
self.target_state = None
self.log = logging.getLogger(name if name else __name__)
self.wallet_state_manager.set_new_peak_callback(self.wallet_id, self.new_peak)
return self
@staticmethod
async def create_new_pool_wallet_transaction(
wallet_state_manager: Any,
main_wallet: Wallet,
initial_target_state: PoolState,
fee: uint64 = uint64(0),
p2_singleton_delay_time: Optional[uint64] = None,
p2_singleton_delayed_ph: Optional[bytes32] = None,
) -> Tuple[TransactionRecord, bytes32, bytes32]:
"""
A "plot NFT", or pool wallet, represents the idea of a set of plots that all pay to
the same pooling puzzle. This puzzle is a `stor singleton` that is
parameterized with a public key controlled by the user's wallet
(a `smart coin`). It contains an inner puzzle that can switch between
paying block rewards to a pool, or to a user's own wallet.
Call under the wallet state manger lock
"""
amount = 1
standard_wallet = main_wallet
if p2_singleton_delayed_ph is None:
p2_singleton_delayed_ph = await main_wallet.get_new_puzzlehash()
if p2_singleton_delay_time is None:
p2_singleton_delay_time = uint64(604800)
unspent_records = await wallet_state_manager.coin_store.get_unspent_coins_for_wallet(standard_wallet.wallet_id)
balance = await standard_wallet.get_confirmed_balance(unspent_records)
if balance < PoolWallet.MINIMUM_INITIAL_BALANCE:
raise ValueError("Not enough balance in main wallet to create a managed plotting pool.")
if balance < fee:
raise ValueError("Not enough balance in main wallet to create a managed plotting pool with fee {fee}.")
# Verify Parameters - raise if invalid
PoolWallet._verify_initial_target_state(initial_target_state)
spend_bundle, singleton_puzzle_hash, launcher_coin_id = await PoolWallet.generate_launcher_spend(
standard_wallet,
uint64(1),
initial_target_state,
wallet_state_manager.constants.GENESIS_CHALLENGE,
p2_singleton_delay_time,
p2_singleton_delayed_ph,
)
if spend_bundle is None:
raise ValueError("failed to generate ID for wallet")
standard_wallet_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=singleton_puzzle_hash,
amount=uint64(amount),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet_state_manager.main_wallet.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
await standard_wallet.push_transaction(standard_wallet_record)
p2_singleton_puzzle_hash: bytes32 = launcher_id_to_p2_puzzle_hash(
launcher_coin_id, p2_singleton_delay_time, p2_singleton_delayed_ph
)
return standard_wallet_record, p2_singleton_puzzle_hash, launcher_coin_id
async def sign(self, coin_spend: CoinSpend) -> SpendBundle:
async def pk_to_sk(pk: G1Element) -> PrivateKey:
owner_sk: Optional[PrivateKey] = await find_owner_sk([self.wallet_state_manager.private_key], pk)
assert owner_sk is not None
return owner_sk
return await sign_coin_spends(
[coin_spend],
pk_to_sk,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
async def generate_travel_transaction(self, fee: uint64) -> TransactionRecord:
# target_state is contained within pool_wallet_state
pool_wallet_info: PoolWalletInfo = await self.get_current_state()
spend_history = await self.get_spend_history()
last_coin_spend: CoinSpend = spend_history[-1][1]
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(spend_history[0][1])
assert pool_wallet_info.target is not None
next_state = pool_wallet_info.target
if pool_wallet_info.current.state in [FARMING_TO_POOL]:
next_state = create_pool_state(
LEAVING_POOL,
pool_wallet_info.current.target_puzzle_hash,
pool_wallet_info.current.owner_pubkey,
pool_wallet_info.current.pool_url,
pool_wallet_info.current.relative_lock_height,
)
new_inner_puzzle = pool_state_to_inner_puzzle(
next_state,
pool_wallet_info.launcher_coin.name(),
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
new_full_puzzle: SerializedProgram = SerializedProgram.from_program(
create_full_puzzle(new_inner_puzzle, pool_wallet_info.launcher_coin.name())
)
outgoing_coin_spend, inner_puzzle = create_travel_spend(
last_coin_spend,
pool_wallet_info.launcher_coin,
pool_wallet_info.current,
next_state,
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
tip = (await self.get_tip())[1]
tip_coin = tip.coin
singleton = tip.additions()[0]
singleton_id = singleton.name()
assert outgoing_coin_spend.coin.parent_coin_info == tip_coin.name()
assert outgoing_coin_spend.coin.name() == singleton_id
assert new_inner_puzzle != inner_puzzle
if is_pool_member_inner_puzzle(inner_puzzle):
(
inner_f,
target_puzzle_hash,
p2_singleton_hash,
pubkey_as_program,
pool_reward_prefix,
escape_puzzle_hash,
) = uncurry_pool_member_inner_puzzle(inner_puzzle)
pk_bytes: bytes = bytes(pubkey_as_program.as_atom())
assert len(pk_bytes) == 48
owner_pubkey = G1Element.from_bytes(pk_bytes)
assert owner_pubkey == pool_wallet_info.current.owner_pubkey
elif is_pool_waitingroom_inner_puzzle(inner_puzzle):
(
target_puzzle_hash, # payout_puzzle_hash
relative_lock_height,
owner_pubkey,
p2_singleton_hash,
) = uncurry_pool_waitingroom_inner_puzzle(inner_puzzle)
pk_bytes = bytes(owner_pubkey.as_atom())
assert len(pk_bytes) == 48
assert owner_pubkey == pool_wallet_info.current.owner_pubkey
else:
raise RuntimeError("Invalid state")
signed_spend_bundle = await self.sign(outgoing_coin_spend)
assert signed_spend_bundle.removals()[0].puzzle_hash == singleton.puzzle_hash
assert signed_spend_bundle.removals()[0].name() == singleton.name()
assert signed_spend_bundle is not None
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=new_full_puzzle.get_tree_hash(),
amount=uint64(1),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=signed_spend_bundle,
additions=signed_spend_bundle.additions(),
removals=signed_spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=signed_spend_bundle.name(),
)
return tx_record
@staticmethod
async def generate_launcher_spend(
standard_wallet: Wallet,
amount: uint64,
initial_target_state: PoolState,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> Tuple[SpendBundle, bytes32, bytes32]:
"""
Creates the initial singleton, which includes spending an origin coin, the launcher, and creating a singleton
with the "pooling" inner state, which can be either self pooling or using a pool
"""
coins: Set[Coin] = await standard_wallet.select_coins(amount)
if coins is None:
raise ValueError("Not enough coins to create pool wallet")
assert len(coins) == 1
launcher_parent: Coin = coins.copy().pop()
genesis_launcher_puz: Program = SINGLETON_LAUNCHER
launcher_coin: Coin = Coin(launcher_parent.name(), genesis_launcher_puz.get_tree_hash(), amount)
escaping_inner_puzzle: bytes32 = create_waiting_room_inner_puzzle(
initial_target_state.target_puzzle_hash,
initial_target_state.relative_lock_height,
initial_target_state.owner_pubkey,
launcher_coin.name(),
genesis_challenge,
delay_time,
delay_ph,
)
escaping_inner_puzzle_hash = escaping_inner_puzzle.get_tree_hash()
self_pooling_inner_puzzle: Program = create_pooling_inner_puzzle(
initial_target_state.target_puzzle_hash,
escaping_inner_puzzle_hash,
initial_target_state.owner_pubkey,
launcher_coin.name(),
genesis_challenge,
delay_time,
delay_ph,
)
if initial_target_state.state == SELF_POOLING:
puzzle = escaping_inner_puzzle
elif initial_target_state.state == FARMING_TO_POOL:
puzzle = self_pooling_inner_puzzle
else:
raise ValueError("Invalid initial state")
full_pooling_puzzle: Program = create_full_puzzle(puzzle, launcher_id=launcher_coin.name())
puzzle_hash: bytes32 = full_pooling_puzzle.get_tree_hash()
pool_state_bytes = Program.to([("p", bytes(initial_target_state)), ("t", delay_time), ("h", delay_ph)])
announcement_set: Set[bytes32] = set()
announcement_message = Program.to([puzzle_hash, amount, pool_state_bytes]).get_tree_hash()
announcement_set.add(Announcement(launcher_coin.name(), announcement_message).name())
create_launcher_tx_record: Optional[TransactionRecord] = await standard_wallet.generate_signed_transaction(
amount,
genesis_launcher_puz.get_tree_hash(),
uint64(0),
None,
coins,
None,
False,
announcement_set,
)
assert create_launcher_tx_record is not None and create_launcher_tx_record.spend_bundle is not None
genesis_launcher_solution: Program = Program.to([puzzle_hash, amount, pool_state_bytes])
launcher_cs: CoinSpend = CoinSpend(
launcher_coin,
SerializedProgram.from_program(genesis_launcher_puz),
SerializedProgram.from_program(genesis_launcher_solution),
)
launcher_sb: SpendBundle = SpendBundle([launcher_cs], G2Element())
# Current inner will be updated when state is verified on the blockchain
full_spend: SpendBundle = SpendBundle.aggregate([create_launcher_tx_record.spend_bundle, launcher_sb])
return full_spend, puzzle_hash, launcher_coin.name()
async def join_pool(self, target_state: PoolState, fee: uint64) -> Tuple[uint64, TransactionRecord]:
if target_state.state != FARMING_TO_POOL:
raise ValueError(f"join_pool must be called with target_state={FARMING_TO_POOL} (FARMING_TO_POOL)")
if self.target_state is not None:
raise ValueError(f"Cannot join a pool while waiting for target state: {self.target_state}")
if await self.have_unconfirmed_transaction():
raise ValueError(
"Cannot join pool due to unconfirmed transaction. If this is stuck, delete the unconfirmed transaction."
)
current_state: PoolWalletInfo = await self.get_current_state()
total_fee = fee
if current_state.current == target_state:
self.target_state = None
msg = f"Asked to change to current state. Target = {target_state}"
self.log.info(msg)
raise ValueError(msg)
elif current_state.current.state in [SELF_POOLING, LEAVING_POOL]:
total_fee = fee
elif current_state.current.state == FARMING_TO_POOL:
total_fee = uint64(fee * 2)
if self.target_state is not None:
raise ValueError(
f"Cannot change to state {target_state} when already having target state: {self.target_state}"
)
PoolWallet._verify_initial_target_state(target_state)
if current_state.current.state == LEAVING_POOL:
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
last_height: uint32 = history[-1][0]
if self.wallet_state_manager.get_peak().height <= last_height + current_state.current.relative_lock_height:
raise ValueError(
f"Cannot join a pool until height {last_height + current_state.current.relative_lock_height}"
)
self.target_state = target_state
self.next_transaction_fee = fee
tx_record: TransactionRecord = await self.generate_travel_transaction(fee)
await self.wallet_state_manager.add_pending_transaction(tx_record)
return total_fee, tx_record
async def self_pool(self, fee: uint64) -> Tuple[uint64, TransactionRecord]:
if await self.have_unconfirmed_transaction():
raise ValueError(
"Cannot self pool due to unconfirmed transaction. If this is stuck, delete the unconfirmed transaction."
)
pool_wallet_info: PoolWalletInfo = await self.get_current_state()
if pool_wallet_info.current.state == SELF_POOLING:
raise ValueError("Attempted to self pool when already self pooling")
if self.target_state is not None:
raise ValueError(f"Cannot self pool when already having target state: {self.target_state}")
# Note the implications of getting owner_puzzlehash from our local wallet right now
# vs. having pre-arranged the target self-pooling address
owner_puzzlehash = await self.standard_wallet.get_new_puzzlehash()
owner_pubkey = pool_wallet_info.current.owner_pubkey
current_state: PoolWalletInfo = await self.get_current_state()
total_fee = uint64(fee * 2)
if current_state.current.state == LEAVING_POOL:
total_fee = fee
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
last_height: uint32 = history[-1][0]
if self.wallet_state_manager.get_peak().height <= last_height + current_state.current.relative_lock_height:
raise ValueError(
f"Cannot self pool until height {last_height + current_state.current.relative_lock_height}"
)
self.target_state = create_pool_state(
SELF_POOLING, owner_puzzlehash, owner_pubkey, pool_url=None, relative_lock_height=uint32(0)
)
self.next_transaction_fee = fee
tx_record = await self.generate_travel_transaction(fee)
await self.wallet_state_manager.add_pending_transaction(tx_record)
return total_fee, tx_record
async def claim_pool_rewards(self, fee: uint64) -> TransactionRecord:
# Search for p2_puzzle_hash coins, and spend them with the singleton
if await self.have_unconfirmed_transaction():
raise ValueError(
"Cannot claim due to unconfirmed transaction. If this is stuck, delete the unconfirmed transaction."
)
unspent_coin_records: List[CoinRecord] = list(
await self.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(self.wallet_id)
)
if len(unspent_coin_records) == 0:
raise ValueError("Nothing to claim, no transactions to p2_singleton_puzzle_hash")
farming_rewards: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_farming_rewards()
coin_to_height_farmed: Dict[Coin, uint32] = {}
for tx_record in farming_rewards:
height_farmed: Optional[uint32] = tx_record.height_farmed(
self.wallet_state_manager.constants.GENESIS_CHALLENGE
)
assert height_farmed is not None
coin_to_height_farmed[tx_record.additions[0]] = height_farmed
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
assert len(history) > 0
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(history[0][1])
current_state: PoolWalletInfo = await self.get_current_state()
last_solution: CoinSpend = history[-1][1]
all_spends: List[CoinSpend] = []
total_amount = 0
for coin_record in unspent_coin_records:
if coin_record.coin not in coin_to_height_farmed:
continue
if len(all_spends) >= 100:
# Limit the total number of spends, so it fits into the block
break
absorb_spend: List[CoinSpend] = create_absorb_spend(
last_solution,
current_state.current,
current_state.launcher_coin,
coin_to_height_farmed[coin_record.coin],
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
last_solution = absorb_spend[0]
all_spends += absorb_spend
total_amount += coin_record.coin.amount
self.log.info(
f"Farmer coin: {coin_record.coin} {coin_record.coin.name()} {coin_to_height_farmed[coin_record.coin]}"
)
if len(all_spends) == 0:
raise ValueError("Nothing to claim, no unspent coinbase rewards")
# No signatures are required to absorb
spend_bundle: SpendBundle = SpendBundle(all_spends, G2Element())
absorb_transaction: TransactionRecord = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=current_state.current.target_puzzle_hash,
amount=uint64(total_amount),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=uint32(self.wallet_id),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
await self.wallet_state_manager.add_pending_transaction(absorb_transaction)
return absorb_transaction
async def new_peak(self, peak: BlockRecord) -> None:
# This gets called from the WalletStateManager whenever there is a new peak
pool_wallet_info: PoolWalletInfo = await self.get_current_state()
tip_height, tip_spend = await self.get_tip()
if self.target_state is None:
return
if self.target_state == pool_wallet_info.current.state:
self.target_state = None
raise ValueError("Internal error")
if (
self.target_state.state in [FARMING_TO_POOL, SELF_POOLING]
and pool_wallet_info.current.state == LEAVING_POOL
):
leave_height = tip_height + pool_wallet_info.current.relative_lock_height
curr: BlockRecord = peak
while not curr.is_transaction_block:
curr = self.wallet_state_manager.blockchain.block_record(curr.prev_hash)
self.log.info(f"Last transaction block height: {curr.height} OK to leave at height {leave_height}")
# Add some buffer (+2) to reduce chances of a reorg
if curr.height > leave_height + 2:
unconfirmed: List[
TransactionRecord
] = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.wallet_id)
next_tip: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(tip_spend)
assert next_tip is not None
if any([rem.name() == next_tip.name() for tx_rec in unconfirmed for rem in tx_rec.removals]):
self.log.info("Already submitted second transaction, will not resubmit.")
return
self.log.info(f"Attempting to leave from\n{pool_wallet_info.current}\nto\n{self.target_state}")
assert self.target_state.version == POOL_PROTOCOL_VERSION
assert pool_wallet_info.current.state == LEAVING_POOL
assert self.target_state.target_puzzle_hash is not None
if self.target_state.state == SELF_POOLING:
assert self.target_state.relative_lock_height == 0
assert self.target_state.pool_url is None
elif self.target_state.state == FARMING_TO_POOL:
assert self.target_state.relative_lock_height >= self.MINIMUM_RELATIVE_LOCK_HEIGHT
assert self.target_state.pool_url is not None
tx_record = await self.generate_travel_transaction(self.next_transaction_fee)
await self.wallet_state_manager.add_pending_transaction(tx_record)
async def have_unconfirmed_transaction(self) -> bool:
unconfirmed: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
self.wallet_id
)
return len(unconfirmed) > 0
async def get_confirmed_balance(self, _=None) -> uint64:
amount: uint64 = uint64(0)
if (await self.get_current_state()).current.state == SELF_POOLING:
unspent_coin_records: List[WalletCoinRecord] = list(
await self.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(self.wallet_id)
)
for record in unspent_coin_records:
if record.coinbase:
amount = uint64(amount + record.coin.amount)
return amount
async def get_unconfirmed_balance(self, record_list=None) -> uint64:
return await self.get_confirmed_balance(record_list)
async def get_spendable_balance(self, record_list=None) -> uint64:
return await self.get_confirmed_balance(record_list)
async def get_pending_change_balance(self) -> uint64:
return uint64(0)
async def get_max_send_amount(self, record_list=None) -> uint64:
return uint64(0)
|
PypiClean
|
/exo_predict-1.0.27-py3-none-any.whl/exo_predict/exo_model_predict.py
|
import pandas as pd
import numpy as np
from sklearn import datasets, linear_model
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, recall_score, precision_score, precision_recall_curve
class ExoTrainer:
"""
Class that creates a model that predicts whether a system has more than one planet given a dataset that contains the following features: stellar mass, stellar temperature, stellar radius, and semi major axis of one planet
Args:
df (pd.dataframe): dataframe containing all relevant columns
mass_col (str): name of column containing stellar mass
temp_col (str): name of column containing stellar temperature
rad_col (str): name of column containing stellar radius
discmethod (str): method of exoplanet discovery
pl_num (str): name of the column containing the number of planets in a system
test_size (float): the proportion of the data to be saved for testing
Returns:
XGBClassifier: model to be used for predicting
pd.dataframe: dataset for model to be tested on
"""
def __init__(self, df, mass_col, temp_col, rad_col, discmethod, pl_pnum, test_size):
self.df = df
self.mass_col = mass_col
self.temp_col = temp_col
self.rad_col = rad_col
self.discmethod = discmethod
self.pl_pnum = pl_pnum
self.test_size = test_size
def make_exomodel(self):
"""
This function uses the data provided to it to train a model for predicting the number of planets in a given system. This model is based on the following features: stellar mass, stellar temperature, stellar radius, and semi major axis of one planet
"""
# Turn the categorical discovery method variable into a numerical variable
representation_map = {}
for category in self.df[self.discmethod].unique():
representation_map[category] = len(self.df[(self.df[self.discmethod] == category)]) / len(self.df)
self.df["pct_discmethod"] = self.df[self.discmethod].map(representation_map)
# Turn the number of planets column into 0's for 1 planet and 1 for > 1 planet
def multiple_planet_check(row):
return 1 if row[self.pl_pnum] > 1 else 0
y = self.df.apply(multiple_planet_check, axis=1)
# Select only the relevant columns to train on
x = self.df[[self.mass_col, self.temp_col, self.rad_col, "pct_discmethod"]]
# Split the dataset into training, and testing set
X_train, X_test_eval, y_train, y_test_eval = train_test_split(x, y, test_size=self.test_size)
# Further splitting the test set into a test and validation set
X_eval, X_test, y_eval, y_test = train_test_split(X_test_eval, y_test_eval, test_size=0.5)
# Create the model, use the validation set to help estimate performance
eval_set = [(X_eval, y_eval)]
model = XGBClassifier()
model.fit(X_train, y_train, eval_metric = "error", eval_set = eval_set, early_stopping_rounds = 50, verbose = False)
self.model = model
# Use the testing set to evaluate final model fit
y_pred = self.model.predict(X_test)
# Calculate metrics
accuracy = accuracy_score(y_test, y_pred)
confusion = confusion_matrix(y_test, y_pred)
recall = recall_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
precision_recall = precision_recall_curve(y_test, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
print("Confusion:" + str(confusion * 100))
print("Recall: %.2f%%" % (recall * 100.0))
print("Precision: %.2f%%" % (precision* 100.0))
return X_test_eval, y_test_eval, model
def predict_exoplanets(self, data, mass_col, temp_col, rad_col, discmethod):
"""
This function uses a model to predict the number of exoplanets that are within a given system on a test dataset.
Args:
data (pd.dataframe): the data to run the model on in order to predict exoplanets
mass_col (str): name of column containing stellar mass
temp_col (str): name of column containing stellar temperature
rad_col (str): name of column containing stellar radius
Returns:
pd.dataframe: The dataframe with a column constaining the prediction
"""
# Turn the categorical discovery method variable into a numerical variable
representation_map = {}
for category in data[discmethod].unique():
representation_map[category] = len(data[(data[discmethod] == category)]) / len(data)
data["pct_discmethod"] = data[discmethod].map(representation_map)
# Select only the columns that matter
X_data = data[[mass_col, temp_col, rad_col, "pct_discmethod"]]
# Use the model to predict the number of planets
y_pred = self.model.predict(X_data)
# Add the predictions as a column
y_pred_labeled = ["multiple planets" if pred == 1 else "single planet" for pred in y_pred]
y_pred_df = pd.DataFrame({"predictions":y_pred_labeled,"original_index":X_data.index.to_list()})
y_pred_df.set_index("original_index", inplace=True)
X_data["predictions"] = y_pred_df["predictions"]
return X_data
|
PypiClean
|
/protocol_helper-0.2.0.tar.gz/protocol_helper-0.2.0/protocol_helper/services/WeiBoH5collectionService.py
|
import json
from abc import ABC
from protocol_helper.exceptions import WeiBoH5SideRestrictions, DefaultException
from protocol_helper.utils import request
from protocol_helper.services import WeiBoBaseService
class WeiBoH5collectionService(WeiBoBaseService, ABC):
def __init__(self):
super(WeiBoH5collectionService, self).__init__()
def wb_h5_get_latest_news(self, uid, proxies = None,**kwargs):
"""
通过h5的接口获取最新的动态
:param uid:
:return:
"""
params = {
"uid": uid,
"luicode": 10000011,
"lfid": f"230413{uid}_-_WEIBO_SECOND_PROFILE_WEIBO",
"type": "uid",
"value": uid,
"containerid": f"107603{uid}"
}
resp = request.get(f'https://m.weibo.cn/api/container/getIndex', params = params, proxies = proxies,**kwargs)
dates = json.loads(resp.text)
if dates.get('ok', None) != 1:
raise WeiBoH5SideRestrictions(f'获取数据失败:{dates}')
return dates
def get_comments(self, mid, cookie, max_id = 0, proxies = None,**kwargs):
header = {
'authority': 'm.weibo.cn',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'sec-ch-ua': '"Chromium";v="92", " Not A;Brand";v="99", "Google Chrome";v="92"',
'accept': 'application/json, text/plain, */*',
'mweibo-pwa': '1',
'x-xsrf-token': '3914b9',
'x-requested-with': 'XMLHttpRequest',
'sec-ch-ua-mobile': '?1',
'user-agent': 'Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 ('
'KHTML, like Gecko) Chrome/92.0.4515.159 Mobile Safari/537.36',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'accept-language': 'zh-CN,zh;q=0.9',
'Cookie': cookie
}
params = (
('id', mid),
('mid', mid),
('max_id', max_id),
('max_id_type', '0'),
)
return request.get('https://m.weibo.cn/comments/hotflow', headers = header, params = params,
proxies = proxies,**kwargs).json()
|
PypiClean
|
/pyoxigraph-0.3.0_rc.1.tar.gz/pyoxigraph-0.3.0_rc.1/local_dependencies/oxrocksdb-sys/rocksdb/tools/advisor/advisor/db_options_parser.py
|
import copy
from advisor.db_log_parser import DataSource, NO_COL_FAMILY
from advisor.ini_parser import IniParser
import os
class OptionsSpecParser(IniParser):
@staticmethod
def is_new_option(line):
return '=' in line
@staticmethod
def get_section_type(line):
'''
Example section header: [TableOptions/BlockBasedTable "default"]
Here ConfigurationOptimizer returned would be
'TableOptions.BlockBasedTable'
'''
section_path = line.strip()[1:-1].split()[0]
section_type = '.'.join(section_path.split('/'))
return section_type
@staticmethod
def get_section_name(line):
# example: get_section_name('[CFOptions "default"]')
token_list = line.strip()[1:-1].split('"')
# token_list = ['CFOptions', 'default', '']
if len(token_list) < 3:
return None
return token_list[1] # return 'default'
@staticmethod
def get_section_str(section_type, section_name):
# Example:
# Case 1: get_section_str('DBOptions', NO_COL_FAMILY)
# Case 2: get_section_str('TableOptions.BlockBasedTable', 'default')
section_type = '/'.join(section_type.strip().split('.'))
# Case 1: section_type = 'DBOptions'
# Case 2: section_type = 'TableOptions/BlockBasedTable'
section_str = '[' + section_type
if section_name == NO_COL_FAMILY:
# Case 1: '[DBOptions]'
return (section_str + ']')
else:
# Case 2: '[TableOptions/BlockBasedTable "default"]'
return section_str + ' "' + section_name + '"]'
@staticmethod
def get_option_str(key, values):
option_str = key + '='
# get_option_str('db_log_dir', None), returns 'db_log_dir='
if values:
# example:
# get_option_str('max_bytes_for_level_multiplier_additional',
# [1,1,1,1,1,1,1]), returned string:
# 'max_bytes_for_level_multiplier_additional=1:1:1:1:1:1:1'
if isinstance(values, list):
for value in values:
option_str += (str(value) + ':')
option_str = option_str[:-1]
else:
# example: get_option_str('write_buffer_size', 1048576)
# returned string: 'write_buffer_size=1048576'
option_str += str(values)
return option_str
class DatabaseOptions(DataSource):
@staticmethod
def is_misc_option(option_name):
# these are miscellaneous options that are not yet supported by the
# Rocksdb options file, hence they are not prefixed with any section
# name
return '.' not in option_name
@staticmethod
def get_options_diff(opt_old, opt_new):
# type: Dict[option, Dict[col_fam, value]] X 2 ->
# Dict[option, Dict[col_fam, Tuple(old_value, new_value)]]
# note: diff should contain a tuple of values only if they are
# different from each other
options_union = set(opt_old.keys()).union(set(opt_new.keys()))
diff = {}
for opt in options_union:
diff[opt] = {}
# if option in options_union, then it must be in one of the configs
if opt not in opt_old:
for col_fam in opt_new[opt]:
diff[opt][col_fam] = (None, opt_new[opt][col_fam])
elif opt not in opt_new:
for col_fam in opt_old[opt]:
diff[opt][col_fam] = (opt_old[opt][col_fam], None)
else:
for col_fam in opt_old[opt]:
if col_fam in opt_new[opt]:
if opt_old[opt][col_fam] != opt_new[opt][col_fam]:
diff[opt][col_fam] = (
opt_old[opt][col_fam],
opt_new[opt][col_fam]
)
else:
diff[opt][col_fam] = (opt_old[opt][col_fam], None)
for col_fam in opt_new[opt]:
if col_fam in opt_old[opt]:
if opt_old[opt][col_fam] != opt_new[opt][col_fam]:
diff[opt][col_fam] = (
opt_old[opt][col_fam],
opt_new[opt][col_fam]
)
else:
diff[opt][col_fam] = (None, opt_new[opt][col_fam])
if not diff[opt]:
diff.pop(opt)
return diff
def __init__(self, rocksdb_options, misc_options=None):
super().__init__(DataSource.Type.DB_OPTIONS)
# The options are stored in the following data structure:
# Dict[section_type, Dict[section_name, Dict[option_name, value]]]
self.options_dict = None
self.column_families = None
# Load the options from the given file to a dictionary.
self.load_from_source(rocksdb_options)
# Setup the miscellaneous options expected to be List[str], where each
# element in the List has the format "<option_name>=<option_value>"
# These options are the ones that are not yet supported by the Rocksdb
# OPTIONS file, so they are provided separately
self.setup_misc_options(misc_options)
def setup_misc_options(self, misc_options):
self.misc_options = {}
if misc_options:
for option_pair_str in misc_options:
option_name = option_pair_str.split('=')[0].strip()
option_value = option_pair_str.split('=')[1].strip()
self.misc_options[option_name] = option_value
def load_from_source(self, options_path):
self.options_dict = {}
with open(options_path, 'r') as db_options:
for line in db_options:
line = OptionsSpecParser.remove_trailing_comment(line)
if not line:
continue
if OptionsSpecParser.is_section_header(line):
curr_sec_type = (
OptionsSpecParser.get_section_type(line)
)
curr_sec_name = OptionsSpecParser.get_section_name(line)
if curr_sec_type not in self.options_dict:
self.options_dict[curr_sec_type] = {}
if not curr_sec_name:
curr_sec_name = NO_COL_FAMILY
self.options_dict[curr_sec_type][curr_sec_name] = {}
# example: if the line read from the Rocksdb OPTIONS file
# is [CFOptions "default"], then the section type is
# CFOptions and 'default' is the name of a column family
# that for this database, so it's added to the list of
# column families stored in this object
if curr_sec_type == 'CFOptions':
if not self.column_families:
self.column_families = []
self.column_families.append(curr_sec_name)
elif OptionsSpecParser.is_new_option(line):
key, value = OptionsSpecParser.get_key_value_pair(line)
self.options_dict[curr_sec_type][curr_sec_name][key] = (
value
)
else:
error = 'Not able to parse line in Options file.'
OptionsSpecParser.exit_with_parse_error(line, error)
def get_misc_options(self):
# these are options that are not yet supported by the Rocksdb OPTIONS
# file, hence they are provided and stored separately
return self.misc_options
def get_column_families(self):
return self.column_families
def get_all_options(self):
# This method returns all the options that are stored in this object as
# a: Dict[<sec_type>.<option_name>: Dict[col_fam, option_value]]
all_options = []
# Example: in the section header '[CFOptions "default"]' read from the
# OPTIONS file, sec_type='CFOptions'
for sec_type in self.options_dict:
for col_fam in self.options_dict[sec_type]:
for opt_name in self.options_dict[sec_type][col_fam]:
option = sec_type + '.' + opt_name
all_options.append(option)
all_options.extend(list(self.misc_options.keys()))
return self.get_options(all_options)
def get_options(self, reqd_options):
# type: List[str] -> Dict[str, Dict[str, Any]]
# List[option] -> Dict[option, Dict[col_fam, value]]
reqd_options_dict = {}
for option in reqd_options:
if DatabaseOptions.is_misc_option(option):
# the option is not prefixed by '<section_type>.' because it is
# not yet supported by the Rocksdb OPTIONS file; so it has to
# be fetched from the misc_options dictionary
if option not in self.misc_options:
continue
if option not in reqd_options_dict:
reqd_options_dict[option] = {}
reqd_options_dict[option][NO_COL_FAMILY] = (
self.misc_options[option]
)
else:
# Example: option = 'TableOptions.BlockBasedTable.block_align'
# then, sec_type = 'TableOptions.BlockBasedTable'
sec_type = '.'.join(option.split('.')[:-1])
# opt_name = 'block_align'
opt_name = option.split('.')[-1]
if sec_type not in self.options_dict:
continue
for col_fam in self.options_dict[sec_type]:
if opt_name in self.options_dict[sec_type][col_fam]:
if option not in reqd_options_dict:
reqd_options_dict[option] = {}
reqd_options_dict[option][col_fam] = (
self.options_dict[sec_type][col_fam][opt_name]
)
return reqd_options_dict
def update_options(self, options):
# An example 'options' object looks like:
# {'DBOptions.max_background_jobs': {NO_COL_FAMILY: 2},
# 'CFOptions.write_buffer_size': {'default': 1048576, 'cf_A': 128000},
# 'bloom_bits': {NO_COL_FAMILY: 4}}
for option in options:
if DatabaseOptions.is_misc_option(option):
# this is a misc_option i.e. an option that is not yet
# supported by the Rocksdb OPTIONS file, so it is not prefixed
# by '<section_type>.' and must be stored in the separate
# misc_options dictionary
if NO_COL_FAMILY not in options[option]:
print(
'WARNING(DatabaseOptions.update_options): not ' +
'updating option ' + option + ' because it is in ' +
'misc_option format but its scope is not ' +
NO_COL_FAMILY + '. Check format of option.'
)
continue
self.misc_options[option] = options[option][NO_COL_FAMILY]
else:
sec_name = '.'.join(option.split('.')[:-1])
opt_name = option.split('.')[-1]
if sec_name not in self.options_dict:
self.options_dict[sec_name] = {}
for col_fam in options[option]:
# if the option is not already present in the dictionary,
# it will be inserted, else it will be updated to the new
# value
if col_fam not in self.options_dict[sec_name]:
self.options_dict[sec_name][col_fam] = {}
self.options_dict[sec_name][col_fam][opt_name] = (
copy.deepcopy(options[option][col_fam])
)
def generate_options_config(self, nonce):
# this method generates a Rocksdb OPTIONS file in the INI format from
# the options stored in self.options_dict
this_path = os.path.abspath(os.path.dirname(__file__))
file_name = '../temp/OPTIONS_' + str(nonce) + '.tmp'
file_path = os.path.join(this_path, file_name)
with open(file_path, 'w') as fp:
for section in self.options_dict:
for col_fam in self.options_dict[section]:
fp.write(
OptionsSpecParser.get_section_str(section, col_fam) +
'\n'
)
for option in self.options_dict[section][col_fam]:
values = self.options_dict[section][col_fam][option]
fp.write(
OptionsSpecParser.get_option_str(option, values) +
'\n'
)
fp.write('\n')
return file_path
def check_and_trigger_conditions(self, conditions):
for cond in conditions:
reqd_options_dict = self.get_options(cond.options)
# This contains the indices of options that are specific to some
# column family and are not database-wide options.
incomplete_option_ix = []
options = []
missing_reqd_option = False
for ix, option in enumerate(cond.options):
if option not in reqd_options_dict:
print(
'WARNING(DatabaseOptions.check_and_trigger): ' +
'skipping condition ' + cond.name + ' because it '
'requires option ' + option + ' but this option is' +
' not available'
)
missing_reqd_option = True
break # required option is absent
if NO_COL_FAMILY in reqd_options_dict[option]:
options.append(reqd_options_dict[option][NO_COL_FAMILY])
else:
options.append(None)
incomplete_option_ix.append(ix)
if missing_reqd_option:
continue
# if all the options are database-wide options
if not incomplete_option_ix:
try:
if eval(cond.eval_expr):
cond.set_trigger({NO_COL_FAMILY: options})
except Exception as e:
print(
'WARNING(DatabaseOptions) check_and_trigger:' + str(e)
)
continue
# for all the options that are not database-wide, we look for their
# values specific to column families
col_fam_options_dict = {}
for col_fam in self.column_families:
present = True
for ix in incomplete_option_ix:
option = cond.options[ix]
if col_fam not in reqd_options_dict[option]:
present = False
break
options[ix] = reqd_options_dict[option][col_fam]
if present:
try:
if eval(cond.eval_expr):
col_fam_options_dict[col_fam] = (
copy.deepcopy(options)
)
except Exception as e:
print(
'WARNING(DatabaseOptions) check_and_trigger: ' +
str(e)
)
# Trigger for an OptionCondition object is of the form:
# Dict[col_fam_name: List[option_value]]
# where col_fam_name is the name of a column family for which
# 'eval_expr' evaluated to True and List[option_value] is the list
# of values of the options specified in the condition's 'options'
# field
if col_fam_options_dict:
cond.set_trigger(col_fam_options_dict)
|
PypiClean
|
/monk_keras_cuda92-0.0.1-py3-none-any.whl/monk/pytorch/finetune/level_4_evaluation_base.py
|
from monk.pytorch.finetune.imports import *
from monk.system.imports import *
from monk.pytorch.finetune.level_3_training_base import finetune_training
class finetune_evaluation(finetune_training):
'''
Bae class for external validation and inferencing
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def set_evaluation_final(self):
'''
Main function for external validation post training
Args:
None
Returns:
float: Accuracy in percentage
dict: Class based accuracy in percentage
'''
self.custom_print("Testing");
self.system_dict["testing"]["status"] = False;
if(self.system_dict["training"]["settings"]["display_progress_realtime"] and self.system_dict["verbose"]):
pbar=tqdm(total=len(self.system_dict["local"]["data_loaders"]["test"]));
running_corrects = 0
class_dict = {};
for i in range(len(self.system_dict["dataset"]["params"]["classes"])):
class_dict[self.system_dict["dataset"]["params"]["classes"][i]] = {};
class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_images"] = 0;
class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_correct"] = 0;
for inputs, labels in self.system_dict["local"]["data_loaders"]["test"]:
if(self.system_dict["training"]["settings"]["display_progress_realtime"] and self.system_dict["verbose"]):
pbar.update();
inputs = inputs.to(self.system_dict["local"]["device"]);
labels = labels.to(self.system_dict["local"]["device"]);
outputs = self.system_dict["local"]["model"](inputs)
_, preds = torch.max(outputs, 1)
label = int(labels.data.cpu().numpy())
pred = int(preds.data.cpu().numpy())
class_dict[self.system_dict["dataset"]["params"]["classes"][label]]["num_images"] += 1;
if(label == pred):
class_dict[self.system_dict["dataset"]["params"]["classes"][label]]["num_correct"] += 1;
running_corrects += torch.sum(preds == labels.data)
accuracy = running_corrects.double() / len(self.system_dict["local"]["data_loaders"]["test"].dataset)
self.custom_print("");
self.custom_print(" Result");
self.custom_print(" class based accuracies");
for i in range(len(self.system_dict["dataset"]["params"]["classes"])):
self.custom_print(" {}. {} - {} %".format(i, self.system_dict["dataset"]["params"]["classes"][i],
class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_correct"]/class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_images"]*100));
class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["accuracy(%)"] = class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_correct"]/class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_images"]*100;
self.custom_print(" total images: {}".format(len(self.system_dict["local"]["data_loaders"]["test"])));
self.custom_print(" num correct predictions: {}".format(int(running_corrects.cpu().numpy())));
self.custom_print(" Average accuracy (%): {}".format(accuracy.cpu().numpy()*100));
self.system_dict["testing"]["num_images"] = len(self.system_dict["local"]["data_loaders"]["test"]);
self.system_dict["testing"]["num_correct_predictions"] = int(running_corrects.cpu().numpy());
self.system_dict["testing"]["percentage_accuracy"] = accuracy.cpu().numpy()*100;
self.system_dict["testing"]["class_accuracy"] = class_dict
self.system_dict["testing"]["status"] = True;
self.custom_print("");
return accuracy.cpu().numpy()*100, class_dict;
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def set_evaluation_final_multiple(self):
'''
Main function for external validation post training for multi-label data
Args:
None
Returns:
float: Accuracy in percentage
dict: Class based accuracy in percentage
'''
self.custom_print("Testing");
self.system_dict["testing"]["status"] = False;
if(self.system_dict["training"]["settings"]["display_progress_realtime"] and self.system_dict["verbose"]):
pbar=tqdm(total=len(self.system_dict["local"]["data_loaders"]["test"]));
running_corrects = 0
total_labels = 0
class_dict = {};
for i in range(len(self.system_dict["dataset"]["params"]["classes"])):
class_dict[self.system_dict["dataset"]["params"]["classes"][i]] = {};
class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_labels"] = 0;
class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_correct"] = 0;
for inputs, labels in self.system_dict["local"]["data_loaders"]["test"]:
if(self.system_dict["training"]["settings"]["display_progress_realtime"] and self.system_dict["verbose"]):
pbar.update();
inputs = inputs.to(self.system_dict["local"]["device"]);
labels = labels.to(self.system_dict["local"]["device"]);
labels = labels.cpu().detach().numpy()[0]
outputs = self.system_dict["local"]["model"](inputs);
list_classes = [];
list_labels = [];
raw_scores = outputs.cpu().detach().numpy()[0];
for i in range(len(raw_scores)):
prob = logistic.cdf(raw_scores[i])
if(prob > 0.5):
list_classes.append(self.system_dict["dataset"]["params"]["classes"][i])
for i in range(len(labels)):
if(labels[i]):
list_labels.append(self.system_dict["dataset"]["params"]["classes"][i])
for i in range(len(list_labels)):
actual = list_labels[i];
if actual in list_classes:
correct = True;
else:
correct = False;
index = self.system_dict["dataset"]["params"]["classes"].index(actual);
class_dict[self.system_dict["dataset"]["params"]["classes"][index]]["num_labels"] += 1;
total_labels += 1;
if(correct):
class_dict[self.system_dict["dataset"]["params"]["classes"][index]]["num_correct"] += 1;
running_corrects += 1;
accuracy = running_corrects/total_labels;
self.custom_print("");
self.custom_print(" Result");
self.custom_print(" class based accuracies");
for i in range(len(self.system_dict["dataset"]["params"]["classes"])):
self.custom_print(" {}. {} - {} %".format(i, self.system_dict["dataset"]["params"]["classes"][i],
class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_correct"]/class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_labels"]*100));
class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["accuracy(%)"] = class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_correct"]/class_dict[self.system_dict["dataset"]["params"]["classes"][i]]["num_labels"]*100;
self.custom_print(" total labels: {}".format(total_labels));
self.custom_print(" num correct predictions: {}".format(running_corrects));
self.custom_print(" Average accuracy (%): {}".format(accuracy*100));
self.system_dict["testing"]["class_accuracy"] = class_dict
self.system_dict["testing"]["status"] = True;
self.custom_print("");
return accuracy*100, class_dict;
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", img_name=[str, bool], img_dir=[str, bool], return_raw=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def set_prediction_final(self, img_name=False, img_dir=False, return_raw=False):
'''
Main function for external inferencing on single image or folder of images post training
Args:
img_name (str): path to image
img_dir (str): path to folders containing images.
(Optional)
return_raw (bool): If True, then output dictionary contains image probability for every class in the set.
Else, only the most probable class score is returned back.
Returns:
float: Accuracy in percentage
dict: Inference output
1) Image name
2) Predicted class
3) Predicted score
'''
self.custom_print("Prediction");
if(not self.system_dict["dataset"]["params"]["input_size"]):
msg = "Input Size not set for experiment.\n";
msg += "Tip: Use update_input_size";
raise ConstraintError(msg);
self.system_dict = set_transform_test(self.system_dict);
if(not self.system_dict["dataset"]["params"]["classes"]):
msg = "Class information unavailabe.\n";
msg += "Labels returned - Indexes instead of classes";
ConstraintWarning(msg);
if(img_name):
self.custom_print(" Image name: {}".format(img_name));
label, score, raw_output = process_single(img_name, return_raw, self.system_dict);
self.custom_print(" Predicted class: {}".format(label));
self.custom_print(" Predicted score: {}".format(score));
tmp = {};
tmp["img_name"] = img_name;
tmp["predicted_class"] = label;
tmp["score"] = score;
if(return_raw):
tmp["raw"] = raw_output;
self.custom_print("");
return tmp;
if(img_dir):
output = [];
self.custom_print(" Dir path: {}".format(img_dir));
img_list = os.listdir(img_dir);
self.custom_print(" Total Images: {}".format(len(img_list)));
self.custom_print("Processing Images");
if(self.system_dict["verbose"]):
pbar = tqdm(total=len(img_list));
for i in range(len(img_list)):
if(self.system_dict["verbose"]):
pbar.update();
img_name = img_dir + "/" + img_list[i];
label, score, raw_output = process_single(img_name, return_raw, self.system_dict);
tmp = {};
tmp["img_name"] = img_list[i];
tmp["predicted_class"] = label;
tmp["score"] = score;
if(return_raw):
tmp["raw"] = raw_output;
output.append(tmp);
self.custom_print("");
return output
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", img_name=[str, bool], img_dir=[str, bool], return_raw=bool, img_thresh=float, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def set_prediction_final_multiple(self, img_name=False, img_dir=False, return_raw=False, img_thresh=0.5):
'''
Main function for external inferencing on single image or folder of images post training
- For multi-label image classification
Args:
img_name (str): path to image
img_dir (str): path to folders containing images.
(Optional)
return_raw (bool): If True, then output dictionary contains image probability for every class in the set.
Else, only the most probable class score is returned back.
img_thresh (float): Thresholding for multi label image classification.
Returns:
float: Accuracy in percentage
dict: Inference output
1) Image name
2) Predicted classes list
3) Predicted score
'''
self.custom_print("Prediction");
if(not self.system_dict["dataset"]["params"]["input_size"]):
msg = "Input Size not set for experiment.\n";
msg += "Tip: Use update_input_size";
raise ConstraintError(msg);
self.system_dict = set_transform_test(self.system_dict);
if(not self.system_dict["dataset"]["params"]["classes"]):
msg = "Class information unavailabe.\n";
msg += "Labels returned - Indexes instead of classes";
ConstraintWarning(msg);
if(img_name):
self.custom_print(" Image name: {}".format(img_name));
labels, scores, raw_output = process_multi(img_name, return_raw, img_thresh, self.system_dict);
self.custom_print(" Predicted classes: {}".format(labels));
self.custom_print(" Predicted scorees: {}".format(scores));
tmp = {};
tmp["img_name"] = img_name;
tmp["predicted_classes"] = labels;
tmp["scores"] = scores;
if(return_raw):
tmp["raw"] = raw_output;
self.custom_print("");
return tmp;
if(img_dir):
output = [];
self.custom_print(" Dir path: {}".format(img_dir));
img_list = os.listdir(img_dir);
self.custom_print(" Total Images: {}".format(len(img_list)));
self.custom_print("Processing Images");
if(self.system_dict["verbose"]):
pbar = tqdm(total=len(img_list));
for i in range(len(img_list)):
if(self.system_dict["verbose"]):
pbar.update();
img_name = img_dir + "/" + img_list[i];
labels, scores, raw_output = process_multi(img_name, return_raw, img_thresh, self.system_dict);
tmp = {};
tmp["img_name"] = img_list[i];
tmp["predicted_classes"] = labels;
tmp["scores"] = scores;
if(return_raw):
tmp["raw"] = raw_output;
output.append(tmp);
self.custom_print("");
return output
###############################################################################################################################################
|
PypiClean
|
/django_iportfolio_ds-0.0.6-py3-none-any.whl/iportfolio/static/iportfolio/vendor/glightbox/js/glightbox.js
|
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global = global || self, global.GLightbox = factory());
}(this, (function () { 'use strict';
function _typeof(obj) {
"@babel/helpers - typeof";
if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
_typeof = function (obj) {
return typeof obj;
};
} else {
_typeof = function (obj) {
return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
};
}
return _typeof(obj);
}
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
function _defineProperties(target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];
descriptor.enumerable = descriptor.enumerable || false;
descriptor.configurable = true;
if ("value" in descriptor) descriptor.writable = true;
Object.defineProperty(target, descriptor.key, descriptor);
}
}
function _createClass(Constructor, protoProps, staticProps) {
if (protoProps) _defineProperties(Constructor.prototype, protoProps);
if (staticProps) _defineProperties(Constructor, staticProps);
return Constructor;
}
var uid = Date.now();
function extend() {
var extended = {};
var deep = true;
var i = 0;
var length = arguments.length;
if (Object.prototype.toString.call(arguments[0]) === '[object Boolean]') {
deep = arguments[0];
i++;
}
var merge = function merge(obj) {
for (var prop in obj) {
if (Object.prototype.hasOwnProperty.call(obj, prop)) {
if (deep && Object.prototype.toString.call(obj[prop]) === '[object Object]') {
extended[prop] = extend(true, extended[prop], obj[prop]);
} else {
extended[prop] = obj[prop];
}
}
}
};
for (; i < length; i++) {
var obj = arguments[i];
merge(obj);
}
return extended;
}
function each(collection, callback) {
if (isNode(collection) || collection === window || collection === document) {
collection = [collection];
}
if (!isArrayLike(collection) && !isObject(collection)) {
collection = [collection];
}
if (size(collection) == 0) {
return;
}
if (isArrayLike(collection) && !isObject(collection)) {
var l = collection.length,
i = 0;
for (; i < l; i++) {
if (callback.call(collection[i], collection[i], i, collection) === false) {
break;
}
}
} else if (isObject(collection)) {
for (var key in collection) {
if (has(collection, key)) {
if (callback.call(collection[key], collection[key], key, collection) === false) {
break;
}
}
}
}
}
function getNodeEvents(node) {
var name = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null;
var fn = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : null;
var cache = node[uid] = node[uid] || [];
var data = {
all: cache,
evt: null,
found: null
};
if (name && fn && size(cache) > 0) {
each(cache, function (cl, i) {
if (cl.eventName == name && cl.fn.toString() == fn.toString()) {
data.found = true;
data.evt = i;
return false;
}
});
}
return data;
}
function addEvent(eventName) {
var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {},
onElement = _ref.onElement,
withCallback = _ref.withCallback,
_ref$avoidDuplicate = _ref.avoidDuplicate,
avoidDuplicate = _ref$avoidDuplicate === void 0 ? true : _ref$avoidDuplicate,
_ref$once = _ref.once,
once = _ref$once === void 0 ? false : _ref$once,
_ref$useCapture = _ref.useCapture,
useCapture = _ref$useCapture === void 0 ? false : _ref$useCapture;
var thisArg = arguments.length > 2 ? arguments[2] : undefined;
var element = onElement || [];
if (isString(element)) {
element = document.querySelectorAll(element);
}
function handler(event) {
if (isFunction(withCallback)) {
withCallback.call(thisArg, event, this);
}
if (once) {
handler.destroy();
}
}
handler.destroy = function () {
each(element, function (el) {
var events = getNodeEvents(el, eventName, handler);
if (events.found) {
events.all.splice(events.evt, 1);
}
if (el.removeEventListener) {
el.removeEventListener(eventName, handler, useCapture);
}
});
};
each(element, function (el) {
var events = getNodeEvents(el, eventName, handler);
if (el.addEventListener && avoidDuplicate && !events.found || !avoidDuplicate) {
el.addEventListener(eventName, handler, useCapture);
events.all.push({
eventName: eventName,
fn: handler
});
}
});
return handler;
}
function addClass(node, name) {
each(name.split(' '), function (cl) {
return node.classList.add(cl);
});
}
function removeClass(node, name) {
each(name.split(' '), function (cl) {
return node.classList.remove(cl);
});
}
function hasClass(node, name) {
return node.classList.contains(name);
}
function closest(elem, selector) {
while (elem !== document.body) {
elem = elem.parentElement;
if (!elem) {
return false;
}
var matches = typeof elem.matches == 'function' ? elem.matches(selector) : elem.msMatchesSelector(selector);
if (matches) {
return elem;
}
}
}
function animateElement(element) {
var animation = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : '';
var callback = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false;
if (!element || animation === '') {
return false;
}
if (animation === 'none') {
if (isFunction(callback)) {
callback();
}
return false;
}
var animationEnd = whichAnimationEvent();
var animationNames = animation.split(' ');
each(animationNames, function (name) {
addClass(element, 'g' + name);
});
addEvent(animationEnd, {
onElement: element,
avoidDuplicate: false,
once: true,
withCallback: function withCallback(event, target) {
each(animationNames, function (name) {
removeClass(target, 'g' + name);
});
if (isFunction(callback)) {
callback();
}
}
});
}
function cssTransform(node) {
var translate = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : '';
if (translate === '') {
node.style.webkitTransform = '';
node.style.MozTransform = '';
node.style.msTransform = '';
node.style.OTransform = '';
node.style.transform = '';
return false;
}
node.style.webkitTransform = translate;
node.style.MozTransform = translate;
node.style.msTransform = translate;
node.style.OTransform = translate;
node.style.transform = translate;
}
function show(element) {
element.style.display = 'block';
}
function hide(element) {
element.style.display = 'none';
}
function createHTML(htmlStr) {
var frag = document.createDocumentFragment(),
temp = document.createElement('div');
temp.innerHTML = htmlStr;
while (temp.firstChild) {
frag.appendChild(temp.firstChild);
}
return frag;
}
function windowSize() {
return {
width: window.innerWidth || document.documentElement.clientWidth || document.body.clientWidth,
height: window.innerHeight || document.documentElement.clientHeight || document.body.clientHeight
};
}
function whichAnimationEvent() {
var t,
el = document.createElement('fakeelement');
var animations = {
animation: 'animationend',
OAnimation: 'oAnimationEnd',
MozAnimation: 'animationend',
WebkitAnimation: 'webkitAnimationEnd'
};
for (t in animations) {
if (el.style[t] !== undefined) {
return animations[t];
}
}
}
function whichTransitionEvent() {
var t,
el = document.createElement('fakeelement');
var transitions = {
transition: 'transitionend',
OTransition: 'oTransitionEnd',
MozTransition: 'transitionend',
WebkitTransition: 'webkitTransitionEnd'
};
for (t in transitions) {
if (el.style[t] !== undefined) {
return transitions[t];
}
}
}
function createIframe(config) {
var url = config.url,
allow = config.allow,
callback = config.callback,
appendTo = config.appendTo;
var iframe = document.createElement('iframe');
iframe.className = 'vimeo-video gvideo';
iframe.src = url;
iframe.style.width = '100%';
iframe.style.height = '100%';
if (allow) {
iframe.setAttribute('allow', allow);
}
iframe.onload = function () {
iframe.onload = null;
addClass(iframe, 'node-ready');
if (isFunction(callback)) {
callback();
}
};
if (appendTo) {
appendTo.appendChild(iframe);
}
return iframe;
}
function waitUntil(check, onComplete, delay, timeout) {
if (check()) {
onComplete();
return;
}
if (!delay) {
delay = 100;
}
var timeoutPointer;
var intervalPointer = setInterval(function () {
if (!check()) {
return;
}
clearInterval(intervalPointer);
if (timeoutPointer) {
clearTimeout(timeoutPointer);
}
onComplete();
}, delay);
if (timeout) {
timeoutPointer = setTimeout(function () {
clearInterval(intervalPointer);
}, timeout);
}
}
function injectAssets(url, waitFor, callback) {
if (isNil(url)) {
console.error('Inject assets error');
return;
}
if (isFunction(waitFor)) {
callback = waitFor;
waitFor = false;
}
if (isString(waitFor) && waitFor in window) {
if (isFunction(callback)) {
callback();
}
return;
}
var found;
if (url.indexOf('.css') !== -1) {
found = document.querySelectorAll('link[href="' + url + '"]');
if (found && found.length > 0) {
if (isFunction(callback)) {
callback();
}
return;
}
var head = document.getElementsByTagName('head')[0];
var headStyles = head.querySelectorAll('link[rel="stylesheet"]');
var link = document.createElement('link');
link.rel = 'stylesheet';
link.type = 'text/css';
link.href = url;
link.media = 'all';
if (headStyles) {
head.insertBefore(link, headStyles[0]);
} else {
head.appendChild(link);
}
if (isFunction(callback)) {
callback();
}
return;
}
found = document.querySelectorAll('script[src="' + url + '"]');
if (found && found.length > 0) {
if (isFunction(callback)) {
if (isString(waitFor)) {
waitUntil(function () {
return typeof window[waitFor] !== 'undefined';
}, function () {
callback();
});
return false;
}
callback();
}
return;
}
var script = document.createElement('script');
script.type = 'text/javascript';
script.src = url;
script.onload = function () {
if (isFunction(callback)) {
if (isString(waitFor)) {
waitUntil(function () {
return typeof window[waitFor] !== 'undefined';
}, function () {
callback();
});
return false;
}
callback();
}
};
document.body.appendChild(script);
}
function isMobile() {
return 'navigator' in window && window.navigator.userAgent.match(/(iPad)|(iPhone)|(iPod)|(Android)|(PlayBook)|(BB10)|(BlackBerry)|(Opera Mini)|(IEMobile)|(webOS)|(MeeGo)/i);
}
function isTouch() {
return isMobile() !== null || document.createTouch !== undefined || 'ontouchstart' in window || 'onmsgesturechange' in window || navigator.msMaxTouchPoints;
}
function isFunction(f) {
return typeof f === 'function';
}
function isString(s) {
return typeof s === 'string';
}
function isNode(el) {
return !!(el && el.nodeType && el.nodeType == 1);
}
function isArray(ar) {
return Array.isArray(ar);
}
function isArrayLike(ar) {
return ar && ar.length && isFinite(ar.length);
}
function isObject(o) {
var type = _typeof(o);
return type === 'object' && o != null && !isFunction(o) && !isArray(o);
}
function isNil(o) {
return o == null;
}
function has(obj, key) {
return obj !== null && hasOwnProperty.call(obj, key);
}
function size(o) {
if (isObject(o)) {
if (o.keys) {
return o.keys().length;
}
var l = 0;
for (var k in o) {
if (has(o, k)) {
l++;
}
}
return l;
} else {
return o.length;
}
}
function isNumber(n) {
return !isNaN(parseFloat(n)) && isFinite(n);
}
function getNextFocusElement() {
var current = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : -1;
var btns = document.querySelectorAll('.gbtn[data-taborder]:not(.disabled)');
if (!btns.length) {
return false;
}
if (btns.length == 1) {
return btns[0];
}
if (typeof current == 'string') {
current = parseInt(current);
}
var orders = [];
each(btns, function (btn) {
orders.push(btn.getAttribute('data-taborder'));
});
var highestOrder = Math.max.apply(Math, orders.map(function (order) {
return parseInt(order);
}));
var newIndex = current < 0 ? 1 : current + 1;
if (newIndex > highestOrder) {
newIndex = '1';
}
var nextOrders = orders.filter(function (el) {
return el >= parseInt(newIndex);
});
var nextFocus = nextOrders.sort()[0];
return document.querySelector(".gbtn[data-taborder=\"".concat(nextFocus, "\"]"));
}
function keyboardNavigation(instance) {
if (instance.events.hasOwnProperty('keyboard')) {
return false;
}
instance.events['keyboard'] = addEvent('keydown', {
onElement: window,
withCallback: function withCallback(event, target) {
event = event || window.event;
var key = event.keyCode;
if (key == 9) {
var focusedButton = document.querySelector('.gbtn.focused');
if (!focusedButton) {
var activeElement = document.activeElement && document.activeElement.nodeName ? document.activeElement.nodeName.toLocaleLowerCase() : false;
if (activeElement == 'input' || activeElement == 'textarea' || activeElement == 'button') {
return;
}
}
event.preventDefault();
var btns = document.querySelectorAll('.gbtn[data-taborder]');
if (!btns || btns.length <= 0) {
return;
}
if (!focusedButton) {
var first = getNextFocusElement();
if (first) {
first.focus();
addClass(first, 'focused');
}
return;
}
var currentFocusOrder = focusedButton.getAttribute('data-taborder');
var nextFocus = getNextFocusElement(currentFocusOrder);
removeClass(focusedButton, 'focused');
if (nextFocus) {
nextFocus.focus();
addClass(nextFocus, 'focused');
}
}
if (key == 39) {
instance.nextSlide();
}
if (key == 37) {
instance.prevSlide();
}
if (key == 27) {
instance.close();
}
}
});
}
function getLen(v) {
return Math.sqrt(v.x * v.x + v.y * v.y);
}
function dot(v1, v2) {
return v1.x * v2.x + v1.y * v2.y;
}
function getAngle(v1, v2) {
var mr = getLen(v1) * getLen(v2);
if (mr === 0) {
return 0;
}
var r = dot(v1, v2) / mr;
if (r > 1) {
r = 1;
}
return Math.acos(r);
}
function cross(v1, v2) {
return v1.x * v2.y - v2.x * v1.y;
}
function getRotateAngle(v1, v2) {
var angle = getAngle(v1, v2);
if (cross(v1, v2) > 0) {
angle *= -1;
}
return angle * 180 / Math.PI;
}
var EventsHandlerAdmin = function () {
function EventsHandlerAdmin(el) {
_classCallCheck(this, EventsHandlerAdmin);
this.handlers = [];
this.el = el;
}
_createClass(EventsHandlerAdmin, [{
key: "add",
value: function add(handler) {
this.handlers.push(handler);
}
}, {
key: "del",
value: function del(handler) {
if (!handler) {
this.handlers = [];
}
for (var i = this.handlers.length; i >= 0; i--) {
if (this.handlers[i] === handler) {
this.handlers.splice(i, 1);
}
}
}
}, {
key: "dispatch",
value: function dispatch() {
for (var i = 0, len = this.handlers.length; i < len; i++) {
var handler = this.handlers[i];
if (typeof handler === 'function') {
handler.apply(this.el, arguments);
}
}
}
}]);
return EventsHandlerAdmin;
}();
function wrapFunc(el, handler) {
var EventshandlerAdmin = new EventsHandlerAdmin(el);
EventshandlerAdmin.add(handler);
return EventshandlerAdmin;
}
var TouchEvents = function () {
function TouchEvents(el, option) {
_classCallCheck(this, TouchEvents);
this.element = typeof el == 'string' ? document.querySelector(el) : el;
this.start = this.start.bind(this);
this.move = this.move.bind(this);
this.end = this.end.bind(this);
this.cancel = this.cancel.bind(this);
this.element.addEventListener('touchstart', this.start, false);
this.element.addEventListener('touchmove', this.move, false);
this.element.addEventListener('touchend', this.end, false);
this.element.addEventListener('touchcancel', this.cancel, false);
this.preV = {
x: null,
y: null
};
this.pinchStartLen = null;
this.zoom = 1;
this.isDoubleTap = false;
var noop = function noop() {};
this.rotate = wrapFunc(this.element, option.rotate || noop);
this.touchStart = wrapFunc(this.element, option.touchStart || noop);
this.multipointStart = wrapFunc(this.element, option.multipointStart || noop);
this.multipointEnd = wrapFunc(this.element, option.multipointEnd || noop);
this.pinch = wrapFunc(this.element, option.pinch || noop);
this.swipe = wrapFunc(this.element, option.swipe || noop);
this.tap = wrapFunc(this.element, option.tap || noop);
this.doubleTap = wrapFunc(this.element, option.doubleTap || noop);
this.longTap = wrapFunc(this.element, option.longTap || noop);
this.singleTap = wrapFunc(this.element, option.singleTap || noop);
this.pressMove = wrapFunc(this.element, option.pressMove || noop);
this.twoFingerPressMove = wrapFunc(this.element, option.twoFingerPressMove || noop);
this.touchMove = wrapFunc(this.element, option.touchMove || noop);
this.touchEnd = wrapFunc(this.element, option.touchEnd || noop);
this.touchCancel = wrapFunc(this.element, option.touchCancel || noop);
this.translateContainer = this.element;
this._cancelAllHandler = this.cancelAll.bind(this);
window.addEventListener('scroll', this._cancelAllHandler);
this.delta = null;
this.last = null;
this.now = null;
this.tapTimeout = null;
this.singleTapTimeout = null;
this.longTapTimeout = null;
this.swipeTimeout = null;
this.x1 = this.x2 = this.y1 = this.y2 = null;
this.preTapPosition = {
x: null,
y: null
};
}
_createClass(TouchEvents, [{
key: "start",
value: function start(evt) {
if (!evt.touches) {
return;
}
var ignoreDragFor = ['a', 'button', 'input'];
if (evt.target && evt.target.nodeName && ignoreDragFor.indexOf(evt.target.nodeName.toLowerCase()) >= 0) {
console.log('ignore drag for this touched element', evt.target.nodeName.toLowerCase());
return;
}
this.now = Date.now();
this.x1 = evt.touches[0].pageX;
this.y1 = evt.touches[0].pageY;
this.delta = this.now - (this.last || this.now);
this.touchStart.dispatch(evt, this.element);
if (this.preTapPosition.x !== null) {
this.isDoubleTap = this.delta > 0 && this.delta <= 250 && Math.abs(this.preTapPosition.x - this.x1) < 30 && Math.abs(this.preTapPosition.y - this.y1) < 30;
if (this.isDoubleTap) {
clearTimeout(this.singleTapTimeout);
}
}
this.preTapPosition.x = this.x1;
this.preTapPosition.y = this.y1;
this.last = this.now;
var preV = this.preV,
len = evt.touches.length;
if (len > 1) {
this._cancelLongTap();
this._cancelSingleTap();
var v = {
x: evt.touches[1].pageX - this.x1,
y: evt.touches[1].pageY - this.y1
};
preV.x = v.x;
preV.y = v.y;
this.pinchStartLen = getLen(preV);
this.multipointStart.dispatch(evt, this.element);
}
this._preventTap = false;
this.longTapTimeout = setTimeout(function () {
this.longTap.dispatch(evt, this.element);
this._preventTap = true;
}.bind(this), 750);
}
}, {
key: "move",
value: function move(evt) {
if (!evt.touches) {
return;
}
var preV = this.preV,
len = evt.touches.length,
currentX = evt.touches[0].pageX,
currentY = evt.touches[0].pageY;
this.isDoubleTap = false;
if (len > 1) {
var sCurrentX = evt.touches[1].pageX,
sCurrentY = evt.touches[1].pageY;
var v = {
x: evt.touches[1].pageX - currentX,
y: evt.touches[1].pageY - currentY
};
if (preV.x !== null) {
if (this.pinchStartLen > 0) {
evt.zoom = getLen(v) / this.pinchStartLen;
this.pinch.dispatch(evt, this.element);
}
evt.angle = getRotateAngle(v, preV);
this.rotate.dispatch(evt, this.element);
}
preV.x = v.x;
preV.y = v.y;
if (this.x2 !== null && this.sx2 !== null) {
evt.deltaX = (currentX - this.x2 + sCurrentX - this.sx2) / 2;
evt.deltaY = (currentY - this.y2 + sCurrentY - this.sy2) / 2;
} else {
evt.deltaX = 0;
evt.deltaY = 0;
}
this.twoFingerPressMove.dispatch(evt, this.element);
this.sx2 = sCurrentX;
this.sy2 = sCurrentY;
} else {
if (this.x2 !== null) {
evt.deltaX = currentX - this.x2;
evt.deltaY = currentY - this.y2;
var movedX = Math.abs(this.x1 - this.x2),
movedY = Math.abs(this.y1 - this.y2);
if (movedX > 10 || movedY > 10) {
this._preventTap = true;
}
} else {
evt.deltaX = 0;
evt.deltaY = 0;
}
this.pressMove.dispatch(evt, this.element);
}
this.touchMove.dispatch(evt, this.element);
this._cancelLongTap();
this.x2 = currentX;
this.y2 = currentY;
if (len > 1) {
evt.preventDefault();
}
}
}, {
key: "end",
value: function end(evt) {
if (!evt.changedTouches) {
return;
}
this._cancelLongTap();
var self = this;
if (evt.touches.length < 2) {
this.multipointEnd.dispatch(evt, this.element);
this.sx2 = this.sy2 = null;
}
if (this.x2 && Math.abs(this.x1 - this.x2) > 30 || this.y2 && Math.abs(this.y1 - this.y2) > 30) {
evt.direction = this._swipeDirection(this.x1, this.x2, this.y1, this.y2);
this.swipeTimeout = setTimeout(function () {
self.swipe.dispatch(evt, self.element);
}, 0);
} else {
this.tapTimeout = setTimeout(function () {
if (!self._preventTap) {
self.tap.dispatch(evt, self.element);
}
if (self.isDoubleTap) {
self.doubleTap.dispatch(evt, self.element);
self.isDoubleTap = false;
}
}, 0);
if (!self.isDoubleTap) {
self.singleTapTimeout = setTimeout(function () {
self.singleTap.dispatch(evt, self.element);
}, 250);
}
}
this.touchEnd.dispatch(evt, this.element);
this.preV.x = 0;
this.preV.y = 0;
this.zoom = 1;
this.pinchStartLen = null;
this.x1 = this.x2 = this.y1 = this.y2 = null;
}
}, {
key: "cancelAll",
value: function cancelAll() {
this._preventTap = true;
clearTimeout(this.singleTapTimeout);
clearTimeout(this.tapTimeout);
clearTimeout(this.longTapTimeout);
clearTimeout(this.swipeTimeout);
}
}, {
key: "cancel",
value: function cancel(evt) {
this.cancelAll();
this.touchCancel.dispatch(evt, this.element);
}
}, {
key: "_cancelLongTap",
value: function _cancelLongTap() {
clearTimeout(this.longTapTimeout);
}
}, {
key: "_cancelSingleTap",
value: function _cancelSingleTap() {
clearTimeout(this.singleTapTimeout);
}
}, {
key: "_swipeDirection",
value: function _swipeDirection(x1, x2, y1, y2) {
return Math.abs(x1 - x2) >= Math.abs(y1 - y2) ? x1 - x2 > 0 ? 'Left' : 'Right' : y1 - y2 > 0 ? 'Up' : 'Down';
}
}, {
key: "on",
value: function on(evt, handler) {
if (this[evt]) {
this[evt].add(handler);
}
}
}, {
key: "off",
value: function off(evt, handler) {
if (this[evt]) {
this[evt].del(handler);
}
}
}, {
key: "destroy",
value: function destroy() {
if (this.singleTapTimeout) {
clearTimeout(this.singleTapTimeout);
}
if (this.tapTimeout) {
clearTimeout(this.tapTimeout);
}
if (this.longTapTimeout) {
clearTimeout(this.longTapTimeout);
}
if (this.swipeTimeout) {
clearTimeout(this.swipeTimeout);
}
this.element.removeEventListener('touchstart', this.start);
this.element.removeEventListener('touchmove', this.move);
this.element.removeEventListener('touchend', this.end);
this.element.removeEventListener('touchcancel', this.cancel);
this.rotate.del();
this.touchStart.del();
this.multipointStart.del();
this.multipointEnd.del();
this.pinch.del();
this.swipe.del();
this.tap.del();
this.doubleTap.del();
this.longTap.del();
this.singleTap.del();
this.pressMove.del();
this.twoFingerPressMove.del();
this.touchMove.del();
this.touchEnd.del();
this.touchCancel.del();
this.preV = this.pinchStartLen = this.zoom = this.isDoubleTap = this.delta = this.last = this.now = this.tapTimeout = this.singleTapTimeout = this.longTapTimeout = this.swipeTimeout = this.x1 = this.x2 = this.y1 = this.y2 = this.preTapPosition = this.rotate = this.touchStart = this.multipointStart = this.multipointEnd = this.pinch = this.swipe = this.tap = this.doubleTap = this.longTap = this.singleTap = this.pressMove = this.touchMove = this.touchEnd = this.touchCancel = this.twoFingerPressMove = null;
window.removeEventListener('scroll', this._cancelAllHandler);
return null;
}
}]);
return TouchEvents;
}();
function resetSlideMove(slide) {
var transitionEnd = whichTransitionEvent();
var windowWidth = window.innerWidth || document.documentElement.clientWidth || document.body.clientWidth;
var media = hasClass(slide, 'gslide-media') ? slide : slide.querySelector('.gslide-media');
var container = closest(media, '.ginner-container');
var desc = slide.querySelector('.gslide-description');
if (windowWidth > 769) {
media = container;
}
addClass(media, 'greset');
cssTransform(media, 'translate3d(0, 0, 0)');
addEvent(transitionEnd, {
onElement: media,
once: true,
withCallback: function withCallback(event, target) {
removeClass(media, 'greset');
}
});
media.style.opacity = '';
if (desc) {
desc.style.opacity = '';
}
}
function touchNavigation(instance) {
if (instance.events.hasOwnProperty('touch')) {
return false;
}
var winSize = windowSize();
var winWidth = winSize.width;
var winHeight = winSize.height;
var process = false;
var currentSlide = null;
var media = null;
var mediaImage = null;
var doingMove = false;
var initScale = 1;
var maxScale = 4.5;
var currentScale = 1;
var doingZoom = false;
var imageZoomed = false;
var zoomedPosX = null;
var zoomedPosY = null;
var lastZoomedPosX = null;
var lastZoomedPosY = null;
var hDistance;
var vDistance;
var hDistancePercent = 0;
var vDistancePercent = 0;
var vSwipe = false;
var hSwipe = false;
var startCoords = {};
var endCoords = {};
var xDown = 0;
var yDown = 0;
var isInlined;
var sliderWrapper = document.getElementById('glightbox-slider');
var overlay = document.querySelector('.goverlay');
var touchInstance = new TouchEvents(sliderWrapper, {
touchStart: function touchStart(e) {
process = true;
if (hasClass(e.targetTouches[0].target, 'ginner-container') || closest(e.targetTouches[0].target, '.gslide-desc') || e.targetTouches[0].target.nodeName.toLowerCase() == 'a') {
process = false;
}
if (closest(e.targetTouches[0].target, '.gslide-inline') && !hasClass(e.targetTouches[0].target.parentNode, 'gslide-inline')) {
process = false;
}
if (process) {
endCoords = e.targetTouches[0];
startCoords.pageX = e.targetTouches[0].pageX;
startCoords.pageY = e.targetTouches[0].pageY;
xDown = e.targetTouches[0].clientX;
yDown = e.targetTouches[0].clientY;
currentSlide = instance.activeSlide;
media = currentSlide.querySelector('.gslide-media');
isInlined = currentSlide.querySelector('.gslide-inline');
mediaImage = null;
if (hasClass(media, 'gslide-image')) {
mediaImage = media.querySelector('img');
}
var windowWidth = window.innerWidth || document.documentElement.clientWidth || document.body.clientWidth;
if (windowWidth > 769) {
media = currentSlide.querySelector('.ginner-container');
}
removeClass(overlay, 'greset');
if (e.pageX > 20 && e.pageX < window.innerWidth - 20) {
return;
}
e.preventDefault();
}
},
touchMove: function touchMove(e) {
if (!process) {
return;
}
endCoords = e.targetTouches[0];
if (doingZoom || imageZoomed) {
return;
}
if (isInlined && isInlined.offsetHeight > winHeight) {
var moved = startCoords.pageX - endCoords.pageX;
if (Math.abs(moved) <= 13) {
return false;
}
}
doingMove = true;
var xUp = e.targetTouches[0].clientX;
var yUp = e.targetTouches[0].clientY;
var xDiff = xDown - xUp;
var yDiff = yDown - yUp;
if (Math.abs(xDiff) > Math.abs(yDiff)) {
vSwipe = false;
hSwipe = true;
} else {
hSwipe = false;
vSwipe = true;
}
hDistance = endCoords.pageX - startCoords.pageX;
hDistancePercent = hDistance * 100 / winWidth;
vDistance = endCoords.pageY - startCoords.pageY;
vDistancePercent = vDistance * 100 / winHeight;
var opacity;
if (vSwipe && mediaImage) {
opacity = 1 - Math.abs(vDistance) / winHeight;
overlay.style.opacity = opacity;
if (instance.settings.touchFollowAxis) {
hDistancePercent = 0;
}
}
if (hSwipe) {
opacity = 1 - Math.abs(hDistance) / winWidth;
media.style.opacity = opacity;
if (instance.settings.touchFollowAxis) {
vDistancePercent = 0;
}
}
if (!mediaImage) {
return cssTransform(media, "translate3d(".concat(hDistancePercent, "%, 0, 0)"));
}
cssTransform(media, "translate3d(".concat(hDistancePercent, "%, ").concat(vDistancePercent, "%, 0)"));
},
touchEnd: function touchEnd() {
if (!process) {
return;
}
doingMove = false;
if (imageZoomed || doingZoom) {
lastZoomedPosX = zoomedPosX;
lastZoomedPosY = zoomedPosY;
return;
}
var v = Math.abs(parseInt(vDistancePercent));
var h = Math.abs(parseInt(hDistancePercent));
if (v > 29 && mediaImage) {
instance.close();
return;
}
if (v < 29 && h < 25) {
addClass(overlay, 'greset');
overlay.style.opacity = 1;
return resetSlideMove(media);
}
},
multipointEnd: function multipointEnd() {
setTimeout(function () {
doingZoom = false;
}, 50);
},
multipointStart: function multipointStart() {
doingZoom = true;
initScale = currentScale ? currentScale : 1;
},
pinch: function pinch(evt) {
if (!mediaImage || doingMove) {
return false;
}
doingZoom = true;
mediaImage.scaleX = mediaImage.scaleY = initScale * evt.zoom;
var scale = initScale * evt.zoom;
imageZoomed = true;
if (scale <= 1) {
imageZoomed = false;
scale = 1;
lastZoomedPosY = null;
lastZoomedPosX = null;
zoomedPosX = null;
zoomedPosY = null;
mediaImage.setAttribute('style', '');
return;
}
if (scale > maxScale) {
scale = maxScale;
}
mediaImage.style.transform = "scale3d(".concat(scale, ", ").concat(scale, ", 1)");
currentScale = scale;
},
pressMove: function pressMove(e) {
if (imageZoomed && !doingZoom) {
var mhDistance = endCoords.pageX - startCoords.pageX;
var mvDistance = endCoords.pageY - startCoords.pageY;
if (lastZoomedPosX) {
mhDistance = mhDistance + lastZoomedPosX;
}
if (lastZoomedPosY) {
mvDistance = mvDistance + lastZoomedPosY;
}
zoomedPosX = mhDistance;
zoomedPosY = mvDistance;
var style = "translate3d(".concat(mhDistance, "px, ").concat(mvDistance, "px, 0)");
if (currentScale) {
style += " scale3d(".concat(currentScale, ", ").concat(currentScale, ", 1)");
}
cssTransform(mediaImage, style);
}
},
swipe: function swipe(evt) {
if (imageZoomed) {
return;
}
if (doingZoom) {
doingZoom = false;
return;
}
if (evt.direction == 'Left') {
if (instance.index == instance.elements.length - 1) {
return resetSlideMove(media);
}
instance.nextSlide();
}
if (evt.direction == 'Right') {
if (instance.index == 0) {
return resetSlideMove(media);
}
instance.prevSlide();
}
}
});
instance.events['touch'] = touchInstance;
}
var ZoomImages = function () {
function ZoomImages(el, slide) {
var _this = this;
var onclose = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : null;
_classCallCheck(this, ZoomImages);
this.img = el;
this.slide = slide;
this.onclose = onclose;
if (this.img.setZoomEvents) {
return false;
}
this.active = false;
this.zoomedIn = false;
this.dragging = false;
this.currentX = null;
this.currentY = null;
this.initialX = null;
this.initialY = null;
this.xOffset = 0;
this.yOffset = 0;
this.img.addEventListener('mousedown', function (e) {
return _this.dragStart(e);
}, false);
this.img.addEventListener('mouseup', function (e) {
return _this.dragEnd(e);
}, false);
this.img.addEventListener('mousemove', function (e) {
return _this.drag(e);
}, false);
this.img.addEventListener('click', function (e) {
if (_this.slide.classList.contains('dragging-nav')) {
_this.zoomOut();
return false;
}
if (!_this.zoomedIn) {
return _this.zoomIn();
}
if (_this.zoomedIn && !_this.dragging) {
_this.zoomOut();
}
}, false);
this.img.setZoomEvents = true;
}
_createClass(ZoomImages, [{
key: "zoomIn",
value: function zoomIn() {
var winWidth = this.widowWidth();
if (this.zoomedIn || winWidth <= 768) {
return;
}
var img = this.img;
img.setAttribute('data-style', img.getAttribute('style'));
img.style.maxWidth = img.naturalWidth + 'px';
img.style.maxHeight = img.naturalHeight + 'px';
if (img.naturalWidth > winWidth) {
var centerX = winWidth / 2 - img.naturalWidth / 2;
this.setTranslate(this.img.parentNode, centerX, 0);
}
this.slide.classList.add('zoomed');
this.zoomedIn = true;
}
}, {
key: "zoomOut",
value: function zoomOut() {
this.img.parentNode.setAttribute('style', '');
this.img.setAttribute('style', this.img.getAttribute('data-style'));
this.slide.classList.remove('zoomed');
this.zoomedIn = false;
this.currentX = null;
this.currentY = null;
this.initialX = null;
this.initialY = null;
this.xOffset = 0;
this.yOffset = 0;
if (this.onclose && typeof this.onclose == 'function') {
this.onclose();
}
}
}, {
key: "dragStart",
value: function dragStart(e) {
e.preventDefault();
if (!this.zoomedIn) {
this.active = false;
return;
}
if (e.type === 'touchstart') {
this.initialX = e.touches[0].clientX - this.xOffset;
this.initialY = e.touches[0].clientY - this.yOffset;
} else {
this.initialX = e.clientX - this.xOffset;
this.initialY = e.clientY - this.yOffset;
}
if (e.target === this.img) {
this.active = true;
this.img.classList.add('dragging');
}
}
}, {
key: "dragEnd",
value: function dragEnd(e) {
var _this2 = this;
e.preventDefault();
this.initialX = this.currentX;
this.initialY = this.currentY;
this.active = false;
setTimeout(function () {
_this2.dragging = false;
_this2.img.isDragging = false;
_this2.img.classList.remove('dragging');
}, 100);
}
}, {
key: "drag",
value: function drag(e) {
if (this.active) {
e.preventDefault();
if (e.type === 'touchmove') {
this.currentX = e.touches[0].clientX - this.initialX;
this.currentY = e.touches[0].clientY - this.initialY;
} else {
this.currentX = e.clientX - this.initialX;
this.currentY = e.clientY - this.initialY;
}
this.xOffset = this.currentX;
this.yOffset = this.currentY;
this.img.isDragging = true;
this.dragging = true;
this.setTranslate(this.img, this.currentX, this.currentY);
}
}
}, {
key: "onMove",
value: function onMove(e) {
if (!this.zoomedIn) {
return;
}
var xOffset = e.clientX - this.img.naturalWidth / 2;
var yOffset = e.clientY - this.img.naturalHeight / 2;
this.setTranslate(this.img, xOffset, yOffset);
}
}, {
key: "setTranslate",
value: function setTranslate(node, xPos, yPos) {
node.style.transform = 'translate3d(' + xPos + 'px, ' + yPos + 'px, 0)';
}
}, {
key: "widowWidth",
value: function widowWidth() {
return window.innerWidth || document.documentElement.clientWidth || document.body.clientWidth;
}
}]);
return ZoomImages;
}();
var DragSlides = function () {
function DragSlides() {
var _this = this;
var config = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
_classCallCheck(this, DragSlides);
var dragEl = config.dragEl,
_config$toleranceX = config.toleranceX,
toleranceX = _config$toleranceX === void 0 ? 40 : _config$toleranceX,
_config$toleranceY = config.toleranceY,
toleranceY = _config$toleranceY === void 0 ? 65 : _config$toleranceY,
_config$slide = config.slide,
slide = _config$slide === void 0 ? null : _config$slide,
_config$instance = config.instance,
instance = _config$instance === void 0 ? null : _config$instance;
this.el = dragEl;
this.active = false;
this.dragging = false;
this.currentX = null;
this.currentY = null;
this.initialX = null;
this.initialY = null;
this.xOffset = 0;
this.yOffset = 0;
this.direction = null;
this.lastDirection = null;
this.toleranceX = toleranceX;
this.toleranceY = toleranceY;
this.toleranceReached = false;
this.dragContainer = this.el;
this.slide = slide;
this.instance = instance;
this.el.addEventListener('mousedown', function (e) {
return _this.dragStart(e);
}, false);
this.el.addEventListener('mouseup', function (e) {
return _this.dragEnd(e);
}, false);
this.el.addEventListener('mousemove', function (e) {
return _this.drag(e);
}, false);
}
_createClass(DragSlides, [{
key: "dragStart",
value: function dragStart(e) {
if (this.slide.classList.contains('zoomed')) {
this.active = false;
return;
}
if (e.type === 'touchstart') {
this.initialX = e.touches[0].clientX - this.xOffset;
this.initialY = e.touches[0].clientY - this.yOffset;
} else {
this.initialX = e.clientX - this.xOffset;
this.initialY = e.clientY - this.yOffset;
}
var clicked = e.target.nodeName.toLowerCase();
var exludeClicks = ['input', 'select', 'textarea', 'button', 'a'];
if (e.target.classList.contains('nodrag') || closest(e.target, '.nodrag') || exludeClicks.indexOf(clicked) !== -1) {
this.active = false;
return;
}
e.preventDefault();
if (e.target === this.el || clicked !== 'img' && closest(e.target, '.gslide-inline')) {
this.active = true;
this.el.classList.add('dragging');
this.dragContainer = closest(e.target, '.ginner-container');
}
}
}, {
key: "dragEnd",
value: function dragEnd(e) {
var _this2 = this;
e && e.preventDefault();
this.initialX = 0;
this.initialY = 0;
this.currentX = null;
this.currentY = null;
this.initialX = null;
this.initialY = null;
this.xOffset = 0;
this.yOffset = 0;
this.active = false;
if (this.doSlideChange) {
this.instance.preventOutsideClick = true;
this.doSlideChange == 'right' && this.instance.prevSlide();
this.doSlideChange == 'left' && this.instance.nextSlide();
}
if (this.doSlideClose) {
this.instance.close();
}
if (!this.toleranceReached) {
this.setTranslate(this.dragContainer, 0, 0, true);
}
setTimeout(function () {
_this2.instance.preventOutsideClick = false;
_this2.toleranceReached = false;
_this2.lastDirection = null;
_this2.dragging = false;
_this2.el.isDragging = false;
_this2.el.classList.remove('dragging');
_this2.slide.classList.remove('dragging-nav');
_this2.dragContainer.style.transform = '';
_this2.dragContainer.style.transition = '';
}, 100);
}
}, {
key: "drag",
value: function drag(e) {
if (this.active) {
e.preventDefault();
this.slide.classList.add('dragging-nav');
if (e.type === 'touchmove') {
this.currentX = e.touches[0].clientX - this.initialX;
this.currentY = e.touches[0].clientY - this.initialY;
} else {
this.currentX = e.clientX - this.initialX;
this.currentY = e.clientY - this.initialY;
}
this.xOffset = this.currentX;
this.yOffset = this.currentY;
this.el.isDragging = true;
this.dragging = true;
this.doSlideChange = false;
this.doSlideClose = false;
var currentXInt = Math.abs(this.currentX);
var currentYInt = Math.abs(this.currentY);
if (currentXInt > 0 && currentXInt >= Math.abs(this.currentY) && (!this.lastDirection || this.lastDirection == 'x')) {
this.yOffset = 0;
this.lastDirection = 'x';
this.setTranslate(this.dragContainer, this.currentX, 0);
var doChange = this.shouldChange();
if (!this.instance.settings.dragAutoSnap && doChange) {
this.doSlideChange = doChange;
}
if (this.instance.settings.dragAutoSnap && doChange) {
this.instance.preventOutsideClick = true;
this.toleranceReached = true;
this.active = false;
this.instance.preventOutsideClick = true;
this.dragEnd(null);
doChange == 'right' && this.instance.prevSlide();
doChange == 'left' && this.instance.nextSlide();
return;
}
}
if (this.toleranceY > 0 && currentYInt > 0 && currentYInt >= currentXInt && (!this.lastDirection || this.lastDirection == 'y')) {
this.xOffset = 0;
this.lastDirection = 'y';
this.setTranslate(this.dragContainer, 0, this.currentY);
var doClose = this.shouldClose();
if (!this.instance.settings.dragAutoSnap && doClose) {
this.doSlideClose = true;
}
if (this.instance.settings.dragAutoSnap && doClose) {
this.instance.close();
}
return;
}
}
}
}, {
key: "shouldChange",
value: function shouldChange() {
var doChange = false;
var currentXInt = Math.abs(this.currentX);
if (currentXInt >= this.toleranceX) {
var dragDir = this.currentX > 0 ? 'right' : 'left';
if (dragDir == 'left' && this.slide !== this.slide.parentNode.lastChild || dragDir == 'right' && this.slide !== this.slide.parentNode.firstChild) {
doChange = dragDir;
}
}
return doChange;
}
}, {
key: "shouldClose",
value: function shouldClose() {
var doClose = false;
var currentYInt = Math.abs(this.currentY);
if (currentYInt >= this.toleranceY) {
doClose = true;
}
return doClose;
}
}, {
key: "setTranslate",
value: function setTranslate(node, xPos, yPos) {
var animated = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : false;
if (animated) {
node.style.transition = 'all .2s ease';
} else {
node.style.transition = '';
}
node.style.transform = "translate3d(".concat(xPos, "px, ").concat(yPos, "px, 0)");
}
}]);
return DragSlides;
}();
function slideImage(slide, data, index, callback) {
var slideMedia = slide.querySelector('.gslide-media');
var img = new Image();
var titleID = 'gSlideTitle_' + index;
var textID = 'gSlideDesc_' + index;
img.addEventListener('load', function () {
if (isFunction(callback)) {
callback();
}
}, false);
img.src = data.href;
if (data.sizes != '' && data.srcset != '') {
img.sizes = data.sizes;
img.srcset = data.srcset;
}
img.alt = '';
if (!isNil(data.alt) && data.alt !== '') {
img.alt = data.alt;
}
if (data.title !== '') {
img.setAttribute('aria-labelledby', titleID);
}
if (data.description !== '') {
img.setAttribute('aria-describedby', textID);
}
if (data.hasOwnProperty('_hasCustomWidth') && data._hasCustomWidth) {
img.style.width = data.width;
}
if (data.hasOwnProperty('_hasCustomHeight') && data._hasCustomHeight) {
img.style.height = data.height;
}
slideMedia.insertBefore(img, slideMedia.firstChild);
return;
}
function slideVideo(slide, data, index, callback) {
var _this = this;
var slideContainer = slide.querySelector('.ginner-container');
var videoID = 'gvideo' + index;
var slideMedia = slide.querySelector('.gslide-media');
var videoPlayers = this.getAllPlayers();
addClass(slideContainer, 'gvideo-container');
slideMedia.insertBefore(createHTML('<div class="gvideo-wrapper"></div>'), slideMedia.firstChild);
var videoWrapper = slide.querySelector('.gvideo-wrapper');
injectAssets(this.settings.plyr.css, 'Plyr');
var url = data.href;
var provider = data === null || data === void 0 ? void 0 : data.videoProvider;
var customPlaceholder = false;
slideMedia.style.maxWidth = data.width;
injectAssets(this.settings.plyr.js, 'Plyr', function () {
if (!provider && url.match(/vimeo\.com\/([0-9]*)/)) {
provider = 'vimeo';
}
if (!provider && (url.match(/(youtube\.com|youtube-nocookie\.com)\/watch\?v=([a-zA-Z0-9\-_]+)/) || url.match(/youtu\.be\/([a-zA-Z0-9\-_]+)/) || url.match(/(youtube\.com|youtube-nocookie\.com)\/embed\/([a-zA-Z0-9\-_]+)/))) {
provider = 'youtube';
}
if (provider === 'local' || !provider) {
provider = 'local';
var html = '<video id="' + videoID + '" ';
html += "style=\"background:#000; max-width: ".concat(data.width, ";\" ");
html += 'preload="metadata" ';
html += 'x-webkit-airplay="allow" ';
html += 'playsinline ';
html += 'controls ';
html += 'class="gvideo-local">';
html += "<source src=\"".concat(url, "\">");
html += '</video>';
customPlaceholder = createHTML(html);
}
var placeholder = customPlaceholder ? customPlaceholder : createHTML("<div id=\"".concat(videoID, "\" data-plyr-provider=\"").concat(provider, "\" data-plyr-embed-id=\"").concat(url, "\"></div>"));
addClass(videoWrapper, "".concat(provider, "-video gvideo"));
videoWrapper.appendChild(placeholder);
videoWrapper.setAttribute('data-id', videoID);
videoWrapper.setAttribute('data-index', index);
var playerConfig = has(_this.settings.plyr, 'config') ? _this.settings.plyr.config : {};
var player = new Plyr('#' + videoID, playerConfig);
player.on('ready', function (event) {
videoPlayers[videoID] = event.detail.plyr;
if (isFunction(callback)) {
callback();
}
});
waitUntil(function () {
return slide.querySelector('iframe') && slide.querySelector('iframe').dataset.ready == 'true';
}, function () {
_this.resize(slide);
});
player.on('enterfullscreen', handleMediaFullScreen);
player.on('exitfullscreen', handleMediaFullScreen);
});
}
function handleMediaFullScreen(event) {
var media = closest(event.target, '.gslide-media');
if (event.type === 'enterfullscreen') {
addClass(media, 'fullscreen');
}
if (event.type === 'exitfullscreen') {
removeClass(media, 'fullscreen');
}
}
function slideInline(slide, data, index, callback) {
var _this = this;
var slideMedia = slide.querySelector('.gslide-media');
var hash = has(data, 'href') && data.href ? data.href.split('#').pop().trim() : false;
var content = has(data, 'content') && data.content ? data.content : false;
var innerContent;
if (content) {
if (isString(content)) {
innerContent = createHTML("<div class=\"ginlined-content\">".concat(content, "</div>"));
}
if (isNode(content)) {
if (content.style.display == 'none') {
content.style.display = 'block';
}
var container = document.createElement('div');
container.className = 'ginlined-content';
container.appendChild(content);
innerContent = container;
}
}
if (hash) {
var div = document.getElementById(hash);
if (!div) {
return false;
}
var cloned = div.cloneNode(true);
cloned.style.height = data.height;
cloned.style.maxWidth = data.width;
addClass(cloned, 'ginlined-content');
innerContent = cloned;
}
if (!innerContent) {
console.error('Unable to append inline slide content', data);
return false;
}
slideMedia.style.height = data.height;
slideMedia.style.width = data.width;
slideMedia.appendChild(innerContent);
this.events['inlineclose' + hash] = addEvent('click', {
onElement: slideMedia.querySelectorAll('.gtrigger-close'),
withCallback: function withCallback(e) {
e.preventDefault();
_this.close();
}
});
if (isFunction(callback)) {
callback();
}
return;
}
function slideIframe(slide, data, index, callback) {
var slideMedia = slide.querySelector('.gslide-media');
var iframe = createIframe({
url: data.href,
callback: callback
});
slideMedia.parentNode.style.maxWidth = data.width;
slideMedia.parentNode.style.height = data.height;
slideMedia.appendChild(iframe);
return;
}
var SlideConfigParser = function () {
function SlideConfigParser() {
var slideParamas = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
_classCallCheck(this, SlideConfigParser);
this.defaults = {
href: '',
sizes: '',
srcset: '',
title: '',
type: '',
videoProvider: '',
description: '',
alt: '',
descPosition: 'bottom',
effect: '',
width: '',
height: '',
content: false,
zoomable: true,
draggable: true
};
if (isObject(slideParamas)) {
this.defaults = extend(this.defaults, slideParamas);
}
}
_createClass(SlideConfigParser, [{
key: "sourceType",
value: function sourceType(url) {
var origin = url;
url = url.toLowerCase();
if (url.match(/\.(jpeg|jpg|jpe|gif|png|apn|webp|avif|svg)/) !== null) {
return 'image';
}
if (url.match(/(youtube\.com|youtube-nocookie\.com)\/watch\?v=([a-zA-Z0-9\-_]+)/) || url.match(/youtu\.be\/([a-zA-Z0-9\-_]+)/) || url.match(/(youtube\.com|youtube-nocookie\.com)\/embed\/([a-zA-Z0-9\-_]+)/)) {
return 'video';
}
if (url.match(/vimeo\.com\/([0-9]*)/)) {
return 'video';
}
if (url.match(/\.(mp4|ogg|webm|mov)/) !== null) {
return 'video';
}
if (url.match(/\.(mp3|wav|wma|aac|ogg)/) !== null) {
return 'audio';
}
if (url.indexOf('#') > -1) {
var hash = origin.split('#').pop();
if (hash.trim() !== '') {
return 'inline';
}
}
if (url.indexOf('goajax=true') > -1) {
return 'ajax';
}
return 'external';
}
}, {
key: "parseConfig",
value: function parseConfig(element, settings) {
var _this = this;
var data = extend({
descPosition: settings.descPosition
}, this.defaults);
if (isObject(element) && !isNode(element)) {
if (!has(element, 'type')) {
if (has(element, 'content') && element.content) {
element.type = 'inline';
} else if (has(element, 'href')) {
element.type = this.sourceType(element.href);
}
}
var objectData = extend(data, element);
this.setSize(objectData, settings);
return objectData;
}
var url = '';
var config = element.getAttribute('data-glightbox');
var nodeType = element.nodeName.toLowerCase();
if (nodeType === 'a') {
url = element.href;
}
if (nodeType === 'img') {
url = element.src;
data.alt = element.alt;
}
data.href = url;
each(data, function (val, key) {
if (has(settings, key) && key !== 'width') {
data[key] = settings[key];
}
var nodeData = element.dataset[key];
if (!isNil(nodeData)) {
data[key] = _this.sanitizeValue(nodeData);
}
});
if (data.content) {
data.type = 'inline';
}
if (!data.type && url) {
data.type = this.sourceType(url);
}
if (!isNil(config)) {
var cleanKeys = [];
each(data, function (v, k) {
cleanKeys.push(';\\s?' + k);
});
cleanKeys = cleanKeys.join('\\s?:|');
if (config.trim() !== '') {
each(data, function (val, key) {
var str = config;
var match = 's?' + key + 's?:s?(.*?)(' + cleanKeys + 's?:|$)';
var regex = new RegExp(match);
var matches = str.match(regex);
if (matches && matches.length && matches[1]) {
var value = matches[1].trim().replace(/;\s*$/, '');
data[key] = _this.sanitizeValue(value);
}
});
}
} else {
if (!data.title && nodeType == 'a') {
var title = element.title;
if (!isNil(title) && title !== '') {
data.title = title;
}
}
if (!data.title && nodeType == 'img') {
var alt = element.alt;
if (!isNil(alt) && alt !== '') {
data.title = alt;
}
}
}
if (data.description && data.description.substring(0, 1) === '.') {
var description;
try {
description = document.querySelector(data.description).innerHTML;
} catch (error) {
if (!(error instanceof DOMException)) {
throw error;
}
}
if (description) {
data.description = description;
}
}
if (!data.description) {
var nodeDesc = element.querySelector('.glightbox-desc');
if (nodeDesc) {
data.description = nodeDesc.innerHTML;
}
}
this.setSize(data, settings, element);
this.slideConfig = data;
return data;
}
}, {
key: "setSize",
value: function setSize(data, settings) {
var element = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : null;
var defaultWith = data.type == 'video' ? this.checkSize(settings.videosWidth) : this.checkSize(settings.width);
var defaultHeight = this.checkSize(settings.height);
data.width = has(data, 'width') && data.width !== '' ? this.checkSize(data.width) : defaultWith;
data.height = has(data, 'height') && data.height !== '' ? this.checkSize(data.height) : defaultHeight;
if (element && data.type == 'image') {
data._hasCustomWidth = element.dataset.width ? true : false;
data._hasCustomHeight = element.dataset.height ? true : false;
}
return data;
}
}, {
key: "checkSize",
value: function checkSize(size) {
return isNumber(size) ? "".concat(size, "px") : size;
}
}, {
key: "sanitizeValue",
value: function sanitizeValue(val) {
if (val !== 'true' && val !== 'false') {
return val;
}
return val === 'true';
}
}]);
return SlideConfigParser;
}();
var Slide = function () {
function Slide(el, instance, index) {
_classCallCheck(this, Slide);
this.element = el;
this.instance = instance;
this.index = index;
}
_createClass(Slide, [{
key: "setContent",
value: function setContent() {
var _this = this;
var slide = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null;
var callback = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false;
if (hasClass(slide, 'loaded')) {
return false;
}
var settings = this.instance.settings;
var slideConfig = this.slideConfig;
var isMobileDevice = isMobile();
if (isFunction(settings.beforeSlideLoad)) {
settings.beforeSlideLoad({
index: this.index,
slide: slide,
player: false
});
}
var type = slideConfig.type;
var position = slideConfig.descPosition;
var slideMedia = slide.querySelector('.gslide-media');
var slideTitle = slide.querySelector('.gslide-title');
var slideText = slide.querySelector('.gslide-desc');
var slideDesc = slide.querySelector('.gdesc-inner');
var finalCallback = callback;
var titleID = 'gSlideTitle_' + this.index;
var textID = 'gSlideDesc_' + this.index;
if (isFunction(settings.afterSlideLoad)) {
finalCallback = function finalCallback() {
if (isFunction(callback)) {
callback();
}
settings.afterSlideLoad({
index: _this.index,
slide: slide,
player: _this.instance.getSlidePlayerInstance(_this.index)
});
};
}
if (slideConfig.title == '' && slideConfig.description == '') {
if (slideDesc) {
slideDesc.parentNode.parentNode.removeChild(slideDesc.parentNode);
}
} else {
if (slideTitle && slideConfig.title !== '') {
slideTitle.id = titleID;
slideTitle.innerHTML = slideConfig.title;
} else {
slideTitle.parentNode.removeChild(slideTitle);
}
if (slideText && slideConfig.description !== '') {
slideText.id = textID;
if (isMobileDevice && settings.moreLength > 0) {
slideConfig.smallDescription = this.slideShortDesc(slideConfig.description, settings.moreLength, settings.moreText);
slideText.innerHTML = slideConfig.smallDescription;
this.descriptionEvents(slideText, slideConfig);
} else {
slideText.innerHTML = slideConfig.description;
}
} else {
slideText.parentNode.removeChild(slideText);
}
addClass(slideMedia.parentNode, "desc-".concat(position));
addClass(slideDesc.parentNode, "description-".concat(position));
}
addClass(slideMedia, "gslide-".concat(type));
addClass(slide, 'loaded');
if (type === 'video') {
slideVideo.apply(this.instance, [slide, slideConfig, this.index, finalCallback]);
return;
}
if (type === 'external') {
slideIframe.apply(this, [slide, slideConfig, this.index, finalCallback]);
return;
}
if (type === 'inline') {
slideInline.apply(this.instance, [slide, slideConfig, this.index, finalCallback]);
if (slideConfig.draggable) {
new DragSlides({
dragEl: slide.querySelector('.gslide-inline'),
toleranceX: settings.dragToleranceX,
toleranceY: settings.dragToleranceY,
slide: slide,
instance: this.instance
});
}
return;
}
if (type === 'image') {
slideImage(slide, slideConfig, this.index, function () {
var img = slide.querySelector('img');
if (slideConfig.draggable) {
new DragSlides({
dragEl: img,
toleranceX: settings.dragToleranceX,
toleranceY: settings.dragToleranceY,
slide: slide,
instance: _this.instance
});
}
if (slideConfig.zoomable && img.naturalWidth > img.offsetWidth) {
addClass(img, 'zoomable');
new ZoomImages(img, slide, function () {
_this.instance.resize();
});
}
if (isFunction(finalCallback)) {
finalCallback();
}
});
return;
}
if (isFunction(finalCallback)) {
finalCallback();
}
}
}, {
key: "slideShortDesc",
value: function slideShortDesc(string) {
var n = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 50;
var wordBoundary = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false;
var div = document.createElement('div');
div.innerHTML = string;
var cleanedString = div.innerText;
var useWordBoundary = wordBoundary;
string = cleanedString.trim();
if (string.length <= n) {
return string;
}
var subString = string.substr(0, n - 1);
if (!useWordBoundary) {
return subString;
}
div = null;
return subString + '... <a href="#" class="desc-more">' + wordBoundary + '</a>';
}
}, {
key: "descriptionEvents",
value: function descriptionEvents(desc, data) {
var _this2 = this;
var moreLink = desc.querySelector('.desc-more');
if (!moreLink) {
return false;
}
addEvent('click', {
onElement: moreLink,
withCallback: function withCallback(event, target) {
event.preventDefault();
var body = document.body;
var desc = closest(target, '.gslide-desc');
if (!desc) {
return false;
}
desc.innerHTML = data.description;
addClass(body, 'gdesc-open');
var shortEvent = addEvent('click', {
onElement: [body, closest(desc, '.gslide-description')],
withCallback: function withCallback(event, target) {
if (event.target.nodeName.toLowerCase() !== 'a') {
removeClass(body, 'gdesc-open');
addClass(body, 'gdesc-closed');
desc.innerHTML = data.smallDescription;
_this2.descriptionEvents(desc, data);
setTimeout(function () {
removeClass(body, 'gdesc-closed');
}, 400);
shortEvent.destroy();
}
}
});
}
});
}
}, {
key: "create",
value: function create() {
return createHTML(this.instance.settings.slideHTML);
}
}, {
key: "getConfig",
value: function getConfig() {
if (!isNode(this.element) && !this.element.hasOwnProperty('draggable')) {
this.element.draggable = this.instance.settings.draggable;
}
var parser = new SlideConfigParser(this.instance.settings.slideExtraAttributes);
this.slideConfig = parser.parseConfig(this.element, this.instance.settings);
return this.slideConfig;
}
}]);
return Slide;
}();
var _version = '3.1.0';
var isMobile$1 = isMobile();
var isTouch$1 = isTouch();
var html = document.getElementsByTagName('html')[0];
var defaults = {
selector: '.glightbox',
elements: null,
skin: 'clean',
theme: 'clean',
closeButton: true,
startAt: null,
autoplayVideos: true,
autofocusVideos: true,
descPosition: 'bottom',
width: '900px',
height: '506px',
videosWidth: '960px',
beforeSlideChange: null,
afterSlideChange: null,
beforeSlideLoad: null,
afterSlideLoad: null,
slideInserted: null,
slideRemoved: null,
slideExtraAttributes: null,
onOpen: null,
onClose: null,
loop: false,
zoomable: true,
draggable: true,
dragAutoSnap: false,
dragToleranceX: 40,
dragToleranceY: 65,
preload: true,
oneSlidePerOpen: false,
touchNavigation: true,
touchFollowAxis: true,
keyboardNavigation: true,
closeOnOutsideClick: true,
plugins: false,
plyr: {
css: 'https://cdn.plyr.io/3.6.12/plyr.css',
js: 'https://cdn.plyr.io/3.6.12/plyr.js',
config: {
ratio: '16:9',
fullscreen: {
enabled: true,
iosNative: true
},
youtube: {
noCookie: true,
rel: 0,
showinfo: 0,
iv_load_policy: 3
},
vimeo: {
byline: false,
portrait: false,
title: false,
transparent: false
}
}
},
openEffect: 'zoom',
closeEffect: 'zoom',
slideEffect: 'slide',
moreText: 'See more',
moreLength: 60,
cssEfects: {
fade: {
"in": 'fadeIn',
out: 'fadeOut'
},
zoom: {
"in": 'zoomIn',
out: 'zoomOut'
},
slide: {
"in": 'slideInRight',
out: 'slideOutLeft'
},
slideBack: {
"in": 'slideInLeft',
out: 'slideOutRight'
},
none: {
"in": 'none',
out: 'none'
}
},
svg: {
close: '<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 512 512" xml:space="preserve"><g><g><path d="M505.943,6.058c-8.077-8.077-21.172-8.077-29.249,0L6.058,476.693c-8.077,8.077-8.077,21.172,0,29.249C10.096,509.982,15.39,512,20.683,512c5.293,0,10.586-2.019,14.625-6.059L505.943,35.306C514.019,27.23,514.019,14.135,505.943,6.058z"/></g></g><g><g><path d="M505.942,476.694L35.306,6.059c-8.076-8.077-21.172-8.077-29.248,0c-8.077,8.076-8.077,21.171,0,29.248l470.636,470.636c4.038,4.039,9.332,6.058,14.625,6.058c5.293,0,10.587-2.019,14.624-6.057C514.018,497.866,514.018,484.771,505.942,476.694z"/></g></g></svg>',
next: '<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 477.175 477.175" xml:space="preserve"> <g><path d="M360.731,229.075l-225.1-225.1c-5.3-5.3-13.8-5.3-19.1,0s-5.3,13.8,0,19.1l215.5,215.5l-215.5,215.5c-5.3,5.3-5.3,13.8,0,19.1c2.6,2.6,6.1,4,9.5,4c3.4,0,6.9-1.3,9.5-4l225.1-225.1C365.931,242.875,365.931,234.275,360.731,229.075z"/></g></svg>',
prev: '<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 477.175 477.175" xml:space="preserve"><g><path d="M145.188,238.575l215.5-215.5c5.3-5.3,5.3-13.8,0-19.1s-13.8-5.3-19.1,0l-225.1,225.1c-5.3,5.3-5.3,13.8,0,19.1l225.1,225c2.6,2.6,6.1,4,9.5,4s6.9-1.3,9.5-4c5.3-5.3,5.3-13.8,0-19.1L145.188,238.575z"/></g></svg>'
}
};
defaults.slideHTML = "<div class=\"gslide\">\n <div class=\"gslide-inner-content\">\n <div class=\"ginner-container\">\n <div class=\"gslide-media\">\n </div>\n <div class=\"gslide-description\">\n <div class=\"gdesc-inner\">\n <h4 class=\"gslide-title\"></h4>\n <div class=\"gslide-desc\"></div>\n </div>\n </div>\n </div>\n </div>\n</div>";
defaults.lightboxHTML = "<div id=\"glightbox-body\" class=\"glightbox-container\" tabindex=\"-1\" role=\"dialog\" aria-hidden=\"false\">\n <div class=\"gloader visible\"></div>\n <div class=\"goverlay\"></div>\n <div class=\"gcontainer\">\n <div id=\"glightbox-slider\" class=\"gslider\"></div>\n <button class=\"gclose gbtn\" aria-label=\"Close\" data-taborder=\"3\">{closeSVG}</button>\n <button class=\"gprev gbtn\" aria-label=\"Previous\" data-taborder=\"2\">{prevSVG}</button>\n <button class=\"gnext gbtn\" aria-label=\"Next\" data-taborder=\"1\">{nextSVG}</button>\n</div>\n</div>";
var GlightboxInit = function () {
function GlightboxInit() {
var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
_classCallCheck(this, GlightboxInit);
this.customOptions = options;
this.settings = extend(defaults, options);
this.effectsClasses = this.getAnimationClasses();
this.videoPlayers = {};
this.apiEvents = [];
this.fullElementsList = false;
}
_createClass(GlightboxInit, [{
key: "init",
value: function init() {
var _this = this;
var selector = this.getSelector();
if (selector) {
this.baseEvents = addEvent('click', {
onElement: selector,
withCallback: function withCallback(e, target) {
e.preventDefault();
_this.open(target);
}
});
}
this.elements = this.getElements();
}
}, {
key: "open",
value: function open() {
var element = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null;
var startAt = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null;
if (this.elements.length === 0) {
return false;
}
this.activeSlide = null;
this.prevActiveSlideIndex = null;
this.prevActiveSlide = null;
var index = isNumber(startAt) ? startAt : this.settings.startAt;
if (isNode(element)) {
var gallery = element.getAttribute('data-gallery');
if (gallery) {
this.fullElementsList = this.elements;
this.elements = this.getGalleryElements(this.elements, gallery);
}
if (isNil(index)) {
index = this.getElementIndex(element);
if (index < 0) {
index = 0;
}
}
}
if (!isNumber(index)) {
index = 0;
}
this.build();
animateElement(this.overlay, this.settings.openEffect === 'none' ? 'none' : this.settings.cssEfects.fade["in"]);
var body = document.body;
var scrollBar = window.innerWidth - document.documentElement.clientWidth;
if (scrollBar > 0) {
var styleSheet = document.createElement('style');
styleSheet.type = 'text/css';
styleSheet.className = 'gcss-styles';
styleSheet.innerText = ".gscrollbar-fixer {margin-right: ".concat(scrollBar, "px}");
document.head.appendChild(styleSheet);
addClass(body, 'gscrollbar-fixer');
}
addClass(body, 'glightbox-open');
addClass(html, 'glightbox-open');
if (isMobile$1) {
addClass(document.body, 'glightbox-mobile');
this.settings.slideEffect = 'slide';
}
this.showSlide(index, true);
if (this.elements.length === 1) {
addClass(this.prevButton, 'glightbox-button-hidden');
addClass(this.nextButton, 'glightbox-button-hidden');
} else {
removeClass(this.prevButton, 'glightbox-button-hidden');
removeClass(this.nextButton, 'glightbox-button-hidden');
}
this.lightboxOpen = true;
this.trigger('open');
if (isFunction(this.settings.onOpen)) {
this.settings.onOpen();
}
if (isTouch$1 && this.settings.touchNavigation) {
touchNavigation(this);
}
if (this.settings.keyboardNavigation) {
keyboardNavigation(this);
}
}
}, {
key: "openAt",
value: function openAt() {
var index = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0;
this.open(null, index);
}
}, {
key: "showSlide",
value: function showSlide() {
var _this2 = this;
var index = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0;
var first = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false;
show(this.loader);
this.index = parseInt(index);
var current = this.slidesContainer.querySelector('.current');
if (current) {
removeClass(current, 'current');
}
this.slideAnimateOut();
var slideNode = this.slidesContainer.querySelectorAll('.gslide')[index];
if (hasClass(slideNode, 'loaded')) {
this.slideAnimateIn(slideNode, first);
hide(this.loader);
} else {
show(this.loader);
var slide = this.elements[index];
var slideData = {
index: this.index,
slide: slideNode,
slideNode: slideNode,
slideConfig: slide.slideConfig,
slideIndex: this.index,
trigger: slide.node,
player: null
};
this.trigger('slide_before_load', slideData);
slide.instance.setContent(slideNode, function () {
hide(_this2.loader);
_this2.resize();
_this2.slideAnimateIn(slideNode, first);
_this2.trigger('slide_after_load', slideData);
});
}
this.slideDescription = slideNode.querySelector('.gslide-description');
this.slideDescriptionContained = this.slideDescription && hasClass(this.slideDescription.parentNode, 'gslide-media');
if (this.settings.preload) {
this.preloadSlide(index + 1);
this.preloadSlide(index - 1);
}
this.updateNavigationClasses();
this.activeSlide = slideNode;
}
}, {
key: "preloadSlide",
value: function preloadSlide(index) {
var _this3 = this;
if (index < 0 || index > this.elements.length - 1) {
return false;
}
if (isNil(this.elements[index])) {
return false;
}
var slideNode = this.slidesContainer.querySelectorAll('.gslide')[index];
if (hasClass(slideNode, 'loaded')) {
return false;
}
var slide = this.elements[index];
var type = slide.type;
var slideData = {
index: index,
slide: slideNode,
slideNode: slideNode,
slideConfig: slide.slideConfig,
slideIndex: index,
trigger: slide.node,
player: null
};
this.trigger('slide_before_load', slideData);
if (type === 'video' || type === 'external') {
setTimeout(function () {
slide.instance.setContent(slideNode, function () {
_this3.trigger('slide_after_load', slideData);
});
}, 200);
} else {
slide.instance.setContent(slideNode, function () {
_this3.trigger('slide_after_load', slideData);
});
}
}
}, {
key: "prevSlide",
value: function prevSlide() {
this.goToSlide(this.index - 1);
}
}, {
key: "nextSlide",
value: function nextSlide() {
this.goToSlide(this.index + 1);
}
}, {
key: "goToSlide",
value: function goToSlide() {
var index = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : false;
this.prevActiveSlide = this.activeSlide;
this.prevActiveSlideIndex = this.index;
if (!this.loop() && (index < 0 || index > this.elements.length - 1)) {
return false;
}
if (index < 0) {
index = this.elements.length - 1;
} else if (index >= this.elements.length) {
index = 0;
}
this.showSlide(index);
}
}, {
key: "insertSlide",
value: function insertSlide() {
var config = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
var index = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : -1;
if (index < 0) {
index = this.elements.length;
}
var slide = new Slide(config, this, index);
var data = slide.getConfig();
var slideInfo = extend({}, data);
var newSlide = slide.create();
var totalSlides = this.elements.length - 1;
slideInfo.index = index;
slideInfo.node = false;
slideInfo.instance = slide;
slideInfo.slideConfig = data;
this.elements.splice(index, 0, slideInfo);
var addedSlideNode = null;
var addedSlidePlayer = null;
if (this.slidesContainer) {
if (index > totalSlides) {
this.slidesContainer.appendChild(newSlide);
} else {
var existingSlide = this.slidesContainer.querySelectorAll('.gslide')[index];
this.slidesContainer.insertBefore(newSlide, existingSlide);
}
if (this.settings.preload && this.index == 0 && index == 0 || this.index - 1 == index || this.index + 1 == index) {
this.preloadSlide(index);
}
if (this.index === 0 && index === 0) {
this.index = 1;
}
this.updateNavigationClasses();
addedSlideNode = this.slidesContainer.querySelectorAll('.gslide')[index];
addedSlidePlayer = this.getSlidePlayerInstance(index);
slideInfo.slideNode = addedSlideNode;
}
this.trigger('slide_inserted', {
index: index,
slide: addedSlideNode,
slideNode: addedSlideNode,
slideConfig: data,
slideIndex: index,
trigger: null,
player: addedSlidePlayer
});
if (isFunction(this.settings.slideInserted)) {
this.settings.slideInserted({
index: index,
slide: addedSlideNode,
player: addedSlidePlayer
});
}
}
}, {
key: "removeSlide",
value: function removeSlide() {
var index = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : -1;
if (index < 0 || index > this.elements.length - 1) {
return false;
}
var slide = this.slidesContainer && this.slidesContainer.querySelectorAll('.gslide')[index];
if (slide) {
if (this.getActiveSlideIndex() == index) {
if (index == this.elements.length - 1) {
this.prevSlide();
} else {
this.nextSlide();
}
}
slide.parentNode.removeChild(slide);
}
this.elements.splice(index, 1);
this.trigger('slide_removed', index);
if (isFunction(this.settings.slideRemoved)) {
this.settings.slideRemoved(index);
}
}
}, {
key: "slideAnimateIn",
value: function slideAnimateIn(slide, first) {
var _this4 = this;
var slideMedia = slide.querySelector('.gslide-media');
var slideDesc = slide.querySelector('.gslide-description');
var prevData = {
index: this.prevActiveSlideIndex,
slide: this.prevActiveSlide,
slideNode: this.prevActiveSlide,
slideIndex: this.prevActiveSlide,
slideConfig: isNil(this.prevActiveSlideIndex) ? null : this.elements[this.prevActiveSlideIndex].slideConfig,
trigger: isNil(this.prevActiveSlideIndex) ? null : this.elements[this.prevActiveSlideIndex].node,
player: this.getSlidePlayerInstance(this.prevActiveSlideIndex)
};
var nextData = {
index: this.index,
slide: this.activeSlide,
slideNode: this.activeSlide,
slideConfig: this.elements[this.index].slideConfig,
slideIndex: this.index,
trigger: this.elements[this.index].node,
player: this.getSlidePlayerInstance(this.index)
};
if (slideMedia.offsetWidth > 0 && slideDesc) {
hide(slideDesc);
slideDesc.style.display = '';
}
removeClass(slide, this.effectsClasses);
if (first) {
animateElement(slide, this.settings.cssEfects[this.settings.openEffect]["in"], function () {
if (_this4.settings.autoplayVideos) {
_this4.slidePlayerPlay(slide);
}
_this4.trigger('slide_changed', {
prev: prevData,
current: nextData
});
if (isFunction(_this4.settings.afterSlideChange)) {
_this4.settings.afterSlideChange.apply(_this4, [prevData, nextData]);
}
});
} else {
var effectName = this.settings.slideEffect;
var animIn = effectName !== 'none' ? this.settings.cssEfects[effectName]["in"] : effectName;
if (this.prevActiveSlideIndex > this.index) {
if (this.settings.slideEffect == 'slide') {
animIn = this.settings.cssEfects.slideBack["in"];
}
}
animateElement(slide, animIn, function () {
if (_this4.settings.autoplayVideos) {
_this4.slidePlayerPlay(slide);
}
_this4.trigger('slide_changed', {
prev: prevData,
current: nextData
});
if (isFunction(_this4.settings.afterSlideChange)) {
_this4.settings.afterSlideChange.apply(_this4, [prevData, nextData]);
}
});
}
setTimeout(function () {
_this4.resize(slide);
}, 100);
addClass(slide, 'current');
}
}, {
key: "slideAnimateOut",
value: function slideAnimateOut() {
if (!this.prevActiveSlide) {
return false;
}
var prevSlide = this.prevActiveSlide;
removeClass(prevSlide, this.effectsClasses);
addClass(prevSlide, 'prev');
var animation = this.settings.slideEffect;
var animOut = animation !== 'none' ? this.settings.cssEfects[animation].out : animation;
this.slidePlayerPause(prevSlide);
this.trigger('slide_before_change', {
prev: {
index: this.prevActiveSlideIndex,
slide: this.prevActiveSlide,
slideNode: this.prevActiveSlide,
slideIndex: this.prevActiveSlideIndex,
slideConfig: isNil(this.prevActiveSlideIndex) ? null : this.elements[this.prevActiveSlideIndex].slideConfig,
trigger: isNil(this.prevActiveSlideIndex) ? null : this.elements[this.prevActiveSlideIndex].node,
player: this.getSlidePlayerInstance(this.prevActiveSlideIndex)
},
current: {
index: this.index,
slide: this.activeSlide,
slideNode: this.activeSlide,
slideIndex: this.index,
slideConfig: this.elements[this.index].slideConfig,
trigger: this.elements[this.index].node,
player: this.getSlidePlayerInstance(this.index)
}
});
if (isFunction(this.settings.beforeSlideChange)) {
this.settings.beforeSlideChange.apply(this, [{
index: this.prevActiveSlideIndex,
slide: this.prevActiveSlide,
player: this.getSlidePlayerInstance(this.prevActiveSlideIndex)
}, {
index: this.index,
slide: this.activeSlide,
player: this.getSlidePlayerInstance(this.index)
}]);
}
if (this.prevActiveSlideIndex > this.index && this.settings.slideEffect == 'slide') {
animOut = this.settings.cssEfects.slideBack.out;
}
animateElement(prevSlide, animOut, function () {
var container = prevSlide.querySelector('.ginner-container');
var media = prevSlide.querySelector('.gslide-media');
var desc = prevSlide.querySelector('.gslide-description');
container.style.transform = '';
media.style.transform = '';
removeClass(media, 'greset');
media.style.opacity = '';
if (desc) {
desc.style.opacity = '';
}
removeClass(prevSlide, 'prev');
});
}
}, {
key: "getAllPlayers",
value: function getAllPlayers() {
return this.videoPlayers;
}
}, {
key: "getSlidePlayerInstance",
value: function getSlidePlayerInstance(index) {
var id = 'gvideo' + index;
var videoPlayers = this.getAllPlayers();
if (has(videoPlayers, id) && videoPlayers[id]) {
return videoPlayers[id];
}
return false;
}
}, {
key: "stopSlideVideo",
value: function stopSlideVideo(slide) {
if (isNode(slide)) {
var node = slide.querySelector('.gvideo-wrapper');
if (node) {
slide = node.getAttribute('data-index');
}
}
console.log('stopSlideVideo is deprecated, use slidePlayerPause');
var player = this.getSlidePlayerInstance(slide);
if (player && player.playing) {
player.pause();
}
}
}, {
key: "slidePlayerPause",
value: function slidePlayerPause(slide) {
if (isNode(slide)) {
var node = slide.querySelector('.gvideo-wrapper');
if (node) {
slide = node.getAttribute('data-index');
}
}
var player = this.getSlidePlayerInstance(slide);
if (player && player.playing) {
player.pause();
}
}
}, {
key: "playSlideVideo",
value: function playSlideVideo(slide) {
if (isNode(slide)) {
var node = slide.querySelector('.gvideo-wrapper');
if (node) {
slide = node.getAttribute('data-index');
}
}
console.log('playSlideVideo is deprecated, use slidePlayerPlay');
var player = this.getSlidePlayerInstance(slide);
if (player && !player.playing) {
player.play();
}
}
}, {
key: "slidePlayerPlay",
value: function slidePlayerPlay(slide) {
var _this$settings$plyr$c;
if (isMobile$1 && !((_this$settings$plyr$c = this.settings.plyr.config) !== null && _this$settings$plyr$c !== void 0 && _this$settings$plyr$c.muted)) {
return;
}
if (isNode(slide)) {
var node = slide.querySelector('.gvideo-wrapper');
if (node) {
slide = node.getAttribute('data-index');
}
}
var player = this.getSlidePlayerInstance(slide);
if (player && !player.playing) {
player.play();
if (this.settings.autofocusVideos) {
player.elements.container.focus();
}
}
}
}, {
key: "setElements",
value: function setElements(elements) {
var _this5 = this;
this.settings.elements = false;
var newElements = [];
if (elements && elements.length) {
each(elements, function (el, i) {
var slide = new Slide(el, _this5, i);
var data = slide.getConfig();
var slideInfo = extend({}, data);
slideInfo.slideConfig = data;
slideInfo.instance = slide;
slideInfo.index = i;
newElements.push(slideInfo);
});
}
this.elements = newElements;
if (this.lightboxOpen) {
this.slidesContainer.innerHTML = '';
if (this.elements.length) {
each(this.elements, function () {
var slide = createHTML(_this5.settings.slideHTML);
_this5.slidesContainer.appendChild(slide);
});
this.showSlide(0, true);
}
}
}
}, {
key: "getElementIndex",
value: function getElementIndex(node) {
var index = false;
each(this.elements, function (el, i) {
if (has(el, 'node') && el.node == node) {
index = i;
return true;
}
});
return index;
}
}, {
key: "getElements",
value: function getElements() {
var _this6 = this;
var list = [];
this.elements = this.elements ? this.elements : [];
if (!isNil(this.settings.elements) && isArray(this.settings.elements) && this.settings.elements.length) {
each(this.settings.elements, function (el, i) {
var slide = new Slide(el, _this6, i);
var elData = slide.getConfig();
var slideInfo = extend({}, elData);
slideInfo.node = false;
slideInfo.index = i;
slideInfo.instance = slide;
slideInfo.slideConfig = elData;
list.push(slideInfo);
});
}
var nodes = false;
var selector = this.getSelector();
if (selector) {
nodes = document.querySelectorAll(this.getSelector());
}
if (!nodes) {
return list;
}
each(nodes, function (el, i) {
var slide = new Slide(el, _this6, i);
var elData = slide.getConfig();
var slideInfo = extend({}, elData);
slideInfo.node = el;
slideInfo.index = i;
slideInfo.instance = slide;
slideInfo.slideConfig = elData;
slideInfo.gallery = el.getAttribute('data-gallery');
list.push(slideInfo);
});
return list;
}
}, {
key: "getGalleryElements",
value: function getGalleryElements(list, gallery) {
return list.filter(function (el) {
return el.gallery == gallery;
});
}
}, {
key: "getSelector",
value: function getSelector() {
if (this.settings.elements) {
return false;
}
if (this.settings.selector && this.settings.selector.substring(0, 5) == 'data-') {
return "*[".concat(this.settings.selector, "]");
}
return this.settings.selector;
}
}, {
key: "getActiveSlide",
value: function getActiveSlide() {
return this.slidesContainer.querySelectorAll('.gslide')[this.index];
}
}, {
key: "getActiveSlideIndex",
value: function getActiveSlideIndex() {
return this.index;
}
}, {
key: "getAnimationClasses",
value: function getAnimationClasses() {
var effects = [];
for (var key in this.settings.cssEfects) {
if (this.settings.cssEfects.hasOwnProperty(key)) {
var effect = this.settings.cssEfects[key];
effects.push("g".concat(effect["in"]));
effects.push("g".concat(effect.out));
}
}
return effects.join(' ');
}
}, {
key: "build",
value: function build() {
var _this7 = this;
if (this.built) {
return false;
}
var children = document.body.childNodes;
var bodyChildElms = [];
each(children, function (el) {
if (el.parentNode == document.body && el.nodeName.charAt(0) !== '#' && el.hasAttribute && !el.hasAttribute('aria-hidden')) {
bodyChildElms.push(el);
el.setAttribute('aria-hidden', 'true');
}
});
var nextSVG = has(this.settings.svg, 'next') ? this.settings.svg.next : '';
var prevSVG = has(this.settings.svg, 'prev') ? this.settings.svg.prev : '';
var closeSVG = has(this.settings.svg, 'close') ? this.settings.svg.close : '';
var lightboxHTML = this.settings.lightboxHTML;
lightboxHTML = lightboxHTML.replace(/{nextSVG}/g, nextSVG);
lightboxHTML = lightboxHTML.replace(/{prevSVG}/g, prevSVG);
lightboxHTML = lightboxHTML.replace(/{closeSVG}/g, closeSVG);
lightboxHTML = createHTML(lightboxHTML);
document.body.appendChild(lightboxHTML);
var modal = document.getElementById('glightbox-body');
this.modal = modal;
var closeButton = modal.querySelector('.gclose');
this.prevButton = modal.querySelector('.gprev');
this.nextButton = modal.querySelector('.gnext');
this.overlay = modal.querySelector('.goverlay');
this.loader = modal.querySelector('.gloader');
this.slidesContainer = document.getElementById('glightbox-slider');
this.bodyHiddenChildElms = bodyChildElms;
this.events = {};
addClass(this.modal, 'glightbox-' + this.settings.skin);
if (this.settings.closeButton && closeButton) {
this.events['close'] = addEvent('click', {
onElement: closeButton,
withCallback: function withCallback(e, target) {
e.preventDefault();
_this7.close();
}
});
}
if (closeButton && !this.settings.closeButton) {
closeButton.parentNode.removeChild(closeButton);
}
if (this.nextButton) {
this.events['next'] = addEvent('click', {
onElement: this.nextButton,
withCallback: function withCallback(e, target) {
e.preventDefault();
_this7.nextSlide();
}
});
}
if (this.prevButton) {
this.events['prev'] = addEvent('click', {
onElement: this.prevButton,
withCallback: function withCallback(e, target) {
e.preventDefault();
_this7.prevSlide();
}
});
}
if (this.settings.closeOnOutsideClick) {
this.events['outClose'] = addEvent('click', {
onElement: modal,
withCallback: function withCallback(e, target) {
if (!_this7.preventOutsideClick && !hasClass(document.body, 'glightbox-mobile') && !closest(e.target, '.ginner-container')) {
if (!closest(e.target, '.gbtn') && !hasClass(e.target, 'gnext') && !hasClass(e.target, 'gprev')) {
_this7.close();
}
}
}
});
}
each(this.elements, function (slide, i) {
_this7.slidesContainer.appendChild(slide.instance.create());
slide.slideNode = _this7.slidesContainer.querySelectorAll('.gslide')[i];
});
if (isTouch$1) {
addClass(document.body, 'glightbox-touch');
}
this.events['resize'] = addEvent('resize', {
onElement: window,
withCallback: function withCallback() {
_this7.resize();
}
});
this.built = true;
}
}, {
key: "resize",
value: function resize() {
var slide = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null;
slide = !slide ? this.activeSlide : slide;
if (!slide || hasClass(slide, 'zoomed')) {
return;
}
var winSize = windowSize();
var video = slide.querySelector('.gvideo-wrapper');
var image = slide.querySelector('.gslide-image');
var description = this.slideDescription;
var winWidth = winSize.width;
var winHeight = winSize.height;
if (winWidth <= 768) {
addClass(document.body, 'glightbox-mobile');
} else {
removeClass(document.body, 'glightbox-mobile');
}
if (!video && !image) {
return;
}
var descriptionResize = false;
if (description && (hasClass(description, 'description-bottom') || hasClass(description, 'description-top')) && !hasClass(description, 'gabsolute')) {
descriptionResize = true;
}
if (image) {
if (winWidth <= 768) {
var imgNode = image.querySelector('img');
} else if (descriptionResize) {
var descHeight = description.offsetHeight;
var _imgNode = image.querySelector('img');
_imgNode.setAttribute('style', "max-height: calc(100vh - ".concat(descHeight, "px)"));
description.setAttribute('style', "max-width: ".concat(_imgNode.offsetWidth, "px;"));
}
}
if (video) {
var ratio = has(this.settings.plyr.config, 'ratio') ? this.settings.plyr.config.ratio : '';
if (!ratio) {
var containerWidth = video.clientWidth;
var containerHeight = video.clientHeight;
var divisor = containerWidth / containerHeight;
ratio = "".concat(containerWidth / divisor, ":").concat(containerHeight / divisor);
}
var videoRatio = ratio.split(':');
var videoWidth = this.settings.videosWidth;
var maxWidth = this.settings.videosWidth;
if (isNumber(videoWidth) || videoWidth.indexOf('px') !== -1) {
maxWidth = parseInt(videoWidth);
} else {
if (videoWidth.indexOf('vw') !== -1) {
maxWidth = winWidth * parseInt(videoWidth) / 100;
} else if (videoWidth.indexOf('vh') !== -1) {
maxWidth = winHeight * parseInt(videoWidth) / 100;
} else if (videoWidth.indexOf('%') !== -1) {
maxWidth = winWidth * parseInt(videoWidth) / 100;
} else {
maxWidth = parseInt(video.clientWidth);
}
}
var maxHeight = maxWidth / (parseInt(videoRatio[0]) / parseInt(videoRatio[1]));
maxHeight = Math.floor(maxHeight);
if (descriptionResize) {
winHeight = winHeight - description.offsetHeight;
}
if (maxWidth > winWidth || maxHeight > winHeight || winHeight < maxHeight && winWidth > maxWidth) {
var vwidth = video.offsetWidth;
var vheight = video.offsetHeight;
var _ratio = winHeight / vheight;
var vsize = {
width: vwidth * _ratio,
height: vheight * _ratio
};
video.parentNode.setAttribute('style', "max-width: ".concat(vsize.width, "px"));
if (descriptionResize) {
description.setAttribute('style', "max-width: ".concat(vsize.width, "px;"));
}
} else {
video.parentNode.style.maxWidth = "".concat(videoWidth);
if (descriptionResize) {
description.setAttribute('style', "max-width: ".concat(videoWidth, ";"));
}
}
}
}
}, {
key: "reload",
value: function reload() {
this.init();
}
}, {
key: "updateNavigationClasses",
value: function updateNavigationClasses() {
var loop = this.loop();
removeClass(this.nextButton, 'disabled');
removeClass(this.prevButton, 'disabled');
if (this.index == 0 && this.elements.length - 1 == 0) {
addClass(this.prevButton, 'disabled');
addClass(this.nextButton, 'disabled');
} else if (this.index === 0 && !loop) {
addClass(this.prevButton, 'disabled');
} else if (this.index === this.elements.length - 1 && !loop) {
addClass(this.nextButton, 'disabled');
}
}
}, {
key: "loop",
value: function loop() {
var loop = has(this.settings, 'loopAtEnd') ? this.settings.loopAtEnd : null;
loop = has(this.settings, 'loop') ? this.settings.loop : loop;
return loop;
}
}, {
key: "close",
value: function close() {
var _this8 = this;
if (!this.lightboxOpen) {
if (this.events) {
for (var key in this.events) {
if (this.events.hasOwnProperty(key)) {
this.events[key].destroy();
}
}
this.events = null;
}
return false;
}
if (this.closing) {
return false;
}
this.closing = true;
this.slidePlayerPause(this.activeSlide);
if (this.fullElementsList) {
this.elements = this.fullElementsList;
}
if (this.bodyHiddenChildElms.length) {
each(this.bodyHiddenChildElms, function (el) {
el.removeAttribute('aria-hidden');
});
}
addClass(this.modal, 'glightbox-closing');
animateElement(this.overlay, this.settings.openEffect == 'none' ? 'none' : this.settings.cssEfects.fade.out);
animateElement(this.activeSlide, this.settings.cssEfects[this.settings.closeEffect].out, function () {
_this8.activeSlide = null;
_this8.prevActiveSlideIndex = null;
_this8.prevActiveSlide = null;
_this8.built = false;
if (_this8.events) {
for (var _key in _this8.events) {
if (_this8.events.hasOwnProperty(_key)) {
_this8.events[_key].destroy();
}
}
_this8.events = null;
}
var body = document.body;
removeClass(html, 'glightbox-open');
removeClass(body, 'glightbox-open touching gdesc-open glightbox-touch glightbox-mobile gscrollbar-fixer');
_this8.modal.parentNode.removeChild(_this8.modal);
_this8.trigger('close');
if (isFunction(_this8.settings.onClose)) {
_this8.settings.onClose();
}
var styles = document.querySelector('.gcss-styles');
if (styles) {
styles.parentNode.removeChild(styles);
}
_this8.lightboxOpen = false;
_this8.closing = null;
});
}
}, {
key: "destroy",
value: function destroy() {
this.close();
this.clearAllEvents();
if (this.baseEvents) {
this.baseEvents.destroy();
}
}
}, {
key: "on",
value: function on(evt, callback) {
var once = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false;
if (!evt || !isFunction(callback)) {
throw new TypeError('Event name and callback must be defined');
}
this.apiEvents.push({
evt: evt,
once: once,
callback: callback
});
}
}, {
key: "once",
value: function once(evt, callback) {
this.on(evt, callback, true);
}
}, {
key: "trigger",
value: function trigger(eventName) {
var _this9 = this;
var data = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : null;
var onceTriggered = [];
each(this.apiEvents, function (event, i) {
var evt = event.evt,
once = event.once,
callback = event.callback;
if (evt == eventName) {
callback(data);
if (once) {
onceTriggered.push(i);
}
}
});
if (onceTriggered.length) {
each(onceTriggered, function (i) {
return _this9.apiEvents.splice(i, 1);
});
}
}
}, {
key: "clearAllEvents",
value: function clearAllEvents() {
this.apiEvents.splice(0, this.apiEvents.length);
}
}, {
key: "version",
value: function version() {
return _version;
}
}]);
return GlightboxInit;
}();
function glightbox () {
var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
var instance = new GlightboxInit(options);
instance.init();
return instance;
}
return glightbox;
})));
|
PypiClean
|
/moto-improved-cognitoidentity-1.3.tar.gz/moto-improved-cognitoidentity-1.3/README.md
|
# Moto - Mock AWS Services
[](https://gitter.im/awsmoto/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](https://github.com/getmoto/moto/actions)
[](https://codecov.io/gh/getmoto/moto)
[](http://docs.getmoto.org)
[](https://pypi.org/project/moto/)
[](#)
[](https://pypistats.org/packages/moto)
[](https://github.com/psf/black)
## Install
```console
$ pip install moto-improved-cognitoidentity
```
## In a nutshell
Moto is a library that allows your tests to easily mock out AWS Services.
Imagine you have the following python code that you want to test:
```python
import boto3
class MyModel:
def __init__(self, name, value):
self.name = name
self.value = value
def save(self):
s3 = boto3.client("s3", region_name="us-east-1")
s3.put_object(Bucket="mybucket", Key=self.name, Body=self.value)
```
Take a minute to think how you would have tested that in the past.
Now see how you could test it with Moto:
```python
import boto3
from moto import mock_s3
from mymodule import MyModel
@mock_s3
def test_my_model_save():
conn = boto3.resource("s3", region_name="us-east-1")
# We need to create the bucket since this is all in Moto's 'virtual' AWS account
conn.create_bucket(Bucket="mybucket")
model_instance = MyModel("steve", "is awesome")
model_instance.save()
body = conn.Object("mybucket", "steve").get()["Body"].read().decode("utf-8")
assert body == "is awesome"
```
With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys.
For a full list of which services and features are covered, please see our [implementation coverage](https://github.com/getmoto/moto/blob/master/IMPLEMENTATION_COVERAGE.md).
### Documentation
The full documentation can be found here:
[http://docs.getmoto.org/en/latest/](http://docs.getmoto.org/en/latest/)
### Fork Feature
In the original Moto cognitoidentity's method get_id() creates new identifiers on the fly. Each time it's requested the library generates a
new one. In cases when identity_id is used to associate it with the user this behavior makes it impossible to test. This version of library generates identifiers based on user's data and provides the same value each time the user requests it.
|
PypiClean
|
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/network/get_vpn_site.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVpnSiteResult',
'AwaitableGetVpnSiteResult',
'get_vpn_site',
'get_vpn_site_output',
]
@pulumi.output_type
class GetVpnSiteResult:
"""
VpnSite Resource.
"""
def __init__(__self__, address_space=None, bgp_properties=None, device_properties=None, etag=None, id=None, ip_address=None, is_security_site=None, location=None, name=None, o365_policy=None, provisioning_state=None, site_key=None, tags=None, type=None, virtual_wan=None, vpn_site_links=None):
if address_space and not isinstance(address_space, dict):
raise TypeError("Expected argument 'address_space' to be a dict")
pulumi.set(__self__, "address_space", address_space)
if bgp_properties and not isinstance(bgp_properties, dict):
raise TypeError("Expected argument 'bgp_properties' to be a dict")
pulumi.set(__self__, "bgp_properties", bgp_properties)
if device_properties and not isinstance(device_properties, dict):
raise TypeError("Expected argument 'device_properties' to be a dict")
pulumi.set(__self__, "device_properties", device_properties)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
pulumi.set(__self__, "ip_address", ip_address)
if is_security_site and not isinstance(is_security_site, bool):
raise TypeError("Expected argument 'is_security_site' to be a bool")
pulumi.set(__self__, "is_security_site", is_security_site)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if o365_policy and not isinstance(o365_policy, dict):
raise TypeError("Expected argument 'o365_policy' to be a dict")
pulumi.set(__self__, "o365_policy", o365_policy)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if site_key and not isinstance(site_key, str):
raise TypeError("Expected argument 'site_key' to be a str")
pulumi.set(__self__, "site_key", site_key)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_wan and not isinstance(virtual_wan, dict):
raise TypeError("Expected argument 'virtual_wan' to be a dict")
pulumi.set(__self__, "virtual_wan", virtual_wan)
if vpn_site_links and not isinstance(vpn_site_links, list):
raise TypeError("Expected argument 'vpn_site_links' to be a list")
pulumi.set(__self__, "vpn_site_links", vpn_site_links)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> Optional['outputs.AddressSpaceResponse']:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> Optional['outputs.BgpSettingsResponse']:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> Optional['outputs.DevicePropertiesResponse']:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> Optional[bool]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="o365Policy")
def o365_policy(self) -> Optional['outputs.O365PolicyPropertiesResponse']:
"""
Office365 Policy.
"""
return pulumi.get(self, "o365_policy")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VPN site resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> Optional[str]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> Optional['outputs.SubResourceResponse']:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> Optional[Sequence['outputs.VpnSiteLinkResponse']]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
class AwaitableGetVpnSiteResult(GetVpnSiteResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVpnSiteResult(
address_space=self.address_space,
bgp_properties=self.bgp_properties,
device_properties=self.device_properties,
etag=self.etag,
id=self.id,
ip_address=self.ip_address,
is_security_site=self.is_security_site,
location=self.location,
name=self.name,
o365_policy=self.o365_policy,
provisioning_state=self.provisioning_state,
site_key=self.site_key,
tags=self.tags,
type=self.type,
virtual_wan=self.virtual_wan,
vpn_site_links=self.vpn_site_links)
def get_vpn_site(resource_group_name: Optional[str] = None,
vpn_site_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVpnSiteResult:
"""
Retrieves the details of a VPN site.
Azure REST API version: 2023-02-01.
:param str resource_group_name: The resource group name of the VpnSite.
:param str vpn_site_name: The name of the VpnSite being retrieved.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['vpnSiteName'] = vpn_site_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network:getVpnSite', __args__, opts=opts, typ=GetVpnSiteResult).value
return AwaitableGetVpnSiteResult(
address_space=pulumi.get(__ret__, 'address_space'),
bgp_properties=pulumi.get(__ret__, 'bgp_properties'),
device_properties=pulumi.get(__ret__, 'device_properties'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
ip_address=pulumi.get(__ret__, 'ip_address'),
is_security_site=pulumi.get(__ret__, 'is_security_site'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
o365_policy=pulumi.get(__ret__, 'o365_policy'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
site_key=pulumi.get(__ret__, 'site_key'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'),
virtual_wan=pulumi.get(__ret__, 'virtual_wan'),
vpn_site_links=pulumi.get(__ret__, 'vpn_site_links'))
@_utilities.lift_output_func(get_vpn_site)
def get_vpn_site_output(resource_group_name: Optional[pulumi.Input[str]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVpnSiteResult]:
"""
Retrieves the details of a VPN site.
Azure REST API version: 2023-02-01.
:param str resource_group_name: The resource group name of the VpnSite.
:param str vpn_site_name: The name of the VpnSite being retrieved.
"""
...
|
PypiClean
|
/dgl_cu100-0.5.3-cp38-cp38-win_amd64.whl/dgl_cu100-0.5.3.data/purelib/dgl/ops/spmm.py
|
import sys
from ..backend import gspmm as gspmm_internal
from .. import backend as F
__all__ = ['gspmm']
def gspmm(g, op, reduce_op, lhs_data, rhs_data):
r""" Generalized Sparse Matrix Multiplication interface.
It fuses two steps into one kernel.
1. Computes messages by :attr:`op` source node and edge features.
2. Aggregate the messages by :attr:`reduce_op` as the features on destination nodes.
.. math::
x_v = \psi_{(u, v, e)\in \mathcal{G}}(\rho(x_u, x_e))
where :math:`x_v` is the returned feature on destination nodes, and :math:`x_u`,
:math:`x_e` refers to :attr:`u`, :attr:`e` respectively. :math:`\rho` means binary
operator :attr:`op` and :math:`\psi` means reduce operator :attr:`reduce_op`,
:math:`\mathcal{G}` is the graph we apply gspmm on: :attr:`g`.
Note that this function does not handle gradients.
Parameters
----------
g : DGLGraph
The input graph.
op : str
The binary op's name, could be ``add``, ``sub``, ``mul``, ``div``,
``copy_lhs``, ``copy_rhs``.
reduce_op : str
Reduce operator, could be ``sum``, ``max``, ``min``, ``mean``.
lhs_data : tensor or None
The left operand, could be None if it's not required by the op.
rhs_data : tensor or None
The right operand, could be None if it's not required by the op.
Returns
-------
tensor
The result tensor.
"""
if op not in ['copy_lhs', 'copy_rhs']:
# Expand dims so that there will be no broadcasting issues with different
# number of dimensions. For example, given two shapes (N, 3, 1), (E, 5, 3, 4)
# that are valid broadcastable shapes, change them to (N, 1, 3, 1) and
# (E, 5, 3, 4)
lhs_shape = F.shape(lhs_data)
rhs_shape = F.shape(rhs_data)
if len(lhs_shape) != len(rhs_shape):
max_ndims = max(len(lhs_shape), len(rhs_shape))
lhs_pad_ndims = max_ndims - len(lhs_shape)
rhs_pad_ndims = max_ndims - len(rhs_shape)
new_lhs_shape = (lhs_shape[0],) + (1,) * lhs_pad_ndims + lhs_shape[1:]
new_rhs_shape = (rhs_shape[0],) + (1,) * rhs_pad_ndims + rhs_shape[1:]
lhs_data = F.reshape(lhs_data, new_lhs_shape)
rhs_data = F.reshape(rhs_data, new_rhs_shape)
# With max and min reducers infinity will be returned for zero degree nodes
ret = gspmm_internal(g._graph, op,
'sum' if reduce_op == 'mean' else reduce_op,
lhs_data, rhs_data)
# Replace infinity with zero for isolated nodes when reducer is min/max
if reduce_op in ['min', 'max']:
ret = F.replace_inf_with_zero(ret)
# divide in degrees for mean reducer.
if reduce_op == 'mean':
ret_shape = F.shape(ret)
deg = g.in_degrees()
deg = F.astype(F.clamp(deg, 1, g.number_of_edges()), F.dtype(ret))
deg_shape = (ret_shape[0],) + (1,) * (len(ret_shape) - 1)
return ret / F.reshape(deg, deg_shape)
else:
return ret
def _attach_zerodeg_note(docstring, reducer):
note1 = """
The {} function will return zero for nodes with no incoming messages.""".format(reducer)
note2 = """
This is implemented by replacing all {} values to zero.
""".format("infinity" if reducer == "min" else "negative infinity")
docstring = docstring + note1
if reducer in ('min', 'max'):
docstring = docstring + note2
return docstring
def _gen_spmm_func(binary_op, reduce_op):
name = "u_{}_e_{}".format(binary_op, reduce_op)
docstring = """Generalized SpMM function.
It fuses two steps into one kernel.
1. Computes messages by {} source node and edge features.
2. Aggregate the messages by {} as the features on destination nodes.
Parameters
----------
g : DGLHeteroGraph
The input graph
x : tensor
The source node features.
y : tensor
The edge features.
Returns
-------
tensor
The result tensor.
Notes
-----
This function supports autograd (computing input gradients given the output gradient). If the
feature shape of two input operands do not match, we first broadcasts the features to a unified
shape (note that the memory usage will not increase accordingly) and then performs the operation.
Broadcasting follows NumPy semantics. Please see
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
for more details about the NumPy broadcasting semantics.
""".format(binary_op, reduce_op)
docstring = _attach_zerodeg_note(docstring, reduce_op)
def func(g, x, y):
return gspmm(g, binary_op, reduce_op, x, y)
func.__name__ = name
func.__doc__ = docstring
return func
def _gen_copy_reduce_func(binary_op, reduce_op):
name = "{}_{}".format(binary_op, reduce_op)
binary_str = {
"copy_u": "It copies node feature to edge as the message.",
'copy_e': "It regards edge feature as message."
}
x_str = {
"copy_u": "source node",
"copy_e": "edge"
}
docstring = lambda binary_op: _attach_zerodeg_note("""Generalized SpMM function. {}
Then aggregates the message by {} on destination nodes.
Parameters
----------
g : DGLHeteroGraph
The input graph
x : tensor
The {} features.
Returns
-------
tensor
The result tensor.
Notes
-----
This function supports autograd (computing input gradients given the output gradient).
""".format(
binary_str[binary_op],
reduce_op,
x_str[binary_op]), reduce_op)
def func(g, x):
if binary_op == 'copy_u':
return gspmm(g, 'copy_lhs', reduce_op, x, None)
else:
return gspmm(g, 'copy_rhs', reduce_op, None, x)
func.__name__ = name
func.__doc__ = docstring(binary_op)
return func
def _register_spmm_func():
"""Register spmm functions
- Binary operation plus reduction between u and e: u_[]_e_[]
- Copy u plus reduction: copy_u_[]
- Copy e plus reduction: copy_e_[]
"""
for binary_op in ["add", "sub", "mul", "div", "copy_u", "copy_e"]:
for reduce_op in ["sum", "max", "min", "mean"]:
if binary_op.startswith("copy"):
func = _gen_copy_reduce_func(binary_op, reduce_op)
else:
func = _gen_spmm_func(binary_op, reduce_op)
setattr(sys.modules[__name__], func.__name__, func)
__all__.append(func.__name__)
_register_spmm_func()
|
PypiClean
|
/silvio-0.1.9.tar.gz/silvio-0.1.9/CONTRIBUTING.rst
|
.. highlight:: shell
============
Contributing
============
TODO: The Contributing notes still use a template for GitHub. Need to adapt to this project.
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/ulf.liebal/silvio/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Silvio could always use more documentation, whether as part of the
official Silvio docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/ulf.liebal/silvio/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `silvio` for local development.
1. Fork the `silvio` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/silvio.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv silvio
$ cd silvio/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 silvio tests
$ python setup.py test or pytest
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 3.9, and for PyPy. Check
https://travis-ci.com/ulf.liebal/silvio/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ pytest tests.test_basic
Deploying
---------
A reminder for the maintainers on how to deploy.
Make sure all your changes are committed (including an entry in HISTORY.rst).
Then run::
$ bump2version patch # possible: major / minor / patch
$ git push
$ git push --tags
Travis will then deploy to PyPI if tests pass.
|
PypiClean
|
/gcloud_aio_storage-8.3.0-py3-none-any.whl/gcloud/aio/storage/storage.py
|
import binascii
import enum
import io
import json
import logging
import mimetypes
import os
from typing import Any
from typing import AnyStr
from typing import Dict
from typing import IO
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from urllib.parse import quote
from gcloud.aio.auth import AioSession # pylint: disable=no-name-in-module
from gcloud.aio.auth import BUILD_GCLOUD_REST # pylint: disable=no-name-in-module
from gcloud.aio.auth import Token # pylint: disable=no-name-in-module
from gcloud.aio.storage.bucket import Bucket
from gcloud.aio.storage.constants import DEFAULT_TIMEOUT
# Selectively load libraries based on the package
if BUILD_GCLOUD_REST:
from time import sleep
from requests import HTTPError as ResponseError
from requests import Session
from builtins import open as file_open
else:
from aiofiles import open as file_open # type: ignore[no-redef]
from asyncio import sleep # type: ignore[assignment]
from aiohttp import ( # type: ignore[assignment]
ClientResponseError as ResponseError,
)
from aiohttp import ClientSession as Session # type: ignore[assignment]
MAX_CONTENT_LENGTH_SIMPLE_UPLOAD = 5 * 1024 * 1024 # 5 MB
SCOPES = [
'https://www.googleapis.com/auth/devstorage.read_write',
]
log = logging.getLogger(__name__)
def init_api_root(api_root: Optional[str]) -> Tuple[bool, str]:
if api_root:
return True, api_root
host = os.environ.get('STORAGE_EMULATOR_HOST')
if host:
return True, f'http://{host}'
return False, 'https://www.googleapis.com'
def choose_boundary() -> str:
"""Stolen from urllib3.filepost.choose_boundary() as of v1.26.2."""
return binascii.hexlify(os.urandom(16)).decode('ascii')
def encode_multipart_formdata(
fields: List[Tuple[Dict[str, str], bytes]],
boundary: str,
) -> Tuple[bytes, str]:
"""
Stolen from urllib3.filepost.encode_multipart_formdata() as of v1.26.2.
Very heavily modified to be compatible with our gcloud-rest converter and
to avoid unnecessary urllib3 dependencies (since that's only included with
requests, not aiohttp).
"""
body: List[bytes] = []
for headers, data in fields:
body.append(f'--{boundary}\r\n'.encode())
# The below is from RequestFields.render_headers()
# Since we only use Content-Type, we could simplify the below to a
# single line... but probably best to be safe for future modifications.
for field in [
'Content-Disposition', 'Content-Type',
'Content-Location',
]:
value = headers.pop(field, None)
if value:
body.append(f'{field}: {value}\r\n'.encode())
for field, value in headers.items():
# N.B. potential bug copied from urllib3 code; zero values should
# be sent! Keeping it for now, since Google libs use urllib3 for
# their examples.
if value:
body.append(f'{field}: {value}\r\n'.encode())
body.append(b'\r\n')
body.append(data)
body.append(b'\r\n')
body.append(f'--{boundary}--\r\n'.encode())
# N.B. 'multipart/form-data' in upstream, but Google wants 'related'
content_type = f'multipart/related; boundary={boundary}'
return b''.join(body), content_type
class UploadType(enum.Enum):
SIMPLE = 1
RESUMABLE = 2
MULTIPART = 3 # unused: SIMPLE upgrades to MULTIPART when metadata exists
class StreamResponse:
"""This class provides an abstraction between the slightly different
recommended streaming implementations between requests and aiohttp.
"""
def __init__(self, response: Any) -> None:
self._response = response
self._iter: Optional[Iterator[bytes]] = None
@property
def content_length(self) -> int:
return int(self._response.headers.get('content-length', 0))
async def read(self, size: int = -1) -> bytes:
chunk: bytes
if BUILD_GCLOUD_REST:
if self._iter is None:
self._iter = self._response.iter_content(chunk_size=size)
chunk = next(self._iter, b'')
else:
chunk = await self._response.content.read(size)
return chunk
async def __aenter__(self) -> Any:
# strictly speaking, since this method can't be called via gcloud-rest,
# we know the return type is aiohttp.ClientResponse
return await self._response.__aenter__()
async def __aexit__(self, *exc_info: Any) -> None:
await self._response.__aexit__(*exc_info)
class Storage:
_api_root: str
_api_is_dev: bool
_api_root_read: str
_api_root_write: str
def __init__(
self, *, service_file: Optional[Union[str, IO[AnyStr]]] = None,
token: Optional[Token] = None, session: Optional[Session] = None,
api_root: Optional[str] = None,
) -> None:
self._api_is_dev, self._api_root = init_api_root(api_root)
self._api_root_read = f'{self._api_root}/storage/v1/b'
self._api_root_write = f'{self._api_root}/upload/storage/v1/b'
self.session = AioSession(session, verify_ssl=not self._api_is_dev)
self.token = token or Token(
service_file=service_file, scopes=SCOPES,
session=self.session.session, # type: ignore[arg-type]
)
async def _headers(self) -> Dict[str, str]:
if self._api_is_dev:
return {}
token = await self.token.get()
return {
'Authorization': f'Bearer {token}',
}
# This method makes the following API call:
# https://cloud.google.com/storage/docs/json_api/v1/buckets/list
async def list_buckets(
self, project: str, *,
params: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, Any]] = None,
session: Optional[Session] = None,
timeout: int = DEFAULT_TIMEOUT,
) -> List[Bucket]:
url = f'{self._api_root_read}?project={project}'
headers = headers or {}
headers.update(await self._headers())
params = params or {}
if not params.get('pageToken'):
params['pageToken'] = ''
s = AioSession(session) if session else self.session
buckets = []
while True:
resp = await s.get(url, headers=headers,
params=params or {},
timeout=timeout)
content: Dict[str, Any] = await resp.json(content_type=None)
for item in content.get('items', []):
buckets.append(Bucket(self, item['id']))
params['pageToken'] = content.get('nextPageToken', '')
if not params['pageToken']:
break
return buckets
def get_bucket(self, bucket_name: str) -> Bucket:
return Bucket(self, bucket_name)
async def copy(
self, bucket: str, object_name: str,
destination_bucket: str, *, new_name: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
params: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
timeout: int = DEFAULT_TIMEOUT,
session: Optional[Session] = None,
) -> Dict[str, Any]:
"""
When files are too large, multiple calls to `rewriteTo` are made. We
refer to the same copy job by using the `rewriteToken` from the
previous return payload in subsequent `rewriteTo` calls.
Using the `rewriteTo` GCS API is preferred in part because it is able
to make multiple calls to fully copy an object whereas the `copyTo` GCS
API only calls `rewriteTo` once under the hood, and thus may fail if
files are large.
In the rare case you need to resume a copy operation, include the
`rewriteToken` in the `params` dictionary. Once you begin a multi-part
copy operation, you then have 1 week to complete the copy job.
https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite
"""
# pylint: disable=too-many-locals
if not new_name:
new_name = object_name
url = (
f'{self._api_root_read}/{bucket}/o/'
f'{quote(object_name, safe="")}/rewriteTo/b/'
f'{destination_bucket}/o/{quote(new_name, safe="")}'
)
# We may optionally supply metadata* to apply to the rewritten
# object, which explains why `rewriteTo` is a POST endpoint; when no
# metadata is given, we have to send an empty body.
# * https://cloud.google.com/storage/docs/json_api/v1/objects#resource
metadict = (metadata or {}).copy()
metadict = {
self._format_metadata_key(k): v
for k, v in metadict.items()
}
if 'metadata' in metadict:
metadict['metadata'] = {
str(k): str(v) if v is not None else None
for k, v in metadict['metadata'].items()
}
metadata_ = json.dumps(metadict)
headers = headers or {}
headers.update(await self._headers())
headers.update({
'Content-Length': str(len(metadata_)),
'Content-Type': 'application/json; charset=UTF-8',
})
params = params or {}
s = AioSession(session) if session else self.session
resp = await s.post(
url, headers=headers, params=params, timeout=timeout,
data=metadata_,
)
data: Dict[str, Any] = await resp.json(content_type=None)
while not data.get('done') and data.get('rewriteToken'):
params['rewriteToken'] = data['rewriteToken']
resp = await s.post(
url, headers=headers, params=params,
timeout=timeout,
)
data = await resp.json(content_type=None)
return data
async def delete(
self, bucket: str, object_name: str, *,
timeout: int = DEFAULT_TIMEOUT,
params: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
session: Optional[Session] = None,
) -> str:
# https://cloud.google.com/storage/docs/request-endpoints#encoding
encoded_object_name = quote(object_name, safe='')
url = f'{self._api_root_read}/{bucket}/o/{encoded_object_name}'
headers = headers or {}
headers.update(await self._headers())
s = AioSession(session) if session else self.session
resp = await s.delete(
url, headers=headers, params=params or {},
timeout=timeout,
)
try:
data: str = await resp.text()
except (AttributeError, TypeError):
data = str(resp.text)
return data
async def download(
self, bucket: str, object_name: str, *,
headers: Optional[Dict[str, Any]] = None,
timeout: int = DEFAULT_TIMEOUT,
session: Optional[Session] = None,
) -> bytes:
return await self._download(
bucket, object_name, headers=headers,
timeout=timeout, params={'alt': 'media'},
session=session,
)
async def download_to_filename(
self, bucket: str, object_name: str,
filename: str, **kwargs: Any,
) -> None:
async with file_open( # type: ignore[attr-defined]
filename,
mode='wb+',
) as file_object:
await file_object.write(
await self.download(bucket, object_name, **kwargs),
)
async def download_metadata(
self, bucket: str, object_name: str, *,
headers: Optional[Dict[str, Any]] = None,
session: Optional[Session] = None,
timeout: int = DEFAULT_TIMEOUT,
) -> Dict[str, Any]:
data = await self._download(
bucket, object_name, headers=headers,
timeout=timeout, session=session,
)
metadata: Dict[str, Any] = json.loads(data.decode())
return metadata
async def download_stream(
self, bucket: str, object_name: str, *,
headers: Optional[Dict[str, Any]] = None,
timeout: int = DEFAULT_TIMEOUT,
session: Optional[Session] = None,
) -> StreamResponse:
"""Download a GCS object in a buffered stream.
Args:
bucket (str): The bucket from which to download.
object_name (str): The object within the bucket to download.
headers (Optional[Dict[str, Any]], optional): Custom header values
for the request, such as range. Defaults to None.
timeout (int, optional): Timeout, in seconds, for the request. Note
that with this function, this is the time to the beginning of
the response data (TTFB). Defaults to 10.
session (Optional[Session], optional): A specific session to
(re)use. Defaults to None.
Returns:
StreamResponse: A object encapsulating the stream, similar to
io.BufferedIOBase, but it only supports the read() function.
"""
return await self._download_stream(
bucket, object_name,
headers=headers, timeout=timeout,
params={'alt': 'media'},
session=session,
)
async def list_objects(
self, bucket: str, *,
params: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, Any]] = None,
session: Optional[Session] = None,
timeout: int = DEFAULT_TIMEOUT,
) -> Dict[str, Any]:
url = f'{self._api_root_read}/{bucket}/o'
headers = headers or {}
headers.update(await self._headers())
s = AioSession(session) if session else self.session
resp = await s.get(
url, headers=headers, params=params or {},
timeout=timeout,
)
data: Dict[str, Any] = await resp.json(content_type=None)
return data
# https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload
# pylint: disable=too-many-locals
async def upload(
self, bucket: str, object_name: str, file_data: Any,
*, content_type: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Any]] = None,
session: Optional[Session] = None,
force_resumable_upload: Optional[bool] = None,
timeout: int = 30,
) -> Dict[str, Any]:
url = f'{self._api_root_write}/{bucket}/o'
stream = self._preprocess_data(file_data)
if BUILD_GCLOUD_REST and isinstance(stream, io.StringIO):
# HACK: `requests` library does not accept `str` as `data` in `put`
# HTTP request.
stream = io.BytesIO(stream.getvalue().encode('utf-8'))
content_length = self._get_stream_len(stream)
# mime detection method same as in aiohttp 3.4.4
content_type = content_type or mimetypes.guess_type(object_name)[0]
parameters = parameters or {}
headers = headers or {}
headers.update(await self._headers())
headers.update({
'Content-Length': str(content_length),
'Content-Type': content_type or '',
})
upload_type = self._decide_upload_type(
force_resumable_upload,
content_length,
)
log.debug('using %r gcloud storage upload method', upload_type)
if upload_type == UploadType.RESUMABLE:
return await self._upload_resumable(
url, object_name, stream, parameters, headers,
metadata=metadata, session=session, timeout=timeout,
)
if upload_type == UploadType.SIMPLE:
if metadata:
return await self._upload_multipart(
url, object_name, stream, parameters, headers, metadata,
session=session, timeout=timeout,
)
return await self._upload_simple(
url, object_name, stream, parameters, headers, session=session,
timeout=timeout,
)
raise TypeError(f'upload type {upload_type} not supported')
async def upload_from_filename(
self, bucket: str, object_name: str,
filename: str,
**kwargs: Any,
) -> Dict[str, Any]:
async with file_open( # type: ignore[attr-defined]
filename,
mode='rb',
) as file_object:
contents = await file_object.read()
return await self.upload(bucket, object_name, contents, **kwargs)
@staticmethod
def _get_stream_len(stream: IO[AnyStr]) -> int:
current = stream.tell()
try:
return stream.seek(0, os.SEEK_END)
finally:
stream.seek(current)
@staticmethod
def _preprocess_data(data: Any) -> IO[Any]:
if data is None:
return io.StringIO('')
if isinstance(data, bytes):
return io.BytesIO(data)
if isinstance(data, str):
return io.StringIO(data)
if isinstance(data, io.IOBase):
return data # type: ignore[return-value]
raise TypeError(f'unsupported upload type: "{type(data)}"')
@staticmethod
def _decide_upload_type(
force_resumable_upload: Optional[bool],
content_length: int,
) -> UploadType:
# force resumable
if force_resumable_upload is True:
return UploadType.RESUMABLE
# force simple
if force_resumable_upload is False:
return UploadType.SIMPLE
# decide based on Content-Length
if content_length > MAX_CONTENT_LENGTH_SIMPLE_UPLOAD:
return UploadType.RESUMABLE
return UploadType.SIMPLE
@staticmethod
def _split_content_type(content_type: str) -> Tuple[str, Optional[str]]:
content_type_and_encoding_split = content_type.split(';')
content_type = content_type_and_encoding_split[0].lower().strip()
encoding = None
if len(content_type_and_encoding_split) > 1:
encoding_str = content_type_and_encoding_split[1].lower().strip()
encoding = encoding_str.split('=')[-1]
return content_type, encoding
@staticmethod
def _format_metadata_key(key: str) -> str:
"""
Formats the fixed-key metadata keys as wanted by the multipart API.
Ex: Content-Disposition --> contentDisposition
"""
parts = key.split('-')
parts = [parts[0].lower()] + [p.capitalize() for p in parts[1:]]
return ''.join(parts)
async def _download(
self, bucket: str, object_name: str, *,
params: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
timeout: int = DEFAULT_TIMEOUT,
session: Optional[Session] = None,
) -> bytes:
# https://cloud.google.com/storage/docs/request-endpoints#encoding
encoded_object_name = quote(object_name, safe='')
url = f'{self._api_root_read}/{bucket}/o/{encoded_object_name}'
headers = headers or {}
headers.update(await self._headers())
s = AioSession(session) if session else self.session
response = await s.get(
url, headers=headers, params=params or {},
timeout=timeout,
)
# N.B. the GCS API sometimes returns 'application/octet-stream' when a
# string was uploaded. To avoid potential weirdness, always return a
# bytes object.
try:
data: bytes = await response.read()
except (AttributeError, TypeError):
data = response.content # type: ignore[assignment]
return data
async def _download_stream(
self, bucket: str, object_name: str, *,
params: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
timeout: int = DEFAULT_TIMEOUT,
session: Optional[Session] = None,
) -> StreamResponse:
# https://cloud.google.com/storage/docs/request-endpoints#encoding
encoded_object_name = quote(object_name, safe='')
url = f'{self._api_root_read}/{bucket}/o/{encoded_object_name}'
headers = headers or {}
headers.update(await self._headers())
s = AioSession(session) if session else self.session
if BUILD_GCLOUD_REST:
# stream argument is only expected by requests.Session.
# pylint: disable=unexpected-keyword-arg
return StreamResponse(
s.get(
url, headers=headers, params=params or {},
timeout=timeout, stream=True,
),
)
return StreamResponse(
await s.get(
url, headers=headers, params=params or {},
timeout=timeout,
),
)
async def _upload_simple(
self, url: str, object_name: str,
stream: IO[AnyStr], params: Dict[str, str],
headers: Dict[str, str], *,
session: Optional[Session] = None,
timeout: int = 30,
) -> Dict[str, Any]:
# https://cloud.google.com/storage/docs/json_api/v1/how-tos/simple-upload
params['name'] = object_name
params['uploadType'] = 'media'
s = AioSession(session) if session else self.session
resp = await s.post(
url, data=stream, headers=headers, params=params,
timeout=timeout,
)
data: Dict[str, Any] = await resp.json(content_type=None)
return data
async def _upload_multipart(
self, url: str, object_name: str,
stream: IO[AnyStr], params: Dict[str, str],
headers: Dict[str, str],
metadata: Dict[str, Any], *,
session: Optional[Session] = None,
timeout: int = 30,
) -> Dict[str, Any]:
# https://cloud.google.com/storage/docs/json_api/v1/how-tos/multipart-upload
params['uploadType'] = 'multipart'
metadata_headers = {'Content-Type': 'application/json; charset=UTF-8'}
metadata = {
self._format_metadata_key(k): v
for k, v in metadata.items()
}
if 'metadata' in metadata:
metadata['metadata'] = {
str(k): str(v) if v is not None else None
for k, v in metadata['metadata'].items()
}
metadata['name'] = object_name
raw_body: AnyStr = stream.read()
if isinstance(raw_body, str):
bytes_body: bytes = raw_body.encode('utf-8')
else:
bytes_body = raw_body
parts = [
(metadata_headers, json.dumps(metadata).encode('utf-8')),
({'Content-Type': headers['Content-Type']}, bytes_body),
]
boundary = choose_boundary()
body, content_type = encode_multipart_formdata(parts, boundary)
headers.update({
'Content-Type': content_type,
'Content-Length': str(len(body)),
'Accept': 'application/json',
})
s = AioSession(session) if session else self.session
if not BUILD_GCLOUD_REST:
# Wrap data in BytesIO to ensure aiohttp does not emit warning
# when payload size > 1MB
body = io.BytesIO(body) # type: ignore[assignment]
resp = await s.post(
url, data=body, headers=headers, params=params,
timeout=timeout,
)
data: Dict[str, Any] = await resp.json(content_type=None)
return data
async def _upload_resumable(
self, url: str, object_name: str,
stream: IO[AnyStr], params: Dict[str, str],
headers: Dict[str, str], *,
metadata: Optional[Dict[str, Any]] = None,
session: Optional[Session] = None,
timeout: int = 30,
) -> Dict[str, Any]:
# https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload
session_uri = await self._initiate_upload(
url, object_name, params,
headers, metadata=metadata,
session=session,
)
return await self._do_upload(
session_uri, stream, headers=headers,
session=session, timeout=timeout,
)
async def _initiate_upload(
self, url: str, object_name: str,
params: Dict[str, str], headers: Dict[str, str],
*, metadata: Optional[Dict[str, Any]] = None,
timeout: int = DEFAULT_TIMEOUT,
session: Optional[Session] = None,
) -> str:
params['uploadType'] = 'resumable'
metadict = (metadata or {}).copy()
metadict = {
self._format_metadata_key(k): v
for k, v in metadict.items()
}
if 'metadata' in metadict:
metadict['metadata'] = {
str(k): str(v) if v is not None else None
for k, v in metadict['metadata'].items()
}
metadict.update({'name': object_name})
metadata_ = json.dumps(metadict)
post_headers = headers.copy()
post_headers.update({
'Content-Length': str(len(metadata_)),
'Content-Type': 'application/json; charset=UTF-8',
'X-Upload-Content-Type': headers['Content-Type'],
'X-Upload-Content-Length': headers['Content-Length'],
})
s = AioSession(session) if session else self.session
resp = await s.post(
url, headers=post_headers, params=params,
data=metadata_, timeout=timeout,
)
session_uri: str = resp.headers['Location']
return session_uri
async def _do_upload(
self, session_uri: str, stream: IO[AnyStr],
headers: Dict[str, str], *, retries: int = 5,
session: Optional[Session] = None,
timeout: int = 30,
) -> Dict[str, Any]:
s = AioSession(session) if session else self.session
original_close = stream.close
original_position = stream.tell()
# Prevent the stream being closed if put operation fails
stream.close = lambda: None # type: ignore[method-assign]
try:
for tries in range(retries):
try:
resp = await s.put(
session_uri, headers=headers,
data=stream, timeout=timeout,
)
except ResponseError:
headers.update({'Content-Range': '*/*'})
stream.seek(original_position)
await sleep( # type: ignore[func-returns-value]
2. ** tries,
)
else:
break
finally:
original_close()
data: Dict[str, Any] = await resp.json(content_type=None)
return data
async def patch_metadata(
self, bucket: str, object_name: str, metadata: Dict[str, Any],
*, params: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
session: Optional[Session] = None,
timeout: int = DEFAULT_TIMEOUT,
) -> Dict[str, Any]:
# https://cloud.google.com/storage/docs/json_api/v1/objects/patch
encoded_object_name = quote(object_name, safe='')
url = f'{self._api_root_read}/{bucket}/o/{encoded_object_name}'
params = params or {}
headers = headers or {}
headers.update(await self._headers())
headers['Content-Type'] = 'application/json'
body = json.dumps(metadata).encode('utf-8')
s = AioSession(session) if session else self.session
resp = await s.patch(
url, data=body, headers=headers, params=params,
timeout=timeout,
)
data: Dict[str, Any] = await resp.json(content_type=None)
return data
async def get_bucket_metadata(
self, bucket: str, *,
params: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
session: Optional[Session] = None,
timeout: int = DEFAULT_TIMEOUT,
) -> Dict[str, Any]:
url = f'{self._api_root_read}/{bucket}'
headers = headers or {}
headers.update(await self._headers())
s = AioSession(session) if session else self.session
resp = await s.get(
url, headers=headers, params=params or {},
timeout=timeout,
)
data: Dict[str, Any] = await resp.json(content_type=None)
return data
async def close(self) -> None:
await self.session.close()
async def __aenter__(self) -> 'Storage':
return self
async def __aexit__(self, *args: Any) -> None:
await self.close()
|
PypiClean
|
/aas-core3.0rc02-testgen-0.0.1.tar.gz/aas-core3.0rc02-testgen-0.0.1/aas_core3_0_rc02_testgen/generate_json.py
|
import base64
import collections
import collections.abc
import copy
import itertools
import json
import os
import pathlib
from typing import (
Union,
Final,
Mapping,
Tuple,
OrderedDict,
Sequence,
List,
MutableMapping,
Any,
Optional,
Callable,
)
import aas_core_codegen.common
import aas_core_meta.v3rc2
import networkx
from aas_core_codegen import intermediate, infer_for_schema, naming
from aas_core_codegen.common import Identifier
from aas_core3_0_rc02_testgen import common
from aas_core3_0_rc02_testgen.frozen_examples import (
pattern as frozen_examples_pattern,
xs_value as frozen_examples_xs_value,
)
class PropertyRelationship:
"""
Model a relationship between two classes as a property.
Namely, instances of the target class appear as a property of the source class.
"""
def __init__(self, property_name: Identifier) -> None:
"""Initialize with the given values."""
self.property_name = property_name
class ListPropertyRelationship:
"""
Model a relationship between two classes as an item of a list property.
Namely, instances of the target class appear as items in a list property of
the source class.
"""
def __init__(self, property_name: Identifier) -> None:
"""Initialize with the given values."""
self.property_name = property_name
RelationshipUnion = Union[PropertyRelationship, ListPropertyRelationship]
RelationshipMap = Mapping[Tuple[Identifier, Identifier], RelationshipUnion]
class Segment:
"""Represent a segment from the class ``Environment`` to a concrete class."""
def __init__(
self,
source: intermediate.ConcreteClass,
target: intermediate.ConcreteClass,
relationship: RelationshipUnion,
) -> None:
"""Initialize with the given values."""
self.source = source
self.target = target
self.relationship = relationship
ShortestPathMap = Mapping[Identifier, Sequence[Segment]]
class ClassGraph:
"""Model how classes of the meta-model are related to each other."""
relationship_map: Final[RelationshipMap]
shortest_paths: Final[ShortestPathMap]
def __init__(
self, relationship_map: RelationshipMap, shortest_paths: ShortestPathMap
) -> None:
"""Initialize with the given values."""
self.relationship_map = relationship_map
self.shortest_paths = shortest_paths
def compute_relationship_map(symbol_table: intermediate.SymbolTable) -> RelationshipMap:
"""Compute the relationships between the classes as edges in the class graph."""
rel_map: OrderedDict[
Tuple[Identifier, Identifier], RelationshipUnion
] = collections.OrderedDict()
for symbol in symbol_table.symbols:
if isinstance(symbol, intermediate.ConcreteClass):
for prop in symbol.properties:
type_anno = intermediate.beneath_optional(prop.type_annotation)
if isinstance(type_anno, intermediate.ListTypeAnnotation):
assert isinstance(
type_anno.items, intermediate.OurTypeAnnotation
), (
"Expected only lists of enums or classes in the meta-model, "
f"but got: {type_anno}"
)
if isinstance(type_anno.items.symbol, intermediate.AbstractClass):
for concrete_cls in type_anno.items.symbol.concrete_descendants:
source_target = (symbol.name, concrete_cls.name)
rel = rel_map.get(source_target, None)
if rel is None:
rel_map[source_target] = ListPropertyRelationship(
property_name=prop.name
)
elif isinstance(type_anno.items.symbol, intermediate.ConcreteClass):
for concrete_cls in itertools.chain(
[type_anno.items.symbol],
type_anno.items.symbol.concrete_descendants,
):
source_target = (symbol.name, concrete_cls.name)
rel = rel_map.get(source_target, None)
if rel is None:
rel_map[source_target] = ListPropertyRelationship(
property_name=prop.name
)
else:
pass
elif isinstance(type_anno, intermediate.OurTypeAnnotation):
if isinstance(type_anno.symbol, intermediate.AbstractClass):
for concrete_cls in type_anno.symbol.concrete_descendants:
source_target = (symbol.name, concrete_cls.name)
rel = rel_map.get(source_target, None)
# NOTE (mristin, 2022-05-07):
# Property edges have smaller distance than list-property
# edges. Hence, we keep only the shortest edges.
#
# See: https://groups.google.com/g/networkx-discuss/c/87uC9F0ug8Y
if rel is None or isinstance(rel, ListPropertyRelationship):
rel_map[source_target] = PropertyRelationship(
property_name=prop.name
)
elif isinstance(type_anno.symbol, intermediate.ConcreteClass):
for concrete_cls in itertools.chain(
[type_anno.symbol], type_anno.symbol.concrete_descendants
):
source_target = (symbol.name, concrete_cls.name)
rel = rel_map.get(source_target, None)
# NOTE (mristin, 2022-05-07):
# See the note above re property edge *versus* list-property
# edge.
if rel is None or isinstance(rel, ListPropertyRelationship):
rel_map[source_target] = PropertyRelationship(
property_name=prop.name
)
else:
pass
return rel_map
def compute_shortest_paths_from_environment(
symbol_table: intermediate.SymbolTable, relationship_map: RelationshipMap
) -> ShortestPathMap:
"""Compute the shortest path from the environment to the concrete classes."""
graph = networkx.DiGraph()
for symbol in symbol_table.symbols:
if isinstance(symbol, intermediate.ConcreteClass):
graph.add_node(symbol.name)
for (source, target), relationship in relationship_map.items():
if isinstance(relationship, PropertyRelationship):
graph.add_edge(source, target, weight=1)
elif isinstance(relationship, ListPropertyRelationship):
# NOTE (mristin, 2022-05-07):
# Creating a list and adding an item is more work than creating an instance.
# Thus, we pay two coins for the list-property creation.
graph.add_edge(source, target, weight=2)
else:
aas_core_codegen.common.assert_never(relationship)
_, raw_path_map = networkx.single_source_dijkstra(G=graph, source="Environment")
path_map: OrderedDict[Identifier, Sequence[Segment]] = collections.OrderedDict()
for symbol in symbol_table.symbols:
if symbol.name == "Environment":
continue
raw_path = raw_path_map.get(symbol.name, None)
if raw_path is None:
continue
assert len(raw_path) >= 2
cursor = iter(raw_path)
current = next(cursor, None)
assert current is None or isinstance(current, str)
path: List[Segment] = []
while True:
prev = current
current = next(cursor, None)
assert current is None or isinstance(current, str)
if current is None:
break
assert prev is not None
source_symbol = symbol_table.must_find(Identifier(prev))
assert isinstance(
source_symbol, intermediate.ConcreteClass
), "Only edges between concrete classes expected in the graph"
assert current is not None
target_symbol = symbol_table.must_find(Identifier(current))
assert isinstance(
target_symbol, intermediate.ConcreteClass
), "Only edges between concrete classes expected in the graph"
relationship = relationship_map[(source_symbol.name, target_symbol.name)]
path.append(
Segment(
source=source_symbol,
target=target_symbol,
relationship=relationship,
)
)
path_map[symbol.name] = path
return path_map
def generate_value(
type_annotation: intermediate.TypeAnnotationUnion,
path_segments: List[Union[str, int]],
len_constraint: Optional[infer_for_schema.LenConstraint],
pattern_constraints: Optional[Sequence[infer_for_schema.PatternConstraint]],
generate_instance: Callable[
[intermediate.ConcreteClass, List[Union[str, int]]], OrderedDict[str, Any]
],
) -> Any:
"""
Generate the value without side effects based on the ``path_segments``.
The callable ``generate_instance`` instructs how to generate the instances
recursively.
"""
def implementation() -> Any:
"""Wrap the body in the separate function so that we can ensure the result."""
type_anno = intermediate.beneath_optional(type_annotation)
# noinspection PyUnusedLocal
primitive_type = None # type: Optional[intermediate.PrimitiveType]
if isinstance(type_anno, intermediate.PrimitiveTypeAnnotation):
primitive_type = type_anno.a_type
elif isinstance(type_anno, intermediate.OurTypeAnnotation) and isinstance(
type_anno.symbol, intermediate.ConstrainedPrimitive
):
primitive_type = type_anno.symbol.constrainee
else:
# It is not a primitive type.
primitive_type = None
hsh = common.hash_path(path_segments=path_segments)
# region Handle the special case of a single pattern constraint first
if pattern_constraints is not None:
if len(pattern_constraints) > 1:
patterns = [
pattern_constraint.pattern
for pattern_constraint in pattern_constraints
]
raise NotImplementedError(
"We did not implement the generation of a value based on two or "
"more pattern constraints, which is the case "
f"for the value {common.posix_path(path_segments)}: {patterns}"
)
if (
primitive_type is None
or primitive_type is not intermediate.PrimitiveType.STR
):
raise NotImplementedError(
"We did not implement the generation of a non-string value with "
"the pattern constraint, which is the case "
f"for the value {common.posix_path(path_segments)}"
)
else:
assert primitive_type is intermediate.PrimitiveType.STR
assert len(pattern_constraints) > 0, "Unexpected empty pattern constraints"
pattern = pattern_constraints[0].pattern
pattern_examples = frozen_examples_pattern.BY_PATTERN.get(pattern, None)
if pattern_examples is None:
raise NotImplementedError(
f"The entry is missing "
f"in the {frozen_examples_pattern.__name__!r} "
f"for the pattern {pattern!r} "
f"when generating the value at {common.posix_path(path_segments)}"
)
if len(pattern_examples.positives) == 0:
raise NotImplementedError(
f"Unexpected an empty list of positive examples "
f"in the {frozen_examples_pattern.__name__!r} "
f"for the pattern {pattern!r} "
f"when generating the value at {common.posix_path(path_segments)}"
)
for value in pattern_examples.positives.values():
return value
raise AssertionError("Expected to check for at least one positive example")
# endregion
if isinstance(type_anno, intermediate.PrimitiveTypeAnnotation) or (
isinstance(type_anno, intermediate.OurTypeAnnotation)
and isinstance(type_anno.symbol, intermediate.ConstrainedPrimitive)
):
assert primitive_type is not None
hsh_as_int = int(hsh, base=16)
if primitive_type is intermediate.PrimitiveType.BOOL:
return hsh_as_int % 2 == 0
elif primitive_type is intermediate.PrimitiveType.INT:
return hsh_as_int
elif primitive_type is intermediate.PrimitiveType.FLOAT:
return float(hsh_as_int) / 100
elif primitive_type is intermediate.PrimitiveType.STR:
return f"something_random_{hsh}"
elif primitive_type is intermediate.PrimitiveType.BYTEARRAY:
return base64.b64encode(bytearray.fromhex(hsh)).decode(encoding="ascii")
else:
aas_core_codegen.common.assert_never(primitive_type)
elif isinstance(type_anno, intermediate.OurTypeAnnotation):
if isinstance(type_anno.symbol, intermediate.Enumeration):
hsh_as_int = int(hsh, base=16)
return type_anno.symbol.literals[
hsh_as_int % len(type_anno.symbol.literals)
].value
elif isinstance(type_anno.symbol, intermediate.ConstrainedPrimitive):
raise AssertionError(
f"Should have been handled before: {type_anno.symbol}"
)
elif isinstance(
type_anno.symbol,
(intermediate.AbstractClass, intermediate.ConcreteClass),
):
if type_anno.symbol.interface is not None:
concrete_classes = type_anno.symbol.interface.implementers
hsh_as_int = int(hsh, base=16)
concrete_cls = concrete_classes[hsh_as_int % len(concrete_classes)]
return generate_instance(concrete_cls, path_segments)
else:
assert isinstance(type_anno.symbol, intermediate.ConcreteClass)
return generate_instance(type_anno.symbol, path_segments)
else:
aas_core_codegen.common.assert_never(type_anno.symbol)
elif isinstance(type_anno, intermediate.ListTypeAnnotation):
assert isinstance(
type_anno.items, intermediate.OurTypeAnnotation
) and isinstance(
type_anno.items.symbol,
(intermediate.AbstractClass, intermediate.ConcreteClass),
), f"Expected all lists to be lists of classes, but got: {type_anno}"
path_segments.append(1)
try:
if type_anno.items.symbol.interface is not None:
concrete_classes = type_anno.items.symbol.interface.implementers
hsh_as_int = int(hsh, base=16)
concrete_cls = concrete_classes[hsh_as_int % len(concrete_classes)]
return [generate_instance(concrete_cls, path_segments)]
else:
assert isinstance(
type_anno.items.symbol, intermediate.ConcreteClass
)
return [generate_instance(type_anno.items.symbol, path_segments)]
finally:
path_segments.pop()
else:
aas_core_codegen.common.assert_never(type_anno)
# NOTE (mristin, 2022-05-11):
# We ensure here that the constraint on ``len(.)`` of the result is satisfied.
# This covers some potential errors, but mind that still does not check
# the constraints. Hence, you have to manually inspect the generated data and
# decide yourself whether you need to write a generator manually.
result = implementation()
if len_constraint is not None and isinstance(result, (str, list)):
if (
len_constraint.min_value is not None
and len(result) < len_constraint.min_value
) or (
len_constraint.max_value is not None
and len(result) > len_constraint.max_value
):
raise ValueError(
f"Expected the value {common.posix_path(path_segments)} "
f"to satisfy the length constraint "
f"[{len_constraint.min_value!r}, {len_constraint.max_value!r}], "
f"but got the length {len(result)}. You have to write the generator "
f"for this property or instance yourself"
)
return result
def generate_property(
prop: intermediate.Property,
constraints_by_prop: infer_for_schema.ConstraintsByProperty,
path_segments: List[Union[str, int]],
generate_instance: Callable[
[intermediate.ConcreteClass, List[Union[str, int]]], OrderedDict[str, Any]
],
) -> Any:
"""
Generate the property ``prop`` of an instance of ``cls``.
The ``path_segments`` points to the property.
The ``generate_instance`` callable is used to recursively generate further
instances (not necessarily of the class ``cls``, but of any class in
the meta-model).
"""
return generate_value(
type_annotation=prop.type_annotation,
path_segments=path_segments,
len_constraint=constraints_by_prop.len_constraints_by_property.get(prop, None),
pattern_constraints=constraints_by_prop.patterns_by_property.get(prop, None),
generate_instance=generate_instance,
)
def generate_minimal_instance(
cls: intermediate.ConcreteClass,
path_segments: List[Union[str, int]],
constraints_by_class: MutableMapping[
intermediate.ClassUnion, infer_for_schema.ConstraintsByProperty
],
symbol_table: intermediate.SymbolTable,
) -> OrderedDict[str, Any]:
"""
Generate an instance with only required properties.
The ``path_segments`` refer to the JSON path leading to the instance of the ``cls``.
We recursively generate minimal instances for all the nested classes.
We will re-use the ``path`` in the subsequent recursive calls to avoid
the quadratic time complexity, so beware that this function is *NOT* thread-safe.
The generation should not be random, *i.e.*, re-generating with the same input
should yield the same output.
"""
reference_cls = symbol_table.must_find(Identifier("Reference"))
if cls is reference_cls:
# NOTE (mristin, 2022-06-19):
# We generate a global reference by default, since this makes for much better
# examples with less confusion for the reader. If you need something else, fix
# it afterwards.
return generate_global_reference(path_segments=path_segments)
constraints_by_prop = constraints_by_class[cls]
instance: OrderedDict[str, Any] = collections.OrderedDict()
for prop in cls.properties:
if isinstance(prop.type_annotation, intermediate.OptionalTypeAnnotation):
continue
path_segments.append(prop.name)
try:
# fmt: off
instance[naming.json_property(prop.name)] = generate_property(
prop=prop,
path_segments=path_segments,
constraints_by_prop=constraints_by_prop,
generate_instance=(
lambda a_cls, a_path_segments:
generate_minimal_instance(
cls=a_cls, path_segments=a_path_segments,
constraints_by_class=constraints_by_class,
symbol_table=symbol_table
)
)
)
# fmt: on
finally:
path_segments.pop()
# region Fix for specific class
basic_event_element_cls = symbol_table.must_find(Identifier("Basic_event_element"))
if cls == basic_event_element_cls:
# Fix that the observed is a proper model reference
instance["observed"] = generate_model_reference(
expected_type=aas_core_meta.v3rc2.Key_types.Referable,
path_segments=path_segments + ["observed"],
)
# Override that the direction is output so that we can always set
# the max interval
instance["direction"] = "OUTPUT"
# endregion
# region Set modelType
if cls.serialization is not None and cls.serialization.with_model_type:
instance["modelType"] = naming.json_model_type(cls.name)
# endregion
return instance
class InstanceWithPath:
"""Represent a JSON-able instance with the path to it from the environment."""
def __init__(
self,
instance: MutableMapping[str, Any],
path_segments: Sequence[Union[str, int]],
) -> None:
self.instance = instance
self.path_segments = path_segments
def generate_minimal_instance_in_minimal_environment(
cls: intermediate.ConcreteClass,
class_graph: ClassGraph,
constraints_by_class: MutableMapping[
intermediate.ClassUnion, infer_for_schema.ConstraintsByProperty
],
symbol_table: intermediate.SymbolTable,
) -> Tuple[OrderedDict[str, Any], List[Union[str, int]]]:
"""
Generate the minimal instance of ``cls`` in a minimal environment instance.
Return the environment and the path to the instance.
"""
shortest_path_in_class_graph_from_environment = class_graph.shortest_paths[cls.name]
environment_instance: Optional[OrderedDict[str, Any]] = None
path_segments: List[Union[str, int]] = []
source_instance: Optional[OrderedDict[str, Any]] = None
# NOTE (mristin, 2022-05-13):
# We have to keep track of submodels so that we can set the idShorts on their
# elements.
submodels = [] # type: List[InstanceWithPath]
submodel_cls = symbol_table.must_find(Identifier("Submodel"))
assert isinstance(submodel_cls, intermediate.ConcreteClass)
# NOTE (mristin, 2022-05-14):
# We need to track asset administration shells so that we set
# the references derivedFrom and submodels correctly.
asset_administration_shells = [] # type: List[InstanceWithPath]
asset_administration_shell_cls = symbol_table.must_find(
Identifier(Identifier("Asset_administration_shell"))
)
assert isinstance(asset_administration_shell_cls, intermediate.ConcreteClass)
instance_path = None # type: Optional[List[Union[int, str]]]
for i, segment in enumerate(shortest_path_in_class_graph_from_environment):
if source_instance is None:
assert segment.source.name == "Environment", (
"Expected the generation to start from an instance "
"of the class 'Environment'"
)
source_instance = generate_minimal_instance(
cls=segment.source,
path_segments=[],
constraints_by_class=constraints_by_class,
symbol_table=symbol_table,
)
environment_instance = source_instance
target_instance: Optional[OrderedDict[str, Any]] = None
if isinstance(segment.relationship, PropertyRelationship):
prop_name = segment.relationship.property_name
path_segments.append(naming.json_property(prop_name))
target_instance = generate_minimal_instance(
cls=segment.target,
path_segments=path_segments,
constraints_by_class=constraints_by_class,
symbol_table=symbol_table,
)
source_instance[naming.json_property(prop_name)] = target_instance
elif isinstance(segment.relationship, ListPropertyRelationship):
prop_name = segment.relationship.property_name
path_segments.append(naming.json_property(prop_name))
path_segments.append(0)
target_instance = generate_minimal_instance(
cls=segment.target,
path_segments=path_segments,
constraints_by_class=constraints_by_class,
symbol_table=symbol_table,
)
source_instance[naming.json_property(prop_name)] = [target_instance]
else:
aas_core_codegen.common.assert_never(segment.relationship)
if i == len(shortest_path_in_class_graph_from_environment) - 1:
instance_path = list(path_segments)
if segment.target.is_subclass_of(submodel_cls):
submodels.append(
InstanceWithPath(
instance=target_instance, path_segments=list(path_segments)
)
)
elif segment.target.is_subclass_of(asset_administration_shell_cls):
asset_administration_shells.append(
InstanceWithPath(
instance=target_instance, path_segments=list(path_segments)
)
)
else:
pass
assert target_instance is not None
source_instance = target_instance
# NOTE (mristin, 2022-05-12):
# The name ``source_instance`` is a bit of a misnomer here. We actually refer to
# the last generated instance which should be our desired final instance.
assert source_instance is not None
assert environment_instance is not None
# region Fix the invariant that all the submodel elements have the IdShort set
for submodel in submodels:
submodel_elements = submodel.instance.get("submodelElements", None)
if submodel_elements is not None:
for submodel_element in submodel.instance["submodelElements"]:
submodel_element[
"idShort"
] = f"some_id_short_{common.hash_path(submodel.path_segments)}"
# endregion
# region Fix the invariant that the derivedFrom is of correct type
for asset_administration_shell in asset_administration_shells:
derived_from = asset_administration_shell.instance.get("derivedFrom", None)
if derived_from is not None:
# len(reference.keys) != 0 or reference.keys[-1].type == expected_type
assert "keys" in derived_from and len(derived_from["keys"]) > 0, (
f"Unexpected derivedFrom with empty keys: "
f"{json.dumps(asset_administration_shell.instance)}"
)
# Fix
derived_from["keys"][-1]["type"] = "AssetAdministrationShell"
# endregion
assert instance_path is not None
return environment_instance, instance_path
def dereference(
environment: MutableMapping[str, Any], path_segments: Sequence[Union[int, str]]
) -> MutableMapping[str, Any]:
"""Dereference the path to an object starting from an environment."""
result = environment # type: Any
for i, segment in enumerate(path_segments):
if (isinstance(segment, str) and segment not in result) or (
isinstance(segment, int) and segment >= len(result)
):
raise AssertionError(
f"Expected the path {path_segments} in the environment: "
f"{json.dumps(environment, indent=2)}; "
f"the segment {i + 1}, {segment!r}, was not there"
)
result = result[segment]
if not isinstance(result, collections.abc.MutableMapping):
raise AssertionError(
f"Unexpected non-mapping after " f"dereferencing {path_segments=}: {result}"
)
for key in result:
if not isinstance(key, str):
raise AssertionError(
f"Unexpected non-string key {key} after "
f"dereferencing {path_segments=}: {result}"
)
return result
def make_minimal_instance_complete(
instance: MutableMapping[str, Any],
path_segments: List[Union[int, str]],
cls: intermediate.ConcreteClass,
constraints_by_class: MutableMapping[
intermediate.ClassUnion, infer_for_schema.ConstraintsByProperty
],
symbol_table: intermediate.SymbolTable,
) -> None:
"""Set all the optional properties in the ``instance``."""
constraints_by_prop = constraints_by_class[cls]
data_element_cls = symbol_table.must_find(Identifier("Data_element"))
assert isinstance(data_element_cls, intermediate.AbstractClass)
asset_administration_shell_cls = symbol_table.must_find(
Identifier("Asset_administration_shell")
)
assert isinstance(asset_administration_shell_cls, intermediate.ConcreteClass)
concept_description_cls = symbol_table.must_find(Identifier("Concept_description"))
assert isinstance(concept_description_cls, intermediate.ConcreteClass)
entity_cls = symbol_table.must_find(Identifier("Entity"))
assert isinstance(entity_cls, intermediate.ConcreteClass)
property_cls = symbol_table.must_find(Identifier("Property"))
assert isinstance(property_cls, intermediate.ConcreteClass)
qualifier_cls = symbol_table.must_find(Identifier("Qualifier"))
assert isinstance(qualifier_cls, intermediate.ConcreteClass)
range_cls = symbol_table.must_find(Identifier("Range"))
assert isinstance(range_cls, intermediate.ConcreteClass)
submodel_cls = symbol_table.must_find(Identifier("Submodel"))
assert isinstance(submodel_cls, intermediate.ConcreteClass)
submodel_element_collection_cls = symbol_table.must_find(
Identifier("Submodel_element_collection")
)
assert isinstance(submodel_element_collection_cls, intermediate.ConcreteClass)
basic_event_element_cls = symbol_table.must_find(Identifier("Basic_event_element"))
assert isinstance(basic_event_element_cls, intermediate.ConcreteClass)
for prop in cls.properties:
if isinstance(prop.type_annotation, intermediate.OptionalTypeAnnotation):
path_segments.append(prop.name)
try:
# fmt: off
instance[naming.json_property(prop.name)] = generate_property(
prop=prop,
path_segments=path_segments,
constraints_by_prop=constraints_by_prop,
generate_instance=(
lambda a_cls, a_path_segments:
generate_minimal_instance(
cls=a_cls, path_segments=a_path_segments,
constraints_by_class=constraints_by_class,
symbol_table=symbol_table
)
)
)
# fmt: on
finally:
path_segments.pop()
# region Fix for the ancestor classes
if cls.is_subclass_of(data_element_cls):
if instance["category"] not in ["CONSTANT", "PARAMETER", "VARIABLE"]:
instance["category"] = "CONSTANT"
# endregion
# region Fix for the concrete class
if cls == asset_administration_shell_cls:
# Fix the derivedFrom to be a proper model reference
instance["derivedFrom"] = generate_model_reference(
expected_type=aas_core_meta.v3rc2.Key_types.Asset_administration_shell,
path_segments=path_segments + ["derivedFrom"],
)
# Fix the submodels to be proper model references
instance["submodels"] = [
generate_model_reference(
expected_type=aas_core_meta.v3rc2.Key_types.Submodel,
path_segments=path_segments + ["submodels"],
)
]
elif cls == concept_description_cls:
instance["category"] = "VALUE"
elif cls == entity_cls:
if instance["entityType"] == "SelfManagedEntity":
del instance["specificAssetId"]
else:
del instance["specificAssetId"]
del instance["globalAssetId"]
elif cls == property_cls:
# NOTE (mristin, 2022-05-15):
# We hard-code the type and the value since this would be otherwise
# unmaintainable.
instance["value"] = "true"
instance["valueType"] = "xs:boolean"
elif cls == qualifier_cls:
# NOTE (mristin, 2022-05-15):
# We hard-code the type and the value since this would be otherwise
# unmaintainable.
instance["value"] = "1234"
instance["valueType"] = "xs:int"
elif cls == range_cls:
# NOTE (mristin, 2022-05-15):
# We hard-code the type and the value since this would be otherwise
# unmaintainable.
instance["min"] = "1234"
instance["max"] = "4321"
instance["valueType"] = "xs:int"
elif cls == submodel_cls:
try:
path_segments.append("submodelElements")
for i, submodel_element in enumerate(instance["submodelElements"]):
path_segments.append(i)
try:
submodel_element[
"idShort"
] = f"some_id_short_{common.hash_path(path_segments)}"
finally:
path_segments.pop()
finally:
path_segments.pop()
# region Fix qualifiers
if (
instance.get("kind", None)
== aas_core_meta.v3rc2.Modeling_kind.Template.value
):
qualifiers = instance.get("qualifiers", None)
if qualifiers is not None:
for qualifier in qualifiers:
qualifier[
"kind"
] = aas_core_meta.v3rc2.Qualifier_kind.Template_qualifier.value
# endregion
elif cls == submodel_element_collection_cls:
for i, value in enumerate(instance["value"]):
path_segments.append(i)
try:
value["idShort"] = f"some_id_short_{common.hash_path(path_segments)}"
finally:
path_segments.pop()
elif cls == basic_event_element_cls:
# Fix that the message_broker is a proper model reference
instance["messageBroker"] = generate_model_reference(
expected_type=aas_core_meta.v3rc2.Key_types.Referable,
path_segments=path_segments + ["messageBroker"],
)
else:
# No fix is necessary.
pass
# endregion
def generate_model_reference(
expected_type: aas_core_meta.v3rc2.Key_types,
path_segments: List[Union[str, int]],
) -> OrderedDict[str, Any]:
"""Generate a model Reference pointing to an instance of ``expected_type``."""
instance = collections.OrderedDict() # type: OrderedDict[str, Any]
instance["type"] = aas_core_meta.v3rc2.Reference_types.Model_reference.value
if expected_type in (
aas_core_meta.v3rc2.Key_types.Asset_administration_shell,
aas_core_meta.v3rc2.Key_types.Concept_description,
aas_core_meta.v3rc2.Key_types.Submodel,
):
instance["keys"] = [
collections.OrderedDict(
[
("type", expected_type.value),
("value", common.hash_path(path_segments + ["keys", 0, "value"])),
]
)
]
elif expected_type is aas_core_meta.v3rc2.Key_types.Referable:
instance["keys"] = [
collections.OrderedDict(
[
("type", aas_core_meta.v3rc2.Key_types.Submodel.value),
(
"value",
"something_random_"
+ common.hash_path(path_segments + ["keys", 0, "value"]),
),
]
),
collections.OrderedDict(
[
("type", aas_core_meta.v3rc2.Key_types.Referable.value),
(
"value",
"something_random_"
+ common.hash_path(path_segments + ["keys", 1, "value"]),
),
]
),
]
else:
raise NotImplementedError(
f"Unhandled {expected_type=}; when we developed this script there were "
f"no other key types expected in the meta-model as a reference, "
f"but this has obvious changed. Please contact the developers."
)
return instance
def generate_global_reference(
path_segments: List[Union[str, int]],
) -> OrderedDict[str, Any]:
"""Generate an instance of a global Reference."""
instance = collections.OrderedDict() # type: OrderedDict[str, Any]
instance["type"] = aas_core_meta.v3rc2.Reference_types.Global_reference.value
instance["keys"] = [
collections.OrderedDict(
[
("type", "GlobalReference"),
(
"value",
"something_random_"
+ common.hash_path(path_segments + ["keys", 0, "value"]),
),
]
)
]
return instance
def _copy_minimal_environment_and_instance(
minimal_environment: MutableMapping[str, Any],
path_to_instance_from_environment: Sequence[Union[str, int]],
) -> Tuple[MutableMapping[str, Any], MutableMapping[str, Any]]:
"""
Make a deep copy of the minimal environment.
Return the copied environment as well as the instance.
"""
environment_copy = copy.deepcopy(minimal_environment)
instance_copy = dereference(
environment=environment_copy,
path_segments=path_to_instance_from_environment,
)
return environment_copy, instance_copy
def _copy_complete_environment_and_instance(
complete_environment: MutableMapping[str, Any],
path_to_instance_from_environment: Sequence[Union[int, str]],
) -> Tuple[MutableMapping[str, Any], MutableMapping[str, Any]]:
"""
Make a deep copy of the environment containing a complete instance.
Return the copied environment as well as the instance.
"""
environment_copy = copy.deepcopy(complete_environment)
instance_copy = dereference(
environment=environment_copy,
path_segments=path_to_instance_from_environment,
)
return environment_copy, instance_copy
def generate(test_data_dir: pathlib.Path) -> None:
"""Generate the JSON files."""
(
symbol_table,
constraints_by_class,
) = common.load_symbol_table_and_infer_constraints_for_schema()
rel_map = compute_relationship_map(symbol_table=symbol_table)
class_graph = ClassGraph(
relationship_map=rel_map,
shortest_paths=compute_shortest_paths_from_environment(
symbol_table=symbol_table,
relationship_map=rel_map,
),
)
# noinspection PyUnusedLocal
environment = None # type: Optional[MutableMapping[str, Any]]
for symbol in symbol_table.symbols:
if isinstance(symbol, intermediate.ConcreteClass):
if symbol.name not in class_graph.shortest_paths:
# NOTE (mristin, 2022-05-12):
# Skip the unreachable classes from the environment
continue
# region Minimal example
(
minimal_environment,
path_to_instance_from_environment,
) = generate_minimal_instance_in_minimal_environment(
cls=symbol,
class_graph=class_graph,
constraints_by_class=constraints_by_class,
symbol_table=symbol_table,
)
pth = (
test_data_dir
/ "Json"
/ "Expected"
/ naming.json_model_type(symbol.name)
/ "minimal.json"
)
pth.parent.mkdir(exist_ok=True, parents=True)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(minimal_environment, fid, indent=2, sort_keys=True)
# endregion
# BEFORE-RELEASE (mristin, 2022-06-19):
# Remove this ``if`` and implement a proper function once we tested the
# SDK with XML.
if symbol.name != "Submodel_element_list":
# region Complete example
environment, instance = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=path_to_instance_from_environment,
)
make_minimal_instance_complete(
instance=instance,
path_segments=path_to_instance_from_environment,
cls=symbol,
constraints_by_class=constraints_by_class,
symbol_table=symbol_table,
)
pth = (
test_data_dir
/ "Json"
/ "Expected"
/ naming.json_model_type(symbol.name)
/ "complete.json"
)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(environment, fid, indent=2, sort_keys=True)
complete_environment = environment
# endregion
# region Type violation
for prop in symbol.properties:
# fmt: off
environment, instance = _copy_complete_environment_and_instance(
complete_environment=complete_environment,
path_to_instance_from_environment=
path_to_instance_from_environment
)
# fmt: on
type_anno = intermediate.beneath_optional(prop.type_annotation)
# noinspection PyUnusedLocal
unexpected_type = None # type: Optional[str]
if isinstance(type_anno, intermediate.ListTypeAnnotation):
instance[
naming.json_property(prop.name)
] = "Expected an array, but we put a string here"
unexpected_type = "String"
else:
instance[naming.json_property(prop.name)] = [
"Unexpected array here"
]
unexpected_type = "Array"
assert unexpected_type is not None
pth = (
test_data_dir
/ "Json"
/ "Unexpected"
/ "TypeViolation"
/ naming.json_model_type(symbol.name)
/ f"{naming.json_property(prop.name)}As{unexpected_type}.json"
)
pth.parent.mkdir(exist_ok=True, parents=True)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(environment, fid, indent=2, sort_keys=True)
# endregion
# region Positive and negative pattern frozen_examples
constraints_by_prop = constraints_by_class[symbol]
for prop in symbol.properties:
pattern_constraints = constraints_by_prop.patterns_by_property.get(
prop, None
)
if pattern_constraints is not None and len(pattern_constraints) == 1:
pattern_examples = frozen_examples_pattern.BY_PATTERN[
pattern_constraints[0].pattern
]
for name, text in pattern_examples.positives.items():
# fmt: off
(
environment,
instance,
) = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=
path_to_instance_from_environment,
)
# fmt: on
instance[naming.json_property(prop.name)] = text
pth = (
test_data_dir
/ "Json"
/ "Expected"
/ naming.json_model_type(symbol.name)
/ f"{naming.json_property(prop.name)}OverPatternExamples"
/ f"{name}.json"
)
pth.parent.mkdir(exist_ok=True, parents=True)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(environment, fid, indent=2, sort_keys=True)
for name, text in pattern_examples.negatives.items():
# fmt: off
(
environment,
instance,
) = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=
path_to_instance_from_environment,
)
# fmt: on
instance[naming.json_property(prop.name)] = text
pth = (
test_data_dir
/ "Json"
/ "Unexpected"
/ "PatternViolation"
/ naming.json_model_type(symbol.name)
/ f"{naming.json_property(prop.name)}"
/ f"{name}.json"
)
pth.parent.mkdir(exist_ok=True, parents=True)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(environment, fid, indent=2, sort_keys=True)
# endregion
# region Required violation
for prop in symbol.properties:
if isinstance(
prop.type_annotation, intermediate.OptionalTypeAnnotation
):
continue
environment, instance = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=path_to_instance_from_environment,
)
del instance[naming.json_property(prop.name)]
pth = (
test_data_dir
/ "Json"
/ "Unexpected"
/ "RequiredViolation"
/ naming.json_model_type(symbol.name)
/ f"{naming.json_property(prop.name)}.json"
)
pth.parent.mkdir(exist_ok=True, parents=True)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(environment, fid, indent=2, sort_keys=True)
# endregion
# region Length violation
for prop in symbol.properties:
len_constraint = constraints_by_prop.len_constraints_by_property.get(
prop, None
)
if len_constraint is not None:
type_anno = intermediate.beneath_optional(prop.type_annotation)
if (
len_constraint.min_value is not None
and len_constraint.min_value > 0
):
environment = None
# NOTE (mristin, 2022-05-15):
# We handle only a subset of cases here automatically since
# otherwise it would be too difficult to implement. The
# remainder of the cases needs to be implemented manually.
if isinstance(type_anno, intermediate.PrimitiveTypeAnnotation):
if type_anno.a_type is intermediate.PrimitiveType.STR:
# fmt: off
(
environment,
instance,
) = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=
path_to_instance_from_environment,
)
# fmt: on
instance[naming.json_property(prop.name)] = ""
elif (
isinstance(type_anno, intermediate.OurTypeAnnotation)
and isinstance(
type_anno.symbol, intermediate.ConstrainedPrimitive
)
and (
type_anno.symbol.constrainee
is intermediate.PrimitiveType.STR
)
):
# fmt: off
(
environment,
instance,
) = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=
path_to_instance_from_environment,
)
# fmt: on
instance[naming.json_property(prop.name)] = ""
elif isinstance(type_anno, intermediate.ListTypeAnnotation):
# fmt: off
(
environment,
instance,
) = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=
path_to_instance_from_environment,
)
# fmt: on
instance[naming.json_property(prop.name)] = []
if environment is not None:
pth = (
test_data_dir
/ "Json"
/ "Unexpected"
/ "MinLengthViolation"
/ naming.json_model_type(symbol.name)
/ f"{naming.json_property(prop.name)}.json"
)
pth.parent.mkdir(exist_ok=True, parents=True)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(environment, fid, indent=2, sort_keys=True)
if len_constraint.max_value is not None:
environment = None
# NOTE (mristin, 2022-05-15):
# We handle only a subset of cases here automatically since
# otherwise it would be too difficult to implement. The
# remainder of the cases needs to be implemented manually.
#
# We also optimistically assume we do not break any patterns,
# invariants *etc.* If that is the case, you have to write
# manual generation code.
prop_name = naming.json_property(prop.name)
too_long_text = common.generate_long_string(
length=len_constraint.max_value + 1,
path_segments=(
path_to_instance_from_environment + [prop_name]
),
)
if isinstance(type_anno, intermediate.PrimitiveTypeAnnotation):
if type_anno.a_type is intermediate.PrimitiveType.STR:
# fmt: off
(
environment,
instance,
) = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=
path_to_instance_from_environment,
)
# fmt: on
instance[prop_name] = too_long_text
elif (
isinstance(type_anno, intermediate.OurTypeAnnotation)
and isinstance(
type_anno.symbol, intermediate.ConstrainedPrimitive
)
and (
type_anno.symbol.constrainee
is intermediate.PrimitiveType.STR
)
):
# fmt: off
(
environment,
instance,
) = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=
path_to_instance_from_environment,
)
# fmt: on
instance[prop_name] = too_long_text
if environment is not None:
pth = (
test_data_dir
/ "Json"
/ "Unexpected"
/ "MaxLengthViolation"
/ naming.json_model_type(symbol.name)
/ f"{naming.json_property(prop.name)}.json"
)
pth.parent.mkdir(exist_ok=True, parents=True)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(environment, fid, indent=2, sort_keys=True)
# endregion
# region Break date-time with UTC with February 29th
date_time_stamp_utc_symbol = symbol_table.must_find(
Identifier("Date_time_stamp_UTC")
)
assert isinstance(
date_time_stamp_utc_symbol, intermediate.ConstrainedPrimitive
)
for prop in symbol.properties:
type_anno = intermediate.beneath_optional(prop.type_annotation)
if (
isinstance(type_anno, intermediate.OurTypeAnnotation)
and type_anno.symbol is date_time_stamp_utc_symbol
):
# fmt: off
(
environment,
instance,
) = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=
path_to_instance_from_environment,
)
# fmt: on
time_of_day = common.generate_time_of_day(
path_segments=(path_to_instance_from_environment + [prop.name])
)
instance[
naming.json_property(prop.name)
] = f"2022-02-29T{time_of_day}Z"
pth = (
test_data_dir
/ "Json"
/ "Unexpected"
/ "DateTimeStampUtcViolationOnFebruary29th"
/ naming.json_model_type(symbol.name)
/ f"{naming.json_property(prop.name)}.json"
)
pth.parent.mkdir(exist_ok=True, parents=True)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(environment, fid, indent=2, sort_keys=True)
# endregion
# region Generate positive and negative frozen_examples of Property values
if symbol.name in ["Property", "Range"]:
data_type_def_xsd_symbol = symbol_table.must_find(
Identifier("Data_type_def_XSD")
)
assert isinstance(data_type_def_xsd_symbol, intermediate.Enumeration)
for literal in data_type_def_xsd_symbol.literals:
examples = frozen_examples_xs_value.BY_VALUE_TYPE.get(
literal.value, None
)
if examples is None:
raise NotImplementedError(
f"The entry is missing "
f"in the {frozen_examples_xs_value.__name__!r} "
f"for the value type {literal.value!r}"
)
if symbol.name == "Property":
paths_values = [
(
(
test_data_dir
/ "Json/Expected"
/ naming.json_model_type(symbol.name)
/ "OverValueExamples"
/ literal.name
/ f"{name}.json"
),
value,
)
for name, value in examples.positives.items()
] + [
(
(
test_data_dir
/ "Json/Unexpected/"
/ naming.json_model_type(symbol.name)
/ "OverInvalidValueExamples"
/ literal.name
/ f"{name}.json"
),
value,
)
for name, value in examples.negatives.items()
]
for pth, value in paths_values:
# fmt: off
(
environment,
instance,
) = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=
path_to_instance_from_environment,
)
# fmt: on
instance[
naming.json_property(Identifier("value_type"))
] = literal.value
instance[naming.json_property(Identifier("value"))] = value
pth.parent.mkdir(exist_ok=True, parents=True)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(environment, fid, indent=2, sort_keys=True)
elif symbol.name == "Range":
paths_values = [
(
(
test_data_dir
/ "Json/Expected"
/ naming.json_model_type(symbol.name)
/ "OverMinMaxExamples"
/ literal.name
/ f"{name}.json"
),
value,
)
for name, value in examples.positives.items()
] + [
(
(
test_data_dir
/ "Json/Unexpected/"
/ naming.json_model_type(symbol.name)
/ "OverInvalidMinMaxExamples"
/ literal.name
/ f"{name}.json"
),
value,
)
for name, value in examples.negatives.items()
]
for pth, value in paths_values:
# fmt: off
(
environment,
instance,
) = _copy_minimal_environment_and_instance(
minimal_environment=minimal_environment,
path_to_instance_from_environment=
path_to_instance_from_environment,
)
# fmt: on
instance[
naming.json_property(Identifier("value_type"))
] = literal.value
instance[naming.json_property(Identifier("min"))] = value
instance[naming.json_property(Identifier("max"))] = value
pth.parent.mkdir(exist_ok=True, parents=True)
with pth.open("wt", encoding="utf-8") as fid:
json.dump(environment, fid, indent=2, sort_keys=True)
else:
raise AssertionError(f"Unexpected {symbol.name=}")
# endregion
# BEFORE-RELEASE (mristin, 2022-06-19):
# Manually write Json/Unexpected/ConstraintViolation/{class name}/
# {describe how we break it somehow}.json
def main() -> None:
"""Execute the main routine."""
this_path = pathlib.Path(os.path.realpath(__file__))
test_data_dir = this_path.parent.parent / "test_data"
generate(test_data_dir=test_data_dir)
if __name__ == "__main__":
main()
|
PypiClean
|
/sos_notebook-0.24.2-py3-none-any.whl/sos_notebook/install.py
|
import argparse
import json
import os
import shutil
import sys
from IPython.utils.tempdir import TemporaryDirectory
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_core.paths import (ENV_CONFIG_PATH, SYSTEM_CONFIG_PATH,
jupyter_config_dir)
from traitlets.config.manager import BaseJSONConfigManager
_py_ver = sys.version_info
if _py_ver.major == 2 or (_py_ver.major == 3 and (_py_ver.minor, _py_ver.micro) < (6, 0)):
raise SystemError('sos requires Python 3.6 or higher. Please upgrade your Python {}.{}.{}.'.format(
_py_ver.major, _py_ver.minor, _py_ver.micro))
kernel_json = {
"argv": [sys.executable, "-m", "sos_notebook.kernel", "-f", "{connection_file}"],
"display_name": "SoS",
"language": "sos",
}
def _is_root():
try:
return os.geteuid() == 0
except AttributeError:
return False # assume not an admin on non-Unix platforms
def get_install_sos_kernel_spec_parser():
parser = argparse.ArgumentParser(description='Install KernelSpec for sos Kernel')
prefix_locations = parser.add_mutually_exclusive_group()
prefix_locations.add_argument('--user', help='Install KernelSpec in user homedirectory', action='store_true')
prefix_locations.add_argument(
'--sys-prefix',
help='Install KernelSpec in sys.prefix. Useful in conda / virtualenv',
action='store_true',
dest='sys_prefix')
prefix_locations.add_argument('--prefix', help='Install KernelSpec in this prefix', default=None)
return parser
def install_sos_kernel_spec(user, prefix):
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(kernel_json, f, sort_keys=True)
# Copy resources once they're specified
shutil.copy(os.path.join(os.path.split(__file__)[0], 'kernel.js'), os.path.join(td, 'kernel.js'))
shutil.copy(os.path.join(os.path.split(__file__)[0], 'logo-64x64.png'), os.path.join(td, 'logo-64x64.png'))
KS = KernelSpecManager()
KS.install_kernel_spec(td, 'sos', user=user, prefix=prefix)
destination = KS._get_destination_dir('sos', user=user, prefix=prefix)
print(f'sos jupyter kernel spec is installed to {destination}')
def _get_config_dir(user=False, sys_prefix=False):
"""Get the location of config files for the current context."""
user = False if sys_prefix else user
if user:
nbext = jupyter_config_dir()
elif sys_prefix:
nbext = ENV_CONFIG_PATH[0]
else:
nbext = SYSTEM_CONFIG_PATH[0]
return nbext
def install_config(user, prefix):
config_dir = _get_config_dir(user=user, sys_prefix=prefix)
# Set extra template path
cm = BaseJSONConfigManager(config_dir=os.path.join(config_dir, 'nbconfig'))
default_config = {
'notebook_console_panel': 'auto',
'kernel_codemirror_mode': {
'python': {
'name': "python",
'version': 3
},
'python2': {
'name': "python",
'version': 2
},
'python3': {
'name': "python",
'version': 3
},
'r': "r",
'report': "report",
'pandoc': "markdown",
'download': "markdown",
'markdown': "markdown",
'ruby': "ruby",
'sas': "sas",
'bash': "shell",
'sh': "shell",
'julia': "julia",
'run': "shell",
'javascript': "javascript",
'typescript': {
'name': "javascript",
'typescript': True
},
'octave': "octave",
'matlab': "octave",
'mllike': "mllike",
'clike': "clike",
'html': "htmlembedded",
'xml': "xml",
'yaml': "yaml",
'json': {
'name': "javascript",
'jsonMode': True
},
'stex': "stex",
}
}
config = cm.get('notebook')
if 'sos' not in config:
config['sos'] = default_config
else:
sos_config = config['sos']
if 'notebook_console_panel' not in sos_config:
sos_config['notebook_console_panel'] = default_config['notebook_console_panel']
if 'kernel_codemirror_mode' not in sos_config:
sos_config['kernel_codemirror_mode'] = default_config['kernel_codemirror_mode']
else:
for key in default_config['kernel_codemirror_mode']:
if key not in sos_config['kernel_codemirror_mode']:
sos_config['kernel_codemirror_mode'][key] = default_config['kernel_codemirror_mode'][key]
config['sos'] = sos_config
# avoid warnings about unset version
cm.set('notebook', config)
print(f'Settings added or updated in {config_dir}/nbconfig/notebook.json')
print('If you notice problems with the kernel, you will need to use AsyncMappingKernelManager as kernel manager')
print('Please see https://github.com/jupyter/notebook/issues/6164 for details.')
def main():
parser = get_install_sos_kernel_spec_parser()
args = parser.parse_args()
user = False
prefix = None
if args.sys_prefix:
prefix = sys.prefix
elif args.prefix:
prefix = args.prefix
elif args.user or not _is_root():
user = True
install_sos_kernel_spec(user, prefix)
install_config(user, prefix)
if __name__ == '__main__':
main()
|
PypiClean
|
/dbpedia_ent-0.1.9-py3-none-any.whl/dbpedia_ent/dto/ent/n1/a/trie_ar.py
|
d_trie_ar = {'_': ['ar.wikipedia.org',
'ar-ruh-ul-qudus',
'ar-rahhayliyah',
'ar.lakshmanan',
'ar-rahaliyah',
'ar-pi-uck-i',
"ar-rubay'i",
'ar-231,453',
'ar-a000002',
"ar-ra'inah",
'ar-rahman',
'ar-ruways',
'ar-riyadh',
'ar-rutbah',
"ar-ru'ays",
'ar-rastan',
'ar-raniri',
'ar-rustaq',
'ar-riyyad',
'ar-ramtha',
'ar-raqqah',
'ar-rashid',
'ar-ramadi',
'ar-rihiya',
'ar-ramlah',
'ar-r17779',
'ar15.com',
'ar-raman',
'ar-ramla',
'ar-riyad',
'ar-namys',
'ar-rundi',
'ar-fffp',
'ar-razi',
'ar-rawi',
'ar-rams',
"ar-ra'd",
'ar-radi',
'ar-rass',
'ar-afff',
'ar-cmt2',
'ar-196',
'ar-231',
'ar-rum',
'ar-232',
'ar-180',
'ar-234',
'ar-nut',
"ar'ara",
'ar-wrf',
'ar-jun',
'ar-30',
'ar-m1',
'ar-10',
'ar-15',
'ar-18',
'ar-16',
'ar-96',
'ar-24',
'ar-57',
'ar-fp',
'ar-ab',
'ar-50',
'ar-64',
'ar-70',
'ar70',
'ar-5',
'ar15',
'ar2d',
'ar-7',
'ar18',
'ar10',
'ar-1',
'ar-b',
'ar5',
'ar-',
'ar2',
'ar.',
'ar4',
'ar1',
'ar7'],
'a': ['arabinopyranosyl-n-methyl-n-nitrosourea',
"arachidonyl-2'-chloroethylamide",
'arachidonylcyclopropylamide',
'arachidonoylethanolamine',
'arabinofuranosylcytosine',
'arachidonoylethanolamide',
'arabinosyltransferase',
'arameiska/syrianska',
'arabhavi-dhavalatti',
'arasanagaripattinam',
'arachaeopsittacus',
'arachneosomatidia',
'arachnopezizaceae',
'araches-la-frasse',
'arangattuparambu',
'araguina-sennola',
'arasalamkarambai',
'aragon-catalonia',
'aramaic-speakers',
'araripelepidotes',
'arambourgisuchus',
'arachnodactylia',
'arabi-malayalam',
'araripedactylus',
'arabic-speaking',
'arachnogyaritus',
'arachnopusiidae',
'arabinogalactan',
'arabbabirkhanly',
'aramean-syriacs',
'araucnephioides',
'arandon-passins',
'arabic-language',
'aramaean-syriac',
'arab-alli-mamed',
'aramean/syriacs',
'arakandanallur',
'arachnodactyly',
'arab-canadians',
'arandaspididae',
'araucanisation',
'aravantigepura',
'aramotaskaupid',
'araxoceratidae',
'aramepinchieue',
'araucarioxylon',
'arab-filipinos',
'araucanization',
'arachidicoccus',
'araucanoraptor',
'arabiastranden',
'araucanizacion',
'aralobatrachus',
'arab-americans',
'aral-paygambar',
'arachnophobiac',
'aramean-syriac',
'arasavanangadu',
'arambourgiania',
'aratli-curuglu',
'arasilankumari',
'arabised-arabs',
'arabshamshevia',
'araripenymphes',
'arasvikfjorden',
'araripemydidae',
'aramean/syriac',
'arachnidiidae',
'aralazhdarcho',
'arabiaweather',
'araripichthys',
'araripesuchus',
'arangottukara',
'arachnopeziza',
'arachnactidae',
'arab-canadian',
'arachnomorpha',
'arachnephobia',
'arachnophilia',
'arachnophobic',
'aram-naharaim',
'arabicization',
'arachosinella',
'aranyaprathet',
'aram-damascus',
'arachnoididae',
'arabawshchyna',
'arachnaphobia',
'arab-filipino',
'arachnolophus',
'arachanamakhi',
'araneagryllus',
'arachnoiditis',
'araucariaceae',
'araeolaimidae',
'arajukaljukua',
'arachnophobia',
'arachnologist',
'arachniphobia',
'arachnotermes',
'arachnaphilia',
'arachnoserver',
'arabo-persian',
'araneomorphae',
'araopan-tepui',
'arandaspidida',
'araeoscelidia',
'araeopterella',
'arab-persians',
'arakatavemula',
'arab-american',
'arabdzhabirli',
'aranganayagam',
'arakkuparamba',
'aranyosgadany',
'araripesaurus',
'aranya-kanda',
'aralvaimozhi',
'aranattukara',
'arachnothera',
'arachidonate',
'arachno-claw',
'aracariguama',
'arablinskoye',
'aralotherium',
'arachnomorph',
'aralvaymozhi',
'arasinakunte',
'ara-altsagat',
'aratitiyopea',
'arachovitika',
'arasvikfjord',
'arachnomyces',
'aratidecthes',
'araucarivora',
'arachnospila',
'arabianranta',
'arabianjoker',
'aralvaimozhy',
'aranbizkarra',
'aranyosapati',
'arachnomancy',
'araksfjorden',
'arakimentari',
'arachnanthus',
'araeodelphis',
'aravakurichi',
'aracniphobia',
'arachnotheca',
'arachnocampa',
'arakkalkettu',
'arabinoxylan',
'arab-israeli',
'araeoscelida',
'araappaloosa',
'arachnoquake',
'arakhovitika',
'arachnoobius',
'arashio-beya',
'arachnocoris',
'arabellapark',
'arachnothryx',
'araeopaschia',
'aralidiaceae',
'arationality',
'aradelloides',
'arachnophagy',
'araeolaimida',
'aratasaurus',
'arabkishlak',
'arastradero',
'araneotanna',
'aramatle-qo',
'aramidopsis',
'araucariana',
'aranycsapat',
'aramichthys',
'araeothrips',
'arabicodium',
'aradophagus',
'arasiramani',
'aralosaurus',
'araeococcus',
'arangastakh',
'araucnephia',
'arandaspida',
'arappaloosa',
'arachnomura',
'araichimani',
'arachnoides',
'aranobroter',
'arabidopsis',
'aragosaurus',
'arachnomyia',
'aragatsavan',
'aratlakatta',
'arabbazarly',
'araeopteron',
'aralasurali',
'arahitogami',
'araeolaimus',
'arachnoidea',
'araiochelys',
'araka-kalai',
'arabization',
'arachniodes',
'arabisation',
'arachnology',
'arab-berber',
'aram-nahrin',
'arab-norman',
'arabibarbus',
'aranidipine',
'aram-geshur',
'aragvispiri',
'arabicnemis',
'arapatiella',
'aracynthias',
'araeomorpha',
'araneomorph',
'aravampatti',
'araboketose',
'arattupuzha',
'arachosians',
'araeodontia',
'arabinoside',
'araeoscelis',
'araeophalla',
'araucanians',
'aratchadzor',
'arappalayam',
'arandelovic',
'arapaiminae',
'arachnyssus',
'arachnoidal',
'araucarites',
'arambourgia',
'arachniotus',
'arachnicide',
'arachno-bot',
'arahovitika',
'arandelovac',
'arabinulose',
'araucoxenia',
'araeopidius',
'arachnacris',
'aralvaymoli',
'arabitragus',
'arabophobia',
'arachidamia',
'arany-album',
'araknofobia',
'araucanized',
'aranimermis',
'aranyakanda',
'arabikkatha',
'araeocoryne',
'aralioideae',
'arachidonic',
'araucogonia',
'arabikkadal',
'araeophylla',
'araquistain',
'araslanovo',
'aratupuzha',
'araeovalva',
'arakkunnam',
'araeomerus',
'araliaceae',
'arabinitol',
'araucanaid',
'araladinni',
'araputanga',
'araxoceras',
'aragadzotn',
'aragominas',
'araeomolis',
'arabsat-1c',
'arachadzor',
'aracamunia',
'aracynthus',
'araiobelus',
'arasakulam',
'araethyrea',
'aranthangi',
'araguatins',
'aral-pokal',
'arangannal',
'arashiyama',
'arab-estan',
'arahmaiani',
'arabicized',
'arag-tower',
'arabakonak',
'araianwali',
'arambegama',
'arabsat-6a',
'aranyvirag',
'arabophone',
'aralikatti',
'aranjaanam',
'araabmuzik',
'araklispor',
'aragatsotn',
'aragoscope',
'arabsat-5a',
'araripemys',
'arambakkam',
'arablinski',
'arabikatha',
'araeosteus',
'aravindhan',
'aranoethra',
'aravinthan',
'aramelevka',
'aragatzotn',
'arakkillam',
'araguaiana',
'aravonitsa',
'aram-zobah',
'aram-sobah',
'arawakanas',
'araeoderes',
'arachnitis',
'araucarias',
'aradhippou',
'aragonaise',
'arangetram',
'araneoidea',
'aralihatti',
'aracanthus',
'aranucidae',
'arauchemus',
'araeostoma',
'araiofusus',
'araucanian',
'aranarache',
'arambagama',
'arasangudy',
'araianwala',
'aramayoite',
'arachosian',
'araeostyle',
'arabineura',
'arakynthos',
'arabacilar',
'arasavalli',
'arabellata',
'aralimatti',
'aralbayevo',
'arabsat-1a',
'aracanidae',
'aranzueque',
'arasadzykh',
'arawwawala',
'araracuara',
'aragonitic',
'aranpitiya',
'araguainha',
'arabesques',
'araraquara',
'arandaspis',
'arambuiyeh',
'aranshahik',
'aragoiania',
'arasaktchi',
'aralihonda',
'arandelovo',
'arachnobas',
'arambepola',
'arakhsamna',
'arachnodes',
'araguacema',
'arayankavu',
'arabsat-1b',
'arale-chan',
'arabidella',
'aramayona',
'araw-araw',
'araqsavan',
'aravissos',
'arabiatta',
'aranguren',
'araniella',
'aracoiaba',
'araskonay',
'arathwada',
'aracataca',
'arambarri',
'arai-juku',
'arauquita',
'aracadzor',
'araguaina',
'araliales',
'arakcheev',
'aranghata',
'araschgen',
'arabsalim',
'aragarcas',
'arachnida',
'aramecina',
'arabinase',
'araca-boi',
'arambhada',
'araratyan',
'arabraces',
'arantzazu',
'aragonais',
'arac/trum',
'aranfaybo',
'aragokome',
'araltogay',
'arahuacan',
'arakapady',
'aravindar',
'arachania',
'arautauta',
'aravatagi',
'arajamugh',
'arandilla',
'arakhchin',
'arahuetes',
'aragnouet',
'arambilet',
'arakshaka',
'arainabad',
'araguapaz',
'arachnoid',
'arabistan',
'aranciata',
'arasikere',
'aranyakas',
'arabestan',
'arachosia',
'aravelian',
'araschnia',
'araguaney',
'arachalur',
'arachotia',
'araujuzon',
'arabinose',
'aranjanam',
'arachnids',
'arasatchi',
'aravathur',
'arabissus',
'arapiraca',
'aranyelet',
'aran-orin',
'arakanese',
'aranganur',
'araeoncus',
'aralidium',
'aranaputa',
'arameaens',
'aragnomus',
'araucania',
'aranaiyur',
'araeocyon',
'ara-kiret',
'arakhthos',
'aracatuba',
'arasalaru',
'arapongas',
'arapovice',
'arakamani',
'aradoidea',
'aratiatia',
'araushnee',
'arazayeva',
'araidanga',
'arasbaran',
'aragonese',
'aramaeans',
'araucanos',
'arabesque',
'arasapura',
'araneidae',
'araiophos',
'aranayaka',
'araecerus',
'arachthos',
'arappavan',
'araksavan',
'araucaria',
'arakelyan',
'arabbiata',
'arajadzor',
'arambasic',
'aragvadha',
'aragualna',
'araki-ryu',
'arasangam',
'aranyakam',
'araeosoma',
'aravindan',
'arancibia',
'aradippou',
'aracillum',
'arabizing',
'aragonian',
'aralgoria',
'aracitaba',
'araneidan',
'araneites',
'arampampa',
'arapahoes',
'arakkonam',
'aratarann',
'arasparlu',
'arachnura',
'arabianus',
'arachnion',
'arapacana',
'arayannam',
'ararangua',
'aranmanai',
'aratashen',
'arapovaca',
'aragonite',
'arakkulam',
'arachnula',
'aravansay',
'arariboia',
'araiteuru',
'aracinovo',
'araripina',
'aranthodu',
'aradalen',
'araveedu',
'araguana',
'arazbary',
'arachnis',
'arabella',
'aranyani',
'araquiel',
'aramidae',
'aramboor',
'arabitol',
'araukome',
'araneosa',
'arandiga',
'aracaris',
'araphura',
'arancedo',
'aradhiou',
'aramango',
'aracamby',
'aramoana',
'arabygdi',
'araquari',
'aramites',
'aranmula',
'arabelia',
'aranella',
'arabinda',
'aramides',
'aratiles',
'arancini',
'arahuano',
'arantina',
'arabemys',
'arachote',
'aragyugh',
'araltobe',
'araneids',
'arabilla',
'arasiyal',
'arachnus',
'arabised',
'aratuipe',
'arascach',
'arabstan',
'arabnews',
'arakhley',
'arabcheh',
'arapaima',
'araqiyeh',
'arahuana',
'arambula',
'arasvika',
'araloosa',
'araguaia',
'arafundi',
'arabists',
'aragonez',
'araguaya',
'arabiola',
'arabanoo',
'aradanga',
'arabsiyo',
'aramchol',
'arachnid',
'arakiite',
'araponga',
'arascues',
'arachchi',
'arandzar',
'arambare',
'araeotis',
'aramengo',
'araeotic',
'arakuzha',
'arancina',
'arabicus',
'araminda',
'arangadi',
'araruama',
'arachnia',
'arabized',
'araminta',
'araegeus',
'araidron',
'arancino',
'aranjuez',
'aradinae',
'arambham',
'arabians',
'aragalur',
'arapohue',
'arachova',
'arankula',
'aravirta',
'arawalli',
'araibeki',
'araespor',
'ararucip',
'arashiro',
'araneida',
'aranitas',
'aranrhod',
'arabriga',
'aradhona',
'arabhavi',
'arapahos',
'arangodb',
'arazbari',
'arachnea',
'arabicum',
'arameans',
'aravaipa',
'aravalli',
'aragyukh',
'arayatli',
'arabeski',
'aramassa',
'aradhana',
'arabidze',
'arakaldo',
'aranmore',
'aranyaka',
'aramisha',
'arabshah',
'araioses',
'araguita',
'arapahoe',
'arachide',
'araguari',
'arawakan',
'aradgaon',
'aradului',
'aravichy',
'arandisa',
'arahatti',
'arajuuri',
'aragonda',
'aramburo',
'aranatha',
'arangina',
'araldite',
'araghchi',
'arakonam',
'arabeyes',
'arakapas',
'araucano',
'araimudi',
'aratsani',
'arapiles',
'aramaean',
'arapoema',
'arablish',
'arabuyeh',
'arawacus',
'aratores',
'araweelo',
'arambagh',
'arattana',
'aramburu',
'arababad',
'arakelov',
'araouane',
'arambram',
'araucana',
'aratinga',
'aratk@nd',
'arasairi',
'arachana',
'arawakia',
'araguacu',
'arancine',
'aradidae',
'arawwala',
'aratilis',
'arapovic',
'arabbuna',
'araranca',
'ararenda',
'arambala',
'aramist',
'aravere',
'araboid',
'arariki',
'arangiz',
'arashio',
'aramane',
'arateus',
'aranuka',
'arakhne',
'arakune',
'arabisc',
'arasibo',
'aranesp',
'aragues',
'aramits',
'aracely',
'arazede',
'arameic',
'araujos',
'arafura',
'ara-kol',
'arapgir',
'araqiel',
'arachna',
'arachne',
'arabluy',
'arachis',
'aranuel',
'arabela',
'arapaho',
'arakhin',
'arahuay',
'araniti',
'araceli',
'arabici',
'aragats',
'araghju',
'arackal',
'aramunt',
'aramiac',
'arashan',
'aranchi',
'arapkoy',
'aratula',
'arabsat',
'arancou',
'aragaki',
'arashic',
'arabism',
'aracana',
'arabesh',
'aranaes',
'aramith',
'aradune',
'arakkal',
'arawala',
'araspar',
'araluen',
'aramshe',
'araruna',
'arabian',
'aratuba',
'arathia',
'araneta',
'arakain',
'araguas',
'arabber',
'arakaka',
'araunia',
'arapuni',
'aratama',
'arandel',
'aracruz',
'arancon',
'araneus',
'arapkir',
'arattai',
'arantxa',
'arapata',
'arambol',
'arabist',
'aracara',
'aratiba',
'aratani',
'araripe',
'arataki',
'aramina',
'araunah',
'aravenu',
'arasuka',
'arabari',
'araarso',
'aracati',
'araceae',
'arabtex',
'aravena',
'aramark',
'aralica',
'aramide',
'arachan',
'araujia',
'arariel',
'aramari',
'aratrum',
'araplus',
'aragona',
'aramaio',
'arahura',
'arasaxa',
'arawelo',
'aramayo',
'araphen',
'aracuai',
'aracaju',
'arancio',
'arambag',
'aragorn',
'aragami',
'arahiwi',
'ararimu',
'aranean',
'arabosi',
'aramire',
'aracoix',
'arabkjr',
'araneae',
'arareco',
'ararica',
'arakeri',
'arangeh',
'aradhna',
'aranjak',
'arabesk',
'arandan',
'arakiel',
'arapian',
'aranica',
'arapito',
'arapoti',
'arapaha',
'arabica',
'aramids',
'aramaki',
'aradite',
'aramsha',
'arawete',
'araragi',
'arabkir',
'aramoho',
'arandur',
'aramotu',
'aracatu',
'arattas',
'arawaks',
'arabise',
'aracena',
'arapora',
'aralbay',
'araguay',
'arasada',
'arangur',
'arawana',
'arayoor',
'arachin',
'araksbo',
'araguia',
'aratius',
'arakaki',
'aradeti',
'aranese',
'aracagi',
'aravaan',
'aralykh',
'arapesh',
'arapuca',
'aranyak',
'arantes',
'arandon',
'arangel',
'aravete',
'aramazd',
'arambai',
'aragadz',
'ara-ctp',
'arantza',
'aramaic',
'arakani',
'arabgir',
'arazand',
'aranais',
'aracari',
'araotes',
'aralluy',
'arabrot',
'aratoca',
'arakwal',
'arabaux',
'arakawa',
'arapusa',
'arakere',
'arataca',
'arasanj',
'arandas',
'aracima',
'aravuse',
'arabaci',
'arabize',
'arahata',
'arazbar',
'araroba',
'arabuta',
'aratika',
'arakura',
'aransas',
'araspes',
'aragoto',
'arabata',
'arahuan',
'aramoun',
'arajuno',
'arageek',
'arakadz',
'aramine',
'arandis',
'araules',
'arackar',
'arabdev',
'araniko',
'aranama',
'aranlah',
'aracaty',
'aravjun',
'arafali',
'aravaca',
'araleri',
'ararad',
'araxos',
'arakan',
'ararki',
'arabhi',
'arales',
'aramon',
'araxes',
'arazeh',
'aragea',
'aradan',
'ararah',
'aranas',
'arablu',
'arathi',
'aravet',
'aranya',
'aramus',
'araiji',
'arates',
'aramil',
'araxxe',
'arangu',
'arauxo',
'arazap',
'araros',
'arayan',
'araiya',
'aracis',
'arases',
'aragua',
'arason',
'aradeo',
'araras',
'arafah',
'araiza',
'aralia',
'aranha',
'arable',
'arasia',
'arator',
'aramex',
'aranna',
'aratal',
'aranca',
'aratos',
'araldi',
'arauco',
'araria',
'arakli',
'araden',
'arapua',
'araura',
'arapai',
'aransa',
'aramco',
'arabic',
'arazpa',
'aranda',
'araujo',
'aralek',
'aralez',
'aravis',
'aracai',
'aramis',
'aranli',
'araona',
'aranpo',
'arabis',
'aratta',
'arabov',
'aracha',
'arabio',
'arasht',
'aranos',
'arafat',
'aradhe',
'aranza',
'arapov',
'araafa',
'arangi',
'aracus',
'arapaj',
'aranea',
'aracar',
'araito',
'araitz',
'arakul',
'aramah',
'aranis',
'aravot',
'arados',
'aratus',
'araght',
'arabia',
'aranta',
'aratea',
'arastu',
'aralsk',
'arakel',
'arapha',
'araldo',
'aralen',
'araski',
'arakoy',
'arajet',
'aradia',
'araboy',
'arabba',
'arawan',
'aravan',
'araira',
'aracil',
'aramba',
'arahal',
'arawad',
'arabin',
'arashi',
'arayat',
'aravus',
'aracne',
'aracan',
'arango',
'aragvi',
'arazan',
'aradus',
'aracas',
'arasur',
'aralik',
'arallu',
'aralla',
'aragoa',
'aragam',
'araste',
'aragon',
'aralan',
'aramos',
'aracic',
'araqah',
'aralam',
'arandu',
'arabih',
'arauan',
'aratti',
'aranga',
'arafoe',
'arayis',
'aranji',
'arabah',
'aramid',
'arakne',
'aradac',
'aranui',
'aranno',
'arauca',
'arakil',
'arakht',
'arahan',
'ararat',
'arahat',
'aramic',
'arabel',
'arayez',
'aragno',
'arakha',
'araban',
'aralar',
'arafel',
'aradas',
'arasan',
'arabid',
'araure',
'arazin',
'arabal',
'arawak',
'arapei',
'arandi',
'arasco',
'araap',
'arauz',
'araxa',
'arabi',
'arama',
'aranc',
'arabe',
'arabs',
'araul',
'arawn',
'arand',
'arach',
'arado',
'arapi',
'aradi',
'arada',
'arare',
'araku',
'arati',
'arata',
'araba',
'arant',
'arash',
'arain',
'arali',
'aracu',
'araux',
'araos',
'arabo',
'arald',
'aravu',
'arans',
'araki',
'araby',
'araja',
'arame',
'araoz',
'aratz',
'arase',
'arana',
'arani',
'arawa',
'araya',
'araci',
'arabu',
'araic',
'arasu',
'arabh',
'aramm',
'araly',
'arack',
'arang',
'arara',
'arayi',
'arano',
'aralu',
'araua',
'arafo',
'arato',
'araia',
'arafi',
'arava',
'arazu',
'ara-c',
'arajs',
'araea',
'arade',
'arats',
'araks',
'araca',
'araza',
'araft',
'arawe',
'arago',
'arawb',
'araqi',
'arath',
'arazi',
'arari',
'arale',
'aravt',
'arai',
'araz',
'aras',
'arau',
'arax',
'arac',
'arab',
'arae',
'arav',
'aran',
'arag',
'arar',
'arap',
'arak',
'aram',
'arat',
'araf',
'arad',
'aral',
'ara'],
'b': ['arbeiter-illustrierte-zeitung',
'arbeidsforskningsinstituttet',
'arbetstidsforkortning',
'arbigny-sous-varennes',
'arbatsko-pokrovskaya',
'arbeider-politikken',
'arbitrary-precision',
'arbeidermagasinet',
'arboriculturalist',
'arbetar-tidningen',
'arberats-sillegue',
'arbouet-sussaute',
'arbonne-la-foret',
'arbeiter-zeitung',
'arbeidsvitaminen',
'arbeider-avisen',
'arbetarhistoria',
'arbitrarinesses',
'arbedo-castione',
'arbeitskommando',
'arbeiderpartiet',
'arbeiterpolitik',
'arblade-le-haut',
'arbeidstilsynet',
'arbeitseinsatz',
'arbeider-avisa',
'arbeiterkammer',
'arbetarekommun',
'arbeiterstimme',
'arbeiderbladet',
'arbeiterfraind',
'arboroharamiya',
'arbitrage-free',
'arblade-le-bas',
'arboricultural',
'arborsculpture',
'arbeitstagung',
'arbeiderspers',
'arbelaezaster',
'arbeideravisa',
'arbetarkommun',
'arbetarbladet',
'arboti-zohota',
'arboriculture',
'arbutus-berry',
'arbeitsdienst',
'arbeiterwille',
'arborsculptor',
'arbitrarities',
'arbitersports',
'arbitrariness',
'arboreomorph',
'arborphiliac',
'arbitaration',
'arbetslinjen',
'arbeitslager',
'arbisteatern',
'arbaejarsafn',
'arbulocarpus',
'arborescence',
'arborday.org',
'arborophilia',
'arbourthorne',
'arbeidsgenot',
'arboricornus',
'arboreality',
'arbat-opera',
'arbitrators',
'arbeitertum',
'arbutoideae',
'arbourvitae',
'arbaletiers',
'arbalestier',
'arboviruses',
'arbovirosis',
'arbor-vitae',
'arbitrageur',
'arborotites',
'arborescent',
'arbitration',
'arbeterheim',
'arborophila',
'arbitrarily',
'arbognophos',
'arbalister',
'arborfield',
'arboussols',
'arbuthnott',
'arborvitae',
'arbitarity',
'arbeitsamt',
'arbanenses',
'arbouretum',
'arbaciella',
'arboricola',
'arbidihist',
'arbondaira',
'arbalester',
'arbutamine',
'arborology',
'arborglyph',
'arbitrista',
'arboretrum',
'arbianwala',
'arbayistan',
'arbazacius',
'arborychoi',
'arboldswil',
'arbatskaya',
'arboricity',
'arbutoside',
'arbitrator',
'arbacioida',
'arbeideren',
'arbatskoye',
'arbostola',
'arbusigny',
'arbitrons',
'arboletes',
'arbequina',
'arboroasa',
'arbrissel',
'arboisier',
'arbuzovka',
'arbobreen',
'arbonaida',
'arboretum',
'arbulario',
'arblaster',
'arbovirus',
'arbegnoch',
'arbekacin',
'arbelodes',
'arbetaren',
'arbularyo',
'arbitrium',
'arberella',
'arbupales',
'arbenhorn',
'arbignieu',
'arbitrary',
'arbereshe',
'arbuzynka',
'arbitrate',
'arborland',
'arborimus',
'arberries',
'arbuscula',
'arbuthnot',
'arbitrage',
'arbanasce',
'arbigland',
'arbellara',
'arbanitis',
'arbesbach',
'arbashevo',
'arboridia',
'arborists',
'arbanaska',
'arboucave',
'arbanitai',
'arbuckles',
'arboledas',
'arberisht',
'arborius',
'arboreta',
'arboleas',
'arbosana',
'arborane',
'arborlon',
'arboreus',
'arbaouet',
'arbalist',
'arbasera',
'arba`een',
'arbelaez',
'arbutina',
'arbavere',
'arbirlot',
'arbouans',
'arbuzovo',
'arborloo',
'arbuckle',
'arbitron',
'arbatsky',
'arbalest',
'arbinovo',
'arbeteta',
'arbanats',
'arbucies',
'arborway',
'arbancon',
'arbeidet',
'arbanija',
'arbastan',
'arboform',
'arboreal',
'arbormon',
'arbornet',
'arbeiter',
'arbogast',
'arborist',
'arborism',
'arbanasi',
'arbroath',
'arborite',
'arboreol',
'arboleda',
'arbourse',
'arbedian',
'arboring',
'arbiter',
'arbanus',
'arbatan',
'arbasus',
'arbelas',
'arbudas',
'arbagar',
'arboria',
'arboles',
'arbonga',
'arbinia',
'arbella',
'arbetio',
'arburua',
'arbanum',
'arbelos',
'arbouet',
'arbuzov',
'arberry',
'arbatax',
'arbegas',
'arbelic',
'arbitio',
'arbania',
'arbutin',
'arbigny',
'arbaoua',
'arbacow',
'arbujad',
'arbalet',
'arbonne',
'arborea',
'arbieto',
'arbanas',
'arbetet',
'arbecey',
'arborio',
'arbacia',
'arbeila',
'arbusto',
'arblast',
'arbinda',
'arbihat',
'arbatov',
'arbaces',
'arbeost',
'arbroth',
'arbinas',
'arbayta',
'arboras',
'arbanne',
'arbylis',
'arbitor',
'arbitol',
'arbidol',
'arbutus',
'arbace',
'arbeau',
'arbent',
'arboga',
'arbois',
'arbaud',
'arbues',
'arbona',
'arbani',
'arbana',
'arbian',
'arbury',
'arberi',
'arbnez',
'arbela',
'arbeia',
'arbaer',
'arboll',
'arbizu',
'arbius',
'arbazh',
'arborg',
'arbeca',
'arbori',
'arband',
'arbaaz',
'arbach',
'arbaji',
'arberg',
'arbano',
'arbane',
'arbulu',
'arboli',
'arbory',
'arbabi',
'arbenz',
'arbour',
'arbnet',
'arbeit',
"arby's",
'arbore',
'arburg',
'arbing',
'arboye',
'arben',
'arbet',
'arbon',
'arbar',
'arbbh',
'arbus',
'arbin',
'arbil',
'arbal',
'arbos',
'arbre',
'arbra',
'arbaz',
'arboa',
'arbol',
'arbee',
'arbel',
'arbib',
'arbis',
'arbir',
'arban',
'arbeo',
'arbot',
'arbor',
'arbat',
'arber',
'arbab',
'arbed',
'arbia',
'arbun',
'arbas',
'arbih',
'arbe',
'arbd',
'arby',
'arbh',
'arba',
'arbi',
'arbs',
'arbo',
'arb1',
'arb'],
'c': ['archaeopterodactyloidea',
'archedino-chernushinsky',
'archaeolithophyllaceae',
'archer-daniels-midland',
'arctotraversodontinae',
'archaeoistiodactylus',
'architectureweek.com',
'arcy-sainte-restitue',
'archaeopterygiformes',
'archipterogrammoides',
'archontophoenicinae',
'archconfraternities',
'architecture-studio',
'archaeopterygiforms',
'archaeorhizomycetes',
'archaeocryptography',
'archaeolepadomorpha',
'archinotodelphyidae',
'archipelepidiformes',
'archaeolithophyllum',
'archaeoparasitology',
'archaeoastronomical',
'archiacanthocephala',
'archanes-asterousia',
'architectsalliance',
'archigregarinorida',
'archaeopteridaceae',
'archimedes-lab.org',
'archaeornithomimus',
'archispirostreptus',
'archaeosphaeroides',
'architectonicoidea',
'architectibranchia',
'arcticiflavibacter',
'archimonocelididae',
'archaebalaenoptera',
'archaeoastronomist',
'archaeodontosaurus',
'architectonicidae',
'archeocrypticidae',
'arcwise-connected',
'archaeascomycetes',
'archigyrodactylus',
'archaeopithecidae',
'archaeopriapulida',
'archaeobibliology',
'archaeonycteridae',
'archoleptonetidae',
'architrypethelium',
'arctomercaticeras',
'archbishop-bishop',
'archaeotrogonidae',
'architaenioglossa',
'archaeology/broch',
'archipresbyterate',
'archconfraternity',
'archisaccophyllia',
'arctometatarsalia',
'archaeogastropoda',
'archaeometallurgy',
'arcizac-ez-angles',
'archaeoseismology',
'archaeopilocornus',
'archactinoposthia',
'arc-sous-montenot',
'archancistrocerus',
'archboldiodendron',
'archaeopteridales',
'archaeospheniscus',
'archeorhinotermes',
'archaeopterygidae',
'archaeo-astronomy',
'archaeornithoides',
'archaeosynthemis',
'archaeoglobaceae',
'archaeolemurinae',
'archotermopsidae',
'archbishop-major',
'archaeperidinium',
'archaefructaceae',
'architectureweek',
'archaeohyracidae',
'archaeobalanidae',
'archaeorrhynchus',
'archaeocydippida',
'archaeoastronomy',
'archaeornithipus',
'archosauriformes',
'archaeolepidotus',
'archaeobiologist',
'archinacelloidea',
'archaeobatrachia',
'archaeoacoustics',
'archeosolenocera',
'archaeolemuridae',
'archidasyphyllum',
'archaeomythology',
'archosauromorpha',
'archosauromorphs',
'archaeogastropod',
'archiascomycetes',
'arctowskifjellet',
'archileistobrius',
'archirhodomyrtus',
'archconservative',
'archiecomics.com',
'archischoenobius',
'arcis-le-ponsart',
'archegosauroidea',
'arctotraversodon',
'archiargiolestes',
'archidermapteron',
'archeterokrohnia',
'archaeopsittacus',
'archaeomarasmius',
'archicollinella',
'arcynopterygini',
'archeio-marxism',
'archidermaptera',
'arctostylopidae',
'archicerebellum',
'archiepiscopacy',
'arctocephalinae',
'archeoastronomy',
'archaeopragidae',
'archaeornithura',
'archio-marxists',
'archiboreoiulus',
'archaeoscinidae',
'archaeogenetics',
'archaeorrhyncha',
'archontophoenix',
'arctocephalites',
'archaeopsyllini',
'architecturally',
'arcizans-dessus',
'archidendropsis',
'arcticodactylus',
'archaeabacteria',
'archichauliodes',
'arc-consistency',
'archstewardship',
'archaerhodopsin',
'archegosauridae',
'archaebacterium',
'archizelmiridae',
'archescytinidae',
'archetypomyidae',
'archstone-smith',
'arctohungarites',
'archosauromorph',
'archaeoceratops',
'arcantiodelphys',
'archaphanostoma',
'archaeobacteria',
'archaeophocaena',
'arctomeekoceras',
'archaeomaenidae',
'archeobuprestis',
'archaeolamnidae',
'archinacellidae',
'archaeorhynchus',
'archaeonycteris',
'arcanobacterium',
'archaeosporales',
'archaeatropidae',
'archaeomagnetic',
'arctometatarsal',
'archoleptoneta',
'archeocyathida',
'archechiniscus',
'archaeozoology',
'archiserratula',
'archibaccharis',
'arcizans-avant',
'archigrammitis',
'archisynagogue',
'archeocyathids',
'arcis-sur-aube',
'archaeosortase',
'archichrysotus',
'arctodiaptomus',
'arctoprionites',
'archinemapogon',
'archaeocyantha',
'archistrategus',
'architeuthidae',
'archidactylina',
'architectonics',
'arch-treasurer',
'archaeopallium',
'archiboehmeria',
'archaeornithes',
'archchancellor',
'arcelor-mittal',
'arctostaphylos',
'archileptocera',
'arc-en-barrois',
'archaeobiology',
'archer-daniels',
'archaeopotamus',
'archaeovaranus',
'archiinocellia',
'archophileurus',
'archencephalon',
'archaeo-optics',
'archaeological',
'arcticneftegaz',
'archaeodobenus',
'archicolliuris',
'archaeodelphis',
'archpresbytery',
'archontopouloi',
'arctopsychidae',
'archaeagnostus',
'archaebacteria',
'archytoepalpus',
'arc-sous-cicon',
'archaeovenator',
'archdeaconries',
'archaeonectrus',
'archiasterella',
'arcado-cypriot',
'architectonica',
'archaeocidaris',
'archaeodictyna',
'arc-transitive',
'archaeotherium',
'arctoptychites',
'archiphytalmia',
'archaeopodella',
'archaeobelodon',
'arca-laurentia',
'archaeocyathid',
'archeabacteria',
'archipresbyter',
'archaerhineura',
'archisargoidea',
'archosauriform',
'archipheracita',
'archaeoattacus',
'archicnephasia',
'archiceroptera',
'archiepiscopal',
'archipheracite',
'archaeologists',
'archaeogenetic',
'archaeoprepona',
'archio-marxism',
'archaeotriakis',
'archaeplastida',
'arctotirolites',
'archaeotourism',
'arctosirenites',
'arche-writing',
'arcangeliella',
'archaeopteryx',
'archaeobdella',
'archaeocortex',
'arcticibacter',
'arctioblepsis',
'archiplutodes',
'archaeobotany',
'archcathedral',
'archaeolithic',
'archeuptychia',
'archaeogaleus',
'archiannelida',
'archaeoraptor',
'arcanotherium',
'architextiles',
'archiphonemic',
'archdeaconess',
'arctic-alpine',
'archimediella',
'archiborborus',
'archimandrite',
'archaeoglobus',
'archipsocidae',
'archipendulum',
'archaeplastid',
'archaeosyodon',
'archaefructus',
'archive.today',
'archisargidae',
'archaeognatha',
'archaeopteris',
'archiperacita',
'archaeocindis',
'architonnerre',
'archaeohippus',
'archemandrite',
'archaeonectes',
'architectonic',
'archaephippus',
'arcytophyllum',
'archaeologist',
'archaeoindris',
'archaeopsylla',
'archaeotrogon',
'archepandemis',
'arctocephalus',
'archaeichnium',
'archeological',
'arcuatopterus',
'archaeonympha',
'archiatriplex',
'archeocyathid',
'archipolypoda',
'archiperlaria',
'arctocyonidae',
'arc-sur-tille',
'architectures',
'archeaologist',
'archipelbuurt',
'arctocetraria',
'archeolimulus',
'archimandrita',
'archaraeoncus',
'archegoniatae',
'archievenblad',
'arctoparmelia',
'archaeamphora',
'archaeogaming',
'archaeocyatha',
'archtreasurer',
'arctaphaenops',
'archaeovolans',
'architainment',
'arctoperlaria',
'arcticalymene',
'archaeogeryon',
'archijassidae',
'archelosauria',
'arctogymnites',
'archicolpodes',
'arc-connected',
'archedinskaya',
'archaelogical',
'archigalleria',
'archaeotriton',
'archipatrobus',
'archidiaconus',
'archetectonic',
'arcadocypriot',
'archiephestia',
'architextures',
'archanophobia',
'archegosaurus',
'archimicrodon',
'archbishopric',
'archimylacris',
'archaeopterix',
'archieratikon',
'archaelogists',
'archaeothyris',
'arc-et-senans',
'archeologists',
'architectural',
'archpresbyter',
'archeognathus',
'arcelormittal',
'arcy-sur-cure',
'archaeoniscus',
'arcizac-adour',
'archaeterphis',
'arcteranthis',
'archaeopolis',
'architecture',
'arctagrostis',
'archiminolia',
'archipelepis',
'archaeomaene',
'archyopterix',
'archeopteryx',
'archidamidas',
'archaeanassa',
'archedictyon',
'archboldomys',
'architeuthus',
'archaeopacha',
'archipielago',
'archocentrus',
'arctoscyphus',
'arch-buffoon',
'archestratus',
'arcanosaurus',
'archaeobelus',
'arch-diocese',
'archigraptis',
'archeognatha',
'architeuthis',
'archdruidess',
'archaeologia',
'arcticoceras',
'arc-les-gray',
'arch-nemesis',
'architrenius',
'archetypical',
'archeopterix',
'archilestris',
'arcutelphusa',
'archetypomys',
'archostemata',
'archipetalia',
'archniphobia',
'arcangelisia',
'archivehuman',
'arcthoplites',
'arctodiamesa',
'archdioceses',
'archaeolepis',
'archaeophyte',
'archaeomanta',
'archaebacter',
'archetecture',
'archipelagos',
'archaeolamna',
'archilochian',
'archaeoptrix',
'arctacaridae',
'archilobesia',
'archchaplain',
'archiearinae',
'arctomiaceae',
'arcitalitrus',
'archiloquian',
'architectuul',
'archachatina',
'archidrepana',
'arctogalidia',
'archiphlebia',
'archmandrite',
'arch-bridges',
'archaeomenia',
'archimandrit',
'archosaurian',
'archambeault',
'archdiocesan',
'arctiocossus',
'archestratos',
'archangelski',
'archiearides',
'arceuthobium',
'archencyrtus',
'arch-villain',
'archaeocytes',
'archidendron',
'archaeohyrax',
'archanthemis',
'archangielsk',
'archeologist',
'archidesmida',
'archaeoptryx',
'archidistoma',
'archaeocetes',
'arctotherium',
'arch-steward',
'archeophylla',
'archilochean',
'arctognathus',
'archicembalo',
'arctacanthus',
'archipallium',
'archipelagic',
'arctanthemum',
'archidiaceae',
'archetoothus',
'archidiptera',
"arc'tan'gent",
'archaeolemur',
'arcelormitta',
'archobarzane',
'arch/matheos',
'archingeayia',
'arcanumophis',
'archdeaconry',
'archaeometry',
'archaeopress',
'archaelology',
'architectura',
'arctopelopia',
'arcynopteryx',
'arctorthezia',
'archinephros',
'archeocyatha',
'arccotangent',
'archaeomeryx',
'arctobyrrhus',
'archeoraptor',
'archeosofica',
'archembiidae',
'archaeocasis',
'archivemount',
'archomentals',
'archaboilus',
'arciechowek',
'archodontes',
'archive.org',
'arcopallium',
'arc-voltaic',
'archaeoidea',
'archemedies',
'archosargus',
'archer/park',
'arcellinida',
'archangelsk',
'arctosaurus',
'arcyosperma',
'archphoneme',
'archimyrmex',
'archilestes',
'archephanes',
'arctapodema',
'arcuphantes',
'arch-priest',
'archiafrika',
'arctopsyche',
'arciszewski',
'archigallus',
'archimboldo',
'archepiscop',
'archeriidae',
'archiponera',
'arctocyonia',
'architecure',
'archipsocus',
'archimeties',
'archeoforum',
'arch-bishop',
'archoplites',
'archeoglobi',
'archivegrid',
'archithosia',
'archimboldi',
'archnemesis',
'archenfield',
'archeanassa',
'archimantis',
'arcexplorer',
'archiphylax',
'arcabuceros',
'archaeornis',
'archimedies',
'arc-en-ciel',
'arc-boutant',
'arccosecant',
'archaeaspis',
'archodonata',
'arch-deacon',
'archomental',
'archaeozeus',
'archbuffoon',
'archaeocete',
'archbridges',
'arcticaborg',
'archamoebae',
'archeometry',
'archaeosine',
'archpriests',
'archimedian',
'archaically',
'archeosetus',
'archidiales',
'archepiolus',
'archegonium',
'archilochos',
'archaeoceti',
'archeopelta',
'archambault',
'archaeophya',
'archicortex',
'arctocorisa',
'archipialea',
'archisepsis',
'archduchess',
'arctoscelia',
'archieunuch',
'arc-welding',
'archemachos',
'archaeozoic',
'arctopeltis',
'archer-fish',
'arch-butler',
'arco-palais',
'archenholtz',
'archdiocese',
'archactenis',
'archvillain',
'archerfield',
'archeparchy',
'archenteron',
'archepolego',
'archidameia',
'arcanoceras',
'arcterigone',
'archdapifer',
'archaeology',
'arcade-game',
'arcovenator',
'archenetron',
'archichlora',
'archbishops',
'archipelago',
'archioptryx',
'arcushallen',
'archeptolis',
'archipimima',
'archimedean',
'archeophone',
'archelochus',
'arcicembalo',
'archichthys',
'arcidiacono',
'archiestown',
'archivx.org',
'arcuolimbus',
'archemachus',
'archedictya',
'archidesmus',
'archenemies',
'archolaemus',
'archimestra',
'archangelos',
'archipirata',
'archagathus',
'architectus',
'archocyrtus',
'archilochus',
'arctosuchus',
'archipeligo',
'archaeocyte',
'arcitumomab',
'archosauria',
'arctoconopa',
'archaeophis',
'archosaurus',
'arch-bridge',
'archiguille',
'archaeocyon',
'archipenko',
'archbishop',
'arctolatry',
'architimus',
'architects',
'arcminutes',
'arc-second',
'architarbi',
'arctagyrta',
'archeocyte',
'arcestidae',
'archerwill',
'arcturidae',
'archinform',
'arctocebus',
'archingeay',
'archemedis',
'archwizard',
'archbridge',
'arcicollar',
'arctogadus',
'archeology',
'arcedeckne',
'archdeacon',
'arctogeron',
'archistorm',
'archivolta',
'arctophila',
'archerfish',
'archaeanax',
'archetypes',
'archiereus',
'architecht',
'archivista',
'archenhold',
'arcticidae',
'archutowko',
'archebulus',
'archirodon',
'archilycus',
'arc-welder',
'archilocus',
'archegonia',
'arctomelon',
'arcesilaus',
'arctophile',
'archidemis',
'arcubisite',
'archaeocin',
'archivists',
'archontics',
'architechs',
'architecti',
'arctosomma',
'arcboutant',
'archebates',
'archigetes',
'arcobacter',
'arcobaleno',
'arctinurus',
'archegetes',
'arcimboldi',
'archaellum',
'archemedes',
'arctiarpia',
'archimedes',
'arctaedius',
'arcturides',
'arcumeggia',
'arcenillas',
'archpriest',
'arctonasua',
'archigenes',
'archelange',
'archemitra',
'arcusaurus',
'archosaurs',
'arctigenin',
'arctoceras',
'arctosippa',
'arctikugol',
'archelaphe',
'arch-linux',
'archaeoses',
'archbutler',
'archivolva',
'archamedes',
'archiearis',
'arcentales',
'arctomecon',
'arces-dilo',
'arctosomus',
'archaelogy',
'arch-enemy',
'archidamus',
'archetypal',
'arcologies',
'arctangent',
'archisopha',
'arcwelding',
'arcyophora',
'arcapillin',
'arch-angel',
'archangels',
'archatypal',
'arcilasisa',
'archidamia',
'archagonum',
'archimania',
'arctesthes',
'archaruniq',
'archepsila',
'arcosolium',
'archaeidae',
'arch-druid',
'arcavacata',
'archocelis',
'architrave',
'archknight',
'archevites',
'arcarsenal',
'arctoseius',
'arctacarus',
'archetimus',
'arckaringa',
'archipoeta',
'archibasis',
'archedicus',
'archemorus',
'archivaria',
'arctolepis',
'arconville',
'arcobjects',
'arcticugol',
'arcseconds',
'arcoptilia',
'arctiopais',
'arcteonais',
'archineura',
'archeozoic',
'arctolamia',
'archivolts',
'arctotheca',
'archboldia',
'arc-minute',
'archostola',
'archedemus',
'archicebus',
'archelaide',
'archemoros',
'arcimboldo',
'architeles',
'archimelus',
'arcangeli',
'archworth',
'arcaboard',
'arcovomer',
'archignac',
'arceditor',
'archstone',
'arcosolia',
'arctaspis',
'archmagus',
'archettes',
'arcticgas',
'arctiinae',
'archspire',
'arcus-air',
'arcopilus',
'archimaga',
'archanara',
'archcount',
"arc'teryx",
'archigram',
'arcathias',
'arctiidae',
'arcillera',
'archadzor',
'archilema',
'archiluth',
'archabbey',
'arcticmud',
'arcapella',
'arcade1up',
'archebios',
'arctopsis',
'arco-iris',
'archetype',
'archermos',
'arcenciel',
'archville',
'arcanator',
'archignat',
'archevite',
'archidela',
'arcediano',
'archdemon',
'arctictis',
'archimage',
'arcadiana',
'archephia',
'archedice',
'archundia',
'archippus',
'arcangelo',
'arcinazzo',
'archirhoe',
'archimate',
'archelaos',
'archibugi',
'archology',
'arctelene',
'arcosanti',
'archebius',
'arcadetar',
'archidona',
'archibald',
'arceburgo',
'archilaus',
'arc-ecris',
'arcturian',
'archermus',
'archfiend',
'arctoidea',
'arcolatry',
'archdruid',
'arciellia',
'archastes',
'archutowo',
'archdaily',
'arclength',
'arctobius',
'archimime',
'archenemy',
'archrival',
'archistar',
'arceisius',
'archelaus',
'arcadians',
'archosaur',
'arctosoma',
'arcicella',
'arconciel',
'arcesilas',
'archidice',
'archaean.',
'archlebov',
'archarius',
'archinaut',
'archipini',
'architype',
'arcattack',
'arctowski',
'archalbot',
'arcanists',
'architech',
'archmagic',
'arctopora',
'arceniega',
'archistes',
'archivist',
'arcadimon',
'archonias',
'archibius',
'archiater',
'archelais',
'archicamp',
'archivolt',
'archaruni',
'archizoom',
'arcangues',
'arceneaux',
'arctander',
'architect',
'arcellina',
'arceophon',
'arcwelder',
'arcyptera',
'archiator',
'arcticfox',
'arcimedes',
'arctocyon',
'archaeron',
'archangel',
'archotuba',
'arciszewo',
'archiving',
'archimago',
'archernis',
'archdevil',
'arcsecond',
'archtypes',
'arcobelus',
'archgallo',
'arcademan',
'archangan',
'arcagnolo',
'archidike',
'archimede',
'arciferal',
'archduchy',
'archerina',
'archuleta',
'arcanacon',
'arcsecant',
'archaster',
'arccosine',
'archerite',
'archaeome',
'arcugnano',
'archlinux',
'arcidosso',
'arcminute',
'arcoverde',
'arcticnet',
'archidium',
'archedius',
'arcuatula',
'arctiites',
'archaeans',
'arcbeetle',
'archewell',
'archytaea',
'archammer',
'arcinella',
'archiveus',
'arctornis',
'arcaspace',
'arciliuto',
'arcozelo',
'archbang',
'archaeum',
'arconnay',
'archaean',
'archibus',
'arcuated',
'arconada',
'archanes',
'arcandor',
'archelon',
'arcology',
'arcesius',
'archbach',
'arcandam',
'arcisate',
'archeion',
'archinti',
'archicad',
'arcserve',
'arconsat',
'archezoa',
'archigny',
'arcadius',
'arcabuco',
'arcomage',
'archyala',
'archeria',
'archaeon',
'arcanite',
'archlute',
'arcahaie',
'arcadelt',
'archaeus',
'arctur-1',
'arctur-2',
'arctenus',
'archieve',
'arcturos',
'arctiini',
'archaism',
'archange',
'archdale',
'archives',
'archmail',
'architel',
'archness',
'arcturis',
'arc/info',
'arcesine',
'arcachon',
'arctonyx',
'archmage',
'arcugowo',
'arcadium',
'arcanoid',
'archlabs',
'arcenant',
'archonta',
'arcturus',
'archival',
'archaeal',
'archamps',
'arcobara',
'arctiina',
'archelis',
'archytas',
'arctides',
'archiwum',
'archlord',
'arcimoto',
'arc-horn',
'archpoet',
'archduke',
'archenor',
'archways',
'architis',
'arcsight',
'arct-021',
'archinus',
'arcangel',
'archeops',
'archiver',
'archgoat',
'archived',
'arcambal',
'arctotis',
'arctulus',
'arcapita',
'archinto',
'arctonoe',
'arctinus',
'archease',
'arccosec',
'arconcey',
'arclight',
'arcalion',
'archhani',
'arcobrau',
'arcatera',
'arctella',
'arconate',
'arcestes',
'arctodus',
'arctomia',
'archwire',
'archamia',
'arcisses',
'arcalyst',
'arcinges',
'arcuites',
'archalla',
'archasia',
'archadis',
'arckanum',
'arch-foe',
'archilab',
'archeage',
'archbold',
'arcadian',
'archmagi',
'arcuella',
'archivea',
'arclinea',
'arct-154',
'archerie',
'archaeol',
'archtype',
'arctopus',
'arcalis',
'arcille',
'arcueid',
'arcview',
'arcanum',
'archiud',
'archnet',
'archive',
'arcitys',
'archaic',
'archaca',
'arcinfo',
'arcsine',
'arconce',
'arcesis',
'archies',
'arcuate',
'archeda',
'archila',
'arctiin',
'archont',
'arcidae',
'arcabas',
'arcelia',
'archons',
'arc-164',
'arcadea',
'archail',
'archaia',
'archeus',
'archale',
'archana',
'archena',
'archant',
'arcbest',
'arccoth',
'arctics',
'arcadis',
'arcos-1',
'archman',
'archaen',
'arctan2',
'arc-pvd',
'archita',
'archang',
'arcatao',
'arcesia',
'archerd',
'arcplan',
'arcelik',
'archfoe',
'archsum',
'arclamp',
'arcades',
'archias',
'archeon',
'arcueil',
'arcetri',
'arctops',
'arcadie',
'arcamax',
'arcones',
'arculus',
'arcelor',
'archins',
'arcadia',
'arcsoft',
'arcoona',
'arctous',
'arccsch',
'arcfour',
'arcieri',
'arccosh',
'arcadio',
'arctica',
'arcanjo',
'arcelca',
'arc-eye',
'archura',
'arcigay',
'arconic',
'arctosa',
'arching',
'archean',
'archubi',
'archigo',
'archain',
'archips',
'archaos',
'arcevia',
'arcsech',
'archery',
'arctanh',
'arctite',
'arcomps',
'archway',
'arctium',
'arcaffe',
'arcanes',
'arcella',
'arciero',
'archaea',
'arcalod',
'arcyria',
'archtop',
'arcanis',
'arcang.',
'arcsinh',
'archone',
'arcoxia',
'arcelin',
'archiac',
'arcais',
'arcaia',
'arcola',
'arckid',
'archer',
'arcaex',
'arcmap',
'archug',
'archis',
'arcina',
'arched',
'archon',
'arckiv',
'arcjet',
'arcnet',
'arcada',
'archin',
'arcore',
'archea',
'arctoa',
'arcano',
'arcosh',
'archae',
'arctus',
'arconi',
'arcctg',
'arcadd',
'archil',
'arcfox',
'arcole',
'arcady',
'arcusa',
'archez',
'arcoth',
'arcsde',
'archar',
'arcana',
'arclid',
'arcona',
'arcani',
'arcand',
'arccsc',
'arctia',
'archey',
'archim',
'archem',
'arcyon',
'arch22',
'archai',
'arclin',
'archib',
'archos',
'arccot',
'arclen',
'arcmin',
'arcaya',
'archut',
'arcade',
'archie',
'arcv-n',
'arcado',
'arceau',
'arccos',
'arcaos',
'arcuri',
'arculf',
'arcida',
'arcims',
'arcens',
'arcsin',
'arcsec',
'arcent',
'arctan',
'arctos',
'arcgis',
'arcari',
'arcins',
'arcsch',
'arcugi',
'arcane',
'arctic',
'arcene',
'arcing',
'arcas',
'arcee',
'archy',
'arcis',
'archi',
'arcom',
'arcia',
'arcae',
'arcte',
'arctu',
'arceo',
'arcan',
'arctg',
'arc21',
'arcam',
'arcic',
'arc-a',
'arcop',
'arcth',
'arcbs',
'arcus',
'arche',
'arcay',
'arch+',
'arces',
'arcoa',
'arcal',
'arcos',
'arcen',
'arcey',
'archa',
'arcon',
'arc-5',
'arcy',
'arco',
'arcm',
'arcs',
'arc4',
'arct',
'arcc',
'arce',
'arch',
'arca',
'arci',
'arc'],
'd': ['ardeuil-et-montfauxelles',
'ardloughnabrackbaddy',
'ard-hauptstadtstudio',
'ardhanaarinateshwara',
'ardteistimeireacht',
'ardenay-sur-merize',
'ardashir-khwarrah',
'ardhanareeswarar',
'ard-el-bathanyeh',
'ardhanarishvara',
'ardhanarishwara',
'ardhanareshvara',
'ardennes-alsace',
'ardhanariswara',
'ardhanarisvara',
'ardulfurataini',
'arden2bytecode',
'ardhnarishwara',
'ardistomopsis',
'ardeirhynchus',
'ardhnariswara',
'ardenticatena',
'ardeadactylus',
'ardepithecus',
'ardekanopsis',
'ardalstangen',
'ardepithicus',
'arda-mulissu',
'ardnamurchan',
'ardincpinari',
'ardhamagadhi',
'ardhamandapa',
'ardharathiri',
'ardipithecus',
'ardhanisvara',
'ardoreosomus',
'ardillieres',
'ardeiformes',
'ardchonnell',
'ard-aktuell',
'ardinghalia',
'arden-close',
'ardnacrusha',
'ardshealach',
'ardanaiseig',
'ardaneaskan',
'ardisiandra',
'arderancheh',
'ardscotnish',
'arduennella',
'ardshinbank',
'ardcharnich',
'ardatovskiy',
'arddhanaari',
'ardeosaurus',
'ardatovskii',
'ardhanagari',
'ardentallen',
'ardlougher',
'ardumanish',
'ardivaghan',
'ardchronie',
'ardkinglas',
'ardeocomus',
'ardie-ganz',
'ardhangani',
'ardbraccan',
'ardnorcher',
'ardnagrask',
'ardennaise',
'ardiansyah',
'ardistomis',
'ardheisker',
'ardharnich',
'ardentinny',
'ardhaveedu',
'ardnurcher',
'ardaburius',
'ardatovsky',
'ardnaglass',
'ardtalnaig',
'ardheslaig',
'ardhanaari',
'ardnastang',
'ardtornish',
'ardrishaig',
'ardmeanach',
'ardskenish',
'ardeadoris',
'ardendrain',
'ardveenish',
'ardpatrick',
'ardogommon',
'ardilistry',
'ardibehest',
'ardiclitas',
'arducopter',
'ardencaple',
'ardalanish',
'ardatovski',
'ardindrean',
'ardincaple',
'ardzhidada',
'ardhangini',
'ardotalia',
'arduinici',
'ardientes',
'ardsollus',
'ardhagiri',
'ardeatine',
'ard-dhubh',
'ardinamir',
'ardeparin',
'ardatovka',
'ardanabie',
'ardgartan',
'ardechive',
'ardeleanu',
'ardclinis',
'ardabilya',
'ardoilean',
'ardsallis',
'ardnaglug',
'ardgarvan',
'ard-alpha',
'ardmillan',
'ardestorf',
'ardfinnan',
'ardcroney',
'ardgillan',
'ardoukoba',
'ardeslawe',
'ardistama',
'arduodens',
'ardonissa',
'ardabilaq',
'ardashevo',
'ardhanari',
'ardersier',
'ardington',
'ardmenish',
'ardanovce',
'ardnocher',
'ardanabia',
'ardeshiri',
'ard-fheis',
'ardpeaton',
'ardeutica',
'ardekania',
'ardengost',
'ardesaldo',
'ardickaya',
'ardupilot',
'ardminish',
'ardtaraig',
'ardmoneen',
'ardarroch',
'ardrantus',
'ardmolich',
'ardilleux',
'ardanavia',
'ardclough',
'ardizzone',
'ardhasaig',
'ardissone',
'ardlethan',
'ardeonaig',
'ardeatino',
'arduinome',
'ardrossan',
'ardfernal',
'arduinnae',
'ardabilak',
'ardingley',
'ardennais',
'ardhnari',
'ardentes',
'ard-righ',
'ardifuir',
'ardolino',
'arddleen',
'ardyshly',
'ardohain',
'ardfeild',
'ardlussa',
'ardestan',
'ardfheis',
'ardashir',
'ardelica',
'ardouval',
'ardinghi',
'ardullie',
'ardennes',
'ardozyga',
'arduinna',
'ardnarff',
'ardstraw',
'ardelean',
'ardzruni',
'ardclach',
'ardtalla',
'ardsilla',
'ardrahan',
'ardonsky',
'ardwinna',
'ardvasar',
'arderich',
'ardessie',
'ardagast',
'ardenais',
'ardvreck',
'ardatovo',
'ardeotis',
'ardzinba',
'ardengus',
'ardisana',
'ardoksho',
'ardhapur',
'ardleigh',
'ardeshir',
'ardagger',
'ardeidae',
'ardfield',
'ardattin',
'ardmayle',
'ardgowan',
'ardjoune',
'ardanavy',
'arduenna',
'ardelles',
'ardiodus',
'ardnadam',
'ardteist',
'ardesman',
'ardagysh',
'ardingly',
'ardealul',
'ardgroom',
'ardhangi',
'ardashar',
'ardcrony',
'ardeoani',
'ardameri',
'arduaine',
'ardarich',
'ardchyle',
'arduinne',
'ardenode',
'ardougne',
'ardeleni',
'ardanion',
'ardisson',
'ardonald',
'ardglass',
'ardovie',
'ardgoil',
'ardesen',
'arditti',
'ardcarn',
'ardanaz',
'ardcath',
'ardales',
'ardmore',
'ardasan',
'ardhana',
'ardassa',
'arduino',
'ardijah',
'arderry',
'ardraly',
'ardsley',
'ardouin',
'ardicli',
'ardfern',
'ardiere',
'arduini',
'ardabur',
'ardices',
'ardakan',
'ardanuc',
'ardijan',
'ardwick',
'arderne',
'ardwold',
'ardalan',
'ardauli',
'ardjuna',
'ardross',
'ardsnap',
'ardudar',
'ardenis',
'ardakul',
'ardbrin',
'ardooie',
'ardanio',
'ardineh',
'ardesio',
'ardenno',
'ardiege',
'ardatov',
'ardahan',
'ardaris',
'ardglen',
'ardusat',
'ardaric',
'ardaite',
'ardelan',
'ardeids',
'ardc-13',
'ardekan',
'ardizas',
'ardasir',
'ardmair',
'ardgour',
'ardiaei',
'ardonis',
'ardnaff',
'ardebil',
'ardessa',
'arduboy',
'ardenna',
'ardachu',
'arduous',
'ardiles',
'ardysus',
'ardoise',
'arduslu',
'ardroil',
'ardalus',
'arddlin',
'ardtole',
'arderin',
'ardraog',
'ardkeen',
'ardning',
'ardisia',
'ardenne',
'ardeola',
'ardaheh',
'ardabil',
'ardelve',
'ardvark',
'ardajin',
'ardquin',
'ardency',
'ardeley',
'ardajan',
'ardeche',
'arduina',
'ardonea',
'ardafeh',
'ardameh',
'ardudwy',
'ardvagh',
'ardence',
'ardfert',
'ardwell',
'ardoyne',
'ardala',
'ardagh',
'ardops',
'ardler',
'ardisa',
'ardgay',
'arduan',
'ardovo',
'ardoin',
'ardron',
'ardent',
'arditi',
'ardvar',
'ardery',
'ardmay',
'ardana',
'ardtoe',
'ardboe',
'ardeha',
'ardani',
'ardley',
'ardrey',
'ardanj',
'ardina',
'ardoch',
'ardapy',
'ardhin',
'ardila',
'ardeth',
'ardeal',
'ardore',
'ardali',
'ardres',
'ardtun',
'arduns',
'ardath',
'ardfin',
'ardino',
'ardian',
'ardene',
'ardesi',
'ardlui',
'ardour',
'ardell',
'ardeas',
'ardini',
'ardara',
'ardbeg',
'ardelu',
'ardoix',
'ardiya',
'ardupa',
'ardeae',
'ardeer',
'arduin',
'ardaas',
'ardine',
'ardors',
'arduba',
'ardud',
'ardee',
'ardra',
'ardas',
'ardun',
'ardex',
'ardeu',
'ardah',
'ardoi',
'ardms',
'ardis',
'ardit',
'ardes',
'ardei',
'arden',
'ardak',
'ardez',
'ardys',
'ardic',
'ardea',
'ardha',
'ardor',
'ardvi',
'ardue',
'arder',
'ardla',
'ardeh',
'arduf',
'ardan',
'ardre',
'ardiz',
'ardie',
'ardin',
'ardon',
'ardal',
'ardo',
'ardl',
'ards',
'ardb',
'ardi',
'ardu',
'ardy',
'ardc',
'ardf',
'arda',
'ardm',
'arde',
'ard'],
'e': ['arette-la-pierre-saint-martin',
'arette-pierre-saint-martin',
'arenberg-nordkirchen',
'arequipa-antofalla',
'arelaune-en-seine',
'areobiotrematidae',
'arena-auditorium',
'arefjallsloppet',
'area-preserving',
'arenopsaltriini',
'area-to-volume',
'arenafootball2',
'arenibacterium',
'arendalsfeltet',
'arendsee-kalbe',
'arenigiphyllum',
'arenopsaltria',
'arebasankoppa',
'aremata-popoa',
'areniscythris',
'arenibacillus',
'areyongalepis',
'arenariomyces',
'arequipiopsis',
'aremata-rorua',
'arendalsbanen',
'arenodosaria',
'arenicolumba',
'arenobufagin',
'area-defence',
'aretxabaleta',
'areopagitica',
'arensgenhout',
'arechavaleta',
'arenicolidae',
'arendalbanen',
'areopagatica',
'arekurahatti',
'arenysaurini',
'arena-khimki',
'areogapitica',
'arenicolites',
'arenaviridae',
'arezumandeh',
'areuhentina',
'arecatannin',
'aresography',
'arecacicola',
'arenibacter',
'arenysaurus',
'arenshausen',
'arestoceras',
'arensnuphis',
'arethusinae',
'arenigobius',
'areacandona',
'arenophryne',
'arescoptera',
'area/volume',
'arenipiscis',
'areticulata',
'aretshalden',
'area-denial',
'aremallapur',
'arestorides',
'arestocaine',
'arentsminde',
'arenysuchus',
'areolospora',
'arenostola',
'arevashokh',
'arecophila',
'arecastrum',
'arenochroa',
'arenenberg',
'ares-orion',
'arenicella',
'arebilachi',
'arenopolis',
'arenabowls',
'arecoideae',
'arecomyces',
'arecaidine',
'areiopolis',
'arevalillo',
'arenariini',
'arevashogh',
'areum-dong',
'arelerland',
'arenophile',
'arenapolis',
'arefjellet',
'arendiwane',
'areimanios',
'arensharde',
'areopagite',
'arethuseae',
'areop-enap',
'arendtsoya',
'areobindus',
'arethusana',
'areligious',
'arenitalea',
'arevadasht',
'arena-rock',
'arenavirus',
'arenimonas',
'aresgalaxy',
'arenaceous',
'arendsvlei',
'areography',
'arenstorff',
'arenocoris',
'areneidae',
'arenaways',
'arenicola',
'arezumand',
'arecoline',
'areopagus',
'arenivaga',
'areflexia',
'aretology',
'arensborg',
'arendator',
'arensburg',
'aretalogy',
'arevashet',
'arendalsk',
'areilycus',
'arevabuyr',
'aretidris',
'arenabowl',
'arerungua',
'arevatsag',
'arencibia',
'arellanes',
'areavibes',
'arevashat',
'arenigian',
'arenawars',
'areosmith',
'areograph',
'arecaceae',
'arenillas',
'arebhashe',
'arenarius',
'arenteiro',
'arethousa',
'arem-arem',
'areopolis',
'aremorica',
'arengario',
'areometer',
'arengosse',
'arenosols',
'aregnadem',
'areskutan',
'aremfoxia',
'areithous',
'arecomici',
'arenander',
'arenaccia',
'arentorp',
'arecidae',
'arenthon',
"aren'ice",
'arehalli',
'arelatic',
'arenrath',
'aremberg',
'arequipe',
'areopoli',
'arenanet',
'arethusa',
'aresches',
'aresaces',
'areyonga',
'aretxaga',
'arenicin',
'arecanut',
'arenaeus',
'arebegas',
'aregonis',
'arendsee',
'arevalos',
'aremania',
'arenapal',
'arenacup',
'area23jc',
'arendonk',
'arenales',
'arenella',
'arekmane',
'arequipa',
'arenaria',
'arenarba',
'arellius',
'arentowo',
'arefyeva',
'areacode',
'arennest',
'areekode',
'aretaeus',
'aregonde',
'arecales',
'areology',
'aremonia',
'arethaea',
'aredhiou',
'arefabad',
'areolate',
'arempudi',
'arenzano',
'area:one',
'arenberg',
'arenosol',
'arendrup',
'arevshat',
'areekara',
'arecinae',
'areolas',
'arethas',
'aresing',
'aremark',
'arescon',
'aretusi',
'arenabg',
'arecuna',
'arepina',
'aretaic',
'arefino',
'arenite',
'arearea',
'arenjan',
'arendal',
'aresite',
'areopag',
'aretias',
'arenila',
'arestin',
'arensky',
'areolus',
'arena-e',
'arexion',
'arefina',
'arenigs',
'aregund',
'areeiro',
'arevelk',
'areines',
'areceae',
'arevaci',
'arensch',
'arepyev',
'aregawi',
'aredyld',
'arekeri',
'arengan',
'arekere',
'arenita',
'arealva',
'arebati',
'aresjon',
'aretaon',
'aresden',
'areguni',
'areolae',
'arebica',
'arediou',
'areocar',
'aretine',
'aredius',
'aregius',
'areatza',
'areadem',
'arenoso',
'areimeh',
'areolar',
'arestor',
'area-51',
'aregala',
'arelate',
'arevalo',
'aresch.',
'arechia',
'arefyev',
'arenas',
'arechi',
'arebay',
'areeba',
'arezzo',
'area21',
'aretus',
'area51',
'arenka',
'areado',
'areole',
'areora',
'area-7',
'arevis',
'aretis',
'aredia',
'aregen',
'areraj',
'areani',
'aretos',
'aregha',
'areklu',
'arengo',
'aretin',
'areito',
'arents',
'areces',
'areois',
'areala',
'arette',
'arenga',
'aretai',
'arends',
'aregua',
'areosa',
'aretes',
'aregno',
'arepas',
'areial',
'aregon',
'arende',
'arenda',
'arenal',
'aresas',
'aredda',
'arenig',
'arendt',
'aressy',
'arevik',
'aremos',
'arelat',
'aretas',
'areias',
'aresti',
'arello',
'area15',
'areviq',
'arentz',
'areola',
'arevut',
'arenys',
'areeta',
'are.na',
'arella',
'areius',
'arewa',
'arega',
'areso',
'arens',
'areop',
'areca',
'areni',
'areka',
'arema',
'areto',
'areza',
'aresi',
'areoi',
'areta',
'arepa',
'aremu',
'areds',
'arexx',
'areas',
'arend',
'areti',
'arent',
'areum',
'arefu',
'areae',
'areal',
'areit',
'areus',
'arese',
'area2',
'areva',
'arero',
'arene',
'arete',
'aresh',
'arepo',
'arean',
'aredo',
'areia',
'arena',
'arejo',
'arem',
'aret',
'areg',
'arey',
'arec',
'arez',
'area',
'areh',
'arek',
'aren',
'ares',
'arev',
'areu',
'arep',
'arex',
'are'],
'f': ['arfeuille-chatain',
'arfiviricetes',
'arf-invariant',
'arfamoussaya',
'arfvedsonite',
'arformoterol',
'arfeuillea',
'arfeuilles',
'arfendazam',
'arfanabad',
'arfderydd',
'arfwedson',
'arfacsad',
'arfaptin',
'arforgen',
'arfaxad',
'arfgef2',
'arfajah',
'arfonad',
'arfgap1',
'arfgap3',
'arfgef1',
'arfip2',
'arfurt',
'arfip1',
'arfons',
'arfrp1',
'arfaka',
'arfcos',
'arfima',
'arflex',
'arfara',
'arford',
'arfie',
'arfws',
'arfli',
'arfle',
'arfan',
'arfcn',
'arfaj',
'arfon',
'arffp',
'arf1',
'arfl',
'arff',
'arfu',
'arf6',
'arfc',
'arf3',
'arfi',
'arf5',
'arft',
'arf4',
'arfb',
'arfa',
'arf'],
'g': ['argumentsfortheexistenceofgod',
'argenteuil--papineau--mirabel',
'argentina/transportation',
'argentina/communications',
'argutinskie-dolgorukovy',
'argenteuil-sur-armancon',
'argutinsky-dologorukov',
'argentat-sur-dordogne',
'argutinsky-dolgorukov',
'argoutinski-dolgoruki',
'argenteuil-sur-seine',
'arginine-vasopressin',
'argillipedoturbation',
'argentina/government',
'argenton-les-vallees',
'argenton-notre-dame',
'argentobaumhauerite',
'argenton-sur-creuse',
'argentina/geography',
'argentre-du-plessis',
'argentinatachoides',
'argent-sur-sauldre',
'argentinafilms.com',
'argentine-american',
'argunov-cassegrain',
'argences-en-aubrac',
'argentina/military',
'argentine-armenian',
'arginyltransferase',
"argenton-l'eglise",
'argyle-barrington',
'argentine-koreans',
'argininosuccinate',
'argentina/economy',
'argentina/history',
'argiusta-moriccio',
'argyrogrammatini',
'argentochiloides',
'argeles-bagneres',
'argens-minervois',
'argument-mapping',
'argentine-korean',
'argonnerwaldlied',
'argentina/people',
'arginine/lysine',
'argyrodiaptomus',
'argentojarosite',
'argentinosuchus',
'argentinosaurus',
'argentiniformes',
'argeles-sur-mer',
'argentinodictya',
'argentinasaurus',
'argentostriatus',
'argyrogrammana',
'argophyllaceae',
'argostemmateae',
'argentipallium',
'argyrochaetona',
'argentinadraco',
'argyromatoides',
'argyrothelaira',
'argyresthiidae',
'argentinomyces',
'argonauticeras',
'argentite-beta',
'argocoffeopsis',
'argyrophorodes',
'argeles-gazost',
'argentiniceras',
'argentoconodon',
'argentinomyia',
'argus-courier',
'argyractoides',
'argut-dessous',
'argentostiria',
'argumentforms',
'argosirenites',
'argyrochlamys',
'argumentative',
'argyrostrotis',
'argyrocytisus',
'argyrodendron',
'argentopyrite',
'argolamprotes',
'argonautoidea',
'argos-mykines',
'argyroglottis',
'argyresthites',
'argenvilliers',
'argentinosaur',
'argpyrimidine',
'argentinisima',
'argyropelecus',
'argiolestinae',
'argillichthys',
'argumentation',
'argillochelys',
'argyranthemum',
'argentozethus',
'arghutashvili',
'argiolestidae',
'argyrothamnia',
'argyroxiphium',
'argyrolepidia',
'argyrosticta',
'argobuccinum',
'argonnenbahn',
'argillaceous',
'arganthonios',
'argentinoeme',
'argutoridius',
'argyropoulos',
'argyrokastro',
'argyronympha',
'argyraspides',
'argillophora',
'argyrophenga',
'arginylation',
'argentodites',
'arghakhanchi',
'argonautinae',
'argyramoiboi',
'argenteohyla',
'argyroupolis',
'argyrocottus',
'argyrophylax',
'argyrotaenia',
'argentically',
'argyrostagma',
'argarthonius',
'arganarhinus',
'arghu-turkic',
'argenschwang',
'argentineans',
'argentinians',
'argeiphontes',
'argentinidae',
'argonautidae',
'argyrolobium',
'arganasaurus',
'argilophilus',
'argyrocheila',
'argunovskaya',
'argyrogramma',
'argyrotegium',
'argentomagus',
'argyrochosma',
'argyraspodes',
'argentovaria',
'argentoratum',
'argyrographa',
'argentometry',
'argonemertes',
'argyrosaurus',
'argilloberyx',
'argyrargenta',
'argyrocopeum',
'argiroupolis',
'argeneuthria',
'argyroptocha',
'argomuellera',
'argyrophorus',
'arganasuchus',
'argilliornis',
'argenbright',
'argyrostola',
'argillieres',
'arguisuelas',
'argyrocorys',
'argyroploce',
'argyrodella',
'argyresthia',
'argyrodines',
'arg-gly-asp',
'argipressin',
'argyraspide',
'argyropouli',
'argyllshire',
'argonautika',
'argentite-b',
'argentinian',
'argothamnia',
'argochampsa',
'argentaffin',
'argheshabad',
'argyroeides',
'argyrosomus',
'arganaceras',
'argiroupoli',
'argithamnia',
'argyroupoli',
'arguineguin',
'argentorate',
'argyrolacia',
'argyrocosma',
'argippaeans',
'argicultire',
'argonautica',
'argophyllum',
'argyrotheca',
'argiropolis',
'argythamnia',
'argus-press',
'arghandakan',
'argentosoma',
'argupurkala',
'argodrepana',
'argyrophora',
'argillornis',
'arginoussai',
'argenvieres',
'argininemia',
'argyropolis',
'argyrospila',
'argiloborus',
'argosarchus',
'arginbaatar',
'argentinazo',
'argyleshire',
'argentopyge',
'argyrolopha',
'argynnaceae',
'argentinean',
'argyroderma',
'argentinien',
'argyropasta',
'argyronisos',
'argyraspids',
'argogorytes',
'arghakhachi',
'argamasilla',
'argentonnay',
'argostolion',
'argiocnemis',
'argyrolagus',
'argentieres',
'argyrarcha',
'argentulia',
'argentonia',
'argentenay',
'argyromima',
'argatroban',
'argyractis',
'argelaguer',
'argentinie',
'argonestis',
'argjentina',
'argyarctia',
'argopistes',
'argentieri',
'argyripnus',
'argyritzos',
'argentical',
'argelliers',
'argyrodite',
'argyrogena',
'argo-hytos',
'argosearch',
'argyrotome',
'argusville',
'argyrophis',
'argentinid',
'argunaspis',
'argiotoxin',
'argilliers',
'argunovsky',
'argelander',
'arganzuela',
'argentinan',
'argissidae',
'argevollen',
'argytamnia',
'arganiella',
'argentiera',
'argostemma',
'argyronion',
'argumental',
'argenteuil',
'argestidae',
'arguebanes',
'argyropeza',
'argentines',
'argentinos',
'argentavis',
'argenziano',
'argileonis',
'argentinia',
'argopleura',
'argoctenus',
'argopteron',
'argakhtakh',
'arghoslent',
'argonectes',
'argyrotype',
'argillites',
'argoncilhe',
'arganthone',
'argyropaea',
'argentella',
'argostolio',
'arghantina',
'argyroneta',
'argetoianu',
'argentiere',
'argentiina',
'argopecten',
'argynninae',
'argiagrion',
'argenthal',
'argynnina',
'argalista',
'arguement',
'argyrhoda',
'argillate',
'arguelles',
'argiesans',
'argentren',
'argeathae',
'argestina',
'argostoli',
'argennina',
'argeliers',
'argandona',
'arghastan',
'argulidae',
'argiovito',
'argecilla',
'argonaute',
'argonotes',
'argirades',
'argentium',
'argentala',
'argentina',
'argentous',
'argenteus',
'argyrella',
'argenbuhl',
'arginusae',
'argiletum',
'argentine',
'arganodus',
'arginemia',
'arghistan',
'argillana',
'arguloida',
'argonauts',
'argentona',
'argofilms',
'arghandab',
'argassion',
'arghanjwa',
'argiolaus',
'argoeuves',
'argentite',
'arginusai',
'argophara',
'argimusco',
'argetoaia',
'argomaniz',
'arga-sala',
'arginussa',
'argelouse',
'arguinano',
'argentita',
'argynnini',
'argyrodes',
'argastiri',
'argentino',
'argillite',
'argentera',
'argalasti',
'arguments',
'argyrades',
'argybargy',
'argolites',
'arg506gln',
'argavieso',
'arginidae',
'argencola',
'argentyna',
'argujillo',
'argasidae',
'argentan',
'arganchy',
'argayash',
'argnidae',
'argirita',
'arguidos',
'argestes',
'arguendo',
'argleton',
'arginate',
'argentat',
'argelato',
'argozelo',
'argadnel',
'argynnus',
'argonide',
'argentre',
'argyrite',
'argences',
'argistes',
'arguenos',
'argus-is',
'argyrose',
'argentit',
'argument',
'argouges',
'argishti',
'argentia',
'argancon',
'argidaua',
'argyrops',
'arginase',
'arga-tas',
'argavand',
'arguable',
'argalant',
'argerich',
'argentum',
'argutite',
'argentic',
'argenson',
'arguidas',
'argyrols',
'argynnis',
'arguisal',
'arghavan',
'arganine',
'argonian',
'argemiro',
'argyreia',
'argomoon',
'arguello',
'arginine',
'arginusa',
'argandab',
'argonium',
'argyripa',
'argavary',
'argelita',
'argoules',
'argopsis',
'argilehs',
'argunovo',
'argagnon',
'argyrita',
'argyrana',
'argocybe',
'argiinae',
'argyphia',
'argasion',
'argminer',
'argoubia',
'arghatos',
'argoides',
'argulica',
'argidava',
'arguedas',
'arglabin',
'argentus',
'arganovo',
'argizala',
'arguenon',
'argithea',
'argemone',
'argolida',
'argelius',
'argennis',
'argoneut',
'argosies',
'argedava',
'arguably',
'argengau',
'argenton',
'argebaud',
'argonite',
'argyrou',
'argonay',
'argovie',
'arganza',
'argiope',
'argaric',
'arguida',
'argilly',
'argolid',
'argueil',
'argenta',
'argyria',
'argvani',
'arginae',
'argisti',
'argelia',
'argenna',
'argeneh',
'argalaa',
'argveti',
'argeius',
'argobba',
'argippo',
'arghezi',
'argassi',
'argilah',
'arganil',
'arginin',
'argadin',
'argyrit',
'argidae',
'argbadh',
'argowal',
'argaeus',
'argesis',
'argaman',
'argelos',
'arghool',
'arguero',
'argonos',
'argivai',
'arguvan',
'argjend',
'argyrol',
'argenti',
'arghons',
'argungu',
'argusia',
'argamak',
'argebad',
'argalus',
'argidia',
'argileh',
'argyros',
'argenis',
'argamum',
'argovia',
'argonst',
'argunov',
'argitis',
'arguido',
'argyris',
'argusto',
'argives',
'argilus',
'argises',
'argonne',
'arghesh',
'argania',
'argynna',
'argento',
'argancy',
'argegno',
'arghoul',
'arginia',
'arghile',
'arge-sh',
'argente',
'argoile',
'argueta',
'argesel',
'argulus',
'argilos',
'argotec',
'arganin',
'argylia',
'argeleh',
'argolis',
'argouml',
'argonon',
'argall',
'argaum',
'arguru',
'argura',
'arghus',
'argent',
'argele',
'arguel',
'argoed',
'argali',
'argela',
'argaki',
'argysh',
'arguis',
'argles',
'argive',
'argaca',
'argaty',
'argier',
'argaam',
'argius',
'arghul',
'argyll',
'arglu1',
'argaka',
'argers',
'argant',
'arghun',
'argari',
'argana',
'argote',
'argyle',
'argova',
'argina',
'argame',
'argosy',
'arguim',
'argaea',
'argand',
'arguda',
'arguin',
'argeos',
'argmax',
'argyra',
'argead',
'argine',
'argies',
'argmin',
'argila',
'argon2',
'argast',
'argema',
'argein',
'argaon',
'argile',
'argrim',
'argens',
'argala',
'argasi',
'argada',
'argeus',
'argiza',
'argoat',
'argbed',
'argob',
'argol',
'argos',
'argle',
'argal',
'argue',
'argas',
'argao',
'argan',
'argha',
'argen',
'argud',
'argyl',
'argya',
'argut',
'argei',
'argin',
'argir',
'argus',
'argil',
'argma',
'argyn',
'argox',
'argia',
'arget',
'arghu',
'argis',
'argov',
'argel',
'argul',
'argon',
'arges',
'argun',
'argna',
'argv',
'argy',
'argb',
'argr',
'argh',
'argo',
'arga',
'argt',
'arge',
'arg2',
'argi',
'arg'],
'h': ['arhythmacanthidae',
'arhythmorhynchus',
'arhynchobdellida',
'arhynchobatidae',
'arhynchobdellae',
'arhinencephaly',
'arhopaloscelis',
'arhynchobatis',
'arhangelskii',
'arhangelskiy',
'arheological',
'arhangelski',
'arhodomonas',
'arhiljevica',
'arhouriella',
'arhinobelus',
'arhangelsky',
'arhantsusi',
'arhangelsk',
'arhabdosia',
'arhavispor',
'arhopalini',
'arhusgade',
'arhochmus',
'arhathood',
'arhytinus',
'arhennius',
'arhythmia',
'arhgap11b',
'arhatship',
'arhbarite',
'arhopalus',
'arhoolie',
'arhgap25',
'arhgap27',
'arhgap29',
'arhyssus',
'arhgef11',
'arhangel',
'arhgef35',
'arhgef10',
'arhopala',
'arholzen',
'arhuacan',
'arhgap24',
'arhgap44',
'arhgap26',
'arhangay',
'arhgap18',
'arhgap19',
'arhgef12',
'arheimar',
'arhuacos',
'arhansus',
'arhgap31',
'arhanes',
'arhgap4',
'arhgdia',
'arhgef6',
'arhgdib',
'arhines',
'arhgap8',
'arhatha',
'arhgef7',
'arhgap9',
'arhalba',
'arhgef3',
'arhgef1',
'arhodia',
'arhgap1',
'arhgap5',
'arhotic',
'arhgdig',
'arhuaco',
'arhgef5',
'arhgef9',
'arholma',
'arhaphe',
'arhgef2',
'arhgef4',
'arhaus',
'arhaan',
'arh-70',
'arhavi',
'arhedi',
'arhats',
'arhant',
'arhal',
'arhar',
'arhat',
'arham',
'arhus',
'arhan',
'arhno',
'arhs',
'arha',
'arhp',
'arhg',
'arh'],
'i': ['arithmetic/multiplication',
'aripiprazole/sertraline',
'ariya-atthangika-magga',
'ariyavangsagatayana',
'arimunding-munding',
'ariakehimeshirauo',
'aristoltochiaceae',
'arisugawa-no-miya',
'ario-christianity',
'arizona/counties',
'ariyappampalayam',
'ariyanayagipuram',
'ariochristianity',
'aristolochiaceae',
'aristoceratoides',
'arinbjarnarkvida',
'arizonanativenet',
'ariola-eurodisc',
'aristoteleanism',
'arizona/natives',
'arithmetization',
'aristolochiales',
'ariyapadaiveedu',
'arikura-no-baba',
'aristotelianism',
'ariboflavinosis',
'arianayagipuram',
'aristoptychites',
'arikesavanallur',
'ariaairline.ir',
'aristonectinae',
'aristocratical',
'aristogeitonia',
'ario-christian',
'aristonectidae',
'arietoceltites',
'arimaddanapura',
'aristaeomorpha',
'ariekanerpeton',
'aristochroodes',
'aristotelians',
'arithmetician',
'arithmomachia',
'arithmomaniac',
'aristocleidas',
'arizonerpeton',
'ariernachweis',
'aristeguietia',
'arizelocichla',
'arizonasaurus',
'aristolochine',
'ari-hes-nefer',
'aristoteleans',
'aristochromis',
'aristocleides',
'aridification',
'ariophantidae',
'aristolochene',
'arianagnathus',
'arishadvargas',
'aridulodrilus',
'arikkadamukku',
'ariyamangalam',
'arizonacritus',
'aristocracies',
'aristostomias',
'arias-navarro',
'aristocleidus',
'aristosyrphus',
'arikhankharer',
'ariochristian',
'ariolimacidae',
'aries-espenan',
'aristotelian',
'aristosuchus',
'ariommatidae',
'arithmometre',
'arista/novus',
'aripiprazole',
'arian-kartli',
'arianization',
'aristomachus',
'arishadvarga',
'ariyanipatti',
'aristocratic',
'arithmosophy',
'aristolochus',
'aristrocracy',
'arisierpeton',
'arithmomancy',
'aries/strong',
'aristotelean',
'arisphinctes',
'arithmologia',
'aristogeiton',
'ariobarzanes',
'aristonautae',
'arik-den-ili',
'aristophones',
'arista-novus',
'aripiprozole',
'aristotelism',
'aristoclides',
'aristolochin',
'arithmomania',
'aristolochia',
'aristomachos',
'aristophanes',
'aristeoideae',
'ariosophical',
'aristonectes',
'aristophenes',
'ariyankuppam',
'aristomaches',
'aristaenetus',
'arietellidae',
'aristelliger',
'arithmometer',
'aristosaurus',
'aristophanic',
'aristocrates',
'arik-den-ilu',
'arianisation',
'aridoamerica',
'arithmantia',
'aristodemus',
'arith-matic',
'ariosophist',
'aristebulea',
'aristagoras',
'aristotelis',
'aristophyli',
'arieticeras',
'arithmetica',
'aridibacter',
'ariancoupom',
'aristomenes',
'arithmetics',
'aristotlean',
'arimoclomol',
'aristasians',
'aristomenis',
'arillastrum',
'ariapeithes',
'arianespace',
'aristocracy',
'aristoxenos',
'aristommata',
'aristarchus',
'arietitidae',
'ariaramneia',
'aristochroa',
'arimaspians',
'aristodimio',
'ariznavarra',
'aristomedia',
'arikuryroba',
'aristarchua',
'aristomache',
'arithmaurel',
'aristocleia',
'arixeniidae',
'arimapasian',
'aristoceras',
'aristonikos',
'aristarkhov',
'aristofusus',
'aristonicus',
'aristocrats',
'aristobulus',
'arianegroup',
'aristocosma',
'aristocapsa',
'aristoteles',
'aristarchos',
'aristofanes',
'arivonimamo',
'aristoxenus',
'aristotelia',
'ariankuppam',
'arishtanemi',
'arionoceras',
'aristophane',
'aristonymus',
'aristotimus',
'aristocreon',
'aristobulos',
'aristocrunk',
'aristolebia',
'aristogiton',
'aristarete',
'arithmancy',
'ariobarzan',
'ariocarpus',
'ariifaaite',
'arithmatic',
'arimaddana',
'aristolaos',
'arigomphus',
'ariphrades',
'ariyanagar',
'aricanduva',
'aristideae',
'arisierung',
'ariliaceae',
'aristoclea',
'ariyasacca',
'aricoceras',
'arimathaea',
'arithmania',
'ariarathes',
'ariadnaria',
'aristotelo',
'aristobule',
'arimnestos',
'ariarathia',
'ariamsvlei',
'aribashevo',
'aristippus',
'arithmetic',
'arianiello',
'arianshahr',
'arimpasian',
'arikesarin',
'aristosoma',
'aristotole',
'aristology',
'ariunculus',
'ariaspidae',
'aristotles',
'ariogaesus',
'arionellus',
'arita-yaki',
'ariaramnes',
'aristarche',
'aristaenus',
'aristiidae',
'aristodama',
'aristocles',
'arikpaktsa',
'aristocats',
'aristarain',
'arikathota',
'ariosophie',
'ariovistus',
'arionoidea',
'aristospan',
'aristonous',
'aristodeme',
'aristocort',
'aristocrat',
'aristyllus',
'aristagora',
'aristophon',
'aridonevra',
'ariophanta',
'arikareean',
'aristaenos',
'aristeides',
'aritaerius',
'ariomardus',
'arithmetik',
'aristasian',
'aripharnes',
'arivumathi',
'aristander',
'arimnestus',
'aristotele',
'aristeidae',
'ariolasoft',
'aripanthan',
'aristavele',
'ariabignes',
'arimaspian',
'ariefusus',
'arianrhod',
'aristotle',
'ariyathur',
'ariolimax',
'ariconium',
'ariquemes',
'arimannia',
'aristasia',
'ariosophy',
'ariostazo',
'arianhrod',
'aristippa',
'aristides',
'arikhvali',
'arikuyusu',
'arionidae',
'aridisols',
'ariathisa',
'arivegaig',
'aristaios',
"ari'imate",
'arisugawa',
'aridhglas',
'arivumani',
'arizmendi',
'arizonian',
'ariodante',
'ariadaeus',
'arirang-2',
'arirang-1',
'aristegui',
'arionrhod',
'aristogel',
'arizelana',
'aristaloe',
'ari-pekka',
'aristaria',
'aricestii',
'aribenchi',
'arikesari',
'arimannus',
'arikokaha',
'aristopia',
'arimneste',
'aristaeus',
'arietinum',
'ariegeois',
'ariaethus',
'arianites',
'aristarch',
'arikkulam',
'arikamedu',
'arismendi',
'arianians',
'aristakes',
'arimathea',
'arimaspes',
'arinagour',
'aribonids',
'ariogalos',
'arintheus',
'arithmeum',
'aristobia',
'aricamedu',
'arishiani',
'arilophia',
'arimittya',
'ariyallur',
'aristaqis',
'aristanax',
'arionesti',
'aristopus',
'arichanna',
'arignotus',
'aripaktsa',
'arietites',
'arielulus',
'ariminium',
'arimanius',
'arinnitti',
'aristotel',
'aristaces',
'aristeia',
'arimachi',
'aristoff',
'aristate',
'arifovic',
'ariundle',
'arilomax',
'arimalla',
'aripalam',
'arixenia',
'ariranha',
'ariphron',
'arigohal',
'aritsugu',
'aristaea',
'aristida',
'arinzano',
'aridness',
'arianist',
'arippara',
'ariogala',
'arignote',
'arimanni',
'aridolis',
'ariyalur',
'arikunte',
'arianiti',
'ariosoma',
'arizzano',
'ariopsis',
'arikaras',
'arivechi',
'arisvere',
'aritmija',
'arin.net',
'arimania',
'aridisol',
'ariassus',
'ariovist',
'aribaeus',
'arindela',
'arianrod',
'aripuana',
'arischia',
'arimidex',
'aristona',
'ariellah',
'ariljaca',
'aricoris',
'arialdus',
'arizabad',
'arikaree',
'arimitsu',
'arisarum',
'arintica',
'arisaema',
'arisdorf',
'arinitti',
'arimalam',
'arietids',
'aricidea',
'arikuzha',
'arishtat',
'ariyoshi',
'ariastes',
'ariqiran',
'arishima',
'aridaria',
'ariaspis',
'arianize',
'ariowald',
'arimazes',
'ariadhoo',
'arierang',
'ariguani',
'arianzus',
'arivaali',
'ariyalai',
'aristeas',
'arigbabu',
'ariminus',
'ariodant',
'ariminum',
'aristion',
'ariazate',
'aridaeus',
'arianita',
'arinniti',
'aristole',
'arignota',
'ariannin',
'ariaspes',
'arinthod',
'aristias',
'ariamnes',
'aristeus',
'arisitum',
'arigaion',
'aristovo',
'arieseni',
'arimpara',
'arianism',
'arihanta',
'arizonan',
'ariantas',
'ariadnet',
'ariolica',
'arianian',
'arianida',
'arillate',
'ariusium',
'arianzum',
'arignano',
'arixiuna',
'ariadaha',
'aristava',
'arimaspi',
'aritinca',
'aridarum',
'aristide',
'arihiro',
'aridity',
'arieiro',
'arinola',
'arietta',
'arities',
'aristov',
'arioald',
'arikady',
'arisbas',
'arianie',
'arimori',
'arikasa',
'aripaev',
'aritomo',
'arianet',
'arisona',
'ariidae',
'arizela',
'aristas',
'arielia',
'ariusia',
'arimjan',
'aridaia',
'ariffin',
'arinobu',
'arillus',
'arindik',
'aripert',
'aricina',
'aricept',
'aridnyk',
'arikara',
'aricara',
'arindzh',
'arisaka',
'arixtra',
'arielle',
'arisan!',
'arigoke',
'aritcle',
'ariinae',
'arianta',
'arikaya',
'arimbra',
'arilson',
'ariccia',
'ariunaa',
'aridian',
'aridius',
'ariosti',
'ariadna',
'arietes',
'arilova',
'arizona',
'aridhol',
'arinaga',
'ariamir',
'ariqdam',
'arisawa',
'arindam',
'arienti',
'aristae',
'aristea',
'aribert',
'arialdo',
'arignac',
'ariotox',
'ariyevo',
'ariteus',
'arimofa',
'arifiye',
'aristus',
'arimoto',
'arifana',
'aricini',
'arisman',
'arishem',
'arigato',
'aristoi',
'arimpur',
'arikawa',
'aringay',
'arizlar',
'ariston',
'arinsal',
'ariselu',
'arihara',
'arindom',
'aristos',
'arienzo',
'arienne',
'ariadne',
'ariomma',
'aribash',
'arihant',
'ariotus',
'ariaeus',
'ariaric',
'aricoma',
'arisaig',
'ariaios',
'aritzia',
'aristau',
'arianus',
'ariotti',
'ariosto',
'arirang',
'arilica',
'arianna',
'aristoc',
'aritimi',
'arielli',
'arigius',
'ariella',
'arianis',
'aricent',
'arizkun',
'arikula',
'arikury',
'arimura',
'aridjis',
'arizent',
'ariana',
'arijac',
'ariulf',
'arinze',
'arid5b',
'arid4a',
'ariely',
'arisbe',
'arisil',
'aribas',
'aritra',
'arioli',
'arison',
'arioso',
'aristo',
'arizao',
'ariary',
'arizpe',
'arista',
'arilda',
'arisen',
'arikli',
'aritox',
'arizal',
'ariglu',
'aringa',
'arimba',
'arinto',
'arisan',
'arinna',
'arilla',
'arioch',
'ariflo',
'arigna',
'arihah',
'arikan',
'aricak',
'ariate',
'ariesu',
'ariass',
'ariald',
'aritzo',
'arinae',
'aringo',
'arisia',
'arimoi',
'arise!',
'arid1b',
'arifke',
'arieus',
'aridif',
'ariste',
'ariany',
'aright',
'arihal',
'arid1a',
'ariege',
'arikoy',
'arilje',
'ariens',
'arinia',
'ariari',
'ariete',
'arinis',
'aripax',
'arid3b',
'aripov',
'aritao',
'arimaa',
'arindj',
'arilus',
'ariano',
'ariaca',
'aricia',
'arilta',
'ariola',
'arisba',
'arid3a',
'arid4b',
'arifat',
'ariane',
'aricom',
'arispe',
'aripra',
'arinez',
'arinos',
'ariake',
'arians',
'arided',
'ariasa',
'arinnu',
'arich',
'ariol',
'arith',
'arico',
'arios',
'arido',
'aripo',
'arisu',
'arize',
'ariga',
'ariha',
'arih1',
'arian',
'arica',
'arion',
'arizl',
'arivu',
'aribi',
'arioi',
'arinj',
'arity',
'arigo',
'arili',
'arida',
'arins',
'arid2',
'arien',
'ariwa',
'aries',
'arigs',
'aring',
'arisa',
'ariat',
'arinc',
'arisi',
'arina',
'ariya',
'arish',
'arini',
'arial',
'ariza',
'aribo',
'ariaz',
'arita',
'ariko',
'arike',
'arise',
'ariab',
'ariki',
'ariss',
'arima',
'arika',
'ariel',
'aribe',
'arifi',
'arius',
'ariba',
'arias',
'arino',
'arils',
'ariid',
'arieh',
'arild',
'arih2',
'aridi',
'arija',
'arit',
'arie',
'arii',
'arik',
'arif',
'arid',
'arim',
'aris',
'ario',
'aric',
'arin',
'arib',
'arij',
'ariz',
'arig',
'aria',
'aril',
'ari'],
'j': ['arjayadengjayaketana',
'arjagur-avsarlu',
'arjuno-welirang',
'arjunudupalam',
'arjohuntleigh',
'arjangstravet',
'arjunawiwaha',
'arjuneswarar',
'arjomandiyeh',
'arjunavarman',
'arjunayanas',
'arjowiggins',
'arjunayana',
'arjassaare',
'arjunkhedi',
'arjanavand',
'arjunnagar',
'arj21-700',
'arjonilla',
'arjalabai',
'arjansukh',
'arjunamau',
'arjuzanx',
'arjanwal',
'arjikiya',
'arjeplog',
'arjestan',
'arjunagi',
'arjomand',
'arjandas',
'arjantin',
'arjunpur',
'arjunwad',
'arjunali',
'arjadzor',
'arjunan',
'arjenak',
'arjazur',
'arjuyeh',
'arjagur',
'arjanli',
'arjola',
'arjuna',
'arjika',
'arjewa',
'arjang',
'arjrud',
'arjoun',
'arjasp',
'arjoli',
'arjadi',
'arjona',
'arjava',
'arj-21',
'arjak',
'arjia',
'arjas',
'arjaq',
'arj21',
'arjut',
'arjal',
'arjan',
'arjin',
'arjai',
'arjun',
'arjen',
'arju',
'arje',
'arja',
'arjo',
'arj'],
'k': ['arkonskie-niemierzyn',
'arkholme-with-cawood',
'arkansas-monticello',
'arkitektforbundet',
'arkat-al-mujahdin',
'arkhipo-osipovka',
'arkhangelskaya',
'arkhangelskoye',
'arkengarthdale',
'arkaliospilio',
'arkhangelskiy',
'arkanserpeton',
'arkhar-merino',
'arkhangelskii',
'arkitektskap',
'arkhangelski',
'arkelloceras',
'arkea-samsic',
'arkhangelsky',
'arkanosaurus',
'arkani-hamed',
'arklatexoma',
'arkansaurus',
'arkhipienka',
'arkalochori',
'arkhangelsk',
'arkenpliers',
'arkymalarky',
'arkefly.com',
'arkenhammer',
'arkonaplatz',
'arkansawyer',
'arkipelago',
'arkoudilas',
'arkive.org',
'arkadyevka',
'arkenberge',
'arkefly.nl',
'arkhalough',
'arkangelsk',
'arkharavia',
'arkalohori',
'arktikugol',
'arkhalukhi',
'arkelstorp',
'arketamine',
'arkhipovka',
'arkivmusic',
'arkhypenko',
'ark-la-tex',
'arkadians',
'arkaroola',
'arkadimon',
'arkanites',
'arkoudion',
'arkimedes',
'arknights',
'arkhangai',
'arkeology',
'arkhipovo',
'arkhashan',
'arkadiusz',
'arkendale',
'arkhalugh',
'arkhangel',
'arktikgaz',
'arkathias',
'arktocara',
'arkwright',
'arkendish',
'arkhawang',
'arkuszewo',
'arkansaws',
'arkaulovo',
'arkansian',
'arkiomaa',
'arkleton',
'arkadian',
'arkadium',
'arkalgud',
'arkansaw',
'arkhazlu',
'arkesios',
'arkadioi',
'arkforce',
'arkosund',
'arkangaj',
'arkitekt',
'arkesden',
'arkyidae',
'arktinos',
'arkhipov',
'arkology',
'arkowyen',
'arkhalig',
'arklinux',
'arkonips',
'arkanoid',
'arkansia',
'arkadija',
'arkatovo',
'arkoudio',
'arkhanka',
'arkoudes',
'arkangel',
'arkestra',
'arktanoi',
'arkhanes',
'arkstorm',
'arkholme',
'arklatex',
'arkansan',
'arkansas',
'arkatag',
'arktida',
'arkavaz',
'arkalyk',
'arkosic',
'arkauti',
'arkanov',
'arkopal',
'arkivet',
'arkades',
'arklity',
'arkitex',
'arkivoc',
'arkadiy',
'arkabay',
'arkhust',
'arkship',
'arkaroo',
'arkuyen',
'arkarua',
'arkadak',
'arkoola',
'arkleby',
'arktika',
'arkells',
'arkebek',
'arkhoti',
'arkhale',
'arkhara',
'arkadia',
'arkoudi',
'arkarna',
'arkless',
'arkefly',
'arkaos',
'arkema',
'arklow',
'arkana',
'arkaul',
'arkaim',
'arkose',
'arkera',
'arkadi',
'arkell',
'arkaea',
'arktos',
'arksey',
'arkham',
'arkite',
'arkoff',
'arkaia',
'arkhar',
'arkies',
'arkabi',
'arkoli',
'arkhiv',
'arkady',
'arkiko',
'arkley',
'arktis',
'arkino',
'arkive',
'arkhip',
'arkhyz',
'arkhud',
'arkona',
'arkina',
'arkaba',
'arkin',
'arkhi',
'arkyt',
'arkul',
'arkia',
'arkel',
'arkos',
'arkas',
'ark21',
'arkon',
'arkis',
'arkem',
'arkha',
'arkoi',
'arkle',
'arkhe',
'arkan',
'arkna',
'arken',
'arkma',
'arkab',
'arkar',
'arkut',
'arkum',
'arkie',
'arkys',
'arke',
'arky',
'arki',
'arko',
'arkh',
'ark.',
'arka',
'ark'],
'l': ['arleux-en-gohelle',
'arlington-lowell',
'arlberg-kandahar',
'arles/kilcruise',
'arles-kilcruise',
'arlesheimer-see',
'arles-sur-tech',
'arles/killeen',
'arles-killeen',
'arlbergtunnel',
'arlinghundra',
'arllechwedd',
'arly-singou',
'arlbergbahn',
'arlequinus',
'arlosoroff',
'arlecchino',
'arlington',
'arlescote',
'arlangkot',
'arlandino',
'arlauskas',
'arlozorov',
'arlingham',
'arlempdes',
'arlabecca',
'arlebrook',
'arlesheim',
'arlequin',
'arlekino',
'arlovski',
'arlebosc',
'arlissos',
'arlanzon',
'arlarovo',
'arloeswr',
'arleston',
'arlecdon',
'arlewatt',
'arlinton',
'arlberg',
'arletta',
'arledge',
'arlanza',
"arlan's",
'arletty',
'arlesey',
'arlette',
'arlissa',
'arleigh',
'arl6ip4',
'arlidin',
'arl6ip5',
'arlotta',
'arl6ip6',
'arladne',
'arlamow',
'arl6ip1',
'arlindo',
'arlanda',
'arluzea',
'arl-44',
'arleen',
'arline',
'arlman',
'arless',
'arling',
'arleuf',
'arleux',
'arl13b',
'arliss',
'arlnow',
'arlanc',
'arlene',
'arluno',
'arland',
'arl13a',
'arloff',
'arlaug',
'arlyan',
'arllat',
'arlije',
'arlhs',
'arlay',
'arlan',
'arlau',
'arl8a',
'arlov',
'arl4a',
'arlit',
'arl11',
'arley',
'arles',
'arlie',
'arl4d',
'arlon',
'arlin',
'arlen',
'arlet',
'arl15',
'arlow',
'arlea',
'arlem',
'arlos',
'arlis',
'arl8b',
'arla',
'arl2',
'arl6',
'arli',
'arls',
'arlt',
'arle',
'arl3',
'arlo',
'arl1',
'arly',
'arl'],
'm': ['arma-goddamn-mother-fuckin-geddon',
'arma-goddamn-motherfucking-geddon',
'army-goddamn-motherfucking-geddon',
'arma-goddamn-motherfuckin-geddon',
'arma-goddam-motherfuckin-geddon',
'armagoddamnmotherfuckingeddon',
'armenia/communications',
'armentieres-sur-ourcq',
'armentieres-sur-avre',
'armstrong-whitworth',
'armeemarschsammlung',
'armentieres-en-brie',
'armagetronadvanced',
'armenian-americans',
'armstrong-siddeley',
'armenian-canadians',
'armenian-iranians',
'armeeoberkommando',
'armadillo-repeats',
'armagnac-tenareze',
'armenian-language',
'armenian-catholic',
'armenian-american',
'armenian-canadian',
'arminotransferase',
'armtrong-siddeley',
'armenian-iranian',
'armeno-phrygians',
'armaghan-e-hijaz',
'armaghan-i-hijaz',
'armin-wolf-arena',
'armour-piercing',
'armstrong-jones',
'armbouts-cappel',
'armadillidiidae',
'armadillomorpha',
'armenoceratidae',
'armadillosuchus',
'armadilloniscus',
'armstrong-braun',
'armoricaphyton',
'armers/armaros',
'armarmshahadlu',
'armenatkyshlak',
'armaghankhaneh',
'armeerundschau',
'armor-piercing',
'armatimonadota',
'armetrakhimovo',
'armenian-dutch',
'armandisaurus',
'armadillodile',
'armenierstadt',
'arm-wrestling',
'armatobalanus',
'armintomyidae',
'armeleuteberg',
'armorhydridae',
'arms-for-iraq',
'armadillidium',
'armamentarium',
'armous-et-cau',
'armour-geddon',
'armanen-orden',
'armour-bearer',
'armatophallus',
'armintomidae',
'armatocereus',
'arm-triangle',
'arm-wrestler',
'armstrongism',
'armadillomon',
'armilustrium',
'armswissbank',
'arming-sword',
'armaueriidae',
'armadillocon',
'armwrestling',
'armando.info',
'arme-shubria',
'armani/silos',
'armizonskoye',
'armstrongite',
'armalsryggen',
'armagetronad',
'arminisaurus',
'armorloricus',
'armatosterna',
'armadillidae',
'armendaritze',
'arms-to-iraq',
'armenophobia',
'armenopoulos',
'armsbendback',
'arm-twisting',
'armyankovtsi',
'armendarits',
'arminiamism',
'armalcolite',
'armenoceras',
'armeno-tats',
'armentarius',
'armedangels',
'armenianism',
'arminianism',
'armuthsbach',
'armaneashti',
'armodafinil',
'armenopolis',
'armenokhori',
'armourstone',
'armagnostus',
'armenopulos',
'arm-warmers',
'armaztsikhe',
'armanenvolk',
'armenophile',
'armtriangle',
'armourglass',
'armeniafund',
'armageddons',
'armathwaite',
'armwrestler',
'armaggeddon',
'armentieres',
'arminbaniaz',
'armagomphus',
'armoricaine',
'armandiella',
'armenochori',
'armillifera',
'armaniidae',
'armenosoma',
'armenakans',
'armorgroup',
'armaucourt',
'armorheads',
'armilliary',
'armenpress',
'armourdale',
'armitstead',
'armanitola',
'armagetron',
'armbrister',
'armogeddon',
'armizonsky',
'armallones',
'armananzas',
'armaguedon',
'armchairgm',
'armeenians',
'armenteule',
'armaghetto',
'armendariz',
'armemuseum',
'armilenium',
'armegeddon',
'armentieux',
'armbruster',
'armagideon',
'armintomys',
'armancourt',
'armudpadar',
'armadillos',
'armillaria',
'arminghall',
'armenianow',
'armasuisse',
'arminanism',
'armaneasti',
'armatoliki',
'arms-trade',
'armenology',
'armillifer',
'arminoidea',
'armstrongs',
'armogastes',
'armophorea',
'armachanus',
'armenteros',
'armaggedon',
'armudaghaj',
'armenistan',
'armigerous',
'armandinho',
'armageddon',
'armigatus',
'armellini',
'armaturae',
'armenzano',
'armenfilm',
'armthorpe',
'armillary',
'armenicum',
'armorique',
'armilimax',
'armorhead',
'armadimon',
'armoracia',
'armalausi',
'armenkino',
'armadillo',
'armangnac',
'armutalan',
'army@love',
'armerding',
'armulator',
'armcomedy',
'armatolik',
'armenavan',
'armeniaca',
'armistice',
'armorican',
'armatures',
'armesberg',
'armbinder',
'armellino',
'armacarus',
'armentano',
'armorines',
'armenchik',
'arminsyah',
'armenakan',
'arminidae',
'armaments',
'armington',
'armistead',
'armenians',
'armatoloi',
'armcosmos',
'armstrong',
'armenikum',
'armatites',
'armigeres',
'armadahan',
'armagedda',
'armanists',
'armenhoef',
'arminians',
'armodoris',
'armatoles',
'armighorn',
'armasesti',
'armagedon',
'armillae',
'armiansk',
'armissan',
'arminism',
'armsmear',
'armitage',
'armalyte',
'armancon',
'armorika',
'armutcuk',
'armsheim',
'armatron',
'armazine',
'armajani',
'armitron',
'armenpac',
'armenaki',
'arm7tdmi',
'armarium',
'armengol',
'armaiolo',
'armyansk',
'armytage',
'armaseni',
'armalite',
'armscote',
'armatrac',
'armatoli',
'armament',
'armonica',
'armblast',
'armajaro',
'armikrog',
'armyworm',
'armacell',
'armstead',
'armstorf',
'armlenet',
'armacost',
'armstedt',
'armaille',
'armchair',
'armutlia',
'armatole',
'armanian',
'armsdale',
'armigers',
'armathia',
'armentel',
'armatage',
'armfield',
'armungia',
'armentia',
'arm-lock',
'arm-rest',
'armillac',
'armanaya',
'armenius',
'armenoid',
'armatius',
'armenien',
'armadini',
'armshead',
'arminian',
'arminiya',
'armagaon',
'armaggon',
'armilius',
'armenian',
'armandia',
'armbrust',
'armorica',
'armslist',
'armonite',
'armonico',
'armangac',
'armiabad',
'armenite',
'armenium',
'armature',
'armbands',
'armorial',
'armardeh',
'armorall',
'arminius',
'armleder',
'armlocks',
'armoured',
'armourer',
'armaside',
'armadale',
'armagnac',
'armando',
'armlock',
'armavir',
'armdale',
'armesto',
'armeria',
'armutlu',
'armases',
'armpits',
'armston',
'armorer',
'armavia',
'armenai',
'arminco',
'armelle',
'armaros',
'armsmen',
'armahda',
'armaria',
'armella',
'armilus',
'armenis',
'armilla',
'armings',
'armenak',
'armento',
'armband',
'armanum',
'armscii',
'armatlu',
'armazem',
'armilos',
'armenie',
'arminon',
'armanic',
'armenio',
'arminio',
'armscor',
'armesko',
'armaiti',
'armagon',
'armisen',
'armanaz',
'armania',
'armenia',
'armenoi',
'armello',
'armands',
'armimex',
'armored',
'armatus',
'armscye',
'armudlu',
'armamar',
'armanis',
'armetta',
'armight',
'armenon',
'army.ca',
'armiger',
'armless',
'armanak',
'armoire',
'armbian',
'arminou',
'armance',
'armudly',
'armudaq',
'armfelt',
'armalok',
'armagan',
'armoury',
'armadyl',
'armillo',
'armenid',
'armrest',
'armsman',
'armenta',
'armster',
'armf-ii',
'armond',
'armuna',
'armudu',
'armens',
'armitt',
'armori',
'armeno',
'armota',
'armf-i',
'armush',
'armpac',
'armena',
'armeni',
'armill',
'armine',
'armani',
'armaki',
'armona',
'armada',
'armana',
'armors',
'armkhi',
'armeau',
'armale',
'armian',
'armand',
'armcx5',
'armado',
'armind',
'armijo',
'armaxa',
'armala',
'armsia',
'armend',
'armcx3',
'armata',
'armash',
'armcx6',
'armank',
'armero',
'armcx1',
'armone',
'armazi',
'armina',
'armida',
'armene',
'armide',
'armlet',
'armoor',
'armaly',
'armour',
'armaan',
'armish',
'armpit',
'armies',
'armley',
'armagh',
'armant',
'armory',
'armer',
'armd4',
'armur',
'arm9e',
'armor',
'armdi',
'arma2',
'armc6',
'armc9',
'armco',
'armes',
'armil',
'armia',
'armel',
'armik',
'armit',
'armak',
'armix',
'armin',
'arm11',
'armut',
'armou',
'armir',
'armas',
'armtv',
'arms1',
'armat',
'arms2',
'armus',
'armh3',
'armon',
'armc5',
'armen',
'armed',
'armya',
'arman',
'armod',
'armet',
'army',
'armi',
'arm6',
'armo',
'arm.',
'arml',
'armc',
'armh',
'arm7',
'arms',
'arma',
'armd',
'arm9',
'armu',
'armm',
'arm'],
'n': ['arnold-schwarzenegger-stadion',
'arnouville-les-mantes',
'arnay-sous-vitteaux',
'arnolfini.marriage',
'arnac-sur-dourdou',
'arneburg-goldbeck',
'arnieres-sur-iton',
'arnou-le-faucon',
'arnoldiellaceae',
'arnac-pompadour',
'arnaoutogitonia',
'arnarulunnguaq',
'arnaud-guilhem',
'arnapkapfaaluk',
'arnex-sur-nyon',
'arnaiz-villena',
'arnac-la-poste',
'arnavatnsheidi',
'arnex-sur-orbe',
'arni-islisberg',
'arnaud-amaury',
'arnis-eskrima',
'arnoldsweiler',
'arnulfstrasse',
'arnioceltites',
'arneshreppur',
'arnarquagsag',
'arnay-le-duc',
'arnold-baker',
'arna-bjornar',
'arnesenodden',
'arnarfjordur',
'arnoglossum',
'arnihaaggen',
'arnekhamani',
'arnsgereuth',
'arnafjordur',
'arnegisclus',
'arnoldiella',
'arnautovici',
'arnognathus',
'arnestovice',
'arnaudiella',
'arnoldstein',
'arnakuagsak',
'arnicastrum',
'arnancourt',
'arnavutkoy',
'arnhemland',
'arnouville',
'arnadottir',
'arnicratea',
'arnautovic',
'arnoliseus',
'arnulfings',
'arnoldists',
'arnyashevo',
'arnomobula',
'arnagardur',
'arnarstapi',
'arnetminer',
'arnsburger',
'arnioceras',
'arnschwang',
'arnemuiden',
'arnocrinum',
'arntgolts',
'arnemetia',
'arnouphis',
'arnswalde',
'arnafjall',
'arnisdale',
'arnoyhamn',
'arnautoff',
'arnarflug',
'arncroach',
'arnbitter',
'arnedillo',
'arnenhorn',
'arncliffe',
'arnuwanda',
'arnoseris',
'arnocorps',
'arnabanda',
'arnshofen',
'arnaville',
'arnbackia',
'arnicourt',
'arnutovce',
'arniboden',
'arnoldius',
'arnaudija',
'arnen-see',
'arnasvall',
'arnautovo',
'arnarholl',
'arnoltice',
'arnulfing',
'arnolfini',
'arnaboldi',
'arniella',
'arnomyia',
'arnarsaq',
'arnprior',
'arnoldus',
'arnobius',
'arnhemia',
'arndilly',
'arnoraja',
'arnstein',
'arnezami',
'arnesson',
'arnfield',
'arniston',
'arneburg',
'arnajevo',
'arnefrit',
'arneiroz',
'arneberg',
'arnsbach',
'arnefrid',
'arnbruck',
'arnesano',
'arni-see',
"arnold's",
'arnoison',
'arnavais',
'arnstadt',
'arnaouti',
'arnarvon',
'arnstorf',
'arnsdorf',
'arnaccio',
'arnottia',
'arnisort',
"arnaud's",
'arnolphe',
'arnellia',
'arnoldas',
'arnsberg',
'arnensee',
'arnaldoa',
'arnaldus',
'arnaudov',
'arnarson',
'arniquet',
'arnaison',
'arnakija',
'arnidiol',
'arnemann',
'arnstedt',
'arnshtam',
'arnaldo',
'arnside',
'arnfinn',
'arnebia',
'arnodin',
'arnatto',
'arnolol',
'arnljot',
'arnamul',
'arnolfo',
'arneson',
'arnaout',
'arnisee',
'arncott',
'arnuero',
'arnines',
'arnadhi',
'arnoldi',
'arnulfo',
'arnolec',
'arnhild',
'arnimal',
'arnayon',
'arngrim',
'arnissa',
'arnreit',
'arnauld',
'arnauti',
'arnobia',
'arnason',
'arnavaz',
'arnager',
'arndell',
'arnaeus',
'arnheim',
'arnfels',
'arnaveh',
'arnesby',
'arnotts',
'arntzen',
'arnauts',
'arnergy',
'arnould',
'arnetta',
'arnasco',
'arnhold',
'arnault',
'arnaoud',
'arnulph',
'arnotto',
'arneguy',
'arnardo',
'arnette',
'arnesen',
'arnison',
'arnoald',
'arneil',
'arnive',
'arnedo',
'arnhem',
'arnova',
'arnell',
'arnium',
'arneth',
'arnull',
'arnoro',
'arneus',
'arnaha',
'arneke',
'arnesa',
'arnage',
'arnara',
'arnapa',
'arnone',
'arnoun',
'arnoya',
'arnish',
'arnona',
'arnoux',
'arnett',
'arnset',
'arntl2',
'arnall',
'arnold',
'arneae',
'arnout',
'arnaba',
'arnaut',
'arnite',
'arnaia',
'arniko',
'arnulf',
'arnaby',
'arnoul',
'arniel',
'arnadi',
'arneva',
'arntor',
'arnego',
'arnace',
'arnald',
'arnost',
'arnave',
'arnous',
'arnott',
'arneis',
'arnoia',
'arnica',
'arnolt',
'arnaud',
'arnos',
'arnaq',
'arnow',
'arnab',
'arnaz',
'arnoc',
'arnin',
'arnet',
'arnac',
'arnay',
'arnie',
'arnar',
'arnus',
'arnim',
'arnas',
'arnal',
'arnah',
'arnex',
'arnon',
'arnau',
'arney',
'arnut',
'arneb',
'arner',
'arnol',
'arnel',
'arnpp',
'arnia',
'arnes',
'arnob',
'arnod',
'arntz',
'arnan',
'arnt2',
'arntl',
'arnad',
'arnup',
'arnor',
'arnga',
'arnis',
'arnee',
'arnot',
'arndt',
'arnp',
'arn.',
'arng',
'arns',
'arno',
'arna',
'arne',
'arnt',
'arnd',
'arny',
'arnk',
'arni',
'arn'],
'o': ['aroue-ithorots-olhaiby',
'aroundtherings.com',
'around-the-clock',
'aromatherapeutic',
'arora-ahluwalia',
'aromatherapist',
'aromatisation',
'aroserrothorn',
'arondissement',
'aromatization',
'aromadendron',
'aromobatidae',
'aromatherapy',
'aromadendrin',
'aromachology',
'aroos-gooleh',
'aromadendrol',
'aromadedrin',
'aromatoleum',
'arotrolepis',
'arotrophora',
'aroplectrus',
'arogyavaram',
'arondizuogu',
'arocephalus',
'aromatizing',
'aromaticity',
'arostropsis',
'aromobates',
'arotinolol',
'arotromima',
'arosvallen',
'arofylline',
'arotrephes',
'aromanians',
'aropsiclus',
'arochoides',
'arophyteae',
'aroundight',
'aroma-rama',
'aromashevo',
'aromalunni',
'arondight',
'arohirohi',
'arosemena',
'aromanian',
'aroostook',
'aromatics',
'aromatase',
'aronofsky',
'arophyton',
'aroundyou',
'arozarena',
'arouquesa',
'arookutty',
'aronstein',
'arochukwu',
'arostegui',
'aronowicz',
'aronszajn',
'aronchupa',
'aronowitz',
'aromarama',
'arothron',
'arosaete',
'aronsson',
'aroasian',
'aroundme',
'arontorp',
'aromasin',
'aroysund',
'aromatic',
'aromania',
'aroneanu',
'aronberg',
'arogalea',
'arorangi',
'aroanios',
'aroclors',
'aroideae',
'arocatus',
'arosuka',
'aronyan',
'aroclor',
'arodhes',
'aroding',
'arobtth',
'aromata',
'arotros',
'arovane',
'arobase',
'aroused',
'aroania',
'aroresa',
'aroazes',
'aroeira',
'arohana',
'aronson',
'arolsen',
'arobapa',
'arossia',
'arotria',
'aronian',
'arowana',
'arocena',
'arousal',
'aromas',
'arouet',
'aroana',
'arofan',
'aroich',
'aromia',
'aronow',
'arolac',
'arowak',
'aroras',
'arohan',
'aromaa',
'around',
'aronen',
'aroxat',
'arotxa',
'arotes',
'aroche',
'arokpa',
'aroldo',
'aropax',
'arogno',
'aronov',
'arokto',
'aroura',
'aronal',
'aroumd',
'arorae',
'arosio',
'aroroy',
'arotis',
'aromat',
'arolla',
'arocet',
'arocha',
'aroffe',
'arochi',
'arodes',
'arouca',
'arouva',
'aronia',
'aronda',
'aronas',
'arowry',
'arosi',
'arora',
'aroab',
'arops',
'arovo',
'aroma',
'arosa',
'arous',
'arome',
'aroer',
'aroga',
'aroor',
'aroid',
'aroli',
'arona',
'aroya',
'arotr',
'aroha',
'arola',
'arob',
'aror',
'arod',
'aros',
'aroz',
'aroe',
'aroi',
'aron',
'arom',
'aroa',
'aro'],
'p': ['arpaillargues-et-aureillac',
'arpheuilles-saint-priest',
'arpajon-sur-cere',
'arpeggiation',
'arphugitonos',
'arpophyllum',
'arpakhshadh',
'arpacbahsis',
'arpeggiated',
'arpa-tektir',
'arpeggiator',
'arppookkara',
'arpelistock',
'arpheuilles',
'arpegiator',
'arprinocid',
'arpachshad',
'arpangasia',
'arpafeelie',
'arpadhalom',
'arpaderesi',
'arpeggiato',
'arpeggione',
'arpilleras',
'arpagodus',
'arpag@dik',
'arpadites',
'arpa-chay',
'arpitania',
'arpeggios',
'arpa-chai',
'arptables',
'arpicordo',
'arphalego',
'arpookara',
'arpenteur',
'arpudham',
'arphaxad',
'arpadere',
'arpedium',
'arpwatch',
'arpsdorf',
'arpoador',
'arpeggio',
'arpeegee',
'arpitans',
'arpenans',
'arputham',
'arpachin',
'arpitany',
'arpaliq',
'arpeggi',
'arpacik',
'arp4754',
'arpilus',
'arpp-21',
'arpaise',
'arpanet',
'arp4761',
'arpinge',
'arpacas',
'arpavar',
'arpajon',
'arpalik',
'arpacay',
'arpagus',
'arpavon',
'arpinia',
'arp-410',
'arpinum',
'arpabet',
'arpegio',
'arpp-19',
'arpine',
'arp2/3',
'arphid',
'arpaio',
'arpc1a',
'arpani',
'arpali',
'arpunq',
'arpels',
'arphia',
'arpita',
'arpeja',
'arpunk',
'arpagi',
'arpa-e',
'arpc1b',
'arpege',
'arpaia',
'arpeni',
'arpent',
'arpack',
'arpino',
'arping',
'arpora',
'arpat',
'arpia',
'arpes',
'arpar',
'arpc2',
'arpos',
'arpon',
'arpc5',
'arpc4',
'arpas',
'arpc3',
'arpac',
'arphy',
'arpke',
'arpct',
'arpad',
'arpan',
'arpin',
'arpm',
'arpc',
'arps',
'arpe',
'arp1',
'arpl',
'arpi',
'arpg',
'arpu',
'arpa',
'arp'],
'q': ['arques-la-bataille',
'arquettes-en-val',
'arquebus-a-croc',
'arquitectonica',
'arquatopotamon',
'arquebusiers',
'arquipelago',
'arquebusier',
'arquillinos',
'arquatella',
'arquebuses',
'arquimedes',
'arquebuse',
'arquennes',
'arquillos',
'arquerite',
'arquebus',
'arqueros',
'arqamani',
'arquette',
'arqueves',
'arquenay',
'arquata',
'arqaneh',
'arquetu',
'arquita',
'arquiva',
'arqalyk',
'arquian',
'arquivo',
'arqalyq',
'arqarly',
'arqiqo',
'arqaya',
'arqatu',
'arques',
'arqana',
'arqiva',
'arqtiq',
'arqih',
'arqin',
'arq-e',
'arque',
'arq-m',
'arqua',
'arqa',
'arq'],
'r': ['arromanches-les-bains',
'arrow-debreu-mckenzie',
'arrentes-de-corcieux',
'arromanche-les-bains',
'arrancy-sur-crusnes',
'arratzua-ubarrundia',
'arrodets-ez-angles',
'arriagadoolithidae',
'arrien-en-bethmale',
'arrueta-sarrikota',
'arraute-charritte',
'arrast-larrebieu',
'arriagadoolithus',
'arras-en-lavedan',
'arrheniusfjellet',
'arrhinoceratops',
'arras-sur-rhone',
'arrayou-lahitte',
'arrhenophanidae',
'arran-elderslie',
'arrondissements',
'arrhenoblastoma',
'arratia-nerbioi',
'arrhenechthites',
'arrhythmogenic',
'arrah-na-pogue',
'arrojadocharis',
'arrhenosphaera',
'arrakkankottai',
'arrol-johnston',
'arricau-bordes',
'arrens-marsous',
'arrondissement',
'arrhopalitidae',
'arrondissment',
'arraya-maestu',
'arrhenopeplus',
'arroyomolinos',
'arrhenophanes',
'arrangingtime',
'arrhenothrips',
'arrhostoxylum',
'arraia-maeztu',
'arriaga-lakua',
'arraye-et-han',
'arrondisement',
'arrhenatherum',
'arraignments',
'arrancacepas',
'arrizabalaga',
'arrepentidos',
'arrhabonarii',
'arrow-debreu',
'arrigorriaga',
'arrt-antenna',
'arros-de-nay',
'arrangements',
'arrembecourt',
'arribasuchus',
'arruabarrena',
'arrhenoseius',
'arrankudiaga',
'arrowianella',
'arribasaurus',
'arrowsmithia',
'arrhopalites',
'arraincourt',
'arraiatores',
'arrhenotoky',
'arriere-ban',
'arrhinactia',
'arrow-woods',
'arrayanaria',
'arrudatitan',
'arrow-heads',
'arrayaccess',
'arrentieres',
'arrhythmica',
'arrosticino',
'arrowtongue',
'arripididae',
'arrissoules',
'arrosticini',
'arrhephoros',
'arrangement',
'arromanches',
'arrajnadzor',
'arroyohondo',
'arrouaisian',
'arrentation',
'arrol-aster',
'arrivaclick',
'arrephorion',
'arravonitsa',
'arrhythmias',
'arriere-fee',
'arrivederci',
'arrenoseius',
'arraignment',
'arrivillaga',
'arrhephoria',
'arrouquelas',
'arrenuridae',
'arrangoitze',
'arretez-moi',
'arrachadzor',
'arrhenomyza',
'arrr-mania',
'arronville',
'arrephoros',
'arrhichion',
'arrathorne',
'arrohattoc',
'arrighetto',
'arrivision',
'arrangings',
'arrowverse',
'arrowgrass',
'arronofsky',
'arrhamphus',
'arrhabaeus',
'arrogantly',
'arra-maida',
'arrow-loop',
'arraigning',
'arrabalera',
'arrouasian',
'arraytrack',
'arrabbiata',
'arrissalah',
'arrhidaios',
'arrhenious',
'arrajadzor',
'arrancourt',
'arrighetti',
'arrow-wood',
'arrowsmith',
'arrearages',
'arrowcraft',
'arremonops',
'arrowcross',
'arrowheads',
'arrhabaios',
'arrhynchus',
'arrow-root',
'arrhythmia',
'arratashen',
'arrestment',
'arrow-arum',
'arrierveld',
'arrhidaeus',
'arrivabene',
'arrow-head',
'arrendare',
'arrowweed',
'arraylist',
'arrabalde',
'arrolobos',
'arrhenius',
'arranmore',
'arrilalah',
'arrse.com',
'arrilaser',
'arresting',
'arrasando',
'arrowmont',
'arrendale',
'arroasian',
'arrpachay',
'arrachera',
'arrowtown',
'arrangers',
'arragonia',
'arrowette',
'arrowords',
'arrenurus',
'arrouaise',
'arronches',
'arrowslit',
'arrhytmia',
'arranging',
'arripiado',
'arrowwood',
'arriondas',
'arraycomm',
'arraigned',
'arrokiaga',
'arrentela',
'arrowston',
'arrythmia',
'arriscope',
'arrillaga',
'arracacha',
'arrasmith',
'arrayfire',
'arrojadoa',
'arripidae',
'arrabiata',
'arranians',
'arrandale',
'arriagada',
'arrington',
'arracacia',
'arrowhead',
'arrowroot',
'arrapahoe',
'arrecifes',
'arraiolos',
'arrowworm',
'arrestins',
'arresodal',
'arrogance',
'arracourt',
'arrenodes',
'arredondo',
'arrapkha',
'arrambam',
'arrochar',
'arrunden',
'arravale',
'arrector',
'arrufiat',
'arraytag',
'arradoul',
'arroword',
'arraigns',
'arrivera',
'arroyito',
'arrawati',
'arrested',
'arrieria',
'arroiabe',
'arranges',
'arrelles',
'arrhoges',
'arrinera',
'arrhenes',
'arrapaho',
'arryadia',
'arrigoni',
'arrallas',
'arrazola',
'arrufiac',
'arriving',
'arrhenia',
'arrestin',
'arrecife',
'arrugada',
'arrowbio',
'arrowana',
'arroscia',
'arraijan',
'arriccio',
'arriflex',
'arrietty',
'arrowman',
'arrhyton',
'arrester',
'arrastra',
'arrothia',
'arrogant',
'arrifana',
'arrernte',
'arronnes',
'arrowmen',
'arrogate',
'arrowcar',
'arrowina',
'arrianus',
'arrondie',
'arrhinia',
'arrebato',
'arrarnta',
'arrotino',
'arrechea',
'arrotois',
'arreglos',
'arranged',
'arrouiah',
'arrodets',
'arretium',
'arriance',
'arrjahur',
'arrabury',
'arrouede',
'arrayer',
'arriola',
'arrawda',
'arripis',
'arregui',
'arroios',
'arraioz',
'arranon',
'arrayan',
'arrechi',
'arredol',
'arrange',
'arriach',
'arrighi',
'arracht',
'arroniz',
'arraign',
'arrufat',
'arreaga',
'arraial',
'arreola',
'arricia',
'arroses',
'arriaza',
'arremon',
'arrants',
'arrocha',
'arranah',
'arruazu',
'arrorro',
'arrajol',
'arriate',
'arrests',
'arradon',
'arrifes',
'arrigas',
'arratzu',
'arroyos',
'arrapha',
'arratel',
'arrigny',
'arrajan',
'arriana',
'arriaga',
'arranis',
'arrouya',
'arrears',
'arrarbi',
'arripid',
'arriero',
'arraias',
'arricam',
'arrente',
'arrakis',
'arribas',
'arrondi',
'arrenes',
'arrival',
'arronax',
'arranho',
'arrieta',
'arreton',
'arribes',
'arrancy',
'arrubal',
'arraiza',
'arrivo',
'arrach',
'arreni',
'arrese',
'arriba',
'arrabe',
'arrans',
'arruda',
'arrhus',
'arrows',
'arroni',
'arrats',
'arroba',
'arrive',
'arrdee',
'arraya',
'arrays',
'arroux',
'arrinj',
'arreux',
'arroxo',
'arraia',
'arreau',
'arruns',
'arrovo',
'arreso',
'arraes',
'arrius',
'arrade',
'arrigo',
'arrien',
'arrack',
'arrawa',
'arrest',
'arroes',
'arriky',
'arranz',
'arrell',
'arrout',
'arruza',
'arraba',
'arroyo',
'arrata',
'arruiz',
'arraid',
'arrate',
'arrast',
'arrone',
'arraso',
'arraga',
'arrhon',
'arrium',
'arriva',
'arrian',
'arrar',
'arren',
'arrol',
'arrha',
'arras',
'arrie',
'arrlo',
'arria',
'arryx',
'arrse',
'arros',
'arrak',
'arrco',
'arrrr',
'arrez',
'arreh',
'arreo',
'arrua',
'arreu',
'array',
'arran',
'arrau',
'arrb2',
'arrup',
'arrow',
'arram',
'arres',
'arrou',
'arrb1',
'arris',
'arron',
'arrah',
'arrid',
'arro',
'arrc',
'arrp',
'arre',
'arrs',
'arr3',
'arrg',
'arrb',
'arrl',
'arrr',
'arry',
'arrt',
'arr1',
'arra',
'arri',
'arr'],
's': ['arslano-amekachevo',
'arsenuranospathite',
'arsenicsesquioxide',
'arsenidogermanate',
'arsinoitheriidae',
'arsenidosilicate',
'ars-an-der-mosel',
'arstaddalsdammen',
'arsure-arsurette',
'arsenidostannate',
'arseniosiderite',
'ars-sur-formans',
'arstechnica.com',
'arsukibacterium',
'ars-sur-moselle',
'arsac-en-velay',
'arsagalitaeans',
'arsonistically',
'arsinoitheriid',
'ars-les-favets',
'arsinoitherium',
'arsenicicoccus',
'arsinotherium',
'arsenoclasite',
'ars-laquenexy',
'arsenobetaine',
'arsenicitalea',
'arshaduzzaman',
'arstanosaurus',
'arsagaliteans',
'arsagalitaean',
'arsenalsgatan',
'arsphenamine',
'arsenopyrite',
'arsagalitean',
'arsetfjorden',
'arsenothelys',
'arsinoithere',
'arsenophonus',
'arsenobetane',
'arslanbekovo',
'arsonistical',
'arstabroarna',
'arsirrhyncha',
'arsabenzene',
'arsentyevka',
'arsenalotti',
'arsagalitae',
'arslanghali',
'arsenal.f.c',
'arsenosugar',
'arsenijevic',
'arsenicosis',
'arsenyevsky',
'arsenurinae',
'arstechnica',
'arshinagar',
'arsitocrat',
'arsamosata',
'arsedition',
'arsenyevka',
'arsenamide',
'arstaviken',
'arsonistic',
'arsebandit',
'arsenicals',
'arstiderna',
'arshakunis',
'arschbombe',
'arseneault',
'arslantepe',
'arsaciodes',
'arsenolite',
'arsharunik',
'arsecodile',
'arsennaria',
'arsyonovo',
'arsinspor',
'arsthinol',
'arslanian',
'arsonisms',
'arshatnab',
'arshakuni',
'arslankoy',
'arslanbab',
'arsdigita',
'arsenites',
'ars-en-re',
'arsenovic',
'arse-hole',
'arshaluys',
'arshakyan',
'arslanovo',
'arsonists',
'arsenates',
'arsenaria',
'arsenault',
'arslanbob',
'arsnuphis',
'arsacidae',
'arshantan',
'arsenyevo',
'arsenical',
'arshaluis',
'arsisios',
'arshavin',
'arsenite',
'arsat-3k',
'arsinoea',
'arsinoia',
'arsenyev',
'arsanias',
'arsawuya',
'arsapnia',
'arsenije',
'arsameia',
'arsinide',
'arsonist',
'arsikeri',
'arseguel',
'arslanli',
'arsinoos',
'arsenals',
'arsenius',
'arsatius',
'arstidir',
'arsacide',
'arsonval',
'arshunah',
'arsenale',
'arshavir',
'arsacius',
'arsakeio',
'arsonium',
'arshinov',
'arsacids',
'arseneau',
'arsonism',
'arsehole',
'arsikere',
'arsenate',
'arseniev',
'arsenide',
'arsamaki',
'arsenura',
'arsanjan',
'arseiles',
'arstech',
'arshani',
'arskoye',
'arschot',
'arshtin',
'arsippe',
'arscott',
'arslaan',
'arsaber',
'arsenio',
'arshaly',
'arsague',
'arsahni',
'arsgang',
'arsenik',
'arsunda',
'arsacia',
'arsat-1',
'arsenis',
'arseven',
'arskaya',
'arsinoi',
'arsacid',
'arstrat',
'arsizio',
'arsdorf',
'arsalyn',
'arsaces',
'arsinoe',
'arsande',
'arsures',
'arsinde',
'arsiyah',
'arsenka',
'arsobal',
'arstein',
'arsat-2',
'arsenal',
'arsamuh',
'arsloid',
'arshtat',
'arsissa',
'arsites',
'arsiero',
'arsenic',
'arsames',
'arsenie',
'arsoli',
'arsaki',
'arsech',
'arseny',
'arsine',
'arsiya',
'arsene',
'arsita',
'arsinh',
'arsada',
'arsala',
'arsr-4',
'arshan',
'arsuri',
'arsina',
'arsnic',
'arsion',
'arslan',
'arshaf',
'arsila',
'arstad',
'arslev',
'arskiy',
'arskii',
'arshad',
'arsane',
'arsons',
'arshaq',
'arsham',
'arsura',
'arsole',
'arshak',
'arsans',
'arshin',
'arsho',
'arset',
'arsur',
'arsak',
'arsik',
'arsia',
'arsha',
'arsoc',
'arsun',
'arsta',
'arsis',
'arsky',
'arsac',
'arsat',
'arsic',
'arski',
'arsbc',
'arseo',
'ars++',
'arsuz',
'arsoa',
'arsug',
'arshi',
'arsov',
'arsuk',
'arsuf',
'arsos',
'arsof',
'arsal',
'arses',
'arsin',
'arsch',
'arsie',
'arson',
'arsen',
'arsay',
'arsht',
'arsan',
'arsm',
'arss',
'arsi',
'arse',
'arsf',
'arsk',
'arsc',
'arsh',
'arsd',
'arso',
'arsn',
'arst',
'arsr',
'arsa',
'arsy',
'arsu',
'ars'],
't': ['artesunate/sulfadoxine/pyrimethamine',
'artificially-intelligent',
'artillerie-inrichtingen',
'artemether/lumefantrine',
'artesunate/pyronaridine',
'arthaz-pont-notre-dame',
'artesunate/amodiaquine',
'artigues-pres-bordeaux',
'artesunate/mefloquine',
'arthur-schnitzler-hof',
'artignosc-sur-verdon',
'artists-in-residence',
'artilleriregementet',
'artist-in-residence',
'artilleribataljonen',
'artificiallanguages',
'artannes-sur-thouet',
'artigues-de-lussac',
'artistic-athevains',
'artakovo-vandarets',
'artstetten-pobring',
'artannes-sur-indre',
'artixentertainment',
'artificallanguages',
'arteriolosclerosis',
'arthrodermataceae',
'artland-gymnasium',
'artaise-le-vivier',
'artyfechinostomum',
'arteshtaran-salar',
"arthez-d'armagnac",
'arthrostylidiinae',
'arthopyreniomyces',
'arteriosclerosis',
'arthroscopically',
'arthopyreniaceae',
'arteriosclerotic',
'artificial-heart',
'artist-educators',
'artedidraconidae',
'arthrosphaeridae',
'artistdirect.com',
'arthrotardigrada',
'arthricocephalus',
'arthus-bertrand',
'arthoniomycetes',
'arthropleuridea',
'artgemeinschaft',
'arthotheliopsis',
'articulavirales',
'arthrocladiella',
'artillerivollen',
'arts-and-crafts',
'artematopodidae',
'arthrostylidium',
'arthromygalidae',
'artist-educator',
'arthez-de-bearn',
'arterosclerosis',
'arthroclianthus',
'artificialities',
'arthropleuridae',
'art-sur-meurthe',
'artus-excalibur',
'arthrolycosidae',
'arthropterygius',
'artern/unstrut',
'artabilitation',
'arthrofibrosis',
'arthracanthida',
'artasiapacific',
'artalens-souin',
'arthrogryposis',
'arthophacopsis',
'arthroleptella',
'arthurkoestler',
'artedielloides',
'artemisiospiza',
'artists4israel',
'arthon-en-retz',
'artaiouteichos',
'arthrocentesis',
'arthroconidium',
'arthrosporella',
"arthez-d'asson",
'arthurdactylus',
'arteriogenesis',
'artyushinskaya',
'arthropleurida',
'arthroleptidae',
'arthropodicide',
'arthrorhynchus',
'artillerigatan',
'arthropodology',
'arthroleptides',
'arthroschista',
'artoo-opustoo',
'artaza-escota',
'arthur-virden',
'artistenrevue',
'articulatidin',
'arteriography',
'artistocratic',
'arthrochlamys',
'artland-arena',
'arthurdendyus',
'arthrothamnus',
'artiodactylla',
'artemkovskaya',
'arthuriomyces',
'artcyclopedia',
'arthroconidia',
'artemidiconus',
'arthropleidae',
'arthropathies',
'arthrographis',
'artemisiopsis',
'arthrophyllum',
'artificiality',
'artigueloutan',
'arteriviridae',
'art-designing',
'artofillusion',
'arthromelodes',
'arthragrostis',
'artemyevskaya',
'arthrorhaphis',
'art-designers',
'articulations',
'artpodgotovka',
'arthroscopies',
'arteriovenous',
'arthroscopic',
'artillerymen',
'arthrobacter',
'arthopyrenia',
'artificially',
'arthrochilus',
'art-designer',
'arthroleptis',
'arthroplasty',
'arthrography',
'artevalencia',
'artzybasheff',
'artediellina',
'artoriellula',
'arthrocaulon',
'artiestenhof',
'art+feminism',
'arthropleona',
'arthromastix',
'arthrocnemum',
'artotrogidae',
'art-language',
'artincidence',
'arthrosporum',
'artilleryman',
'arthrobotrys',
'arthrostemma',
'artha-sastra',
'arthropleura',
'artioposthia',
'arthaberites',
'articlesbase',
'arthropterus',
'artiguelouve',
'arthrophytum',
'arthoniaceae',
'artistdirect',
'artiodactyla',
'arthashastra',
'artemisinins',
'arthropodium',
'articulation',
'artcriticism',
'artyomovskiy',
'arthropteris',
'arthrobotrya',
'artiodactyls',
'arthingworth',
'art-siadziba',
'arthrochaete',
'artiomovskiy',
'articulatory',
'arthrolycosa',
'arthrostylis',
'arthothelium',
'arthrocereus',
'artemonopsis',
'arthroteles',
'artsplosure',
'artoo-detoo',
'arthropathy',
'art-designs',
'artiofabula',
'arthropitys',
'artaphernes',
'artemidorus',
'artsakhbank',
'artoklassia',
'artyushkino',
'artyomovsky',
'articerodes',
'artocarpeae',
'artillery-3',
'art.lebedev',
'art-nouveau',
'articulate!',
'artur-music',
'artistshare',
'artoriopsis',
'artashastra',
'artashumara',
'artotyritae',
'artedidraco',
'arthrodires',
'art-o-thlon',
'artiodactyl',
'artemesinin',
'artsimovich',
'artemovskiy',
'arthropogon',
'artaphrenes',
'arthursdale',
'arterivirus',
'artinsights',
'arthurstown',
'artchronika',
'arthrospira',
'artsybashev',
'arthrodytes',
'arthromyces',
'artietsmitw',
'artzosuchus',
'arthoniales',
'arthropeina',
'artotyrites',
'arteriotomy',
'artesonraju',
'arthrosaura',
'arthasastra',
'arteriogram',
'art+auction',
'arth-goldau',
'artpassport',
'arthroceras',
'arthroaspis',
'artegg-yumi',
'articulated',
'artyukhovka',
'arthroscope',
'artietwmitw',
'arthromeris',
'artoviridae',
'articulator',
'arthropsida',
'arthralgias',
'artilleribn',
'arthrodesis',
'artabazanes',
'arthroscopy',
'artibrannan',
'artistworks',
'artemisinin',
'arthrophyta',
'art-moderne',
'artzentales',
'artediellus',
'artheneidae',
'art-a-whirl',
'artsvaberd',
'arthington',
'artabasdos',
'arterioles',
'artsutanov',
'art-o-graf',
'artrageous',
'artsvanist',
'artocarpus',
'artesunate',
'arterially',
'artopoetes',
'arthemesia',
'artisornis',
'arthuriana',
'arthrodire',
'artukekang',
'artistamps',
'artemisias',
'artavasdus',
'artsboston',
'artemisina',
'artexpress',
'art-design',
'artyomkovo',
'arthroplea',
'artamonovo',
'arthapatti',
'arthephius',
'arthropoda',
'artafallie',
'arthralgia',
'artdocfest',
'arthabaska',
'artzenheim',
'arthropods',
'articaudna',
'artigatois',
'artificial',
'artavasdes',
'arthropeas',
'articulina',
'artukainen',
'artemidora',
'artichokes',
'arthrodial',
'artembares',
'arthanaari',
'artbreeder',
'articulata',
'artepiazza',
'artiplanto',
'artisphere',
'arthrinium',
'artabotrys',
'artsbridge',
'artesiidae',
'artemisium',
'artyushino',
'artibonite',
'artiphanes',
'artsevanik',
'arteshabad',
'artiocetus',
'arthrodira',
'arts-dance',
'artillerie',
'arthrology',
'artashavan',
'arterolane',
'artoklasia',
'artemision',
'artyomovsk',
'arthrodont',
'artematica',
'artemis-30',
'artazostre',
'artlifting',
'artotyrite',
'artechouse',
'arthurella',
'artyshchiv',
'artoriinae',
'artpremium',
'artemiidae',
'artinskian',
'arthrogram',
'artesonado',
'artabasdus',
'articulate',
'arturzinho',
'articulare',
'artzvashen',
'arthurfest',
'arthritica',
'arthrocare',
'artymnesus',
'arthaldeus',
'artemiflow',
'artsvashen',
'arthraerua',
'artagerses',
'artlenburg',
'arthrolips',
'arteurotia',
'artifodina',
'artymnesos',
'artolsheim',
'arthrotomy',
'artemether',
'artemovsky',
'artwashing',
'artaxerxes',
'art.welten',
'artanovsky',
'artigarvan',
'artamanovo',
'arthemonay',
'artziniega',
'artscience',
'artemissia',
'artefaktur',
'artistamp',
'artumpara',
'arthungal',
'artemotil',
'artbabble',
'artosilla',
'artsadmin',
'artecombo',
'artomatic',
'artorhiza',
'artoxares',
'artamonov',
'artigasus',
'arthraxon',
'artcrimes',
'arttactic',
'artfutura',
'art/draft',
'art-o-mat',
'artachaea',
'artamidae',
'artsvenik',
'artystone',
'articella',
'arterburn',
'artyphius',
'artitropa',
'artelliry',
'articlave',
'artaxarta',
'arthalgia',
'artreview',
'arthanari',
'artayctes',
'artavazde',
'artsplace',
'artsvanik',
'artifacts',
'artemivsk',
'arthurian',
'arthrosis',
'artenacia',
'artpeople',
'artephius',
'artapanas',
'arthamuru',
'artamella',
'arthrodia',
'artacaena',
'arthurite',
'art/media',
'artiguemy',
'artyomovo',
'artemenko',
'artursson',
'artemisia',
'artabuynk',
"artiya'il",
'artsvakar',
'artrocker',
'arteether',
'arteriole',
'artglaube',
'arthonnay',
'artsworld',
'artificer',
'artemyeva',
'artoffact',
'artemisin',
'artcurial',
'artyomova',
'arthurson',
'arthropod',
'artiifact',
'artisteer',
'artisanal',
'artholmen',
'artapanus',
'artoolkit',
'artassenx',
'artyukhin',
'artaulovo',
'arthritus',
'artigasia',
'artagyukh',
'arthurlie',
'artichoke',
'artroeite',
'arteritis',
'artemisio',
'arthunkal',
'articolla',
'artaminae',
'artabanus',
'artmotion',
'artioscad',
'arthuriad',
'artiopoda',
'artikelly',
'artipelag',
'artdc.org',
'artesanos',
'arthritis',
'artington',
'artabuink',
'arthritic',
'artechoke',
'artifical',
'artabanes',
'arthinkal',
'artweaver',
'artension',
'artothlon',
'arthrotec',
'artherapy',
'artscroll',
'articaine',
'artaserse',
'artantica',
'artbridge',
'arthelais',
'artsdepot',
'arthrofen',
'artitudes',
'artiguism',
'arthingal',
'artemesia',
'art-house',
'artscribe',
'artzooka!',
'artbakery',
'artzvanik',
'artacoana',
'artomeria',
'artomyces',
'artabazes',
'artukhina',
'artefacts',
'artillery',
'articular',
'artiletra',
'artopoula',
'articling',
'artasyrus',
'art-world',
'artiloxis',
'art-rite',
'artizone',
'artronix',
'artakama',
'articles',
'artivion',
'artunkal',
'artomana',
'artforum',
'artuqids',
'artistic',
'arthonia',
'arteries',
'artemova',
"arthur's",
'artakioi',
'artukids',
'artistry',
'artemius',
'artotlon',
'artifice',
'artigues',
'arterton',
'artaxias',
'artakovo',
'arturowo',
'artprize',
'art-club',
'artisans',
'artefact',
'artashar',
'artimino',
'artiknos',
'artemisa',
'artellia',
'artchive',
'arthouse',
'artemide',
'artoldus',
'artibeus',
'arthmius',
'artmedia',
'artsruni',
'artemida',
'artilect',
'artamene',
'artemiev',
'artichia',
'artifakt',
'artinite',
'artjarvi',
'arterial',
'artelida',
'artashes',
'artaldus',
'art-punk',
'artsfest',
'arthemis',
'artostan',
'artaxiad',
'artvoice',
'artivism',
'artanada',
'artiodus',
'artrelia',
'artybash',
'artritis',
'arthuret',
'artifort',
'artiglio',
'artagall',
'arthenas',
'artangel',
'artigisa',
'artenara',
'artscene',
'artemare',
'art-deco',
'artinian',
'artworld',
'arthisma',
'artspace',
'artaynte',
'arturito',
'artilery',
'artzruni',
'art-rock',
'artanish',
'artworks',
'artagnan',
'artukais',
'artatama',
'artprice',
'artedius',
'artemovo',
'artinkal',
'artsaha!',
'artotina',
'artorima',
'artemjev',
'artradis',
'arthenac',
'arthania',
'artashat',
'artemita',
'artnexus',
'artyomov',
'artxanda',
'artrosis',
'artemyev',
'artaxata',
'artraker',
'artelius',
'artefill',
'artivist',
'artscape',
'artefius',
'artifact',
'artonges',
'articled',
'arthunge',
'artspeak',
'artabrus',
'artinano',
'artisten',
'artsmark',
'artanema',
'artajona',
'artesian',
'artashen',
'artheze',
'artisto',
'artzakh',
'artemis',
'arturas',
'artiles',
'arturia',
'artaios',
'artadia',
'artpace',
'artemov',
'artigat',
'artlink',
'artforo',
'artakul',
'artbots',
'artedis',
'arthuna',
'artukid',
'artipic',
'artpack',
'arteris',
'arthena',
'artsakh',
'artiman',
'arturis',
'artbook',
'artahan',
'artemas',
'artenac',
'artieda',
'artaius',
'artemia',
'artsvik',
'artsong',
'artslav',
'artwall',
'artaxes',
'artonne',
'artipus',
'artuklu',
'artavaz',
'artamet',
'artfire',
'artynia',
'artiluc',
'arthana',
'artuqid',
'artakai',
'artavan',
'artwood',
'artstar',
'artfido',
'artaces',
'arthies',
'artaria',
'article',
'artwork',
'artanis',
'artanes',
'arthurs',
'artpark',
'artstor',
'artonis',
'artisia',
'artcars',
'artamus',
'artiste',
'artempo',
'artemps',
'artjoms',
'arteche',
'artwerk',
'artdink',
'arteijo',
'artiora',
'artogne',
'artisan',
'artinis',
'artport',
'artnews',
'arteezy',
'arteixo',
'artosis',
'artspan',
'artzuid',
'artabri',
'arthaud',
'artemin',
'artigue',
'artrock',
'artlukh',
'artworx',
'arteaga',
'artegna',
'artcirq',
'artcell',
'artrage',
'artline',
'artemus',
'artical',
'artrave',
'arthrob',
'artoria',
'arthill',
'artavil',
'artemon',
'artesie',
'artenay',
'artists',
'artesia',
'artsyom',
'artimet',
'artijan',
'artines',
'artland',
'artumes',
'artsana',
'art.net',
'artrain',
'artvize',
'artifex',
'art-net',
'artlang',
'artbank',
'artemiy',
'artedia',
'artango',
'artmuza',
'artmark',
'artelia',
'artemio',
'artano',
'artupa',
'arthus',
'artjog',
'artuch',
'artoys',
'art-xc',
'artist',
'arthun',
'arteta',
'artech',
'artane',
'arteni',
'artsni',
'artace',
'artuby',
'artman',
'arthes',
'artpop',
'arturo',
'artusi',
'artoon',
'artajo',
'artein',
'artuso',
'arthur',
'artade',
'artham',
'art:21',
'artium',
'artume',
'artana',
'artova',
'arthon',
'artipe',
'artold',
'artazu',
'artega',
'artlog',
'artice',
'artahe',
'artley',
'artois',
'artssa',
'arturs',
'arteza',
'arthua',
'artmic',
'artres',
'arties',
'arthel',
'artedi',
'arteis',
'artena',
'artsah',
'artona',
'artemi',
'artvin',
'artesh',
'artyom',
'artolf',
'artrod',
'artokh',
'artsyz',
'artful',
'artouz',
'artins',
'arthog',
'artari',
'art-13',
'artneo',
'artaud',
'artern',
'artian',
'artcar',
'artemy',
'artery',
'artime',
'artand',
'artima',
'artios',
'artaix',
'artown',
'artanh',
'artnet',
'artaxa',
'artesa',
'artgal',
'artell',
'artema',
'artdoc',
'artel',
'artax',
'artsa',
'artan',
'artin',
'artbo',
'artux',
'artis',
'artha',
'artil',
'artot',
'arter',
'artom',
'artas',
'artek',
'artix',
'artun',
'artze',
'artia',
'arteh',
'artos',
'artsd',
'artzi',
'arthi',
'artus',
'artch',
'artuk',
'art4d',
'artcc',
'artik',
'artsy',
'artaz',
'artic',
'artio',
'artag',
'artex',
'artgo',
'artba',
'artea',
'art21',
'artat',
'artuf',
'artec',
'artes',
'artur',
'artoo',
'artyk',
'artie',
'arton',
'art-x',
'artah',
'artts',
'artem',
'artak',
'arta',
'art3',
'art4',
'artc',
'arte',
'arto',
'arti',
'arty',
'artx',
'artv',
'arth',
'artz',
'arts',
'art'],
'u': ['arutanga-reureu-nikaupara',
'aruvankattuvalasu',
'arundoclaytonia',
'arunagirinathar',
'arumaipperumal',
'arunagirinagar',
'arutperunjothi',
'arumainayagam',
'aruppukkottai',
'arundavapuram',
'arunthathiyar',
'arupadaiveedu',
'arundinoideae',
'arundinelleae',
'arundinarieae',
'arundinicola',
'arutani-sape',
'arungurukkai',
'aruppukottai',
'arunachalam',
'arumanallur',
'aruodynerus',
'arunamalaia',
'arulmolipet',
'aruvippuram',
'aruvikkarai',
'arundinella',
'aruvappulam',
'arungundram',
'arukalickal',
'arumuganeri',
'arulampalam',
'arunodhayam',
'arundinaria',
'aruvapakkam',
'arunachala',
'arumbakkam',
'aruvankadu',
'arundineae',
'aruchavank',
'arubaconus',
'arumanians',
'aruhentina',
'arundelpet',
'arutiunian',
'aruvikkara',
'arumbanur',
'arunkhola',
'arundells',
'arumanian',
'arubolana',
'aruvikara',
'arumbavur',
'aruraumon',
'arussaare',
'aruspices',
'arukwatta',
'arulnithi',
'arukharka',
'aruzhqoli',
'arupaalse',
'arundhati',
'arul-nool',
'arumberia',
'arusnates',
'arusiyyah',
'arunachal',
'arunodaya',
'arumanoor',
'arubianus',
'arunasura',
'arunkumar',
'arumecla',
'arutunga',
'arulappa',
'aruchaur',
'arulidae',
'arumetsa',
'aruncuta',
'arusnati',
'arutanga',
'aruagint',
'arundell',
'aruvalja',
'arumugam',
'arunqash',
'arundina',
'aruattus',
'aruldoss',
'aruppola',
'arumaeus',
'arulenus',
'arumukan',
'aruspice',
'arumanai',
'aruiteru',
'aruvalla',
'arughtai',
'arushina',
'arunasva',
'aru-ding',
'arumugom',
'arubaito',
'arumulai',
'arunvadi',
'arunkal',
'aruncus',
'arujarv',
'aruanda',
'arumeru',
'arucard',
'arutela',
'arunima',
'arungen',
'arushki',
'arutz-7',
'aruchan',
'arundel',
'arunena',
'arubans',
'aruanas',
'arukula',
'arugisa',
'arugtai',
'arubani',
'arugala',
'aruwimi',
'arungal',
'aruldev',
'arugula',
'aruliho',
'arubote',
'arunta',
'arunus',
'aruaia',
'aruana',
'arusar',
'arugot',
'aruaru',
'arumae',
'arutua',
'arukse',
'arunas',
'arudan',
'aruruu',
'aruval',
'arundo',
'aruman',
'arular',
'arugla',
'aruvam',
'aruray',
'arusei',
'aruban',
'arupaa',
'aruama',
'arudra',
'arusak',
'arusta',
'arudou',
'arukku',
'arussi',
'arugba',
'arugam',
'arusha',
'aruste',
'aruthu',
'arukh',
'aruvi',
'aruaa',
'arudo',
'aruch',
'arupa',
'aruak',
'arula',
'aruli',
'aruru',
'aruna',
'aruan',
'arups',
'arudy',
'aruko',
'aruja',
'aruze',
'arufo',
'arusi',
'aruba',
'arura',
'aruta',
'aruq',
'arun',
'arul',
'arus',
'arup',
'arum',
'aruk',
'arui',
'arua',
'arud',
'arue',
'aru'],
'v': ['arvandrud/shatt-al-arab',
'arviere-en-valromey',
'arvamusfestival',
'arvantovlaxika',
'arvfurstinna',
'arvicanthini',
'arvinmeritor',
'arvinachelys',
'arvesiniadu',
'arvandkenar',
'arvicanthis',
'arvicolinae',
'arvorezinha',
'arvanitissa',
'arvand-rood',
'arvicolidae',
'arvopaperi',
'arvandroud',
'arvaykheer',
'arvanitika',
'arvibacter',
'arvand-rud',
'arvingarna',
'arvandrood',
'arvirargus',
'arvidsjaur',
'arvaikheer',
'arvanitaki',
'arvicolini',
'arvfurste',
'arvanites',
'arvernian',
'arvandrud',
'arvidsson',
'arvayheer',
'arveladze',
'arvillard',
'arvanitis',
'arvanitic',
'arvillers',
'arviragus',
'arvernien',
'arvelius',
'arvicola',
'arvandus',
'arvanite',
'arvoredo',
'arvernia',
'arvensis',
'arvinder',
'arvidson',
'arvernus',
'arvonian',
'arvatica',
'arveyres',
'arveyron',
'arvingen',
'arvinger',
'arvernes',
'arvieux',
'arvydas',
'arvigna',
'arvesen',
'arvilla',
'arvonen',
'arveson',
'arvense',
'arvanit',
'arvajeh',
'arvesta',
'arverns',
'arvanaq',
'arville',
'arverni',
'arvaneh',
'arvoll',
'arvana',
'arvika',
'arvine',
'arvumi',
'arvigo',
'arveni',
'arvagh',
'arvest',
'arviza',
'arvelo',
'arvies',
'arvind',
'arvell',
'arvand',
'arvieu',
'arvals',
'arvato',
'arvizu',
'arviat',
'arvila',
'arvids',
'arvale',
'arvier',
'arvati',
'arvert',
'arvore',
'arven',
'arvij',
'arvis',
'arvai',
'arvan',
'arvon',
'arviz',
'arvcf',
'arved',
'arvar',
'arvit',
'arvas',
'arvey',
'arvad',
'arvid',
'arval',
'arvi',
'arvn',
'arv1',
'arvo',
'arve',
'arvc',
'arva',
'arvd',
'arvw',
'arv'],
'w': ['arwidssonia',
'arwidsson',
'arwystli',
'arwenack',
'arwarton',
'arwaliya',
'arwadito',
'arweet',
'arwium',
'arwald',
'arwen',
'arwin',
'arwos',
'arwon',
'arwal',
'arwad',
'arwel',
'arwyn',
'arwi',
'arw1',
'arw2',
'arwu',
'arwa',
'arwe',
'arw'],
'x': ['arxiomyces',
'arxiv.org',
'arxintina',
'arxentina',
'arxellia',
'arxxant',
'arxama',
'arxan',
'arxyz',
'arxiv',
'arx'],
'y': ['aryldialkylphosphatase',
'arylsulfotransferase',
'arylcyclohexylamine',
'arylcyclohexanamine',
'arylacetonitrilase',
'arylcyclohexamine',
'aryachakravartis',
'aryl-acylamidase',
'arylalkanolamine',
'aryachakaravarti',
'aryepiglotticus',
'aryachakravarti',
'arylformamidase',
'aryacakravarti',
'arylsulfatases',
'arylalkylamine',
'arylsulfatase',
'arylsulfonate',
'arytaenoideae',
'arytaenoideus',
'aryanization',
'arytropteris',
'aryanisation',
'arylesterase',
'aryan-kartli',
'arytrurides',
'arystoteles',
'arylmethine',
'aryshparovo',
'aryabhatiya',
'aryobarzan',
'arylomycin',
'aryballoid',
'aryaashahr',
'arysdaghes',
'arytenoids',
'aryloxides',
'arytrurina',
'aryabhatta',
'aryankuzhi',
'aryballus',
'ary-tolon',
'aryashahr',
'aryabhata',
'aryanandi',
'arytenoid',
'aryballos',
'aryankavu',
'arylation',
'aryapuram',
'aryabhatt',
'arylamine',
'aryavarta',
'aryirades',
'aryanist',
'aryamala',
'arytrura',
'aryaraja',
'aryanism',
'aryamehr',
'aryanadu',
'aryamani',
'aryktakh',
'arycanda',
'aryadeva',
'aryandes',
'arytera',
'aryenis',
'aryaman',
'ary-mas',
'aryogen',
'arybbas',
'arymbas',
'aryndzh',
'aryanah',
'ary-tit',
'arylakh',
'arylene',
'aryaee',
'aryana',
'aryman',
'aryzta',
'arywee',
'arynow',
'aryaka',
'arybas',
'aryika',
'aryeh',
'aryll',
'aryob',
'aryan',
'aryad',
'arya`',
'aryee',
'aryal',
'aryne',
'aryk',
'aryn',
'arys',
'aryl',
'aryd',
'ary'],
'z': ['arzillieres-neuville',
'arzacq-arraziguet',
'arz.wikipedia.org',
'arzenc-de-randon',
"arzenc-d'apcher",
'arzani-volpini',
'arzhanovskaya',
'arzte-zeitung',
'arzoumanian',
'arzergrande',
'arzugitana',
'arzamas-16',
'arzumanyan',
'arzamasova',
'arzamopsis',
'arzoxifene',
'arzubiaga',
'arzembouy',
'arzignano',
'arzashkun',
'arzakyand',
'arzonella',
'arzviller',
'arzachena',
'arzubikha',
'arzuiyeh',
'arzanene',
'arzanias',
'arzutraa',
'arzantak',
'arzhanov',
'arzerra',
'arzamas',
'arzanah',
'arzanaq',
'arzberg',
'arzbach',
'arzaneh',
'arzakan',
'arzefun',
'arzecla',
'arzheim',
'arzuvaj',
'arzhang',
'arziani',
'arzania',
'arzfeld',
'arzaqan',
'arzhan',
'arzano',
'arzens',
'arzhis',
'arzama',
'arzgir',
'arzana',
'arzuni',
'arzgun',
'arzila',
'arzest',
'arzach',
'arzier',
'arzner',
'arzang',
'arzenu',
'arzawa',
'arzat',
'arzin',
'arzen',
'arzua',
'arzak',
'arzon',
'arzay',
'arzan',
'arzil',
'arzos',
'arzah',
'arzac',
'arzal',
'arzni',
'arzew',
'arziw',
'arzet',
'arzoo',
'arzul',
'arzel',
'arzt',
'arzc',
'arzl',
'arzo',
'arza',
'arzu',
'arz']}
|
PypiClean
|
/dsin100days603v37-6.0.3.tar.gz/dsin100days603v37-6.0.3/notebook/static/components/codemirror/mode/clojure/clojure.js
|
(function(mod) {
if (typeof exports === "object" && typeof module === "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define === "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("clojure", function (options) {
var atoms = ["false", "nil", "true"];
var specialForms = [".", "catch", "def", "do", "if", "monitor-enter",
"monitor-exit", "new", "quote", "recur", "set!", "throw", "try", "var"];
var coreSymbols = ["*", "*'", "*1", "*2", "*3", "*agent*",
"*allow-unresolved-vars*", "*assert*", "*clojure-version*",
"*command-line-args*", "*compile-files*", "*compile-path*",
"*compiler-options*", "*data-readers*", "*default-data-reader-fn*", "*e",
"*err*", "*file*", "*flush-on-newline*", "*fn-loader*", "*in*",
"*math-context*", "*ns*", "*out*", "*print-dup*", "*print-length*",
"*print-level*", "*print-meta*", "*print-namespace-maps*",
"*print-readably*", "*read-eval*", "*reader-resolver*", "*source-path*",
"*suppress-read*", "*unchecked-math*", "*use-context-classloader*",
"*verbose-defrecords*", "*warn-on-reflection*", "+", "+'", "-", "-'",
"->", "->>", "->ArrayChunk", "->Eduction", "->Vec", "->VecNode",
"->VecSeq", "-cache-protocol-fn", "-reset-methods", "..", "/", "<", "<=",
"=", "==", ">", ">=", "EMPTY-NODE", "Inst", "StackTraceElement->vec",
"Throwable->map", "accessor", "aclone", "add-classpath", "add-watch",
"agent", "agent-error", "agent-errors", "aget", "alength", "alias",
"all-ns", "alter", "alter-meta!", "alter-var-root", "amap", "ancestors",
"and", "any?", "apply", "areduce", "array-map", "as->", "aset",
"aset-boolean", "aset-byte", "aset-char", "aset-double", "aset-float",
"aset-int", "aset-long", "aset-short", "assert", "assoc", "assoc!",
"assoc-in", "associative?", "atom", "await", "await-for", "await1",
"bases", "bean", "bigdec", "bigint", "biginteger", "binding", "bit-and",
"bit-and-not", "bit-clear", "bit-flip", "bit-not", "bit-or", "bit-set",
"bit-shift-left", "bit-shift-right", "bit-test", "bit-xor", "boolean",
"boolean-array", "boolean?", "booleans", "bound-fn", "bound-fn*",
"bound?", "bounded-count", "butlast", "byte", "byte-array", "bytes",
"bytes?", "case", "cast", "cat", "char", "char-array",
"char-escape-string", "char-name-string", "char?", "chars", "chunk",
"chunk-append", "chunk-buffer", "chunk-cons", "chunk-first", "chunk-next",
"chunk-rest", "chunked-seq?", "class", "class?", "clear-agent-errors",
"clojure-version", "coll?", "comment", "commute", "comp", "comparator",
"compare", "compare-and-set!", "compile", "complement", "completing",
"concat", "cond", "cond->", "cond->>", "condp", "conj", "conj!", "cons",
"constantly", "construct-proxy", "contains?", "count", "counted?",
"create-ns", "create-struct", "cycle", "dec", "dec'", "decimal?",
"declare", "dedupe", "default-data-readers", "definline", "definterface",
"defmacro", "defmethod", "defmulti", "defn", "defn-", "defonce",
"defprotocol", "defrecord", "defstruct", "deftype", "delay", "delay?",
"deliver", "denominator", "deref", "derive", "descendants", "destructure",
"disj", "disj!", "dissoc", "dissoc!", "distinct", "distinct?", "doall",
"dorun", "doseq", "dosync", "dotimes", "doto", "double", "double-array",
"double?", "doubles", "drop", "drop-last", "drop-while", "eduction",
"empty", "empty?", "ensure", "ensure-reduced", "enumeration-seq",
"error-handler", "error-mode", "eval", "even?", "every-pred", "every?",
"ex-data", "ex-info", "extend", "extend-protocol", "extend-type",
"extenders", "extends?", "false?", "ffirst", "file-seq", "filter",
"filterv", "find", "find-keyword", "find-ns", "find-protocol-impl",
"find-protocol-method", "find-var", "first", "flatten", "float",
"float-array", "float?", "floats", "flush", "fn", "fn?", "fnext", "fnil",
"for", "force", "format", "frequencies", "future", "future-call",
"future-cancel", "future-cancelled?", "future-done?", "future?",
"gen-class", "gen-interface", "gensym", "get", "get-in", "get-method",
"get-proxy-class", "get-thread-bindings", "get-validator", "group-by",
"halt-when", "hash", "hash-combine", "hash-map", "hash-ordered-coll",
"hash-set", "hash-unordered-coll", "ident?", "identical?", "identity",
"if-let", "if-not", "if-some", "ifn?", "import", "in-ns", "inc", "inc'",
"indexed?", "init-proxy", "inst-ms", "inst-ms*", "inst?", "instance?",
"int", "int-array", "int?", "integer?", "interleave", "intern",
"interpose", "into", "into-array", "ints", "io!", "isa?", "iterate",
"iterator-seq", "juxt", "keep", "keep-indexed", "key", "keys", "keyword",
"keyword?", "last", "lazy-cat", "lazy-seq", "let", "letfn", "line-seq",
"list", "list*", "list?", "load", "load-file", "load-reader",
"load-string", "loaded-libs", "locking", "long", "long-array", "longs",
"loop", "macroexpand", "macroexpand-1", "make-array", "make-hierarchy",
"map", "map-entry?", "map-indexed", "map?", "mapcat", "mapv", "max",
"max-key", "memfn", "memoize", "merge", "merge-with", "meta",
"method-sig", "methods", "min", "min-key", "mix-collection-hash", "mod",
"munge", "name", "namespace", "namespace-munge", "nat-int?", "neg-int?",
"neg?", "newline", "next", "nfirst", "nil?", "nnext", "not", "not-any?",
"not-empty", "not-every?", "not=", "ns", "ns-aliases", "ns-imports",
"ns-interns", "ns-map", "ns-name", "ns-publics", "ns-refers",
"ns-resolve", "ns-unalias", "ns-unmap", "nth", "nthnext", "nthrest",
"num", "number?", "numerator", "object-array", "odd?", "or", "parents",
"partial", "partition", "partition-all", "partition-by", "pcalls", "peek",
"persistent!", "pmap", "pop", "pop!", "pop-thread-bindings", "pos-int?",
"pos?", "pr", "pr-str", "prefer-method", "prefers",
"primitives-classnames", "print", "print-ctor", "print-dup",
"print-method", "print-simple", "print-str", "printf", "println",
"println-str", "prn", "prn-str", "promise", "proxy",
"proxy-call-with-super", "proxy-mappings", "proxy-name", "proxy-super",
"push-thread-bindings", "pvalues", "qualified-ident?",
"qualified-keyword?", "qualified-symbol?", "quot", "rand", "rand-int",
"rand-nth", "random-sample", "range", "ratio?", "rational?",
"rationalize", "re-find", "re-groups", "re-matcher", "re-matches",
"re-pattern", "re-seq", "read", "read-line", "read-string",
"reader-conditional", "reader-conditional?", "realized?", "record?",
"reduce", "reduce-kv", "reduced", "reduced?", "reductions", "ref",
"ref-history-count", "ref-max-history", "ref-min-history", "ref-set",
"refer", "refer-clojure", "reify", "release-pending-sends", "rem",
"remove", "remove-all-methods", "remove-method", "remove-ns",
"remove-watch", "repeat", "repeatedly", "replace", "replicate", "require",
"reset!", "reset-meta!", "reset-vals!", "resolve", "rest",
"restart-agent", "resultset-seq", "reverse", "reversible?", "rseq",
"rsubseq", "run!", "satisfies?", "second", "select-keys", "send",
"send-off", "send-via", "seq", "seq?", "seqable?", "seque", "sequence",
"sequential?", "set", "set-agent-send-executor!",
"set-agent-send-off-executor!", "set-error-handler!", "set-error-mode!",
"set-validator!", "set?", "short", "short-array", "shorts", "shuffle",
"shutdown-agents", "simple-ident?", "simple-keyword?", "simple-symbol?",
"slurp", "some", "some->", "some->>", "some-fn", "some?", "sort",
"sort-by", "sorted-map", "sorted-map-by", "sorted-set", "sorted-set-by",
"sorted?", "special-symbol?", "spit", "split-at", "split-with", "str",
"string?", "struct", "struct-map", "subs", "subseq", "subvec", "supers",
"swap!", "swap-vals!", "symbol", "symbol?", "sync", "tagged-literal",
"tagged-literal?", "take", "take-last", "take-nth", "take-while", "test",
"the-ns", "thread-bound?", "time", "to-array", "to-array-2d",
"trampoline", "transduce", "transient", "tree-seq", "true?", "type",
"unchecked-add", "unchecked-add-int", "unchecked-byte", "unchecked-char",
"unchecked-dec", "unchecked-dec-int", "unchecked-divide-int",
"unchecked-double", "unchecked-float", "unchecked-inc",
"unchecked-inc-int", "unchecked-int", "unchecked-long",
"unchecked-multiply", "unchecked-multiply-int", "unchecked-negate",
"unchecked-negate-int", "unchecked-remainder-int", "unchecked-short",
"unchecked-subtract", "unchecked-subtract-int", "underive", "unquote",
"unquote-splicing", "unreduced", "unsigned-bit-shift-right", "update",
"update-in", "update-proxy", "uri?", "use", "uuid?", "val", "vals",
"var-get", "var-set", "var?", "vary-meta", "vec", "vector", "vector-of",
"vector?", "volatile!", "volatile?", "vreset!", "vswap!", "when",
"when-first", "when-let", "when-not", "when-some", "while",
"with-bindings", "with-bindings*", "with-in-str", "with-loading-context",
"with-local-vars", "with-meta", "with-open", "with-out-str",
"with-precision", "with-redefs", "with-redefs-fn", "xml-seq", "zero?",
"zipmap"];
var haveBodyParameter = [
"->", "->>", "as->", "binding", "bound-fn", "case", "catch", "comment",
"cond", "cond->", "cond->>", "condp", "def", "definterface", "defmethod",
"defn", "defmacro", "defprotocol", "defrecord", "defstruct", "deftype",
"do", "doseq", "dotimes", "doto", "extend", "extend-protocol",
"extend-type", "fn", "for", "future", "if", "if-let", "if-not", "if-some",
"let", "letfn", "locking", "loop", "ns", "proxy", "reify", "struct-map",
"some->", "some->>", "try", "when", "when-first", "when-let", "when-not",
"when-some", "while", "with-bindings", "with-bindings*", "with-in-str",
"with-loading-context", "with-local-vars", "with-meta", "with-open",
"with-out-str", "with-precision", "with-redefs", "with-redefs-fn"];
CodeMirror.registerHelper("hintWords", "clojure",
[].concat(atoms, specialForms, coreSymbols));
var atom = createLookupMap(atoms);
var specialForm = createLookupMap(specialForms);
var coreSymbol = createLookupMap(coreSymbols);
var hasBodyParameter = createLookupMap(haveBodyParameter);
var delimiter = /^(?:[\\\[\]\s"(),;@^`{}~]|$)/;
var numberLiteral = /^(?:[+\-]?\d+(?:(?:N|(?:[eE][+\-]?\d+))|(?:\.?\d*(?:M|(?:[eE][+\-]?\d+))?)|\/\d+|[xX][0-9a-fA-F]+|r[0-9a-zA-Z]+)?(?=[\\\[\]\s"#'(),;@^`{}~]|$))/;
var characterLiteral = /^(?:\\(?:backspace|formfeed|newline|return|space|tab|o[0-7]{3}|u[0-9A-Fa-f]{4}|x[0-9A-Fa-f]{4}|.)?(?=[\\\[\]\s"(),;@^`{}~]|$))/;
// simple-namespace := /^[^\\\/\[\]\d\s"#'(),;@^`{}~][^\\\[\]\s"(),;@^`{}~]*/
// simple-symbol := /^(?:\/|[^\\\/\[\]\d\s"#'(),;@^`{}~][^\\\[\]\s"(),;@^`{}~]*)/
// qualified-symbol := (<simple-namespace>(<.><simple-namespace>)*</>)?<simple-symbol>
var qualifiedSymbol = /^(?:(?:[^\\\/\[\]\d\s"#'(),;@^`{}~][^\\\[\]\s"(),;@^`{}~]*(?:\.[^\\\/\[\]\d\s"#'(),;@^`{}~][^\\\[\]\s"(),;@^`{}~]*)*\/)?(?:\/|[^\\\/\[\]\d\s"#'(),;@^`{}~][^\\\[\]\s"(),;@^`{}~]*)*(?=[\\\[\]\s"(),;@^`{}~]|$))/;
function base(stream, state) {
if (stream.eatSpace() || stream.eat(",")) return ["space", null];
if (stream.match(numberLiteral)) return [null, "number"];
if (stream.match(characterLiteral)) return [null, "string-2"];
if (stream.eat(/^"/)) return (state.tokenize = inString)(stream, state);
if (stream.eat(/^[(\[{]/)) return ["open", "bracket"];
if (stream.eat(/^[)\]}]/)) return ["close", "bracket"];
if (stream.eat(/^;/)) {stream.skipToEnd(); return ["space", "comment"];}
if (stream.eat(/^[#'@^`~]/)) return [null, "meta"];
var matches = stream.match(qualifiedSymbol);
var symbol = matches && matches[0];
if (!symbol) {
// advance stream by at least one character so we don't get stuck.
stream.next();
stream.eatWhile(function (c) {return !is(c, delimiter);});
return [null, "error"];
}
if (symbol === "comment" && state.lastToken === "(")
return (state.tokenize = inComment)(stream, state);
if (is(symbol, atom) || symbol.charAt(0) === ":") return ["symbol", "atom"];
if (is(symbol, specialForm) || is(symbol, coreSymbol)) return ["symbol", "keyword"];
if (state.lastToken === "(") return ["symbol", "builtin"]; // other operator
return ["symbol", "variable"];
}
function inString(stream, state) {
var escaped = false, next;
while (next = stream.next()) {
if (next === "\"" && !escaped) {state.tokenize = base; break;}
escaped = !escaped && next === "\\";
}
return [null, "string"];
}
function inComment(stream, state) {
var parenthesisCount = 1;
var next;
while (next = stream.next()) {
if (next === ")") parenthesisCount--;
if (next === "(") parenthesisCount++;
if (parenthesisCount === 0) {
stream.backUp(1);
state.tokenize = base;
break;
}
}
return ["space", "comment"];
}
function createLookupMap(words) {
var obj = {};
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
function is(value, test) {
if (test instanceof RegExp) return test.test(value);
if (test instanceof Object) return test.propertyIsEnumerable(value);
}
return {
startState: function () {
return {
ctx: {prev: null, start: 0, indentTo: 0},
lastToken: null,
tokenize: base
};
},
token: function (stream, state) {
if (stream.sol() && (typeof state.ctx.indentTo !== "number"))
state.ctx.indentTo = state.ctx.start + 1;
var typeStylePair = state.tokenize(stream, state);
var type = typeStylePair[0];
var style = typeStylePair[1];
var current = stream.current();
if (type !== "space") {
if (state.lastToken === "(" && state.ctx.indentTo === null) {
if (type === "symbol" && is(current, hasBodyParameter))
state.ctx.indentTo = state.ctx.start + options.indentUnit;
else state.ctx.indentTo = "next";
} else if (state.ctx.indentTo === "next") {
state.ctx.indentTo = stream.column();
}
state.lastToken = current;
}
if (type === "open")
state.ctx = {prev: state.ctx, start: stream.column(), indentTo: null};
else if (type === "close") state.ctx = state.ctx.prev || state.ctx;
return style;
},
indent: function (state) {
var i = state.ctx.indentTo;
return (typeof i === "number") ?
i :
state.ctx.start + 1;
},
closeBrackets: {pairs: "()[]{}\"\""},
lineComment: ";;"
};
});
CodeMirror.defineMIME("text/x-clojure", "clojure");
CodeMirror.defineMIME("text/x-clojurescript", "clojure");
CodeMirror.defineMIME("application/edn", "clojure");
});
|
PypiClean
|
/privex_jsonrpc-1.3.0-py3-none-any.whl/privex/jsonrpc/helpers.py
|
import logging
from decimal import Decimal
from privex.jsonrpc.JsonRPC import JsonRPC
from typing import List, Union, Dict
log = logging.getLogger(__name__)
class BitcoinRPC(JsonRPC):
"""
Wrapper class for JsonRPC, with default host 127.0.0.1 and port 8332
Contains pre-defined methods with pydoc for interacting with `bitcoind` compatible JsonRPC services
including most coin daemons forked from Bitcoin, e.g. litecoind, dogecoind etc.
If a method is not defined, you can still use it! You just won't get any IDE hints with the parameters.
Basic usage (by default, connects to http://127.0.0.1:8332):
>>> j = BitcoinRPC(username='bitcoinrpc', password='somesecurepassword')
>>> j.getbalance()
Decimal(0.2456337)
"""
def __init__(self, hostname='127.0.0.1', port=8332, username=None, password=None, ssl=False, timeout=120,
url: str = '', auth: str = 'plain'):
super().__init__(
hostname=hostname, port=port, username=username, password=password,
ssl=ssl, timeout=timeout, url=url, auth=auth
)
def getnewaddress(self, account="", address_type=None) -> str:
"""
Generate a new crypto address and return it as a string.
:param account: Name of the account to store address in. Default is blank ``""``
:param address_type: The address type to use. Options are ``legacy``, ``p2sh-segwit``, and ``bech32``.
:return: string - the address that was generated
"""
if address_type is None:
return self.call('getnewaddress', account)
return self.call('getnewaddress', account, address_type)
def getbalance(self, account="*", confirmations: int = 0, watch_only=False) -> Decimal:
"""
Get the current wallet balance as a Decimal
:param str account: DEPRECATED - Get the balance of this wallet account, ``*`` means all accs.
:param int confirmations: Get wallet balance that has at least this many confirms
:param bool watch_only: Include "Watch Only" addresses in the balance figure
:return Decimal balance: The total balance of the given account
"""
bal = self.call('getbalance', account, confirmations, watch_only)
if type(bal) == float:
bal = '{0:.8f}'.format(bal)
return Decimal(bal)
def getreceivedbyaddress(self, address, confirmations: int = 0) -> Decimal:
"""
Get the total amount of coins received by an address (must exist in the wallet)
:param str address: The address to lookup
:param int confirmations: Get received amount that has at least this many confirms
:return Decimal balance: The total amount of coins received by an address.
"""
bal = self.call('getreceivedbyaddress', address, confirmations)
if type(bal) == float:
bal = '{0:.8f}'.format(bal)
return Decimal(bal)
def sendtoaddress(self, address, amount: Union[float, str, Decimal], comment="", comment_to="",
subtractfee: bool = False, force_float=True) -> str:
"""
Send coins to an address
:param str address: The destination address to send coins to
:param float amount: The amount of coins to send. If coin supports string amounts, see ``force_float`` param.
:param str comment: A comment used to store what the transaction is for.
:param str comment_to: A comment, representing the name of the person or organization you're sending to.
:param bool subtractfee: (Default False) If set to True, reduce the sending amount to cover the TX fee.
:param bool force_float: (Default True) If set to True, the ``amount`` parameter will be casted to a float
before sending via JSONRPC. If you're dealing with a coin daemon that can handle
string amounts, set this to False and pass amount as a str
:return str txid: The transaction ID for this "send coins" transaction.
"""
if force_float:
amount = float(amount)
return self.call('sendtoaddress', address, amount, comment, comment_to, subtractfee)
def listtransactions(self, account="*", count: int = 10, skip: int = 0, watch_only=False) -> List[dict]:
"""
List transactions sent/received/generated by an account, or all accounts
:param account: Account to list TXs for
:param count: Load this many recent TXs
:param skip: Skip this many recent TXs (for pagination)
:param watch_only: Include watchonly addresses
:return: [ {account, address, category, amount, label, vout, fee, confirmations, trusted, generated,
blockhash, blockindex, blocktime, txid, walletconflicts, time, timereceived, comment,
to, otheraccount, bip125-replaceable, abandoned}, ... ]
"""
return self.call('listtransactions', account, count, skip, watch_only)
def getblockchaininfo(self) -> dict:
"""
Get information about the blockchain, such as the current block/header height, network difficulty etc.
:return dict networkinfo: Returns blockchain information as a dict, in this format
Return format::
{
chain:str, blocks:int, headers: int, bestblockhash: str, difficulty: float,
mediantime: int, verificationprogress: float, initialblockdownload: bool,
chainwork: str, size_on_disk: int, pruned: bool, softforks: List[dict],
bip9_softforks: Dict[dict], warnings: str
}
"""
return self.call('getblockchaininfo')
def getnetworkinfo(self) -> dict:
"""
Get information about the network, such as daemon version, relay fees, total connections etc.
:return dict networkinfo: Returns network information as a dict, in this format
Return format::
{
version:int, subversion:str, localservices:str, localrelay:bool,
timeoffset:int, networkactive:bool, connections:int, networks:List[dict],
relayfee:float, incrementalfee:float, localaddresses:List[dict], warnings:str
}
"""
return self.call('getnetworkinfo')
def getinfo(self) -> dict:
"""
WARNING: This is deprecated in favour of getnetworkinfo/getblockchaininfo, and is only here for compatibility
with older cryptocurrency daemons.
:return dict daemoninfo: Various status info, such as current block, balance etc. See below.
Return format::
{
version:int, protocolversion: int, walletversion: int, balance: float, blocks:int,
timeoffset: int, connections: int, proxy: str, difficulty: float, testnet: bool,
keypoololdest: int, keypoolsize: int, paytxfee: float, relayfee: float, warnings: str
}
"""
return self.call('getinfo')
class LitecoinRPC(BitcoinRPC):
"""
Wrapper class for JsonRPC, with default host 127.0.0.1 and port 8332
"""
def __init__(self, hostname='127.0.0.1', port=9332, username=None, password=None, ssl=False, timeout=120,
url: str = '', auth: str = 'plain'):
super().__init__(
hostname=hostname, port=port, username=username, password=password,
ssl=ssl, timeout=timeout, url=url, auth=auth
)
class SteemEngineRPC(JsonRPC):
"""
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Python Simple JSON RPC library |
| License: X11/MIT |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| |
+===================================================+
"""
DEF_HOST = 'api.steem-engine.net'
DEF_URL = '/rpc/contracts'
def __init__(self, hostname=DEF_HOST, port=443, username=None, password=None, ssl=True, timeout=120, url=DEF_URL, auth='plain'):
super().__init__(
hostname=hostname, port=port, username=username, password=password,
ssl=ssl, timeout=timeout, url=url, auth=auth
)
def getcontract(self, name: str) -> dict:
"""
Returns information about a given contract, such as 'tokens'
:param name: Name of the contract, e.g. tokens
:return: None if not found
:return: {name, owner, code, codeHash, tables, $loki}
"""
return self.call('getContract', name=name)
def findone(self, contract: str, table: str, query: dict) -> dict:
"""
Returns the first result of a contract table query as a dictionary
>>> rpc = SteemEngineRPC()
>>> t = rpc.findone(contract='tokens',table='tokens',query=dict(symbol='ENG'))
>>> t['name']
'Steem Engine Token'
:param contract: Name of the contract, e.g. tokens
:param table: The table of the contract to query, e.g. balances
:param query: A dictionary query for filtering results, e.g. {'account': 'someguy123'}
:return: None if not found
:return: Dictionary containing the row data
"""
return self.call('findOne', contract=contract, table=table, query=query)
def find(self, contract, table, query: dict = None, limit: int = 1000,
offset: int = 0, indexes: list = None) -> list:
"""
Returns a list of matching rows for a given contract table query
Example - Get a list of all tokens (max 1000 results by default):
>>> rpc = SteemEngineRPC()
>>> t = rpc.find(contract='tokens',table='tokens')
:param contract: Name of the contract, e.g. tokens
:param table: The table of the contract to query, e.g. balances
:param query: A dictionary query for filtering results, e.g. {'account': 'someguy123'} (Default: {})
:param limit: Maximum results to retrieve
:param offset: Skip this many results
:param indexes:
:return: A list of matching rows, as dict's
"""
return self.call(
'find',
contract=contract,
table=table,
query=query if query is not None else {},
limit=limit,
offset=offset,
indexes=indexes if indexes is not None else []
)
"""
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Python Simple JSON RPC library |
| License: X11/MIT |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| |
+===================================================+
Python Json RPC - A simple library for interacting with JsonRPC services
Copyright (c) 2019 Privex Inc. ( https://www.privex.io )
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name(s) of the above copyright holders shall not be used in advertising or
otherwise to promote the sale, use or other dealings in this Software without prior written authorization.
"""
|
PypiClean
|
/jupyterhub_url_sharing-0.1.0.tar.gz/jupyterhub_url_sharing-0.1.0/node_modules/@typescript-eslint/types/README.md
|
<h1 align="center">TypeScript-ESTree Types</h1>
<p align="center">
<img src="https://github.com/typescript-eslint/typescript-eslint/workflows/CI/badge.svg" alt="CI" />
<a href="https://www.npmjs.com/package/@typescript-eslint/types"><img src="https://img.shields.io/npm/v/@typescript-eslint/types.svg?style=flat-square" alt="NPM Version" /></a>
<a href="https://www.npmjs.com/package/@typescript-eslint/types"><img src="https://img.shields.io/npm/dm/@typescript-eslint/types.svg?style=flat-square" alt="NPM Downloads" /></a>
</p>
This package exists to help us reduce cycles and provide lighter-weight packages at runtime.
You probably don't want to use it directly.
If you're building an ESLint plugin, consider using [`@typescript-eslint/experimental-utils`](../experimental-utils).
If you're parsing TypeScript code, consider using [`@typescript-eslint/typescript-estree`](../typescript-estree).
## Contributing
[See the contributing guide here](../../CONTRIBUTING.md)
|
PypiClean
|
/fake_bpy_module_2.78-20230117-py3-none-any.whl/bl_ui/properties_texture.py
|
import sys
import typing
import bpy_types
import rna_prop_ui
GenericType = typing.TypeVar("GenericType")
class TEXTURE_MT_envmap_specials(bpy_types.Menu, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, filter_ext):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_MT_specials(bpy_types.Menu, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, filter_ext):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_UL_texslots(bpy_types.UIList, bpy_types._GenericUI):
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TextureButtonsPanel:
bl_context = None
''' '''
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def poll(self, context):
'''
'''
pass
class TEXTURE_PT_colors(TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_context_texture(TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_custom_props(TextureButtonsPanel, rna_prop_ui.PropertyPanel,
bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_pointdensity(TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_pointdensity_turbulence(TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_preview(TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_voxeldata(TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TextureSlotPanel(TextureButtonsPanel):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def poll(self, context):
'''
'''
pass
class TextureTypePanel(TextureButtonsPanel):
bl_context = None
''' '''
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def poll(self, context):
'''
'''
pass
class TEXTURE_PT_influence(TextureSlotPanel, TextureButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_mapping(TextureSlotPanel, TextureButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_blend(TextureTypePanel, TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_clouds(TextureTypePanel, TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_distortednoise(TextureTypePanel, TextureButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_envmap(TextureTypePanel, TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_envmap_sampling(TextureTypePanel, TextureButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_image(TextureTypePanel, TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_image_mapping(TextureTypePanel, TextureButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_image_sampling(TextureTypePanel, TextureButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_bge(self, context):
'''
'''
pass
def draw_bi(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_magic(TextureTypePanel, TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_marble(TextureTypePanel, TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_musgrave(TextureTypePanel, TextureButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_ocean(TextureTypePanel, TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_stucci(TextureTypePanel, TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_voronoi(TextureTypePanel, TextureButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class TEXTURE_PT_wood(TextureTypePanel, TextureButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
tex_type = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
def context_tex_datablock(context):
'''
'''
pass
def id_tex_datablock(bid):
'''
'''
pass
def texture_filter_common(tex, layout):
'''
'''
pass
|
PypiClean
|
/fit_tool-0.9.13-py3-none-any.whl/fit_tool/profile/messages/ohr_settings_message.py
|
from typing import List as list
from typing import Optional
from fit_tool.base_type import BaseType
from fit_tool.data_message import DataMessage
from fit_tool.definition_message import DefinitionMessage
from fit_tool.developer_field import DeveloperField
from fit_tool.endian import Endian
from fit_tool.field import Field
from fit_tool.profile.profile_type import *
class OhrSettingsMessage(DataMessage):
ID = 188
NAME = 'ohr_settings'
@staticmethod
def __get_field_size(definition_message: DefinitionMessage, field_id: int) -> int:
size = 0
if definition_message:
field_definition = definition_message.get_field_definition(field_id)
if field_definition:
size = field_definition.size
return size
def __init__(self, definition_message=None, developer_fields=None, local_id: int = 0,
endian: Endian = Endian.LITTLE):
super().__init__(name=OhrSettingsMessage.NAME,
global_id=OhrSettingsMessage.ID,
local_id=definition_message.local_id if definition_message else local_id,
endian=definition_message.endian if definition_message else endian,
definition_message=definition_message,
developer_fields=developer_fields,
fields=[
TimestampField(
size=self.__get_field_size(definition_message, TimestampField.ID),
growable=definition_message is None),
OhrSettingsEnabledField(
size=self.__get_field_size(definition_message, OhrSettingsEnabledField.ID),
growable=definition_message is None)
])
self.growable = self.definition_message is None
@classmethod
def from_bytes(cls, definition_message: DefinitionMessage, developer_fields: list[DeveloperField],
bytes_buffer: bytes, offset: int = 0):
message = cls(definition_message=definition_message, developer_fields=developer_fields)
message.read_from_bytes(bytes_buffer, offset)
return message
# timestamp : milliseconds from January 1st, 1970 at 00:00:00 UTC
@property
def timestamp(self) -> Optional[int]:
field = self.get_field(TimestampField.ID)
if field and field.is_valid():
sub_field = field.get_valid_sub_field(self.fields)
return field.get_value(sub_field=sub_field)
else:
return None
# timestamp : milliseconds from January 1st, 1970 at 00:00:00 UTC
@timestamp.setter
def timestamp(self, value: int):
field = self.get_field(TimestampField.ID)
if field:
if value is None:
field.clear()
else:
sub_field = field.get_valid_sub_field(self.fields)
field.set_value(0, value, sub_field)
@property
def enabled(self) -> Optional[SwitchType]:
field = self.get_field(OhrSettingsEnabledField.ID)
if field and field.is_valid():
sub_field = field.get_valid_sub_field(self.fields)
return field.get_value(sub_field=sub_field)
else:
return None
@enabled.setter
def enabled(self, value: SwitchType):
field = self.get_field(OhrSettingsEnabledField.ID)
if field:
if value is None:
field.clear()
else:
sub_field = field.get_valid_sub_field(self.fields)
field.set_value(0, value, sub_field)
class TimestampField(Field):
ID = 253
def __init__(self, size: int = 0, growable: bool = True):
super().__init__(
name='timestamp',
field_id=self.ID,
base_type=BaseType.UINT32,
offset=-631065600000,
scale=0.001,
size=size,
units='ms',
type_name='date_time',
growable=growable,
sub_fields=[
]
)
class OhrSettingsEnabledField(Field):
ID = 0
def __init__(self, size: int = 0, growable: bool = True):
super().__init__(
name='enabled',
field_id=self.ID,
base_type=BaseType.ENUM,
offset=0,
scale=1,
size=size,
growable=growable,
sub_fields=[
]
)
|
PypiClean
|
/v2/model/nova_server.py
|
import pprint
import re
import six
class NovaServer:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'id': 'str',
'status': 'str',
'created': 'str',
'updated': 'str',
'flavor': 'NovaServerFlavor',
'image': 'NovaServerImage',
'tenant_id': 'str',
'key_name': 'str',
'user_id': 'str',
'metadata': 'dict(str, str)',
'host_id': 'str',
'addresses': 'dict(str, list[NovaNetwork])',
'security_groups': 'list[NovaServerSecurityGroup]',
'links': 'list[NovaLink]',
'os_dc_fdisk_config': 'str',
'os_ext_a_zavailability_zone': 'str',
'os_ext_srv_att_rhost': 'str',
'os_ext_srv_att_rhypervisor_hostname': 'str',
'os_ext_srv_att_rinstance_name': 'str',
'os_ext_st_spower_state': 'int',
'os_ext_st_stask_state': 'str',
'os_ext_st_svm_state': 'str',
'os_srv_us_glaunched_at': 'str',
'os_srv_us_gterminated_at': 'str',
'os_extended_volumesvolumes_attached': 'list[NovaServerVolume]',
'fault': 'NovaServerFault',
'description': 'str',
'host_status': 'str',
'os_ext_srv_att_rhostname': 'str',
'os_ext_srv_att_rreservation_id': 'str',
'os_ext_srv_att_rlaunch_index': 'int',
'os_ext_srv_att_rkernel_id': 'str',
'os_ext_srv_att_rramdisk_id': 'str',
'os_ext_srv_att_rroot_device_name': 'str',
'os_ext_srv_att_ruser_data': 'str',
'tags': 'list[str]',
'locked': 'bool',
'access_i_pv4': 'str',
'access_i_pv6': 'str',
'config_drive': 'str',
'progress': 'int'
}
attribute_map = {
'name': 'name',
'id': 'id',
'status': 'status',
'created': 'created',
'updated': 'updated',
'flavor': 'flavor',
'image': 'image',
'tenant_id': 'tenant_id',
'key_name': 'key_name',
'user_id': 'user_id',
'metadata': 'metadata',
'host_id': 'hostId',
'addresses': 'addresses',
'security_groups': 'security_groups',
'links': 'links',
'os_dc_fdisk_config': 'OS-DCF:diskConfig',
'os_ext_a_zavailability_zone': 'OS-EXT-AZ:availability_zone',
'os_ext_srv_att_rhost': 'OS-EXT-SRV-ATTR:host',
'os_ext_srv_att_rhypervisor_hostname': 'OS-EXT-SRV-ATTR:hypervisor_hostname',
'os_ext_srv_att_rinstance_name': 'OS-EXT-SRV-ATTR:instance_name',
'os_ext_st_spower_state': 'OS-EXT-STS:power_state',
'os_ext_st_stask_state': 'OS-EXT-STS:task_state',
'os_ext_st_svm_state': 'OS-EXT-STS:vm_state',
'os_srv_us_glaunched_at': 'OS-SRV-USG:launched_at',
'os_srv_us_gterminated_at': 'OS-SRV-USG:terminated_at',
'os_extended_volumesvolumes_attached': 'os-extended-volumes:volumes_attached',
'fault': 'fault',
'description': 'description',
'host_status': 'host_status',
'os_ext_srv_att_rhostname': 'OS-EXT-SRV-ATTR:hostname',
'os_ext_srv_att_rreservation_id': 'OS-EXT-SRV-ATTR:reservation_id',
'os_ext_srv_att_rlaunch_index': 'OS-EXT-SRV-ATTR:launch_index',
'os_ext_srv_att_rkernel_id': 'OS-EXT-SRV-ATTR:kernel_id',
'os_ext_srv_att_rramdisk_id': 'OS-EXT-SRV-ATTR:ramdisk_id',
'os_ext_srv_att_rroot_device_name': 'OS-EXT-SRV-ATTR:root_device_name',
'os_ext_srv_att_ruser_data': 'OS-EXT-SRV-ATTR:user_data',
'tags': 'tags',
'locked': 'locked',
'access_i_pv4': 'accessIPv4',
'access_i_pv6': 'accessIPv6',
'config_drive': 'config_drive',
'progress': 'progress'
}
def __init__(self, name=None, id=None, status=None, created=None, updated=None, flavor=None, image=None, tenant_id=None, key_name=None, user_id=None, metadata=None, host_id=None, addresses=None, security_groups=None, links=None, os_dc_fdisk_config=None, os_ext_a_zavailability_zone=None, os_ext_srv_att_rhost=None, os_ext_srv_att_rhypervisor_hostname=None, os_ext_srv_att_rinstance_name=None, os_ext_st_spower_state=None, os_ext_st_stask_state=None, os_ext_st_svm_state=None, os_srv_us_glaunched_at=None, os_srv_us_gterminated_at=None, os_extended_volumesvolumes_attached=None, fault=None, description=None, host_status=None, os_ext_srv_att_rhostname=None, os_ext_srv_att_rreservation_id=None, os_ext_srv_att_rlaunch_index=None, os_ext_srv_att_rkernel_id=None, os_ext_srv_att_rramdisk_id=None, os_ext_srv_att_rroot_device_name=None, os_ext_srv_att_ruser_data=None, tags=None, locked=None, access_i_pv4=None, access_i_pv6=None, config_drive=None, progress=None):
"""NovaServer - a model defined in huaweicloud sdk"""
self._name = None
self._id = None
self._status = None
self._created = None
self._updated = None
self._flavor = None
self._image = None
self._tenant_id = None
self._key_name = None
self._user_id = None
self._metadata = None
self._host_id = None
self._addresses = None
self._security_groups = None
self._links = None
self._os_dc_fdisk_config = None
self._os_ext_a_zavailability_zone = None
self._os_ext_srv_att_rhost = None
self._os_ext_srv_att_rhypervisor_hostname = None
self._os_ext_srv_att_rinstance_name = None
self._os_ext_st_spower_state = None
self._os_ext_st_stask_state = None
self._os_ext_st_svm_state = None
self._os_srv_us_glaunched_at = None
self._os_srv_us_gterminated_at = None
self._os_extended_volumesvolumes_attached = None
self._fault = None
self._description = None
self._host_status = None
self._os_ext_srv_att_rhostname = None
self._os_ext_srv_att_rreservation_id = None
self._os_ext_srv_att_rlaunch_index = None
self._os_ext_srv_att_rkernel_id = None
self._os_ext_srv_att_rramdisk_id = None
self._os_ext_srv_att_rroot_device_name = None
self._os_ext_srv_att_ruser_data = None
self._tags = None
self._locked = None
self._access_i_pv4 = None
self._access_i_pv6 = None
self._config_drive = None
self._progress = None
self.discriminator = None
self.name = name
self.id = id
self.status = status
self.created = created
self.updated = updated
self.flavor = flavor
self.image = image
self.tenant_id = tenant_id
self.key_name = key_name
self.user_id = user_id
self.metadata = metadata
self.host_id = host_id
self.addresses = addresses
self.security_groups = security_groups
self.links = links
self.os_dc_fdisk_config = os_dc_fdisk_config
self.os_ext_a_zavailability_zone = os_ext_a_zavailability_zone
self.os_ext_srv_att_rhost = os_ext_srv_att_rhost
self.os_ext_srv_att_rhypervisor_hostname = os_ext_srv_att_rhypervisor_hostname
self.os_ext_srv_att_rinstance_name = os_ext_srv_att_rinstance_name
self.os_ext_st_spower_state = os_ext_st_spower_state
self.os_ext_st_stask_state = os_ext_st_stask_state
self.os_ext_st_svm_state = os_ext_st_svm_state
self.os_srv_us_glaunched_at = os_srv_us_glaunched_at
self.os_srv_us_gterminated_at = os_srv_us_gterminated_at
self.os_extended_volumesvolumes_attached = os_extended_volumesvolumes_attached
if fault is not None:
self.fault = fault
if description is not None:
self.description = description
self.host_status = host_status
if os_ext_srv_att_rhostname is not None:
self.os_ext_srv_att_rhostname = os_ext_srv_att_rhostname
if os_ext_srv_att_rreservation_id is not None:
self.os_ext_srv_att_rreservation_id = os_ext_srv_att_rreservation_id
if os_ext_srv_att_rlaunch_index is not None:
self.os_ext_srv_att_rlaunch_index = os_ext_srv_att_rlaunch_index
if os_ext_srv_att_rkernel_id is not None:
self.os_ext_srv_att_rkernel_id = os_ext_srv_att_rkernel_id
if os_ext_srv_att_rramdisk_id is not None:
self.os_ext_srv_att_rramdisk_id = os_ext_srv_att_rramdisk_id
if os_ext_srv_att_rroot_device_name is not None:
self.os_ext_srv_att_rroot_device_name = os_ext_srv_att_rroot_device_name
if os_ext_srv_att_ruser_data is not None:
self.os_ext_srv_att_ruser_data = os_ext_srv_att_ruser_data
self.tags = tags
if locked is not None:
self.locked = locked
self.access_i_pv4 = access_i_pv4
self.access_i_pv6 = access_i_pv6
self.config_drive = config_drive
self.progress = progress
@property
def name(self):
"""Gets the name of this NovaServer.
云服务器名称。
:return: The name of this NovaServer.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this NovaServer.
云服务器名称。
:param name: The name of this NovaServer.
:type: str
"""
self._name = name
@property
def id(self):
"""Gets the id of this NovaServer.
云服务器唯一标识。
:return: The id of this NovaServer.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this NovaServer.
云服务器唯一标识。
:param id: The id of this NovaServer.
:type: str
"""
self._id = id
@property
def status(self):
"""Gets the status of this NovaServer.
云服务器当前状态信息。 取值范围: ACTIVE, BUILD,DELETED,ERROR,HARD_REBOOT,MIGRATING,REBOOT,RESIZE,REVERT_RESIZE,SHELVED,SHELVED_OFFLOADED,SHUTOFF,UNKNOWN,VERIFY_RESIZE
:return: The status of this NovaServer.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this NovaServer.
云服务器当前状态信息。 取值范围: ACTIVE, BUILD,DELETED,ERROR,HARD_REBOOT,MIGRATING,REBOOT,RESIZE,REVERT_RESIZE,SHELVED,SHELVED_OFFLOADED,SHUTOFF,UNKNOWN,VERIFY_RESIZE
:param status: The status of this NovaServer.
:type: str
"""
self._status = status
@property
def created(self):
"""Gets the created of this NovaServer.
云服务器创建时间。 时间格式例如:2019-05-22T07:48:53Z
:return: The created of this NovaServer.
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this NovaServer.
云服务器创建时间。 时间格式例如:2019-05-22T07:48:53Z
:param created: The created of this NovaServer.
:type: str
"""
self._created = created
@property
def updated(self):
"""Gets the updated of this NovaServer.
云服务器上一次更新时间。时间格式例如:2019-05-22T07:48:53Z
:return: The updated of this NovaServer.
:rtype: str
"""
return self._updated
@updated.setter
def updated(self, updated):
"""Sets the updated of this NovaServer.
云服务器上一次更新时间。时间格式例如:2019-05-22T07:48:53Z
:param updated: The updated of this NovaServer.
:type: str
"""
self._updated = updated
@property
def flavor(self):
"""Gets the flavor of this NovaServer.
:return: The flavor of this NovaServer.
:rtype: NovaServerFlavor
"""
return self._flavor
@flavor.setter
def flavor(self, flavor):
"""Sets the flavor of this NovaServer.
:param flavor: The flavor of this NovaServer.
:type: NovaServerFlavor
"""
self._flavor = flavor
@property
def image(self):
"""Gets the image of this NovaServer.
:return: The image of this NovaServer.
:rtype: NovaServerImage
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this NovaServer.
:param image: The image of this NovaServer.
:type: NovaServerImage
"""
self._image = image
@property
def tenant_id(self):
"""Gets the tenant_id of this NovaServer.
云服务器所属租户ID。即项目id,与project_id表示相同的概念。
:return: The tenant_id of this NovaServer.
:rtype: str
"""
return self._tenant_id
@tenant_id.setter
def tenant_id(self, tenant_id):
"""Sets the tenant_id of this NovaServer.
云服务器所属租户ID。即项目id,与project_id表示相同的概念。
:param tenant_id: The tenant_id of this NovaServer.
:type: str
"""
self._tenant_id = tenant_id
@property
def key_name(self):
"""Gets the key_name of this NovaServer.
SSH密钥名称。
:return: The key_name of this NovaServer.
:rtype: str
"""
return self._key_name
@key_name.setter
def key_name(self, key_name):
"""Sets the key_name of this NovaServer.
SSH密钥名称。
:param key_name: The key_name of this NovaServer.
:type: str
"""
self._key_name = key_name
@property
def user_id(self):
"""Gets the user_id of this NovaServer.
云服务器所属用户ID。
:return: The user_id of this NovaServer.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this NovaServer.
云服务器所属用户ID。
:param user_id: The user_id of this NovaServer.
:type: str
"""
self._user_id = user_id
@property
def metadata(self):
"""Gets the metadata of this NovaServer.
云服务器元数据。
:return: The metadata of this NovaServer.
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this NovaServer.
云服务器元数据。
:param metadata: The metadata of this NovaServer.
:type: dict(str, str)
"""
self._metadata = metadata
@property
def host_id(self):
"""Gets the host_id of this NovaServer.
云服务器对应的主机ID。
:return: The host_id of this NovaServer.
:rtype: str
"""
return self._host_id
@host_id.setter
def host_id(self, host_id):
"""Sets the host_id of this NovaServer.
云服务器对应的主机ID。
:param host_id: The host_id of this NovaServer.
:type: str
"""
self._host_id = host_id
@property
def addresses(self):
"""Gets the addresses of this NovaServer.
云服务器对应的网络地址信息。
:return: The addresses of this NovaServer.
:rtype: dict(str, list[NovaNetwork])
"""
return self._addresses
@addresses.setter
def addresses(self, addresses):
"""Sets the addresses of this NovaServer.
云服务器对应的网络地址信息。
:param addresses: The addresses of this NovaServer.
:type: dict(str, list[NovaNetwork])
"""
self._addresses = addresses
@property
def security_groups(self):
"""Gets the security_groups of this NovaServer.
云服务器所属安全组列表。
:return: The security_groups of this NovaServer.
:rtype: list[NovaServerSecurityGroup]
"""
return self._security_groups
@security_groups.setter
def security_groups(self, security_groups):
"""Sets the security_groups of this NovaServer.
云服务器所属安全组列表。
:param security_groups: The security_groups of this NovaServer.
:type: list[NovaServerSecurityGroup]
"""
self._security_groups = security_groups
@property
def links(self):
"""Gets the links of this NovaServer.
云服务器相关标记快捷链接信息。
:return: The links of this NovaServer.
:rtype: list[NovaLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this NovaServer.
云服务器相关标记快捷链接信息。
:param links: The links of this NovaServer.
:type: list[NovaLink]
"""
self._links = links
@property
def os_dc_fdisk_config(self):
"""Gets the os_dc_fdisk_config of this NovaServer.
扩展属性,磁盘配置方式。对镜像启动云服务器生效。 取值范围: - AUTO: API使用单个分区构建目标磁盘大小的云服务器。 API会自动调整文件系统以适应整个分区。 - MANUAL:API使用源映像中的分区方案和文件系统构建服务器。如果目标磁盘较大,则API不分区剩余的磁盘空间。
:return: The os_dc_fdisk_config of this NovaServer.
:rtype: str
"""
return self._os_dc_fdisk_config
@os_dc_fdisk_config.setter
def os_dc_fdisk_config(self, os_dc_fdisk_config):
"""Sets the os_dc_fdisk_config of this NovaServer.
扩展属性,磁盘配置方式。对镜像启动云服务器生效。 取值范围: - AUTO: API使用单个分区构建目标磁盘大小的云服务器。 API会自动调整文件系统以适应整个分区。 - MANUAL:API使用源映像中的分区方案和文件系统构建服务器。如果目标磁盘较大,则API不分区剩余的磁盘空间。
:param os_dc_fdisk_config: The os_dc_fdisk_config of this NovaServer.
:type: str
"""
self._os_dc_fdisk_config = os_dc_fdisk_config
@property
def os_ext_a_zavailability_zone(self):
"""Gets the os_ext_a_zavailability_zone of this NovaServer.
扩展属性,可用分区编码。
:return: The os_ext_a_zavailability_zone of this NovaServer.
:rtype: str
"""
return self._os_ext_a_zavailability_zone
@os_ext_a_zavailability_zone.setter
def os_ext_a_zavailability_zone(self, os_ext_a_zavailability_zone):
"""Sets the os_ext_a_zavailability_zone of this NovaServer.
扩展属性,可用分区编码。
:param os_ext_a_zavailability_zone: The os_ext_a_zavailability_zone of this NovaServer.
:type: str
"""
self._os_ext_a_zavailability_zone = os_ext_a_zavailability_zone
@property
def os_ext_srv_att_rhost(self):
"""Gets the os_ext_srv_att_rhost of this NovaServer.
扩展属性,与主机宿主名称。
:return: The os_ext_srv_att_rhost of this NovaServer.
:rtype: str
"""
return self._os_ext_srv_att_rhost
@os_ext_srv_att_rhost.setter
def os_ext_srv_att_rhost(self, os_ext_srv_att_rhost):
"""Sets the os_ext_srv_att_rhost of this NovaServer.
扩展属性,与主机宿主名称。
:param os_ext_srv_att_rhost: The os_ext_srv_att_rhost of this NovaServer.
:type: str
"""
self._os_ext_srv_att_rhost = os_ext_srv_att_rhost
@property
def os_ext_srv_att_rhypervisor_hostname(self):
"""Gets the os_ext_srv_att_rhypervisor_hostname of this NovaServer.
扩展属性,hypervisor主机名。
:return: The os_ext_srv_att_rhypervisor_hostname of this NovaServer.
:rtype: str
"""
return self._os_ext_srv_att_rhypervisor_hostname
@os_ext_srv_att_rhypervisor_hostname.setter
def os_ext_srv_att_rhypervisor_hostname(self, os_ext_srv_att_rhypervisor_hostname):
"""Sets the os_ext_srv_att_rhypervisor_hostname of this NovaServer.
扩展属性,hypervisor主机名。
:param os_ext_srv_att_rhypervisor_hostname: The os_ext_srv_att_rhypervisor_hostname of this NovaServer.
:type: str
"""
self._os_ext_srv_att_rhypervisor_hostname = os_ext_srv_att_rhypervisor_hostname
@property
def os_ext_srv_att_rinstance_name(self):
"""Gets the os_ext_srv_att_rinstance_name of this NovaServer.
扩展属性,云服务器实例ID。
:return: The os_ext_srv_att_rinstance_name of this NovaServer.
:rtype: str
"""
return self._os_ext_srv_att_rinstance_name
@os_ext_srv_att_rinstance_name.setter
def os_ext_srv_att_rinstance_name(self, os_ext_srv_att_rinstance_name):
"""Sets the os_ext_srv_att_rinstance_name of this NovaServer.
扩展属性,云服务器实例ID。
:param os_ext_srv_att_rinstance_name: The os_ext_srv_att_rinstance_name of this NovaServer.
:type: str
"""
self._os_ext_srv_att_rinstance_name = os_ext_srv_att_rinstance_name
@property
def os_ext_st_spower_state(self):
"""Gets the os_ext_st_spower_state of this NovaServer.
扩展属性,云服务器电源状态。 取值范围:0,1,2,3,4 - 0 : pending - 1 : running - 2 : paused - 3 : shutdown - 4 : crashed
:return: The os_ext_st_spower_state of this NovaServer.
:rtype: int
"""
return self._os_ext_st_spower_state
@os_ext_st_spower_state.setter
def os_ext_st_spower_state(self, os_ext_st_spower_state):
"""Sets the os_ext_st_spower_state of this NovaServer.
扩展属性,云服务器电源状态。 取值范围:0,1,2,3,4 - 0 : pending - 1 : running - 2 : paused - 3 : shutdown - 4 : crashed
:param os_ext_st_spower_state: The os_ext_st_spower_state of this NovaServer.
:type: int
"""
self._os_ext_st_spower_state = os_ext_st_spower_state
@property
def os_ext_st_stask_state(self):
"""Gets the os_ext_st_stask_state of this NovaServer.
扩展属性,云服务器任务状态。 取值范围: SHOUTOFF, RESIZE, REBUILD, VERIFY_RESIZE, REVERT_RESIZE, PAUSED, MIGRATING, SUSPENDED, RESCUE, ERROR, DELETED,SOFT_DELETED,SHELVED,SHELVED_OFFLOADED
:return: The os_ext_st_stask_state of this NovaServer.
:rtype: str
"""
return self._os_ext_st_stask_state
@os_ext_st_stask_state.setter
def os_ext_st_stask_state(self, os_ext_st_stask_state):
"""Sets the os_ext_st_stask_state of this NovaServer.
扩展属性,云服务器任务状态。 取值范围: SHOUTOFF, RESIZE, REBUILD, VERIFY_RESIZE, REVERT_RESIZE, PAUSED, MIGRATING, SUSPENDED, RESCUE, ERROR, DELETED,SOFT_DELETED,SHELVED,SHELVED_OFFLOADED
:param os_ext_st_stask_state: The os_ext_st_stask_state of this NovaServer.
:type: str
"""
self._os_ext_st_stask_state = os_ext_st_stask_state
@property
def os_ext_st_svm_state(self):
"""Gets the os_ext_st_svm_state of this NovaServer.
扩展属性,云服务器状态。 取值范围: ACTIVE,BUILDING,STOPPED,RESIZED,PAUSED,SUSPENDED,RESCUED,ERROR,DELETED,SOFT_DELETED,SHELVED,SHELVED_OFFLOADED
:return: The os_ext_st_svm_state of this NovaServer.
:rtype: str
"""
return self._os_ext_st_svm_state
@os_ext_st_svm_state.setter
def os_ext_st_svm_state(self, os_ext_st_svm_state):
"""Sets the os_ext_st_svm_state of this NovaServer.
扩展属性,云服务器状态。 取值范围: ACTIVE,BUILDING,STOPPED,RESIZED,PAUSED,SUSPENDED,RESCUED,ERROR,DELETED,SOFT_DELETED,SHELVED,SHELVED_OFFLOADED
:param os_ext_st_svm_state: The os_ext_st_svm_state of this NovaServer.
:type: str
"""
self._os_ext_st_svm_state = os_ext_st_svm_state
@property
def os_srv_us_glaunched_at(self):
"""Gets the os_srv_us_glaunched_at of this NovaServer.
扩展属性,云服务器启动时间。时间格式例如:2019-05-22T07:48:19.000000
:return: The os_srv_us_glaunched_at of this NovaServer.
:rtype: str
"""
return self._os_srv_us_glaunched_at
@os_srv_us_glaunched_at.setter
def os_srv_us_glaunched_at(self, os_srv_us_glaunched_at):
"""Sets the os_srv_us_glaunched_at of this NovaServer.
扩展属性,云服务器启动时间。时间格式例如:2019-05-22T07:48:19.000000
:param os_srv_us_glaunched_at: The os_srv_us_glaunched_at of this NovaServer.
:type: str
"""
self._os_srv_us_glaunched_at = os_srv_us_glaunched_at
@property
def os_srv_us_gterminated_at(self):
"""Gets the os_srv_us_gterminated_at of this NovaServer.
扩展属性,云服务器关闭时间。 时间格式例如:2019-05-22T07:48:19.000000
:return: The os_srv_us_gterminated_at of this NovaServer.
:rtype: str
"""
return self._os_srv_us_gterminated_at
@os_srv_us_gterminated_at.setter
def os_srv_us_gterminated_at(self, os_srv_us_gterminated_at):
"""Sets the os_srv_us_gterminated_at of this NovaServer.
扩展属性,云服务器关闭时间。 时间格式例如:2019-05-22T07:48:19.000000
:param os_srv_us_gterminated_at: The os_srv_us_gterminated_at of this NovaServer.
:type: str
"""
self._os_srv_us_gterminated_at = os_srv_us_gterminated_at
@property
def os_extended_volumesvolumes_attached(self):
"""Gets the os_extended_volumesvolumes_attached of this NovaServer.
云服务器挂载的云磁盘信息。
:return: The os_extended_volumesvolumes_attached of this NovaServer.
:rtype: list[NovaServerVolume]
"""
return self._os_extended_volumesvolumes_attached
@os_extended_volumesvolumes_attached.setter
def os_extended_volumesvolumes_attached(self, os_extended_volumesvolumes_attached):
"""Sets the os_extended_volumesvolumes_attached of this NovaServer.
云服务器挂载的云磁盘信息。
:param os_extended_volumesvolumes_attached: The os_extended_volumesvolumes_attached of this NovaServer.
:type: list[NovaServerVolume]
"""
self._os_extended_volumesvolumes_attached = os_extended_volumesvolumes_attached
@property
def fault(self):
"""Gets the fault of this NovaServer.
:return: The fault of this NovaServer.
:rtype: NovaServerFault
"""
return self._fault
@fault.setter
def fault(self, fault):
"""Sets the fault of this NovaServer.
:param fault: The fault of this NovaServer.
:type: NovaServerFault
"""
self._fault = fault
@property
def description(self):
"""Gets the description of this NovaServer.
弹性云服务器的描述信息。 微版本2.19后支持
:return: The description of this NovaServer.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this NovaServer.
弹性云服务器的描述信息。 微版本2.19后支持
:param description: The description of this NovaServer.
:type: str
"""
self._description = description
@property
def host_status(self):
"""Gets the host_status of this NovaServer.
nova-compute状态。 - UP:服务正常 - UNKNOWN:状态未知 - DOWN:服务异常 - MAINTENANCE:维护状态 - 空字符串:弹性云服务器无主机信息
:return: The host_status of this NovaServer.
:rtype: str
"""
return self._host_status
@host_status.setter
def host_status(self, host_status):
"""Sets the host_status of this NovaServer.
nova-compute状态。 - UP:服务正常 - UNKNOWN:状态未知 - DOWN:服务异常 - MAINTENANCE:维护状态 - 空字符串:弹性云服务器无主机信息
:param host_status: The host_status of this NovaServer.
:type: str
"""
self._host_status = host_status
@property
def os_ext_srv_att_rhostname(self):
"""Gets the os_ext_srv_att_rhostname of this NovaServer.
弹性云服务器的主机名。 微版本2.3后支持
:return: The os_ext_srv_att_rhostname of this NovaServer.
:rtype: str
"""
return self._os_ext_srv_att_rhostname
@os_ext_srv_att_rhostname.setter
def os_ext_srv_att_rhostname(self, os_ext_srv_att_rhostname):
"""Sets the os_ext_srv_att_rhostname of this NovaServer.
弹性云服务器的主机名。 微版本2.3后支持
:param os_ext_srv_att_rhostname: The os_ext_srv_att_rhostname of this NovaServer.
:type: str
"""
self._os_ext_srv_att_rhostname = os_ext_srv_att_rhostname
@property
def os_ext_srv_att_rreservation_id(self):
"""Gets the os_ext_srv_att_rreservation_id of this NovaServer.
批量创建场景,弹性云服务器的预留ID。 微版本2.3后支持
:return: The os_ext_srv_att_rreservation_id of this NovaServer.
:rtype: str
"""
return self._os_ext_srv_att_rreservation_id
@os_ext_srv_att_rreservation_id.setter
def os_ext_srv_att_rreservation_id(self, os_ext_srv_att_rreservation_id):
"""Sets the os_ext_srv_att_rreservation_id of this NovaServer.
批量创建场景,弹性云服务器的预留ID。 微版本2.3后支持
:param os_ext_srv_att_rreservation_id: The os_ext_srv_att_rreservation_id of this NovaServer.
:type: str
"""
self._os_ext_srv_att_rreservation_id = os_ext_srv_att_rreservation_id
@property
def os_ext_srv_att_rlaunch_index(self):
"""Gets the os_ext_srv_att_rlaunch_index of this NovaServer.
批量创建场景,弹性云服务器的启动顺序。 微版本2.3后支持
:return: The os_ext_srv_att_rlaunch_index of this NovaServer.
:rtype: int
"""
return self._os_ext_srv_att_rlaunch_index
@os_ext_srv_att_rlaunch_index.setter
def os_ext_srv_att_rlaunch_index(self, os_ext_srv_att_rlaunch_index):
"""Sets the os_ext_srv_att_rlaunch_index of this NovaServer.
批量创建场景,弹性云服务器的启动顺序。 微版本2.3后支持
:param os_ext_srv_att_rlaunch_index: The os_ext_srv_att_rlaunch_index of this NovaServer.
:type: int
"""
self._os_ext_srv_att_rlaunch_index = os_ext_srv_att_rlaunch_index
@property
def os_ext_srv_att_rkernel_id(self):
"""Gets the os_ext_srv_att_rkernel_id of this NovaServer.
若使用AMI格式的镜像,则表示kernel image的UUID;否则,留空。 微版本2.3后支持
:return: The os_ext_srv_att_rkernel_id of this NovaServer.
:rtype: str
"""
return self._os_ext_srv_att_rkernel_id
@os_ext_srv_att_rkernel_id.setter
def os_ext_srv_att_rkernel_id(self, os_ext_srv_att_rkernel_id):
"""Sets the os_ext_srv_att_rkernel_id of this NovaServer.
若使用AMI格式的镜像,则表示kernel image的UUID;否则,留空。 微版本2.3后支持
:param os_ext_srv_att_rkernel_id: The os_ext_srv_att_rkernel_id of this NovaServer.
:type: str
"""
self._os_ext_srv_att_rkernel_id = os_ext_srv_att_rkernel_id
@property
def os_ext_srv_att_rramdisk_id(self):
"""Gets the os_ext_srv_att_rramdisk_id of this NovaServer.
若使用AMI格式镜像,则表示ramdisk image的UUID;否则,留空。 微版本2.3后支持
:return: The os_ext_srv_att_rramdisk_id of this NovaServer.
:rtype: str
"""
return self._os_ext_srv_att_rramdisk_id
@os_ext_srv_att_rramdisk_id.setter
def os_ext_srv_att_rramdisk_id(self, os_ext_srv_att_rramdisk_id):
"""Sets the os_ext_srv_att_rramdisk_id of this NovaServer.
若使用AMI格式镜像,则表示ramdisk image的UUID;否则,留空。 微版本2.3后支持
:param os_ext_srv_att_rramdisk_id: The os_ext_srv_att_rramdisk_id of this NovaServer.
:type: str
"""
self._os_ext_srv_att_rramdisk_id = os_ext_srv_att_rramdisk_id
@property
def os_ext_srv_att_rroot_device_name(self):
"""Gets the os_ext_srv_att_rroot_device_name of this NovaServer.
弹性云服务器系统盘的设备名称。 微版本2.3后支持
:return: The os_ext_srv_att_rroot_device_name of this NovaServer.
:rtype: str
"""
return self._os_ext_srv_att_rroot_device_name
@os_ext_srv_att_rroot_device_name.setter
def os_ext_srv_att_rroot_device_name(self, os_ext_srv_att_rroot_device_name):
"""Sets the os_ext_srv_att_rroot_device_name of this NovaServer.
弹性云服务器系统盘的设备名称。 微版本2.3后支持
:param os_ext_srv_att_rroot_device_name: The os_ext_srv_att_rroot_device_name of this NovaServer.
:type: str
"""
self._os_ext_srv_att_rroot_device_name = os_ext_srv_att_rroot_device_name
@property
def os_ext_srv_att_ruser_data(self):
"""Gets the os_ext_srv_att_ruser_data of this NovaServer.
创建弹性云服务器时指定的user_data。 微版本2.3后支持
:return: The os_ext_srv_att_ruser_data of this NovaServer.
:rtype: str
"""
return self._os_ext_srv_att_ruser_data
@os_ext_srv_att_ruser_data.setter
def os_ext_srv_att_ruser_data(self, os_ext_srv_att_ruser_data):
"""Sets the os_ext_srv_att_ruser_data of this NovaServer.
创建弹性云服务器时指定的user_data。 微版本2.3后支持
:param os_ext_srv_att_ruser_data: The os_ext_srv_att_ruser_data of this NovaServer.
:type: str
"""
self._os_ext_srv_att_ruser_data = os_ext_srv_att_ruser_data
@property
def tags(self):
"""Gets the tags of this NovaServer.
云服务器的标签列表。 系统近期对标签功能进行了升级,升级后,返回的tag值遵循如下规则: - key与value使用“=”连接,如“key=value”。 - 如果value为空字符串,则仅返回key。
:return: The tags of this NovaServer.
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this NovaServer.
云服务器的标签列表。 系统近期对标签功能进行了升级,升级后,返回的tag值遵循如下规则: - key与value使用“=”连接,如“key=value”。 - 如果value为空字符串,则仅返回key。
:param tags: The tags of this NovaServer.
:type: list[str]
"""
self._tags = tags
@property
def locked(self):
"""Gets the locked of this NovaServer.
当云服务器被锁时为True,否则为False。 微版本2.9后支持
:return: The locked of this NovaServer.
:rtype: bool
"""
return self._locked
@locked.setter
def locked(self, locked):
"""Sets the locked of this NovaServer.
当云服务器被锁时为True,否则为False。 微版本2.9后支持
:param locked: The locked of this NovaServer.
:type: bool
"""
self._locked = locked
@property
def access_i_pv4(self):
"""Gets the access_i_pv4 of this NovaServer.
预留属性。
:return: The access_i_pv4 of this NovaServer.
:rtype: str
"""
return self._access_i_pv4
@access_i_pv4.setter
def access_i_pv4(self, access_i_pv4):
"""Sets the access_i_pv4 of this NovaServer.
预留属性。
:param access_i_pv4: The access_i_pv4 of this NovaServer.
:type: str
"""
self._access_i_pv4 = access_i_pv4
@property
def access_i_pv6(self):
"""Gets the access_i_pv6 of this NovaServer.
预留属性。
:return: The access_i_pv6 of this NovaServer.
:rtype: str
"""
return self._access_i_pv6
@access_i_pv6.setter
def access_i_pv6(self, access_i_pv6):
"""Sets the access_i_pv6 of this NovaServer.
预留属性。
:param access_i_pv6: The access_i_pv6 of this NovaServer.
:type: str
"""
self._access_i_pv6 = access_i_pv6
@property
def config_drive(self):
"""Gets the config_drive of this NovaServer.
预留属性。
:return: The config_drive of this NovaServer.
:rtype: str
"""
return self._config_drive
@config_drive.setter
def config_drive(self, config_drive):
"""Sets the config_drive of this NovaServer.
预留属性。
:param config_drive: The config_drive of this NovaServer.
:type: str
"""
self._config_drive = config_drive
@property
def progress(self):
"""Gets the progress of this NovaServer.
预留属性
:return: The progress of this NovaServer.
:rtype: int
"""
return self._progress
@progress.setter
def progress(self, progress):
"""Sets the progress of this NovaServer.
预留属性
:param progress: The progress of this NovaServer.
:type: int
"""
self._progress = progress
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NovaServer):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/make_vcard-1.0.2.tar.gz/make_vcard-1.0.2/README.md
|
# make_vcard
这是一个将xlsx文件转化为通讯录导入(.vcf)文件的py程序。
## 安装
```
pip install make_vcard
```
## 基本使用方法
```
python -m make_vcard inputFileName.xlsx
```
## HTELP
```
usage: make_vcard [-h] [--example] [--sheet SHEET] [--filetype FILETYPE] [-o O] inputFileName
基本用法 python -m make_vcard inputFileName.xlsx [-o outputFileName]
positional arguments:
inputFileName 指定读取的文件
optional arguments:
-h, --help show this help message and exit
--example 生成示例excel文件;
--sheet SHEET 指定读取哪张表;默认为第一张表
--filetype FILETYPE 指定文件类型(默认为excel)只可以指定为 csv
-o O 输出保存的文件名;不需要加后缀!
注意:不同设备对 字段 的支持不一样,可能您按照这个规则并不会得到您想要的结果!
```
说明
只有 姓名和电话 是必要的,其它是可以任意组合
有 电话, 邮箱, 网址, 地址 是可以重复出现的;其规则是 名称+数字;例: 电话A
可重复出现的数据可定义其类型
电话的类型有:优先,住宅,工作,单元,传真,其它
邮箱的类型有:住宅,工作,其它
网址的类型有:住宅,工作,其它
地址的类型有:住宅,工作,其它,邮寄
例如: 邮箱(住宅)
例如: 网址(工作)
注意:不同设备对 字段 的支持不一样,可能您按照这个规则并不会得到您想要的结果!
|
PypiClean
|
/Anti-Viral-Protocol-1.0.1.tar.gz/Anti-Viral-Protocol-1.0.1/anti_viral_protocol/enemies.py
|
import pygame
from .chars import Enemy
from .shells import *
import os
# Working file paths
BASE_PATH = os.path.dirname(__file__)
IMAGES_PATH = os.path.join(BASE_PATH, 'resources/Images/')
# virus_1
class Virus1(Enemy):
ammo = Virus1shell
height = 78
width = 78
hp = 100
health_max = 100
# Virus_2
class Virus2(Enemy):
ammo = Virus2shell
height = 78
width = 78
hp = 200
health_max = 200
# Virus3
class Virus3(Enemy):
ammo = Virus3shell
height = 156
width = 156
hp = 300
health_max = 300
# Virus4
class Virus4(Enemy):
ammo = Virus4shell
height = 156
width = 156
hp = 350
health_max = 350
# virus_boss
class VirusBoss(Enemy):
ammo = Virus1shell
height = 320
width = 320
hp = 500
health_max = 500
spawn_cooldown = 70
enemy_list = []
enemy = Virus2
# Loading the boss health bar
health_bar = pygame.image.load(IMAGES_PATH + "HUD/bossbar.png")
# Function to spawn enemies
def spawn_enemies(self, player):
# checking if there is room to spawn enemies
if self.spawn_cooldown <= 0 and len(self.enemy_list) < 11:
self.spawn_cooldown = 70
# checking for the distance from player
if 700 > player.x - self.x or 700 > self.x - player.x:
self.enemy_list.append(self.enemy(self.x - 50, 500))
self.enemy_list[-1].load_anim(IMAGES_PATH + "/Characters/Virus/Virus_2/idle.png",
IMAGES_PATH + "Projectiles/virus_1_")
self.enemy_list[-1].Tracking = True
else:
self.spawn_cooldown -= 1
# Checking for damage by player
def check_hurt(self, player):
if player.current_weapon != 0:
if not self.enemy_list:
ammo_list = player.weapons[player.weapon_list[player.current_weapon]].ammo_list
for ammo in ammo_list:
if self.x + self.width > ammo.x > self.x and self.y + self.height > ammo.y > self.y:
self.hp -= ammo.damage
ammo_list.pop(ammo_list.index(ammo))
# Update function for boss health-bar
def update_health_bar(self, win):
if self.hp > 0:
win.blit(self.health_bar, (430, 22), (0, 0, (self.hp/self.health_max) * 500, 20))
# function to kill player if in the virus
def kill_on_contact(self, player):
if self.x + self.width > player.x > self.x and self.y + self.height > player.y > self.y:
player.hp = 0
|
PypiClean
|
/neural_compressor-2.2.1-py3-none-any.whl/neural_compressor/experimental/data/filters/filter.py
|
from abc import abstractmethod
from neural_compressor.utils.utility import singleton
@singleton
class TensorflowFilters(object):
"""The base filter class for Tensorflow framework."""
def __init__(self):
"""Initialize the atrribute of the class."""
self.filters = {}
self.filters.update(TENSORFLOW_FILTERS)
@singleton
class ONNXRTQLFilters(object):
"""The base filter class for ONNXRT framework QLinear mode."""
def __init__(self):
"""Initialize the atrribute of the class."""
self.filters = {}
self.filters.update(ONNXRT_QL_FILTERS)
@singleton
class ONNXRTITFilters(object):
"""The base filter class for ONNXRT framework IT mode."""
def __init__(self):
"""Initialize the atrribute of the class."""
self.filters = {}
self.filters.update(ONNXRT_IT_FILTERS)
@singleton
class PyTorchFilters(object):
"""The base filter class for PyTorch framework."""
def __init__(self):
"""Initialize the atrribute of the class."""
self.filters = {}
self.filters.update(PYTORCH_FILTERS)
@singleton
class MXNetFilters(object):
"""The base filter class for MXNet framework."""
def __init__(self):
"""Initialize the atrribute of the class."""
self.filters = {}
self.filters.update(MXNET_FILTERS)
TENSORFLOW_FILTERS = {}
TENSORFLOW_ITEX_FILTERS = {}
ONNXRT_IT_FILTERS = {}
ONNXRT_QL_FILTERS = {}
PYTORCH_FILTERS = {}
MXNET_FILTERS = {}
framework_filters = {"tensorflow": TensorflowFilters,
"tensorflow_itex": TensorflowFilters,
"pytorch": PyTorchFilters,
"pytorch_ipex": PyTorchFilters,
"pytorch_fx": PyTorchFilters,
"mxnet": MXNetFilters,
"onnxrt_qlinearops": ONNXRTQLFilters,
"onnxrt_qdq": ONNXRTQLFilters,
"onnxruntime": ONNXRTQLFilters,
"onnxrt_integerops": ONNXRTITFilters,
}
registry_filters = {"tensorflow": TENSORFLOW_FILTERS,
"tensorflow_itex": TENSORFLOW_ITEX_FILTERS,
"pytorch": PYTORCH_FILTERS,
"pytorch_ipex": PYTORCH_FILTERS,
"pytorch_fx": PYTORCH_FILTERS,
"mxnet": MXNET_FILTERS,
"onnxrt_integerops": ONNXRT_IT_FILTERS,
"onnxrt_qdq": ONNXRT_QL_FILTERS,
"onnxruntime": ONNXRT_QL_FILTERS,
"onnxrt_qlinearops": ONNXRT_QL_FILTERS}
class FILTERS(object):
"""The filter register for all frameworks.
Args:
framework (str): frameworks in ["tensorflow", "tensorflow_itex", "mxnet",
"onnxrt_qdq", "pytorch", "pytorch_ipex",
"pytorch_fx", "onnxrt_integerops", "keras"
"onnxrt_qlinearops", "onnxruntime"].
"""
def __init__(self, framework):
"""Initialize the attribute of class."""
assert framework in ["tensorflow", "tensorflow_itex", "keras",
"mxnet", "onnxrt_qdq", "pytorch", "pytorch_ipex", "pytorch_fx",
"onnxrt_integerops", "onnxrt_qlinearops", "onnxruntime"], \
"framework support tensorflow pytorch mxnet onnxrt"
self.filters = framework_filters[framework]().filters
self.framework = framework
def __getitem__(self, filter_type):
"""Magic method.
x[i] is roughly equivalent to type(x).__getitem__(x, index)
"""
assert filter_type in self.filters.keys(), "filter support {}".\
format(self.filters.keys())
return self.filters[filter_type]
def filter_registry(filter_type, framework):
"""Register all transform subclasses.
Args:
filter_type (str): fILTER registration name.
framework (str): support 4 framework including 'tensorflow', 'pytorch', 'mxnet', 'onnxrt'.
cls (class): The class of register.
Returns:
cls: The class of register.
"""
def decorator_transform(cls):
"""Decorate a class."""
for single_framework in [fwk.strip() for fwk in framework.split(',')]:
assert single_framework in [
"tensorflow",
"tensorflow_itex",
"pytorch",
"pytorch_ipex",
"pytorch_fx",
"mxnet",
"onnxrt_integerops",
"onnxrt_qdq",
"onnxrt_qlinearops",
"onnxruntime"
], "The framework support tensorflow mxnet pytorch onnxrt"
if filter_type in registry_filters[single_framework].keys():
raise ValueError('Cannot have two transforms with the same name')
registry_filters[single_framework][filter_type] = cls
return cls
return decorator_transform
class Filter(object):
"""The base class for transform.
__call__ method is needed when write user specific transform.
"""
@abstractmethod
def __call__(self, *args, **kwargs):
"""Execute the filter."""
raise NotImplementedError
|
PypiClean
|
/Flask-Wizard-0.5.28.tar.gz/Flask-Wizard-0.5.28/flask_wizard/facebook.py
|
from __future__ import absolute_import
from __future__ import print_function
import os
import json
import apiai
import requests
import base64
import sys
import random
import uuid
import time
from timeit import default_timer as timer
from flask import request
from actions import *
class FacebookHandler(object):
"""
The facebook handler acts as the interface to handle all requests coming
from messenger.
It parses the payload and responds
"""
def __init__(self, pid, pat, verify_token, ozz_guid, actions, redis_db, mongo, log):
self.pid = pid
self.pat = pat
self.verify_token = verify_token
self.ozz_guid = ozz_guid
self.redis_db = redis_db
self.mongo = mongo
self.log = log
with open(actions,"r") as jsonFile:
self.actions = json.load(jsonFile)
if ozz_guid != "":
if ozz_guid[:4] == 'api_':
self.api = apiai.ApiAI(ozz_guid[4:])
print("Messenger endpoint - /api/messages/facebook")
def verify(self,*args,**kwargs):
if request.args.get('hub.verify_token','') == self.verify_token:
return request.args.get('hub.challenge','')
else:
return "Error, wrong validation token"
def respond(self,*args,**kwargs):
payload = request.get_json()
for sender, message in self.messaging_events(payload):
if sender != self.pid:
if type(message) != str:
start = timer()
intent=None
entities=None
action=None
message = message.decode('utf-8')
r = requests.get("https://graph.facebook.com/v2.6/"+ sender + "?fields=first_name,last_name,profile_pic,locale,timezone,gender&access_token=" + self.pat)
r_data = json.loads(r.text)
session = {}
session['user'] = {
'id':sender,
'name':r_data['first_name'] + ' ' + r_data['last_name'],
'profile_pic':r_data['profile_pic'],
'locale':r_data['locale'],
'timezone':r_data['timezone'],
'gender':r_data['gender']
}
session['cache'] = self.redis_db
session['mongo'] = self.mongo
session['message'] = message
session['channel'] = 'facebook'
if self.api:
r = self.api.text_request()
r.session_id = uuid.uuid4().hex
r.query = message
res = r.getresponse()
res = json.loads(res.read().decode('utf-8'))
intent = res["result"]["action"]
if intent == '':
intent = res["result"]["metadata"]["intentName"]
response = res["result"]["fulfillment"]["speech"]
entities = res["result"]['parameters']
session['intent'] = intent
session['entities'] = entities
print(intent)
if intent in self.actions:
action = self.actions[intent]
if type(self.actions[intent]) == list:
response = random.choice(self.actions[intent])
self.send_message(self.pat,sender,response)
else:
func = eval(self.actions[intent])
func(session)
elif response != "":
end = timer()
runtime = str(end - start)
if self.mongo:
log_object = {"message":message,"channel":"facebook","intent":intent,"entities":entities,"action":action,"response":str(response),"runtime":runtime,"time":str(time.time())}
self.mongo.db.logs.insert_one(log_object)
self.send_message(self.pat, sender, response)
else:
end = timer()
runtime = str(end - start)
if self.mongo:
log_object = {"message":message,"channel":"facebook","intent":intent,"entities":entities,"action":action,"response":str(message),"runtime":runtime,"time":str(time.time())}
self.mongo.db.logs.insert_one(log_object)
self.send_message(self.pat, sender, message)
return "responded"
def messaging_events(self, payload):
data = payload
messaging_events = data["entry"][0]["messaging"]
for event in messaging_events:
if event["sender"]["id"] == self.pid:
continue
elif 'read' in event:
continue
elif 'delivery' in event:
continue
else:
if "message" in event and "text" in event["message"]:
yield event["sender"]["id"], event["message"]["text"].encode('unicode_escape')
elif "postback" in event and "payload" in event["postback"]:
yield event["sender"]["id"], event["postback"]["payload"].encode('unicode_escape')
def send_message(self, token, recipient, text):
"""Send the message text to recipient with id recipient.
"""
if sys.version_info >= (3, 0):
message = text
else:
message = text.decode('unicode_escape')
r = requests.post("https://graph.facebook.com/v2.6/me/messages",
params={"access_token": token},
data=json.dumps({
"recipient": {"id": recipient},
"message": {"text": message}
}),
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
print(r.text)
|
PypiClean
|
/ciefunctions-1.0.2.tar.gz/ciefunctions-1.0.2/tc1_97/MathJax-2.7.5/unpacked/jax/output/SVG/fonts/TeX/Fraktur/Bold/BasicLatin.js
|
MathJax.Hub.Insert(
MathJax.OutputJax.SVG.FONTDATA.FONTS['MathJax_Fraktur-bold'],
{
// SPACE
0x20: [0,0,250,0,0,''],
// EXCLAMATION MARK
0x21: [689,12,349,107,241,'121 621Q121 657 132 673T177 689Q223 689 223 644V635Q223 604 222 595Q221 590 210 490T187 292T175 190V186L166 185L156 184Q156 185 139 393T121 621ZM107 47Q107 70 127 87T174 104Q201 104 221 89T241 48Q241 24 222 6T174 -12Q147 -12 127 6T107 47'],
// QUOTATION MARK
0x22: [695,-432,254,10,231,'53 695Q74 695 90 679V622L65 433L52 432H39L27 516Q10 626 10 655Q10 680 26 688Q33 693 39 693Q49 695 53 695ZM151 668Q151 691 191 691Q217 691 224 685T231 661V652Q230 634 219 531L207 433L195 432Q183 432 183 433L168 541Q151 664 151 668'],
// AMPERSAND
0x26: [696,17,871,44,839,'290 -14Q186 -14 115 41T44 185Q44 222 54 249T88 300T131 336T189 371Q216 387 216 388Q185 459 185 510Q185 563 206 601T263 659T334 687T405 696Q476 696 503 668T531 603Q531 565 513 536T450 476Q423 459 370 432L334 413L354 384Q474 212 560 139L563 137Q611 185 611 250Q611 295 577 329Q549 356 496 357Q439 357 433 354Q432 354 432 379V403L437 402Q443 402 479 401T557 400Q653 400 735 403T831 407H836Q833 352 833 351L821 352Q809 352 792 352T756 352T720 353T696 354Q680 354 680 353L681 346Q682 339 683 327T685 306Q685 283 681 260T669 218T653 182T635 152T619 129T606 114L602 109Q604 107 618 99T659 81T707 71Q742 71 767 99T804 155L815 183Q815 184 821 183T833 180T839 177Q837 169 834 156T816 110T782 52T727 5T648 -16Q569 -16 499 35H498Q496 35 466 23T387 -1T290 -14ZM418 592Q418 617 398 639T352 661T302 642T278 574Q278 545 288 514T306 465T319 444Q342 456 353 463T382 488T409 529T418 584V592ZM159 239Q159 158 222 98T364 38Q386 38 447 57L469 63L434 98Q349 185 286 275Q258 316 238 345Q233 353 232 353Q159 316 159 239'],
// APOSTROPHE
0x27: [695,-436,250,80,158,'80 645T80 662T93 687T123 695Q158 695 158 659Q158 649 157 643L123 437Q123 436 114 436H104Q104 442 92 538Q80 645 80 662'],
// LEFT PARENTHESIS
0x28: [737,186,459,134,347,'347 719Q325 708 311 698T272 656T233 580T207 455T195 267Q195 30 247 -79Q261 -110 291 -136Q320 -163 347 -172V-179Q347 -186 344 -186Q338 -186 328 -184T287 -165T230 -123Q134 -25 134 271Q134 417 158 514T226 662T335 734L346 737Q347 737 347 728V719'],
// RIGHT PARENTHESIS
0x29: [735,187,459,105,326,'264 262Q264 366 253 446T226 572T186 649T145 692T105 714V725Q105 735 107 735Q108 734 121 731T154 719T196 692T242 641T284 560T314 437T326 268Q326 112 299 7Q279 -78 239 -124T116 -185L105 -187V-179L106 -171L109 -169Q130 -161 138 -158T165 -146T190 -127T210 -101T229 -64T243 -12T255 58T261 148T264 262'],
// ASTERISK
0x2A: [692,-449,328,40,277,'40 516L62 529Q85 542 110 556T140 574L126 582Q112 591 104 595T80 607T40 629Q53 642 57 645L65 652L78 642Q106 620 132 603L152 589V595Q152 630 149 681V692H179V689Q178 681 174 638T171 593Q173 593 240 639L258 652Q260 652 267 643L276 633L260 625Q190 587 175 576Q173 575 180 570Q183 569 186 567Q213 549 256 527L277 515L256 495Q246 501 228 515T194 539T170 554V543Q170 486 178 449H148V456Q152 492 152 550L151 562Q150 562 102 528L53 495Q40 514 40 516'],
// PLUS SIGN
0x2B: [598,82,893,56,837,'422 584L471 598Q472 598 472 440V282H837Q833 273 829 263L821 244L647 243H472V-63L448 -73L423 -82Q422 -82 422 81V243H239Q56 243 56 244Q60 253 65 263L73 282H422V584'],
// COMMA
0x2C: [107,191,328,118,253,'118 61Q118 80 135 93T169 107Q190 107 221 65T253 -23Q253 -39 251 -49T237 -80T198 -133Q148 -191 144 -191Q142 -191 137 -182T132 -172Q143 -161 160 -131T183 -83Q185 -77 185 -62Q185 -54 184 -48T182 -38T177 -28T171 -19T162 -8T150 6Q130 28 124 38T118 61'],
// HYPHEN-MINUS
0x2D: [275,-236,893,54,833,'54 236L73 275H453Q833 275 833 274Q830 265 825 255L818 236H54'],
// FULL STOP
0x2E: [102,15,328,103,237,'103 23T103 44T120 83T170 102Q200 102 218 84T237 44Q237 20 216 3T168 -15Q138 -15 121 4'],
// SOLIDUS
0x2F: [721,182,593,41,550,'272 270Q503 721 506 721L509 720Q512 720 518 719T529 717L550 713L91 -181L66 -182Q41 -182 41 -181L272 270'],
// DIGIT ZERO
0x30: [501,12,593,42,533,'238 -12Q162 -12 102 42T42 185Q42 303 130 393Q163 425 208 452T284 490L313 501Q323 499 339 495T395 472T464 426Q533 357 533 273Q533 201 483 133T364 27T238 -12ZM428 208Q428 255 402 297T342 365T280 404T241 419Q214 419 178 374T142 259Q142 206 168 164T225 99Q259 74 310 74Q326 74 337 75T366 82T396 103T417 141Q428 171 428 208'],
// DIGIT ONE
0x31: [489,0,593,54,548,'95 481Q102 481 217 485T383 489Q384 489 384 485Q367 397 367 165Q367 58 369 54Q374 46 380 44T410 42H466H546V40Q547 38 547 19L548 0H54V23Q54 29 54 34T54 44L55 47Q79 47 134 46T202 45Q226 45 234 52Q240 57 241 64T245 105Q254 236 254 320V347Q254 369 252 382T240 409T211 431L97 450L96 465Q95 480 95 481'],
// DIGIT TWO
0x32: [491,-2,593,44,563,'307 335Q307 374 283 397T224 421Q187 421 112 387Q105 384 100 382T95 381Q90 387 86 394L77 407L86 413Q219 491 298 491Q370 491 399 460T428 388Q428 373 424 358T409 326T391 297T363 264T335 235T301 202T269 171L199 104Q194 99 205 97Q209 96 214 96Q527 105 544 105Q553 107 563 102Q563 100 557 79T545 34T537 2H377Q338 2 247 2T130 4H44V26L104 77Q185 145 212 172T267 235Q307 291 307 335'],
// DIGIT THREE
0x33: [487,193,593,31,523,'102 402L108 408Q115 413 122 418T141 431T165 447T194 461T227 474T263 483T302 487H307Q413 487 452 420Q465 400 465 371Q465 334 445 303T396 253T347 225T317 213Q314 213 314 211Q316 209 316 205Q317 201 320 201Q337 201 359 198T411 184T465 156T506 109T523 39Q523 -62 436 -127T229 -193Q179 -193 130 -178T56 -150T31 -133Q31 -132 41 -122L52 -112L63 -117Q128 -148 201 -148Q282 -148 331 -104T381 20Q381 71 363 100T304 145Q243 166 149 166H137V204H146Q179 204 211 210T275 229T326 268T346 329Q346 372 314 401Q292 423 245 423Q188 423 125 383L102 402'],
// DIGIT FOUR
0x34: [495,196,593,13,565,'346 -196Q344 -196 335 -187L336 -148Q337 -127 337 -55V0H13V29L187 253Q362 477 362 479L368 480Q375 481 387 483T411 487T434 491T452 494L459 495Q460 495 470 482V453Q470 389 466 230T461 62Q461 61 513 61T565 60L555 29L546 -1H461V-15Q461 -48 463 -100T465 -154L457 -157Q449 -160 434 -165T405 -175Q347 -196 346 -196ZM339 265V341Q339 362 335 362Q327 362 219 217T110 65V61H337V117Q338 133 338 187T339 265'],
// DIGIT FIVE
0x35: [481,190,593,18,519,'232 192Q176 192 122 152L95 162V481H306Q516 481 516 479Q514 477 501 433L486 389L319 388H152V386V382Q152 379 152 374T151 365Q147 329 146 260V218H149Q211 242 284 242Q353 242 402 224T474 176T508 117T518 55Q518 -62 432 -126T220 -190Q184 -190 151 -185T96 -172T57 -157T31 -145T20 -139T19 -138Q19 -136 27 -125L35 -112L51 -120Q114 -152 174 -152Q257 -152 314 -100T371 46Q371 107 340 149T232 192'],
// DIGIT SIX
0x36: [704,12,593,48,547,'48 251Q48 330 76 403T150 529T253 623T370 683T485 704Q494 704 520 701T547 695Q547 692 542 659T536 625Q531 624 524 624L512 623L502 628Q489 635 468 640Q452 645 423 645Q403 645 379 640T320 617T255 568T201 481T171 348Q170 341 170 330V325L183 333Q275 385 357 385H361Q464 385 514 312Q546 267 546 217Q546 127 457 58T262 -12Q225 -12 189 3T120 49T68 132T48 251ZM448 165Q448 228 406 274T289 320Q264 320 236 312T190 295T173 284Q173 266 176 241T189 178T214 112T259 61T326 39Q372 39 410 75T448 165'],
// DIGIT SEVEN
0x37: [479,197,593,54,591,'57 376L87 479H591V455L584 446Q544 399 491 328T349 117T185 -169L171 -196H159Q152 -197 102 -197Q58 -197 58 -196T56 -185L54 -175L299 158L443 359Q446 367 444 370H254L71 365L57 376'],
// DIGIT EIGHT
0x38: [714,5,593,45,542,'88 533Q88 573 120 610T194 668T268 701T307 714Q324 714 352 711T422 695T486 659Q518 625 518 585Q518 536 479 489T384 406L371 398L385 390Q387 389 400 382T420 370T442 356T466 339T489 319T510 295T526 269T538 238T542 204Q542 125 463 60T256 -5Q145 -5 92 52Q45 97 45 165Q45 204 64 237T109 290T163 324T209 345T228 353L214 364Q199 375 179 392T138 431T103 480T88 533ZM405 557Q405 568 402 581T387 612T350 644T286 663Q283 663 280 663T274 664H272Q256 664 228 636T199 572Q199 547 238 507Q268 475 320 437L334 427Q345 433 358 443T388 483T405 549V557ZM304 42Q366 42 398 76T431 155Q431 178 420 200T396 238T359 270T321 296T283 318L263 328Q262 328 230 312Q190 290 175 266T160 198Q160 132 202 87T304 42'],
// DIGIT NINE
0x39: [487,195,593,29,549,'549 220Q549 23 429 -82T105 -195H84V-189Q84 -179 85 -174V-164H93Q184 -156 238 -132T334 -56Q361 -23 376 16T394 78L397 100L363 88Q329 75 291 61T244 45Q237 44 218 44Q154 44 94 97Q29 152 29 240Q29 350 108 404Q145 429 257 480Q270 487 279 487Q403 487 470 421Q549 347 549 220ZM408 217Q408 276 390 320T346 385T297 415T259 424Q218 424 185 393T151 286Q151 216 213 154Q252 115 321 115Q368 115 388 134T408 217'],
// COLON
0x3A: [457,12,255,57,197,'57 398Q57 419 72 438T117 457Q154 457 174 439T194 398Q194 379 176 361T119 343Q85 343 71 362T57 398ZM62 19T62 43T77 85T115 104Q153 104 175 86T197 42Q197 14 171 1T119 -12Q96 -12 79 3'],
// SEMICOLON
0x3B: [458,190,255,56,211,'56 399Q56 424 73 440T104 456Q114 458 120 458Q149 458 170 440T192 399Q192 380 174 362T120 344Q85 344 71 362T56 399ZM78 53Q78 67 84 76T90 86Q90 88 98 92T116 98Q117 98 121 98T128 99Q152 97 181 58T211 -24Q211 -77 128 -165Q124 -170 121 -173T116 -178T113 -181T110 -185T106 -190L97 -184L88 -177L95 -168Q143 -104 143 -65Q143 -51 137 -40T113 -7T81 35Q78 41 78 53'],
// EQUALS SIGN
0x3D: [343,-168,582,22,559,'559 342L549 304H22L27 319Q29 328 30 333T33 343H296Q559 343 559 342ZM559 206L549 168H22L27 183Q29 192 30 197T33 207H296Q559 207 559 206'],
// QUESTION MARK
0x3F: [697,14,428,40,422,'121 590Q121 575 128 562T144 542T152 533T115 512L78 491Q55 499 47 516Q40 530 40 553Q40 601 77 632Q155 697 257 697H268Q316 697 355 679Q422 646 422 576Q422 518 388 476Q383 468 376 461T358 444T340 428T316 410T290 390L230 344Q180 307 180 275Q180 261 187 248T202 227L209 219Q209 215 176 193L142 170Q114 177 100 194T84 226V239Q84 259 93 276T113 302T150 331T192 362Q203 370 219 382T247 403T267 422Q312 471 312 546Q312 593 282 623T207 653Q170 653 146 636T121 590ZM95 23T95 49T117 94T173 113Q204 113 223 96T242 54Q242 27 221 7T167 -14Q136 -14 116 4'],
// LATIN CAPITAL LETTER A
0x41: [686,31,847,29,827,'821 97Q822 97 824 88T827 77L793 53Q676 -25 670 -28Q669 -29 656 -27L583 123Q583 124 467 46L352 -31L341 -20Q305 18 264 47T192 77Q161 77 60 32L49 40Q37 47 38 49Q39 49 93 83T212 160T297 219Q411 312 411 452Q411 519 360 571T233 624Q180 624 157 601T133 548Q133 524 160 496T214 441T241 393Q241 356 199 321T100 256L86 249L77 256Q68 263 67 263L84 274Q101 286 118 304T135 339T109 384T56 446T29 504Q29 566 118 624Q207 686 309 686Q349 686 360 685Q405 678 439 661T491 625T520 583T534 543T537 511Q537 436 491 344L478 318L455 299Q420 272 308 179L284 160L294 158Q348 154 426 89L437 79Q513 110 579 153V175Q579 183 579 227T580 330T581 446T582 542L583 582L664 630Q681 640 703 653T734 673L744 679Q750 678 756 676L767 674L716 623V585Q716 568 712 463T708 289V250Q708 237 709 218T710 195L711 180L739 130Q768 79 771 79Q775 79 796 88T821 97'],
// LATIN CAPITAL LETTER B
0x42: [684,31,1044,56,965,'160 345Q160 357 144 376T109 413T73 458T57 509Q57 544 95 584Q142 631 205 657T331 684Q382 684 427 658T500 585L505 577L521 588Q537 599 562 614T616 646T679 673T738 684Q790 684 807 666T840 587Q850 552 863 532T888 508Q894 505 906 505Q917 505 930 507T953 512T963 514L964 504Q965 495 965 494T914 467T808 413T745 384H751Q782 380 802 377T854 362T904 334T937 287T951 217Q951 178 937 143T908 91Q903 86 820 34L734 -21L718 -24Q679 -31 639 -31Q561 -31 451 4T271 40Q190 40 119 -2L99 -13L91 1L84 15L86 16Q88 18 132 42T233 100T315 152Q377 199 386 233Q388 240 393 297T399 363Q399 487 353 551Q337 573 306 597T238 622Q201 622 179 602T157 557T214 476T272 396Q272 371 229 334T143 272T96 246Q95 246 85 252T74 259T95 273T138 306T160 345ZM529 443Q529 409 528 385T526 353L525 346Q526 346 649 390T773 435Q749 451 742 464T727 518Q727 519 725 532T721 548T717 562T712 577T706 589T698 601T688 608T675 614T658 616Q626 616 576 582T525 528Q525 527 526 518T528 489T529 443ZM772 57Q774 57 778 58T792 64T808 77T821 103T827 144Q827 222 784 266T660 322Q652 323 611 323H596Q577 323 535 316L523 314Q520 291 505 255L500 241L356 138L366 137Q443 131 518 110T650 72T748 54Q763 54 772 57'],
// LATIN CAPITAL LETTER C
0x43: [676,32,723,71,726,'460 -32Q373 -32 305 -11T193 45T122 124T83 214T72 303Q72 395 114 476L119 486L313 592L338 568L359 580Q418 615 479 638T568 668T606 675Q607 675 608 676H610Q612 676 615 661T630 621T660 578Q673 568 694 568Q717 568 721 570H726Q724 565 722 559L717 549L706 545Q608 513 583 513Q568 517 559 522T533 546T493 603L490 609Q452 599 452 558Q452 537 469 481T486 393Q486 353 474 331T422 285T296 231L272 223L262 230L253 237Q279 246 314 274T351 338Q351 376 334 442T316 532Q316 546 319 552Q319 554 316 554Q304 554 288 547T250 523T214 466T199 371Q199 218 299 133T541 47Q571 47 585 51T652 81L712 108Q716 104 716 81L706 74Q695 68 673 54T633 29L550 -22L540 -24Q492 -32 460 -32'],
// LATIN CAPITAL LETTER D
0x44: [683,29,982,31,896,'380 596Q307 596 250 582T158 546T100 493T67 433T56 373V361Q55 361 43 366L31 372V384Q31 455 69 523T173 627Q213 650 284 666T444 683H452Q629 683 735 629Q896 548 896 369Q896 263 839 163Q835 155 818 140Q746 82 662 27T563 -29Q525 -29 386 16T183 62Q147 62 127 52T63 1L48 -14L40 -4L31 5Q83 73 172 149L186 161H199Q291 161 329 181Q357 199 357 231Q357 258 301 316T245 396Q245 423 282 458T349 512T403 543L413 548L425 545L438 541Q373 491 373 462Q373 446 399 415T453 349T480 288Q480 251 433 212Q394 180 348 156L334 148L353 145Q408 134 513 105T654 76Q711 76 745 132T780 277Q780 434 676 517Q637 549 562 572T380 596'],
// LATIN CAPITAL LETTER E
0x45: [686,29,783,74,728,'527 55Q574 55 619 69T691 97L717 111V85L562 -18Q520 -29 443 -29Q379 -29 325 -15T235 21T180 61T146 98Q74 186 74 307Q74 395 109 472Q113 482 123 489T190 533Q251 568 295 591L308 598L350 580L361 586Q403 612 464 636T564 673T609 686Q610 686 610 685Q612 683 616 670T627 636T646 601Q666 572 686 572H692Q713 572 726 576H728L725 565L723 554L692 544Q660 535 629 526T595 516Q585 514 574 519Q563 527 543 552T507 597T490 617Q467 604 456 579V564Q456 535 473 471T492 393L494 381L613 460L622 446Q630 433 650 411T696 371L703 365L614 312H596L580 322Q568 329 553 340T528 355T510 360Q496 358 491 354T484 345T471 326T435 297Q408 278 370 261T307 235T277 227Q273 227 266 234L256 240L267 245Q280 251 294 258T330 288T353 336Q353 373 335 444T316 530V537Q316 549 322 567Q270 554 233 499T196 370Q196 253 287 157Q392 55 527 55'],
// LATIN CAPITAL LETTER F
0x46: [684,146,722,17,727,'424 522Q265 596 208 596Q193 596 180 593T150 579T116 542T89 474Q86 465 86 463L59 481L63 494Q87 578 137 627Q191 684 285 684Q334 684 406 658T538 607T621 581Q644 581 706 629L721 640Q722 640 725 630L727 620Q701 592 654 548T582 486L569 487Q533 490 485 504L468 508Q449 503 429 495T387 466T365 422Q365 373 439 299L453 310Q473 325 528 370L588 418Q614 398 642 368T668 331Q667 331 628 296L590 262L582 274Q557 311 526 311Q511 311 487 297T462 278Q462 277 492 244T551 166T581 88Q581 54 570 25T536 -27T505 -56T478 -76Q376 -146 274 -146H270Q199 -146 162 -118T124 -15Q124 12 128 30T132 96V107Q132 144 117 157Q102 169 85 169Q74 169 59 165T32 156T20 151Q20 152 19 158T17 167Q17 168 17 168T17 169T19 170T22 172T27 175T35 179Q131 230 195 230Q231 230 259 202Q270 190 270 171Q269 150 253 87T236 -16Q236 -67 261 -87T322 -107Q380 -107 428 -68Q467 -35 467 30Q467 60 447 91T383 171T316 251Q290 286 278 308T263 339T261 359Q261 384 284 418Q322 469 424 522'],
// LATIN CAPITAL LETTER G
0x47: [687,29,927,74,844,'742 611Q784 611 812 631V611Q807 607 783 591T718 544T629 476L606 458Q608 458 628 457T667 453T713 443T762 423T804 388T836 335Q844 313 844 289Q844 231 814 182T746 103Q720 82 655 48T546 -18L520 -21Q456 -29 432 -29Q313 -29 223 33Q204 45 183 65T135 119T91 207T74 320Q74 428 109 480Q116 491 127 497T215 546L308 595L343 583L355 591Q387 613 433 636T488 660H489L491 659Q493 658 495 657T500 655L509 650L500 645Q479 635 460 612T441 552Q441 535 447 498T459 433T466 405L625 513L643 526Q620 530 585 546T535 586Q535 587 532 592T527 602T525 610Q525 613 577 649L630 687Q632 687 638 675T653 649T686 623T742 611ZM349 313Q349 328 327 413T305 510V516Q305 531 308 542T314 559T317 566T315 567Q297 567 270 548Q233 524 212 490T191 392Q191 337 206 288T244 207T284 156T316 128Q410 51 535 51Q632 51 675 102T718 217Q718 269 690 314T599 375Q574 381 535 381Q501 381 477 377L466 376Q469 364 469 349Q469 314 457 295T408 258Q366 236 308 219L288 213L279 220L270 227Q284 232 294 236T309 243T320 252T326 260T331 270T336 281Q349 310 349 313'],
// LATIN CAPITAL LETTER H
0x48: [683,126,851,6,752,'288 139Q288 172 255 224T189 335T156 442Q156 495 242 579Q289 625 361 668Q364 671 368 673T376 678T380 681L384 683L392 676Q401 670 414 661T443 642T477 626T509 619Q543 619 618 668Q625 672 628 674T631 675Q632 673 633 663T633 651L564 595Q556 589 545 580T528 566T516 556T505 548T497 543T488 539T481 537T472 535T463 534T451 534H442Q385 534 304 581L291 589Q290 588 285 583T277 575T269 566T262 555T257 543T255 529V522Q255 507 260 487T276 446T293 409T311 376L321 359Q321 358 322 358T324 359T327 361T333 366Q386 409 481 460L503 472L543 471Q586 471 599 470Q692 459 714 430Q725 416 738 360T752 245Q752 184 742 127T725 51T703 -8Q700 -13 619 -64T518 -123Q508 -126 493 -126Q438 -126 398 -86L427 -52Q456 -17 457 -17Q460 -17 465 -16H473Q474 -21 481 -32T504 -56T539 -69Q572 -69 599 -34Q625 4 625 158Q625 264 609 311T532 378Q508 386 484 386Q455 386 419 372T360 345T337 330L346 313Q375 263 386 227Q389 215 389 202Q389 192 388 184T384 168T376 152T365 138T350 121T331 103T307 81T278 54L194 -24Q130 30 99 30Q85 30 64 20T31 1T16 -10Q15 -11 13 -7Q12 -6 11 -3Q8 4 6 8L32 35Q88 88 117 107T169 126Q177 126 182 125Q218 118 252 84L263 73Q288 113 288 139'],
// LATIN CAPITAL LETTER I
0x49: [681,25,655,32,623,'500 615Q523 615 550 628T595 655T614 668L623 654L607 642Q512 569 440 534L427 527L413 529Q384 535 340 547T265 565T209 572Q173 572 145 556T101 522T60 465Q58 460 54 460T41 468L32 477L37 487Q96 599 139 640Q187 681 247 681Q275 681 283 680Q313 674 398 645T500 615ZM418 170Q418 186 410 260T401 382Q403 418 403 424L405 433L415 444Q482 515 571 571L582 578Q591 573 607 568L597 560Q522 504 522 450Q522 427 533 357T545 241V228Q545 190 536 159T508 106T478 73T446 48Q343 -25 238 -25Q179 -25 118 15L107 22L79 5Q51 -12 51 -12L38 2L55 18Q106 67 175 122L192 136Q202 130 206 123Q223 91 252 61Q263 50 266 48T278 39T297 32T320 30Q357 30 389 68Q415 102 418 170'],
// LATIN CAPITAL LETTER J
0x4A: [681,141,652,-8,616,'65 510Q68 517 74 528T101 569T144 620T202 661T274 680Q308 680 389 628T503 576Q530 576 596 600Q615 607 616 607Q616 602 615 596V585Q605 581 576 568T531 548T485 531T418 509L400 503L358 522Q347 527 327 537T299 550T277 560T257 568T239 573T220 577T201 578H196Q181 578 169 575T135 554T88 502L83 496Q82 496 74 502T65 510ZM424 4Q424 50 395 151T365 313V320Q365 352 369 361T405 403Q431 432 465 462T521 508T547 525L549 524Q551 524 554 523T560 521L571 517L552 498Q515 461 499 430Q485 399 485 366Q485 326 512 231T539 84Q539 -14 460 -77T273 -141Q248 -141 234 -140T198 -131T160 -106T134 -59Q128 -40 124 -16T117 22T108 49T91 69T59 75T15 65L1 59Q-8 76 -7 77Q4 85 22 97T88 129T170 149Q218 149 234 125Q242 112 242 43V21Q242 -17 248 -41T274 -85T322 -105H325H330Q363 -105 396 -75Q424 -47 424 4'],
// LATIN CAPITAL LETTER K
0x4B: [681,27,789,20,806,'234 109Q234 144 194 245T153 404Q153 445 180 490Q232 572 325 626T517 681H524Q612 681 661 658Q683 647 699 632T717 604Q717 600 708 545L699 490L690 489Q681 488 679 488Q675 488 669 504T640 546T577 592Q520 620 446 620Q415 620 386 614T327 594T280 553T262 487Q262 468 265 447T271 413T279 384T285 362L295 371Q320 396 352 421T439 474T538 502Q577 502 596 484T627 428Q642 386 651 373T677 360H682Q698 360 727 369L724 357Q724 354 724 351T722 346V344Q559 289 539 283Q582 272 589 271L615 265L637 189Q662 109 663 108Q668 97 682 84Q698 68 722 68H730H738Q762 68 799 91L803 80L806 70Q795 59 770 40T703 -3T631 -26Q598 -26 578 -8Q548 24 536 92Q524 154 509 183T477 218T428 224Q409 224 385 220T346 212L331 207Q330 205 330 201T331 189T332 178Q332 158 325 116L305 96Q269 60 240 38Q171 -21 123 -21Q72 -21 33 18L20 32L62 74Q96 107 102 112T116 118Q120 118 122 113T131 95T150 69Q171 48 190 48Q198 48 206 51T224 69T234 109ZM519 367Q497 432 450 432Q379 432 313 333L300 314L304 299Q306 294 309 280T315 260L321 235L542 313Q530 325 519 367'],
// LATIN CAPITAL LETTER L
0x4C: [683,28,786,30,764,'277 226Q277 248 253 286T203 369T178 449Q178 490 212 533T284 607Q380 683 532 683Q610 683 639 660T668 583Q668 568 666 546T663 509Q663 478 683 460Q691 452 719 452L738 450Q732 437 729 437Q728 437 652 416T573 394Q554 394 541 409T527 444Q527 449 532 487T538 542Q536 584 501 606T418 628Q389 628 364 620T317 587T295 523Q295 478 333 401T372 276Q372 269 371 267Q371 264 318 206L264 149Q284 141 317 130T433 101T577 82Q619 82 652 95T701 127T728 164T742 196L744 209Q744 210 749 208T759 203T764 199T760 185T751 154T744 129Q714 42 680 13Q628 -28 566 -28Q490 -28 403 -5T249 42T153 66T106 53T70 15T47 -16Q46 -17 30 -5L39 13Q85 100 138 148L147 156L161 157Q218 165 246 179T277 226'],
// LATIN CAPITAL LETTER M
0x4D: [683,32,1239,27,1232,'134 338Q134 357 81 417T27 504Q27 516 34 530Q55 568 110 615Q190 683 305 683H314Q445 683 495 580L501 569L512 577Q608 646 681 646Q759 646 801 585L808 576L816 583Q860 619 921 650T1041 682Q1063 682 1077 675T1096 660T1112 631T1132 596Q1160 555 1188 555Q1204 555 1228 564Q1230 565 1231 562Q1231 560 1232 554V547L1215 538Q1179 521 1114 475Q1112 474 1106 470T1099 464T1093 459T1088 452T1085 441T1082 425T1081 404T1079 376T1079 339Q1079 282 1084 236T1098 160T1117 112T1138 85T1159 77Q1166 77 1180 81T1207 90L1219 94Q1220 94 1221 86T1222 76L1045 -32Q1044 -32 1004 15L964 64V167Q965 334 970 372V378L994 402Q1032 440 1057 460Q1061 463 1066 467Q1070 469 1070 470T1068 471T1060 474T1050 481Q1040 488 1021 531T996 583Q979 609 947 609Q922 609 887 592T820 537L821 524Q825 484 825 448Q825 268 768 155L759 137L589 -28L579 -20Q533 17 507 17Q475 17 449 -7L436 -18L424 2L441 20Q446 25 456 36T471 52T484 65T497 79T509 90T522 99T534 106T548 112T561 115T576 117Q602 117 639 86Q648 81 648 81Q650 82 657 94T668 112Q711 202 711 373Q711 484 677 533T600 583Q592 583 583 581T569 577T554 568T542 560T528 549T516 539L519 523Q527 485 527 461Q527 444 522 407Q506 266 447 150L437 130L217 -25L208 -15Q165 28 126 28Q89 28 62 1Q47 -14 43 -14Q42 -14 36 -8L28 0L44 17Q96 73 120 92T166 117Q182 123 204 123Q239 123 284 78L295 67Q307 72 337 102Q400 178 400 346Q400 508 325 571Q270 618 208 618Q180 618 168 614T140 594Q124 578 124 564Q124 540 182 480T240 396Q240 359 197 321Q154 285 94 252L80 245L76 248L67 257L61 262L71 268Q82 275 94 284T120 309T134 338'],
// LATIN CAPITAL LETTER N
0x4E: [679,30,983,25,973,'522 492Q521 492 517 502T512 513Q542 444 542 333Q542 226 503 137L498 125L396 53Q308 -8 292 -17T260 -27Q226 -27 191 -9T136 29L145 39Q162 56 192 89L230 129L235 128H241Q276 57 332 57Q358 57 391 80Q403 89 409 100T422 143T428 227Q428 329 406 408T347 530T272 594T196 615Q152 615 135 596T118 558Q118 535 146 502T203 438T232 385Q232 357 195 322T122 265T83 243Q82 242 72 249T61 258L66 262Q72 265 82 273T103 292Q125 314 125 333Q125 351 101 376T51 432T26 492Q26 549 108 614T290 679Q326 679 335 678Q353 675 370 670T400 658T425 642T445 625T463 606T477 588T487 571T495 556T500 543L504 535L523 553Q553 581 569 595T619 632T686 667T757 678Q778 678 793 675T819 664T833 651T844 633T852 617Q884 548 910 548H916Q938 548 962 556L967 542Q967 540 947 531Q909 509 883 492T847 467T838 458Q825 419 825 328Q825 234 833 191T858 121Q875 94 892 77Q898 71 907 71Q912 71 928 76T957 87T971 91L972 88Q972 84 972 81L973 73L957 63Q891 21 806 -23L794 -30L783 -14Q766 13 728 60L713 79V372L724 384Q743 406 765 427T800 460L813 471Q809 472 806 472Q783 479 766 503T741 551T715 594T672 614Q644 614 622 595Q597 576 572 550T534 508L522 492'],
// LATIN CAPITAL LETTER O
0x4F: [726,30,976,12,881,'254 595Q269 583 269 581L262 577Q256 573 247 566T228 549T212 527T205 502Q205 480 266 386T328 277Q328 234 239 150L221 134L231 133Q264 131 376 99T516 62Q567 50 604 50Q614 50 626 52Q643 57 662 71T703 115T739 198T753 323Q753 454 692 517Q652 555 584 565T382 577Q365 577 357 577H308L300 591L292 606Q292 608 342 665L392 724L403 725Q406 725 411 726H416L417 725L412 715Q408 705 408 698Q408 684 423 679Q431 677 516 672T663 655Q757 634 806 593T873 463Q881 421 881 380Q881 340 874 306Q859 223 809 147Q801 134 789 124Q595 -30 456 -30Q395 -30 289 3T147 36Q134 36 121 33T98 26T76 15T59 4T44 -8T32 -17L22 -7L12 4L56 59L100 114L116 118Q217 142 217 199Q217 230 185 276T120 365T87 430Q87 435 109 464T172 534T254 595'],
// LATIN CAPITAL LETTER P
0x50: [688,223,977,33,943,'247 398Q247 372 206 334T126 272T83 247Q82 247 72 253T61 261Q60 261 61 262T66 265Q127 306 127 343Q127 364 63 430Q42 451 38 458T33 480V490V497Q33 526 63 567Q112 632 170 660T282 688Q341 688 384 667Q454 633 482 566Q483 565 484 566T496 574Q562 623 630 653Q699 681 751 681Q778 681 797 673Q818 662 830 609Q835 580 843 564Q863 524 895 524H901Q917 524 932 528Q936 522 938 518T942 513T942 511Q873 480 836 454Q789 423 789 395Q789 362 834 298T880 200Q880 170 867 145T820 81Q733 -20 647 -20Q581 -20 499 21V9Q499 -16 502 -53T509 -116L512 -141L370 -223L357 -216Q344 -209 344 -208L348 -196Q370 -113 370 33V52L355 58Q307 76 284 76Q258 76 228 60T183 29T141 -11Q137 -7 133 -2L126 7L134 18Q181 89 210 121T278 170Q304 179 328 179Q336 179 358 177L370 175Q368 268 367 359Q367 416 363 434Q362 438 362 441Q348 527 302 574T203 621Q169 621 148 599T127 557Q127 535 187 476T247 398ZM673 315Q673 357 786 442Q786 443 776 444T750 449T727 462Q719 471 716 484V496Q715 507 715 515Q715 571 698 588Q680 611 643 611Q592 611 547 571Q534 558 511 522L499 505V139L543 123Q702 64 744 64Q770 64 781 79T793 112Q793 143 733 217T673 315'],
// LATIN CAPITAL LETTER Q
0x51: [726,83,976,12,918,'254 595Q269 583 269 581L262 577Q256 573 247 566T228 549T212 527T205 502Q205 480 266 386T328 277Q328 234 239 150L221 134L231 133Q264 131 376 99T516 62Q567 50 604 50Q614 50 626 52Q643 57 662 71T703 115T739 198T753 323Q753 454 692 517Q652 555 584 565T382 577Q365 577 357 577H308L300 591L292 606Q292 608 342 665L392 724L403 725Q406 725 411 726H416L417 725L412 715Q408 705 408 698Q408 684 423 679Q431 677 516 672T663 655Q757 634 806 593T873 463Q881 421 881 380Q881 340 874 306Q864 250 838 196T791 126Q748 93 733 82L715 69Q714 68 723 60T748 40T774 23Q806 2 832 2Q849 2 870 6T904 14L917 17Q917 12 918 6V-3L882 -22Q806 -60 778 -73L755 -83Q640 -36 596 -7L586 0L576 -4Q513 -30 457 -30Q394 -30 289 2T149 35Q119 35 93 22T52 -4T36 -17T24 -7T12 4L56 59L100 114L116 118Q217 142 217 199Q217 230 185 276T120 365T87 430Q87 435 109 464T172 534T254 595'],
// LATIN CAPITAL LETTER R
0x52: [688,28,978,31,978,'31 498Q34 541 76 586T176 659T279 688H290Q377 688 429 653T506 569L511 558L526 572Q620 663 707 682Q722 685 737 685Q781 685 804 665T830 619T838 565T854 525Q866 511 897 511Q917 511 925 513L937 515Q938 515 941 509T944 501T925 493T870 470T803 438Q735 406 735 401Q735 400 741 399T767 390T814 374L828 367L829 307Q829 233 833 202T852 144Q873 109 896 90Q906 82 928 82T976 95V92Q976 88 978 72L807 -28Q768 39 733 87L718 108V149Q718 230 714 257T693 298Q654 333 580 333Q524 333 520 329Q520 300 489 224T443 133Q441 131 333 53T223 -27Q221 -26 204 -11T169 16T136 28Q110 28 66 -8L56 -16Q52 -13 40 -1L48 7Q165 124 211 124Q232 124 287 77L298 67Q309 73 337 97Q397 150 397 347Q397 419 379 474T330 560T269 604T207 619Q177 619 152 601T126 563Q126 540 185 479T244 387Q240 336 160 289Q144 278 98 255L80 246L62 261L79 272Q96 283 113 301T130 337Q130 353 115 373T81 410T47 451T31 498ZM524 358Q537 358 657 405T777 457Q777 459 768 459Q749 462 738 474T723 499T714 539Q706 585 697 599Q681 618 657 618Q632 618 597 595T532 515L525 502L524 441Q524 375 523 369Q523 358 524 358'],
// LATIN CAPITAL LETTER S
0x53: [685,31,978,82,905,'457 -31Q356 -31 272 6T135 120T82 304Q82 372 106 430T170 527T241 588T305 626Q341 643 386 657T460 678T495 685T554 660T674 609T778 584Q800 584 818 591T848 610T866 633T878 651T883 659L893 649L901 639Q879 574 803 532T666 490Q661 490 657 490T650 491T641 492T633 495T622 500T610 505T595 513T577 522T554 533T527 547Q436 594 415 602Q393 608 374 608Q303 608 253 545T202 386Q202 229 307 135T568 41Q674 41 748 85T822 198Q822 244 779 283T639 322Q595 322 499 303T383 283Q358 283 335 290T291 318T270 374Q270 418 313 460T424 510H431L435 505L440 500Q425 496 403 475T380 427Q380 382 431 373Q437 372 475 372Q543 372 626 388T742 404Q831 404 868 362T905 260Q905 182 831 108Q692 -31 457 -31'],
// LATIN CAPITAL LETTER T
0x54: [686,30,790,30,802,'666 641Q737 641 794 686L802 662Q790 648 734 596L677 541L664 538Q630 528 583 528Q540 528 482 537L461 541Q402 512 402 456Q402 427 439 387T512 311T549 253Q549 220 455 139L440 126Q541 75 586 75Q600 75 619 80T654 94T685 110T709 124T719 130Q722 125 725 119L730 108Q700 72 568 -18Q551 -30 542 -30Q495 -30 404 6T270 42H263Q213 42 142 -11L131 -19L129 -8Q126 1 126 4Q218 84 301 126L316 134H406L413 142Q436 165 436 189Q436 202 421 221T364 281Q336 307 318 328T296 356T283 381L290 394Q338 478 410 540Q419 549 417 549Q415 550 369 558T268 575T195 584Q153 584 127 567T100 523Q100 499 116 479T151 447T170 433Q170 429 171 428Q171 427 131 394T88 359Q82 363 73 370T47 403T31 457Q31 513 79 565T197 648T332 679Q369 679 490 660T666 641'],
// LATIN CAPITAL LETTER U
0x55: [688,39,851,18,871,'273 244Q273 281 244 331T186 428T155 502Q155 524 165 536Q239 634 333 688Q338 684 345 680L356 672L344 664Q310 642 295 624T280 582Q280 550 303 505T348 407T371 300Q371 270 362 248L247 123L358 92Q452 64 484 64Q507 64 523 72Q553 87 573 109Q583 121 586 146T593 283Q594 303 594 344Q594 401 591 461T584 558L581 595Q598 600 623 611T672 634T719 659T754 678L768 686Q770 686 784 673L782 670Q781 668 777 664T768 655Q747 635 738 616T721 535T714 359Q714 205 723 176Q727 164 744 133T771 89Q780 75 804 75Q814 75 853 87L867 92L871 73L671 -39L654 -10Q636 20 619 50T600 83Q600 84 589 75T539 34Q478 -16 475 -19Q469 -22 449 -28T414 -34Q410 -34 394 -32Q356 -28 282 -2L237 15Q169 38 126 38Q106 38 85 27T51 4T37 -8T27 -1T18 8Q18 10 70 63T124 116Q154 123 176 131T223 154T260 191T273 244'],
// LATIN CAPITAL LETTER V
0x56: [685,29,982,25,966,'133 343Q133 360 79 416T25 496Q25 523 58 563T118 624Q197 685 293 685Q331 685 339 684Q453 665 489 558L493 546Q521 570 553 596T640 653T725 684Q753 684 783 672T844 641T889 618Q895 616 912 616Q924 616 936 617T956 620T965 622T966 612V604L952 595Q924 576 895 549Q864 517 856 496T847 448V434Q847 395 848 388L859 323Q874 241 874 212Q874 142 830 96Q796 62 724 14Q661 -29 603 -29Q555 -29 421 28T242 86Q182 86 110 31Q105 28 102 26T99 25Q88 36 88 42Q95 54 222 142Q252 163 262 165Q319 183 344 218Q378 266 378 377Q378 444 362 494T319 571T266 610T212 623Q181 623 156 603T131 562Q131 539 154 512T206 458T243 416Q246 409 246 399Q246 387 242 377T225 351T178 311T94 259L79 251Q72 256 68 261T62 268L61 270L70 277Q131 318 133 343ZM822 526Q778 531 719 564T628 597Q611 597 579 574Q543 543 513 506L505 495L506 473Q506 469 506 461T507 449Q507 348 467 271L462 261L404 218L348 174Q349 173 356 173Q384 169 450 144L546 105Q665 56 708 56Q737 56 746 72T756 118Q756 129 755 135L741 219Q725 314 725 334V344Q725 416 736 431Q748 450 815 510L832 526H822'],
// LATIN CAPITAL LETTER W
0x57: [683,30,1235,26,1240,'133 317T133 338T80 413T26 496Q26 532 83 591Q100 608 111 616T151 644T219 672T304 682Q381 682 434 646T506 564L510 557Q513 557 534 573L677 665L707 683L790 561L803 572Q933 682 1001 682Q1037 682 1098 650T1193 616Q1208 616 1222 619L1235 622Q1239 622 1239 616Q1239 611 1240 609Q1240 608 1206 577T1138 503T1104 430Q1104 409 1123 330T1142 208Q1142 183 1136 147Q1127 118 1117 106Q1114 103 1031 48T935 -14Q930 -18 908 -22T862 -27Q826 -27 759 -6T647 26Q597 38 578 38Q573 38 561 33T533 20T505 4T480 -10L469 -16L452 -26L439 -28Q423 -30 411 -30Q358 -30 279 7T169 45Q125 45 58 -5L47 -14L41 -4L35 8Q35 11 56 29T113 75T181 125L200 139H217Q279 143 320 180T377 270T394 393Q394 453 378 498T334 568T277 605T213 617Q177 617 155 607Q140 600 130 587T119 560Q119 545 137 522T177 479T217 434T236 393Q236 324 98 251L89 246L76 253L63 261Q91 275 112 296ZM1088 526Q1066 526 1004 556T909 586Q863 586 816 539L802 526L804 514Q814 461 814 411Q814 319 781 238Q772 214 760 198T730 165T702 136L715 133Q759 122 848 90T973 57Q1003 57 1017 80Q1022 93 1022 116Q1022 152 1003 241T983 377V391Q983 405 985 409T1002 429Q1019 450 1045 475T1090 514L1107 528Q1104 527 1102 527T1096 527T1088 526ZM699 358Q699 391 696 419T688 467T675 503T660 530T642 550T626 563T608 574T593 582Q581 575 559 554T524 512Q523 510 523 477Q523 315 444 218L435 207L368 169Q301 132 301 131Q307 128 315 125L377 99Q476 57 515 57Q534 57 608 94L627 102L636 111Q699 187 699 358'],
// LATIN CAPITAL LETTER X
0x58: [681,35,849,32,835,'273 679Q354 674 408 633T477 525L484 533Q496 548 524 574T571 615Q594 633 625 649T675 673T699 681Q724 632 747 607Q754 601 756 599T765 594T777 591T794 590Q818 590 834 594V585L835 577L704 513L693 518Q657 534 631 560T597 599Q596 601 581 584Q495 490 489 379V366H562L681 369Q682 369 679 366T668 355T651 341L620 314H485V295Q490 190 543 125T686 60Q720 60 789 88L801 93V89Q798 83 798 66Q781 59 685 -10L665 -25L634 -30Q596 -35 594 -35Q570 -35 536 -23T477 19Q461 37 445 67T418 118L409 138Q401 131 388 120T340 79T273 28T206 -12T151 -31Q129 -31 90 -12T32 22L113 101Q114 101 120 96T136 84T160 69T189 56T221 51Q256 51 305 90Q376 149 376 301V315H293Q276 315 251 315T210 314T190 313L168 312Q168 313 200 340L231 368L238 367Q275 367 311 366H378V387Q376 470 355 512T291 572Q274 579 252 579Q223 579 197 568T156 544T131 519T117 508Q112 512 108 518L99 527L117 545Q177 604 255 665L273 679'],
// LATIN CAPITAL LETTER Y
0x59: [688,214,984,34,878,'34 496Q34 518 53 549T107 610T195 661T310 682Q357 682 398 663T460 611Q467 600 475 583T489 554T495 542Q495 544 531 570T617 629T700 676L724 688Q742 670 756 657T784 635T806 621T830 606T856 592Q878 416 878 340Q878 154 805 -3L798 -20L779 -40Q706 -113 613 -163T421 -214Q359 -214 317 -196T256 -160L306 -63L313 -64L320 -66L326 -79Q337 -104 349 -120T392 -151T470 -166Q576 -166 644 -101Q750 7 750 292Q750 426 721 495T617 565H611Q563 565 513 509L506 501L508 493Q508 490 509 475T510 445Q510 319 458 236L451 225L436 216Q406 198 365 169T318 134L332 127Q336 126 397 103T489 80H493Q527 80 593 129L604 137L607 127Q610 119 610 116Q610 114 592 95T543 46T484 -4Q450 -27 446 -27Q441 -27 402 -18Q365 -9 290 20T188 50Q135 50 64 -7L52 -17L43 -7L34 2L51 19Q118 87 177 132L192 143H215Q259 145 289 155T335 184T355 214T366 245Q382 306 382 388Q382 426 381 436Q368 520 318 570T214 621Q184 621 165 608T142 583T137 562Q137 541 163 508L201 469Q245 425 251 408Q253 403 253 398Q253 383 240 366T212 335T161 295Q128 271 99 253L89 247L77 256L65 266L76 273Q125 301 134 329Q136 334 136 342Q136 357 124 372T88 410T49 455Q34 479 34 496'],
// LATIN CAPITAL LETTER Z
0x5A: [677,148,711,-5,624,'278 601Q242 601 212 591T167 570T121 533Q114 528 111 525L93 550Q223 661 244 667Q299 677 356 677Q415 677 456 666T515 634T541 596T549 555Q549 513 529 478T480 421T424 388T377 372Q365 370 365 367Q365 365 389 365T450 358T523 337T588 282T623 183Q624 177 624 161Q624 20 524 -60Q415 -148 285 -148Q242 -148 213 -139Q181 -131 159 -109Q136 -87 127 -56T114 6T104 49Q94 69 57 69Q38 69 13 58L1 53Q1 55 0 59T-3 68T-4 76Q78 130 138 142Q150 144 162 144Q213 144 227 120T242 31Q242 -30 263 -66T345 -102Q397 -102 444 -52T491 107Q491 172 471 211T428 265Q392 288 306 288Q269 288 233 284L218 282Q208 289 208 291L229 324L251 359Q250 360 248 360Q239 360 248 371L256 381H273Q344 385 378 409T413 495Q413 537 384 569T278 601'],
// LEFT SQUARE BRACKET
0x5B: [740,130,257,36,226,'226 711T225 711T86 699V-93H89Q94 -93 157 -96T223 -100H226V-119H223Q134 -119 42 -130H36V740H42Q61 738 156 736H226V723Q226 711 225 711'],
// RIGHT SQUARE BRACKET
0x5D: [738,132,257,14,208,'69 732Q116 733 146 734T184 736T197 737T206 738H208V-132Q190 -129 160 -127T99 -125T66 -124H14V-103H19Q20 -103 84 -98T152 -92H158V699H151Q148 700 85 703T18 708H14V732H69'],
// CIRCUMFLEX ACCENT
0x5E: [734,-452,590,1,584,'1 463T1 464T148 599T296 734Q584 486 584 485L561 472Q538 459 537 461Q296 672 293 672L161 563Q133 539 97 509T44 466L28 452Q27 452 14 457'],
// LATIN SMALL LETTER A
0x61: [472,32,603,80,586,'80 129V151Q80 241 99 363Q99 367 111 372T172 401T285 465L297 472Q340 455 405 443L423 440L455 453Q486 467 489 467L497 461L494 451Q480 390 480 292V283Q480 207 483 155L484 143L535 80L558 90L582 99Q586 95 586 83Q586 81 513 25L443 -29Q410 16 386 40L371 55V61Q371 63 371 67T370 74V80L278 25Q186 -29 184 -31Q182 -32 160 -12T112 35T80 75V129ZM359 366Q334 366 300 371T243 382L221 388Q218 388 212 375T200 323T194 228Q194 191 197 152L198 139L217 120Q245 92 269 74L279 66L304 78Q338 95 349 100L369 110V152Q368 164 368 210T367 275Q367 358 366 361V366H359'],
// LATIN SMALL LETTER B
0x62: [690,32,590,86,504,'99 398Q99 610 86 662Q86 665 95 669T106 674L108 669Q109 664 112 654T119 635Q122 626 125 616T130 601L131 596Q214 649 273 678Q295 690 298 690Q299 690 304 688T313 682L317 679Q275 653 240 612Q210 569 210 469V459Q210 450 210 432T211 406L212 378L285 425Q301 435 321 447T350 466L360 472Q360 473 361 473T368 471T401 456T465 429L501 414V408Q504 386 504 309Q504 255 500 203T491 125T485 97Q485 95 445 74T343 23T237 -24L214 -32Q197 -22 165 3T109 49T87 73Q99 169 99 398ZM386 251Q386 320 380 347V350L305 374L282 382L214 348L213 274Q213 184 214 165V131L230 119Q288 76 349 54Q386 137 386 251'],
// LATIN SMALL LETTER C
0x63: [473,26,464,87,424,'227 393Q215 393 210 351T205 269Q205 161 213 153Q220 145 244 125T290 88L312 72L365 92Q414 113 418 113V93L365 60Q255 -9 221 -26L211 -18Q158 21 91 88L90 107Q87 167 87 225Q87 267 90 302T96 351T100 366L295 473L311 470Q340 464 368 454T410 437T424 429L347 334L342 333H337L325 342Q299 363 271 378T228 393H227'],
// LATIN SMALL LETTER D
0x64: [632,28,589,-1,511,'88 117Q88 177 91 231T97 310T102 341Q102 343 118 357T168 397T239 447L257 459L268 454L278 449Q242 416 238 412L219 394Q219 391 216 378T211 349T206 307T203 249Q203 211 206 166L208 148Q224 132 261 108T333 70Q341 66 342 67T350 79Q393 157 393 302Q393 368 388 406V411L371 424Q199 558 101 558Q69 558 28 545L18 542L8 549L-1 557L24 569Q61 587 147 621L177 632Q179 631 194 627T216 621T240 613T269 602T302 589T340 571T382 549T431 522T484 488Q504 475 504 472Q511 449 511 365Q511 248 474 129L468 108L451 96Q427 77 347 28T254 -28Q235 -20 174 21T89 86L88 117'],
// LATIN SMALL LETTER E
0x65: [471,27,472,81,428,'309 69Q391 98 416 108Q418 106 422 100T425 92Q419 86 326 30T229 -27Q228 -27 207 -13T154 27T97 76L85 87L84 106Q81 152 81 194Q81 295 93 359L95 369L286 471L313 449Q376 397 414 372L428 362Q428 360 375 318L188 181V170Q188 156 189 153V148L203 138Q228 119 266 94T309 69ZM209 389Q208 388 204 366T194 307T187 244Q187 225 188 225T201 233L245 261Q283 284 291 291Q324 313 324 316L296 334Q280 343 259 357T224 380L210 390Q209 390 209 389'],
// LATIN SMALL LETTER F
0x66: [687,222,388,35,372,'128 400Q127 401 121 422T108 478T99 540V555L111 569Q135 597 165 626T214 671T235 687L249 678Q263 668 282 659T315 650Q335 650 362 666L372 654L286 569H271Q205 576 173 586V583Q173 558 208 492T252 401Q253 399 310 399T367 398L332 355H254V311Q251 160 235 16Q230 -28 226 -36Q225 -38 221 -45Q171 -140 121 -211L113 -222H104Q94 -222 94 -220Q94 -215 105 -187L121 -145Q139 -80 139 35V93Q139 222 135 314L134 354Q134 355 84 355H35L84 399H106Q128 399 128 400'],
// LATIN SMALL LETTER G
0x67: [472,208,595,17,541,'92 71Q92 74 91 88T88 128T86 183Q86 230 91 275T102 342T109 366Q115 372 207 422T305 472Q407 426 431 426Q435 426 476 445L519 465L525 463L532 461Q497 392 497 268Q496 255 496 233Q496 179 516 92T539 -10L541 -22L526 -38Q441 -126 355 -194L339 -206L327 -207Q324 -207 319 -207T310 -208Q242 -208 171 -179T73 -131L56 -141Q40 -150 38 -150Q17 -140 17 -137Q17 -136 18 -136T98 -79L176 -23Q174 -21 134 24T92 71ZM226 393Q224 393 221 372T214 312T210 235Q210 182 214 144L215 132L230 118Q281 70 301 66Q304 66 331 80T373 105L384 112L383 165Q383 224 387 309Q387 314 387 319T387 329T388 336T388 341V343Q388 344 381 344T339 354T249 384Q246 385 243 386T236 389T231 391T228 392L226 393ZM414 -80Q414 -64 411 -43T403 -1T394 37T386 66T382 79Q381 79 286 15T189 -52Q312 -125 365 -125Q397 -125 405 -115T414 -80'],
// LATIN SMALL LETTER H
0x68: [687,207,615,89,507,'95 661Q95 662 103 667T113 672L126 634L137 596L147 602Q235 656 275 677L292 687L303 680Q305 679 307 677T312 674L313 672L310 670Q307 669 301 667T289 660T274 649T259 634Q250 622 244 611T233 585T226 560T222 528T221 497T220 456T219 413V377L232 384Q244 391 271 409T339 455L362 471L383 461Q425 440 491 415L504 410V406Q507 399 507 269Q507 76 486 -21Q485 -30 483 -33T461 -57Q382 -139 299 -207L281 -197L263 -186L266 -185Q268 -184 280 -177T312 -155Q344 -130 353 -116Q394 -59 394 117Q394 162 391 216T386 301T382 335Q382 338 365 346T323 364T281 376L250 362Q220 347 219 347Q213 336 213 232Q213 177 217 144L218 128L224 119Q244 92 263 71L272 60Q206 21 157 -24Q156 -24 151 -16T132 11T98 52L89 62L91 103Q104 289 104 436Q104 471 103 506T101 568T99 616T96 649L95 661'],
// LATIN SMALL LETTER I
0x69: [686,25,331,3,327,'73 613L164 686L184 666Q200 650 214 637T235 620T242 614T203 577T162 540Q158 540 122 570T73 613ZM92 58Q92 63 94 83T98 142T101 234Q101 318 97 358V366L59 387L40 379L21 371Q20 371 12 376T3 382L38 406Q78 431 125 466L138 477Q149 468 186 444L219 422V389Q215 324 215 247Q215 136 222 123Q226 113 238 98T258 83Q263 83 292 94L322 104Q322 103 324 97T327 89Q327 88 317 82T272 52T190 -7Q166 -25 164 -25L112 35Q92 55 92 58'],
// LATIN SMALL LETTER J
0x6A: [682,203,332,-19,238,'74 611L155 682Q172 666 186 655T208 636L235 614Q227 606 191 574L154 540L135 556Q101 582 84 601L74 611ZM10 377L144 477Q145 476 184 453T229 428L233 425V416Q238 346 238 252Q238 93 215 -16L213 -30L185 -57Q29 -203 19 -203Q17 -203 -19 -189L-9 -183Q52 -146 78 -116T114 -37Q120 31 120 192V237Q120 327 113 351T72 380L53 372Q34 362 32 364L10 377'],
// LATIN SMALL LETTER K
0x6B: [682,25,464,34,432,'106 72Q110 105 111 193T114 294V308H74L34 309L83 346H115V430Q114 591 106 652Q105 662 107 665T114 668T123 672Q125 672 139 635L152 597L154 598Q156 600 160 602T167 607Q193 625 226 644T279 672T302 682L312 676L321 670L312 665Q281 649 263 626T241 587T233 547Q232 541 231 530T230 510T230 501Q231 501 265 522T334 564T369 583L380 570Q428 509 428 481Q428 475 427 470T423 459T416 448T404 434T389 418T369 397T344 371L321 347L365 346H409L372 308H227V294Q227 272 230 208T234 138Q234 136 256 119T302 84L324 68L372 88Q421 108 422 108T432 90L421 83Q373 53 270 -5L234 -25L204 -1Q172 25 124 60L106 72ZM336 434Q336 452 327 472T308 503T297 514Q296 514 290 510T275 499T264 490Q230 458 230 358V346H247Q268 346 276 350T302 372Q328 398 335 423Q335 424 335 428T336 434'],
// LATIN SMALL LETTER L
0x6C: [681,24,337,100,312,'111 275Q111 406 108 518T104 650V657Q105 657 109 660T117 665T122 666L133 629L144 594L161 606Q218 642 272 670L294 681Q295 681 300 677T306 672L302 669Q298 666 292 662T278 651T263 637T251 621Q232 587 227 530T222 343Q222 226 230 125L231 112L244 98L258 83Q271 87 285 92L312 102V84Q297 72 231 24T163 -23L100 55Q110 141 111 275'],
// LATIN SMALL LETTER M
0x6D: [476,31,921,16,900,'115 203Q115 257 114 291T112 338T111 355Q111 357 93 370L75 384L54 375Q32 366 31 365Q27 365 16 378Q25 383 89 430L152 476Q175 453 228 420Q229 420 229 418T229 410T227 394L225 369Q279 400 315 425T363 461T376 471Q480 424 514 416V412Q514 411 514 404T513 392L511 376L520 382Q529 387 548 399T584 422Q599 432 618 444T648 463L657 469H658Q661 469 681 461T735 440T796 420Q803 418 803 416Q801 414 798 390T791 325T788 247Q788 220 790 172T794 123Q799 115 814 97T835 78H838Q841 78 867 89L895 101Q896 101 896 100T897 92T900 78L873 62Q810 23 761 -12L736 -30Q735 -30 729 -22T707 7T671 48L661 59Q674 93 674 207V219Q674 341 670 344Q655 353 591 372L576 376L544 364Q511 351 510 351Q507 349 507 224V132L535 95Q541 87 548 78T560 63L563 58Q563 57 504 15T444 -28L385 53L387 67Q396 114 396 206Q396 289 393 334Q393 346 390 348Q369 358 306 373Q301 373 265 361L228 349V335Q227 322 227 284Q227 206 231 157Q231 151 231 144T232 133V129Q232 125 259 90Q286 56 286 53Q287 53 284 51T273 43T258 31L173 -31L166 -20Q160 -11 145 7T119 38T108 59Q108 62 110 81T113 133T115 203'],
// LATIN SMALL LETTER N
0x6E: [473,28,654,5,608,'608 88Q572 65 535 37T477 -8T455 -25Q432 7 389 53L375 68L378 82Q386 160 386 195V221Q386 284 385 307L384 344Q352 359 306 373L286 379L213 353V273Q214 229 214 161V129L275 62L163 -28L150 -14Q136 0 121 16T91 44Q86 48 86 50Q95 83 96 148Q96 224 89 340L88 366L79 374Q69 384 67 385L64 388L55 383Q52 382 44 378T33 373L21 367L13 374Q5 379 5 381Q5 384 69 428L133 473Q135 473 147 464T179 443T215 424L214 400V376Q271 404 342 457L363 472Q363 473 364 473Q366 473 375 469T418 449T502 414L512 411V407Q502 330 502 217V197V132L523 109Q527 104 533 97T543 87T547 83L550 80L578 92Q603 103 604 103Q606 103 608 88'],
// LATIN SMALL LETTER O
0x6F: [482,35,609,107,515,'107 102Q107 178 112 242T123 334T129 362Q129 363 140 368T199 400T315 469L336 482L346 476Q409 439 498 414L514 410L515 389Q515 208 502 141Q494 101 491 94Q490 89 478 81Q430 51 375 23T288 -20T254 -34Q250 -34 200 -1T119 56L108 65L107 76V102ZM389 355Q367 358 346 363T309 372T282 381T264 388L257 390H256Q254 390 249 381T238 348T227 293Q226 280 226 237Q226 183 231 146L232 131L244 122Q285 91 323 74T374 57H377L380 68Q405 154 405 267Q405 315 401 349V354L389 355'],
// LATIN SMALL LETTER P
0x70: [557,207,604,-1,519,'66 435Q66 445 117 501T173 557Q174 557 183 555T193 551Q174 526 174 509Q174 496 190 472T233 428V386L377 482L399 471Q450 445 509 425Q519 421 519 420L518 419Q518 418 518 416T517 410Q517 405 518 381T519 335Q519 222 501 137Q492 84 489 84L473 75Q457 66 423 44T354 -6L338 -19L329 -13Q320 -8 313 -4T297 4T284 10T270 14T258 17T245 20T233 22V12L241 -161L214 -172Q187 -184 160 -195T131 -207Q127 -207 112 -202L113 -188Q113 -182 115 -77T118 31Q118 32 109 32Q63 27 23 0L10 -9Q5 -4 -1 8Q1 13 52 57T114 101H115L117 123Q117 141 117 230V359L110 367Q85 394 71 421Q66 433 66 435ZM384 83Q386 83 389 110T396 180T400 254Q400 294 395 339L394 349L379 355Q308 383 294 383Q290 383 263 372L234 360L233 245V130Q270 125 305 113T361 92T384 83'],
// LATIN SMALL LETTER Q
0x71: [485,211,596,87,515,'362 -196Q375 -92 375 47V78L282 24Q189 -29 188 -30Q187 -30 139 21T90 75Q87 84 87 158Q88 206 94 259T107 342L113 372L308 478L322 473Q374 452 421 444L433 442L503 485Q515 479 515 477Q485 378 485 56Q485 -100 494 -164V-171L381 -211L371 -207L362 -202V-196ZM280 72Q301 77 323 86T358 101T372 110Q372 268 377 346L378 358H374Q368 360 358 360T323 365T257 380L234 386Q231 386 229 379Q215 353 211 310T207 180Q207 152 208 150Q210 142 235 114T280 72'],
// LATIN SMALL LETTER R
0x72: [472,26,460,13,453,'23 367Q21 370 18 374T14 380L13 382L151 472L236 411L238 381L290 426Q298 432 307 439T322 452T333 461T342 467L344 469Q382 410 404 399Q410 397 416 397Q423 397 432 399T446 403L451 405Q453 405 453 399V393Q430 374 404 356T364 328T350 318L349 317Q321 320 276 356Q257 371 256 371Q253 374 249 366T242 351Q232 321 232 236Q232 214 232 205T232 182T233 162T235 148T238 137T242 129T249 120T257 114T268 105T281 95Q313 70 314 70L358 85Q377 92 389 96T402 100V90L403 80L229 -26L221 -18Q195 6 166 29T121 63T105 76T106 82T110 97T114 121T117 158T119 208Q119 269 114 329L113 341L103 350Q90 362 67 380L45 374L23 367'],
// LATIN SMALL LETTER S
0x73: [479,34,523,-24,481,'189 331Q190 304 196 282T207 252T214 244Q239 244 348 292L371 302L382 297Q398 290 415 279T433 265Q442 238 442 166Q442 103 423 45Q416 42 380 29T310 3T244 -26L227 -34Q139 40 73 40Q61 40 48 37T24 30T6 22T-8 14L-13 11Q-14 11 -18 18T-23 26T38 75T102 125Q107 128 146 131H153Q192 131 296 56Q318 40 318 43Q323 48 323 114Q323 157 321 177L319 194Q308 208 291 216T261 225Q239 225 160 185L123 167Q85 205 79 227Q78 230 78 304V377L171 428Q264 479 265 478Q268 478 287 465T334 440T384 427Q423 427 475 463L478 453Q481 446 481 442Q481 439 410 391L339 342H331Q309 345 277 361T222 391T198 406T195 399T191 372T189 331'],
// LATIN SMALL LETTER T
0x74: [648,27,393,43,406,'328 69Q401 102 403 102Q404 102 405 94T406 84Q406 83 318 28L230 -27Q223 -21 206 -5T171 25T132 54L124 60V71Q129 154 129 297V359H43L44 363Q44 365 44 367L45 369L48 372Q51 374 57 378T68 387L90 405H129V553L285 648Q304 641 306 640L260 598V592Q259 589 255 505T249 413V405H353V402Q353 399 328 379L303 360H245V319Q245 150 253 125Q257 115 276 101T311 78T328 69'],
// LATIN SMALL LETTER U
0x75: [472,32,589,9,603,'444 -31Q444 -29 384 66Q382 66 364 58T309 30T231 -17Q214 -29 212 -29L197 -20Q172 -4 140 11T88 34L68 42Q68 43 73 49T85 67T100 98T113 149T118 221Q118 272 105 332L100 356L58 383L23 365L9 379L76 425Q141 472 144 472Q144 471 183 443L221 414V404Q224 365 224 275V253Q224 159 196 113Q191 104 193 104Q203 104 285 72L308 62L374 89L375 106Q375 266 373 340Q373 364 371 396V424L430 445L491 467Q493 467 499 463T505 457Q505 456 503 442Q488 335 488 187V158L529 81L534 80Q541 80 568 90L598 101Q605 94 602 87L524 27Q445 -32 444 -31'],
// LATIN SMALL LETTER V
0x76: [546,27,604,56,507,'95 67Q104 80 104 193Q104 261 100 321L98 355L91 363Q56 402 56 421Q56 441 82 472T132 524T159 546Q174 542 175 542Q159 520 159 501Q159 481 205 432L221 415L220 401Q219 394 219 387L288 429Q309 441 325 451T347 465T358 472T365 476L504 415V409Q504 408 505 374T507 318Q507 155 474 91L469 80L343 26Q314 14 281 0T232 -20L216 -27L202 -15Q192 -5 152 28Q141 35 126 45T103 60T95 67ZM386 349Q302 389 287 389Q271 383 253 375L220 361V136Q226 120 256 100T312 68T342 56Q355 56 360 68Q389 134 389 258Q389 310 386 341V349'],
// LATIN SMALL LETTER W
0x77: [549,32,918,55,815,'90 58T90 59T92 64T97 78T102 105T107 150T109 218Q109 290 103 350V356L83 377Q55 407 55 425Q55 445 138 528Q158 549 162 549L164 548Q165 548 167 548T170 547L175 546L172 540Q168 533 165 523T161 502Q161 479 216 430L229 419V382Q232 382 366 471Q407 445 500 408L511 404V387L512 370L595 420Q678 469 679 469L693 462Q756 431 795 417L815 409L814 380Q812 187 782 96Q774 71 766 62T744 48T684 25T577 -23L557 -32L546 -26Q536 -19 519 -10T481 10T436 31T393 47Q384 50 380 50Q380 52 381 58T384 77T387 104Q391 174 391 256V292L390 333L377 340Q350 357 304 373L294 376L227 355V348Q224 322 224 243Q228 117 232 112L235 108Q238 103 245 95T257 80L281 50Q281 49 227 10T172 -29L159 -13Q133 19 116 36T94 56ZM652 64Q658 64 667 84T685 162T697 303V336L686 341Q653 356 619 367L591 376Q590 376 553 361T514 344T512 324T510 275T508 221Q508 167 510 152T521 126Q537 112 590 88T652 64'],
// LATIN SMALL LETTER X
0x78: [471,188,459,8,441,'8 -90Q8 -68 13 -63Q13 -56 53 -8T120 63L128 71L129 85Q133 120 134 182Q134 308 131 331T106 365Q100 367 97 369L75 381L35 365L20 377Q20 378 47 397T110 440T161 471L253 413V396Q253 378 254 378L309 422Q364 466 365 466Q365 467 366 466T370 461T376 454Q403 419 426 396L441 380L438 377Q438 376 433 372T420 359T404 344L372 314Q351 320 338 327T310 344T277 364Q261 364 252 316Q251 306 251 235Q251 136 255 129Q257 127 258 124T268 113T298 92Q334 68 335 68Q340 70 349 73T377 84T408 95T421 99Q422 99 422 90L423 82L334 26Q246 -28 243 -28L200 8Q156 43 148 43Q144 43 130 36T99 9T83 -36Q83 -67 121 -89T198 -118L237 -124V-129L238 -133L193 -160Q183 -166 171 -173T152 -184L146 -188Q140 -187 131 -185T98 -173T56 -154T23 -127T8 -90'],
// LATIN SMALL LETTER Y
0x79: [557,221,589,60,512,'280 53Q272 47 246 27T199 -10T176 -27L167 -18Q137 17 107 44L90 60L93 71Q108 130 109 290V331Q109 339 109 344T108 353T107 359T105 364T102 369T97 374T91 381Q60 412 60 432Q60 448 86 479T138 534L164 557Q168 553 180 553Q163 532 163 511Q165 491 186 468Q206 443 231 423V404L232 385L371 477L389 468Q439 441 498 418L512 412V386Q512 360 507 190T500 14Q488 -26 445 -67Q401 -111 355 -148T282 -203T249 -221Q247 -220 230 -210T213 -199T229 -191T269 -172T306 -151Q361 -120 379 14Q391 92 391 182Q391 218 386 305Q384 339 380 341Q363 353 330 366T288 379Q282 379 258 368L230 356V181V141Q230 127 232 120T236 108T251 89T275 59L280 53'],
// LATIN SMALL LETTER Z
0x7A: [471,214,461,-7,378,'153 371Q141 371 126 365T100 354T78 340L65 331L57 338L50 346L62 356Q133 419 222 471Q274 453 306 422T338 366Q338 356 329 346T283 301L243 264L262 257Q298 246 361 214Q378 154 378 73Q378 33 371 -9T356 -74T345 -104Q340 -106 267 -160L191 -214H177Q60 -214 13 -150Q-7 -122 -7 -115Q-7 -112 19 -77T106 25T241 149Q241 152 227 158T181 173T109 185V190L108 194L158 229Q212 267 223 278T234 306Q234 329 208 350T153 371ZM258 1Q258 42 257 68T254 105T252 118Q235 105 210 85T144 22T102 -45Q102 -79 146 -106T234 -133H238Q248 -128 254 -80Q258 -58 258 1']
}
);
MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/Fraktur/Bold/BasicLatin.js");
|
PypiClean
|
/aws_cost_optimization_8-0.2.7-py3-none-any.whl/aws_cost_optimization_8/_rds_costing.py
|
import logging
from aws_cost_optimization_8.utils import *
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
class rds:
def rds_upgrades(self) -> list:
"""
:return: list of cost saving recommendations
"""
logger.info(" ---Inside aws_client :: rds_upgrades()--- ")
recommendations = []
rds_instances = list_rds_instances(self.session, self.regions)
for region, rds_list in rds_instances.items():
resolved_region = self.aws_region_map[region]
for instance in rds_list:
instance_type = instance['DBInstanceClass']
instance_family = instance_type.split('.')[1]
Filters = [
{'Type': 'TERM_MATCH', 'Field': 'instanceType', 'Value': instance_type},
{'Type': 'TERM_MATCH', 'Field': 'databaseEngine', 'Value': instance['Engine']},
{'Type': 'TERM_MATCH', 'Field': 'deploymentOption',
'Value': 'Single-AZ' if instance['MultiAZ'] else 'Multi-AZ'},
{'Type': 'TERM_MATCH', 'Field': 'productFamily', 'Value': 'Database Instance'},
{'Type': 'TERM_MATCH', 'Field': 'location', 'Value': resolved_region}
]
def evaluate(frm: str, to: str):
price_from = get_pricing(
self.session, region, 'AmazonRDS',
Filters,
service_name='instanceType'
)
print(price_from)
Filters[0]['Value'] = instance_type.replace(frm, to)
price_to = get_pricing(
self.session, region, 'AmazonRDS', Filters,
service_name='rds'
)
print(price_to)
current_cost = float(price_from[instance_type]) * 730
effective_cost = float(price_to[instance_type.replace(frm, to)]) * 730
recommendation = {
'Region': region,
'Instance Id': instance['DBInstanceIdentifier'],
'Instance Type': instance_type,
'Upgrade To': instance_type.replace(frm, to),
'Current Cost': current_cost,
'Effective Cost': effective_cost,
'Savings': current_cost - effective_cost,
'Savings %': ((current_cost - effective_cost) / current_cost) * 100
}
return recommendation
match instance_family:
case 'm3':
recommendations.append(evaluate('m3', 'm5'))
case 'r3':
recommendations.append(evaluate('r3', 'r5'))
case 'm1':
recommendations.append(evaluate('m1', 't2'))
return recommendations
def delete_rds_costing(self, data: dict) -> dict:
"""
:param data:
:return:
"""
logger.info(" ---Inside aws_client :: rds_costing()--- ")
region = data['Metadata']['Region']
resolved_region = self.aws_region_map[region]
filters = [
{
'Type': 'TERM_MATCH',
'Field': 'productFamily',
'Value': 'Database Instance'
},
{
'Type': 'TERM_MATCH',
'Field': 'location',
'Value': resolved_region
},
{
'Type': 'TERM_MATCH',
'Field': 'instanceType',
'Value': data['Metadata']['DBInstanceClass']
},
{
'Type': 'TERM_MATCH',
'Field': 'databaseEngine',
'Value': data['Metadata']['Engine']
},
{
'Type': 'TERM_MATCH',
'Field': 'deploymentOption',
'Value': 'Multi-AZ' if data['Metadata']['MultiAZ'] else 'Single-AZ'
}
]
price = get_pricing(self.session, data['Metadata']['Region'], 'AmazonRDS', filters, service_name='rds')
# print(price)
current_cost = float(price[data['Metadata']['DBInstanceClass']]) * 730
effective_cost = 0
recommendation = {
'Current Cost': current_cost,
'Effective Cost': effective_cost,
'Savings': current_cost - effective_cost,
'Savings %': ((current_cost - effective_cost) / current_cost) * 100
}
return recommendation
# returns the costing details of rds general purpose ssd
def rds_gp_ssd(self, data: dict) -> dict:
"""
:param data:
:return:
"""
logger.info(" ---Inside aws_client :: rds_gp_ssd()--- ")
region = data['Metadata']['Region']
resolved_region = self.aws_region_map[region]
filters = lambda family, vtype: [
{
'Type': 'TERM_MATCH',
'Field': 'productFamily',
'Value': family
},
{
'Type': 'TERM_MATCH',
'Field': 'location',
'Value': resolved_region
},
{
'Type': 'TERM_MATCH',
'Field': 'volumeType',
'Value': vtype
},
{
'Type': 'TERM_MATCH',
'Field': 'databaseEdition',
'Value': 'Standard'
}
]
price_instance = get_pricing(self.session, region, 'AmazonRDS', filters('Database Storage', 'Provisioned IOPS (SSD)'), service_name='rds_storage')
price_iops = get_pricing(self.session, region, 'AmazonRDS', filters('Provisioned IOPS', 'Provisioned IOPS (SSD)'), service_name='rds_storage')
current_cost = float(price_instance['io1']) + float(price_iops['io1'])
# print(current_cost)
gp2_price = get_pricing(self.session, region, 'AmazonRDS', filters('Database Storage', 'General Purpose (SSD)'), service_name='rds_storage')
# print(gp2_price)
effective_cost = float(gp2_price['gp2'])
recommendation = {
'Current Cost': current_cost,
'Effective Cost': effective_cost,
'Savings': current_cost - effective_cost,
'Savings %': ((current_cost - effective_cost) / current_cost) * 100
}
return recommendation
|
PypiClean
|
/pykaldi-0.2.1-cp37-cp37m-manylinux_2_27_x86_64.whl/kaldi/lat/functions.py
|
import logging
from . import _confidence
from . import _determinize_lattice_pruned as _dlp
from . import _lattice_functions as _lat_fun
from ._confidence import *
from ._compose_lattice_pruned import *
from ._determinize_lattice_pruned import *
from ._lattice_functions import *
from ._minimize_lattice import *
from ._push_lattice import *
from .. import fstext as _fst
from ..fstext import _api
def sentence_level_confidence(lat):
"""Computes sentence level confidence scores.
If input is a compact lattice, this function requires that distinct paths in
`lat` have distinct word sequences; this will automatically be the case if
`lat` was generated by a decoder, since a deterministic FST has this
property. If input is a state-level lattice, it is first determinized, but
this is done in a "smart" way so that only paths needed for this operation
are generated.
This function assumes that any acoustic scaling you want to apply,
has already been applied.
The output consists of the following. `confidence` is the score difference
between the best path and the second-best path in the lattice (a positive
number), or zero if lattice was equivalent to the empty FST (no successful
paths), or infinity if there was only one path in the lattice. `num_paths`
is a number in `{0, 1, 2}` saying how many n-best paths (up to two) were
found. If `num_paths >= 1`, `best_sentence` is the best word-sequence; if
`num_paths -= 2`, `second_best_sentence` is the second best word-sequence
(this may be useful for testing whether the two best word sequences are
somehow equivalent for the task at hand).
Args
lat (LatticeVectorFst or CompactLatticeVectorFst): The input lattice.
Returns:
Tuple[float, int, List[int], List[int]]: The tuple
`(confidence, num_paths, best_sentence, second_best_sentence)`.
Note:
This function is not the only way to get confidences in Kaldi. This only
gives you sentence-level (utterance-level) confidence. You can get
word-by-word confidence within a sentence, along with Minimum Bayes Risk
decoding. Also confidences estimated using this function are not very
accurate.
"""
if isinstance(lat, _fst.CompactLatticeVectorFst):
return _confidence._sentence_level_confidence_from_compact_lattice(lat)
else:
return _confidence._sentence_level_confidence_from_lattice(lat)
def determinize_lattice_phone_pruned(ifst, trans_model, prune,
opts=None, destructive=True):
"""Applies a specialized determinization operation to a lattice.
Determinizes a raw state-level lattice, keeping only the best output-symbol
sequence (typically transition ids) for each input-symbol sequence. This
version does phone insertion when doing a first pass determinization (if
`opts.phone_determinize == True`), it then removes the inserted phones and
does a second pass determinization on the word lattice (if
`opts.word_determinize == True`). It also does pruning as part of the
determinization algorithm, which is more efficient and prevents blowup.
Args:
ifst (LatticeFst): The input lattice.
trans_model (TransitionModel): The transition model.
prune (float): The pruning beam.
opts (DeterminizeLatticePhonePrunedOptions): The options for lattice
determinization.
destructive (bool): Whether to use the destructive version of the
algorithm which mutates input lattice.
Returns:
CompactLatticeVectorFst: The output lattice.
See Also:
:meth:`determinize_lattice_pruned`
Note:
The point of doing first a phone-level determinization pass and then a
word-level determinization pass is that it allows us to determinize
deeper lattices without "failing early" and returning a too-small
lattice due to the max-mem constraint. The result should be the same
as word-level determinization in general, but for deeper lattices it is
a bit faster, despite the fact that we now have two passes of
determinization by default.
"""
if opts is None:
opts = DeterminizeLatticePhonePrunedOptions()
if not destructive or not isinstance(ifst, _api._MutableFstBase):
ifst = _fst.LatticeVectorFst(ifst)
ofst = _fst.CompactLatticeVectorFst()
success = _dlp._determinize_lattice_phone_pruned_wrapper(trans_model, ifst,
prune, ofst, opts)
if not success:
logging.warning(
"Lattice determinization is terminated early because at least one "
"of max_mem, max_loop or max_arcs thresholds was reached. If you "
"want a more detailed log message, rerun this function after "
"setting verbose level > 0 using kaldi.base.set_verbose_level.")
return ofst
def determinize_lattice_pruned(ifst, prune, opts=None, compact_out=True):
"""Applies a specialized determinization operation to a lattice.
Determinizes a raw state-level lattice, keeping only the best output-symbol
sequence (typically transition ids) for each input-symbol sequence. This
version does determinization only on the word lattice. The output is
represented using either sequences of arcs (if `compact_out == False`),
where all but the first one has an epsilon on the input side, or directly as
strings using compact lattice weight type (if `compact_out == True`). It
also does pruning as part of the determinization algorithm, which is more
efficient and prevents blowup.
Args:
ifst (LatticeFst): The input lattice.
prune (float): The pruning beam.
opts (DeterminizeLatticePrunedOptions): The options for lattice
determinization.
compact_out (bool): Whether to output a compact lattice.
Returns:
LatticeVectorFst or CompactLatticeVectorFst: The output lattice.
See Also:
:meth:`determinize_lattice_phone_pruned`
"""
if opts is None:
opts = DeterminizeLatticePrunedOptions()
ifst = _fst.LatticeVectorFst(ifst).invert().topsort().arcsort()
if compact_out:
ofst = _fst.CompactLatticeVectorFst()
success = _dlp._determinize_lattice_pruned_to_compact(ifst, prune, ofst,
opts)
else:
ofst = _fst.LatticeVectorFst()
success = _dlp._determinize_lattice_pruned(ifst, prune, ofst, opts)
if not success:
logging.warning(
"Lattice determinization is terminated early because at least one "
"of max_mem, max_loop or max_arcs thresholds was reached. If you "
"want a more detailed log message, rerun this function after "
"setting verbose level > 0 using kaldi.base.set_verbose_level.")
return ofst
def lattice_state_times(lat):
"""Extracts lattice state times (in terms of frames).
Iterates over the states of a topologically sorted lattice and computes
the corresponding time instances.
Args:
lat (LatticeVectorFst or CompactLatticeVectorFst): The input lattice.
Returns:
Tuple[int, List[int]]: The number of frames and the state times.
Note:
If input is a regular lattice, the number of frames is equal to the
maximum state time in the lattice. If input is a compact lattice, the
number of frames might not be equal to the maximum state time in the
lattice due to frames in final states.
"""
if isinstance(lat, _fst.LatticeVectorFst):
return _lat_fun._lattice_state_times(lat)
else:
return _lat_fun._compact_lattice_state_times(lat)
def compute_lattice_alphas_and_betas(lat, viterbi):
"""Computes forward and backward scores for lattice states.
If `viterbi == True`, computes the Viterbi scores, i.e. forward (alpha) and
backward (beta) scores are the scores of best paths reaching and leaving
each state. Otherwise, computes regular forward and backward scores. Note
that alphas and betas are negated costs. Requires the input lattice to be
topologically sorted.
Args:
lat (LatticeVectorFst or CompactLatticeVectorFst): The input lattice.
viterbi (bool): Whether to compute Viterbi scores.
Returns:
Tuple[float, List[float], List[float]]: The total-prob (or best-path
prob), the forward (alpha) scores and the backward (beta) scores.
"""
if isinstance(lat, _fst.LatticeVectorFst):
return _lat_fun._compute_lattice_alphas_and_betas(lat, viterbi)
else:
return _lat_fun._compute_compact_lattice_alphas_and_betas(lat, viterbi)
def top_sort_lattice_if_needed(lat):
"""Topologically sorts the lattice if it is not already sorted.
Args:
lat (LatticeVectorFst or CompactLatticeVectorFst): The input lattice.
Raises:
RuntimeError: If lattice cannot be topologically sorted.
"""
if isinstance(lat, _fst.LatticeVectorFst):
_lat_fun._top_sort_lattice_if_needed(lat)
else:
_lat_fun._top_sort_compact_lattice_if_needed(lat)
def prune_lattice(beam, lat):
"""Prunes a lattice.
Args:
beam (float): The pruning beam.
lat (LatticeVectorFst or CompactLatticeVectorFst): The input lattice.
Raises:
ValueError: If pruning fails.
"""
if isinstance(lat, _fst.LatticeVectorFst):
_lat_fun._prune_lattice(beam, lat)
else:
_lat_fun._prune_compact_lattice(beam, lat)
def rescore_lattice(decodable, lat):
"""Adjusts acoustic scores in the lattice.
This function *adds* the negated scores obtained from the decodable object,
to the acoustic scores on the arcs. If you want to replace them, you should
use :meth:`scale_compact_lattice` to first set the acoustic scores to zero.
The input labels (or the string component of arc weights if the input is a
compact lattice), are interpreted as transition-ids or whatever other index
the decodable object expects.
Args:
decodable (DecodableInterface): The decodable object.
lat (LatticeVectorFst or CompactLatticeVectorFst): The input lattice.
Raises:
ValueError: If the inputs are not compatible.
See Also:
:meth:`rescore_compact_lattice_speedup`
"""
if isinstance(lat, _fst.LatticeVectorFst):
_lat_fun._rescore_lattice(decodable, lat)
else:
_lat_fun._rescore_compact_lattice(decodable, lat)
def longest_sentence_length_in_lattice(lat):
"""Returns the number of words in the longest sentence in a lattice.
Args:
lat (LatticeVectorFst or CompactLatticeVectorFst): The input lattice.
Returns:
int: The length of the longest sentence in the lattice.
"""
if isinstance(lat, _fst.LatticeVectorFst):
return _lat_fun._longest_sentence_length_in_lattice(lat)
else:
return _lat_fun._longest_sentence_length_in_compact_lattice(lat)
__all__ = [name for name in dir()
if name[0] != '_'
and not name.endswith('Base')]
|
PypiClean
|
/nonebot_plugin_ai_timetable-0.3.6.tar.gz/nonebot_plugin_ai_timetable-0.3.6/nonebot_plugin_ai_timetable/__init__.py
|
from .utils import (
AiTimetable,
scheduler,
logger,
config,
md_to_pic,
userdata,
usertable,
)
from nonebot.plugin import PluginMetadata
from nonebot.params import RegexStr, ArgStr, CommandArg, ArgPlainText
from nonebot.matcher import Matcher
from nonebot import on_command, on_regex
import datetime
import re
import random
from nonebot.adapters.onebot.v11 import Bot, MessageSegment, MessageEvent, Message
from .config import Config
logger.opt(colors=True).info(
"已检测到软依赖<y>nonebot_plugin_apscheduler</y>, <g>开启定时任务功能</g>"
if scheduler
else "未检测到软依赖<y>nonebot_plugin_apscheduler</y>,<r>禁用定时任务功能</r>"
)
__plugin_meta__ = PluginMetadata(
name="小爱课表",
description="一键导入课表、查看课表、提醒上课、查询课程",
usage=AiTimetable.ai_timetable__usage,
type="application",
homepage="https://github.com/maoxig/nonebot-plugin-ai-timetable",
config=Config,
supported_adapters={"~onebot.v11"},
)
my_table = on_regex(r"^(小爱|我的|本周|下周)(课表)", priority=20, block=False)
new_table = on_command("导入课表", priority=20, block=False, aliases={"创建课表"})
table_help = on_command("课表帮助", priority=20, block=False, aliases={"课表介绍", "课表怎么用"})
someday_table = on_regex(
r"^(((今|明|昨|后)(天|日))|(星期|周)(一|二|三|四|五|六|日|天))(课表|的课|课程|((上|有)(什么|啥)课))",
priority=20,
block=False,
)
add_alock_someday = on_regex(
r"^(订阅|提醒)((周|星期)(一|二|三|四|五|六|日|天))(课程|课表|的课)", priority=20, block=True
)
add_alock_morningcalss = on_command("订阅早八", priority=20, block=True, aliases={"提醒早八"})
remove_alock_someday = on_regex(
r"^(取消)(订阅|提醒)((周|星期)(一|二|三|四|五|六|日|天))(课程|的课|课表)", priority=20, block=False
)
sub_class = on_command("订阅课程", priority=25, block=False, aliases={"提醒课程"})
remove_sub_class = on_command("取消订阅课程", priority=25, block=False, aliases={"取消提醒课程"})
remove_alock_morningclass = on_command(
"取消订阅早八", priority=20, block=False, aliases={"取消提醒早八"}
)
renew_table = on_command("更新本地课表", priority=20, block=False, aliases={"更新课表"})
send_next_class = on_command("上课", priority=20, block=False, aliases={"下节课"})
next_morningclass = on_command("早八", priority=20, block=False, aliases={"明日早八", "明天早八"})
@table_help.handle()
async def _():
"""课表帮助"""
if config.timetable_pic:
await table_help.finish(
MessageSegment.image(await md_to_pic(AiTimetable.ai_timetable__usage))
)
else:
await table_help.finish(AiTimetable.ai_timetable__usage)
@my_table.handle()
async def _(event: MessageEvent, key: str = RegexStr()):
"""获取本周/下周的课表"""
uid = event.get_user_id()
if uid in userdata:
pic = await AiTimetable.my_table(uid=uid, key=key)
await my_table.finish(MessageSegment.image(pic))
else:
await my_table.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
@new_table.got("key", "请发送小爱课程表导出的链接,发送/取消以退出")
async def _(event: MessageEvent, key: str = ArgStr()):
"""更新本地的课表"""
uid = event.get_user_id()
url = str(key)
if re.match(AiTimetable.base_url_re, url): # 用户发送的链接匹配
msg = await AiTimetable.new_table(uid=uid, base_url=key)
await new_table.finish(msg)
else:
await new_table.finish("出错了,请检查链接是否正确", at_sender=True)
@someday_table.handle()
async def _(event: MessageEvent, key: str = RegexStr()):
"""发送某天的课表"""
uid = event.get_user_id()
if uid not in userdata:
await someday_table.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
else:
if config.timetable_pic:
pic = await AiTimetable.someday_table(uid=uid, key=key)
await someday_table.finish(MessageSegment.image(pic))
else:
await someday_table.finish(
await AiTimetable.someday_table(uid=uid, key=key)
)
@renew_table.handle() # 更新本地课表
async def _(event: MessageEvent):
uid = event.get_user_id()
if uid not in userdata:
await renew_table.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
else:
msg = await AiTimetable.renew_table(uid=uid)
await renew_table.finish(msg, at_sender=True)
@send_next_class.handle() # 发送本节课、以及下节课信息
async def _(event: MessageEvent):
uid = event.get_user_id()
if uid not in userdata:
await send_next_class.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
else:
msg = "现在时间是" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
msg += AiTimetable.now_class(uid)
msg += AiTimetable.next_class(uid)
await send_next_class.finish(msg, at_sender=True)
@next_morningclass.handle()
async def _(bot: Bot, event: MessageEvent):
"""发送早八"""
uid = event.get_user_id()
if uid not in userdata:
await send_next_class.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
else:
await AiTimetable.post_alock_morningclass(uid=uid, bot=bot, event=event)
# -----------以下为定时任务----------------#
@add_alock_someday.handle()
async def _(bot: Bot, event: MessageEvent, key: str = RegexStr()):
"""订阅课表"""
uid = event.get_user_id()
if uid not in userdata:
await add_alock_someday.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
else:
if scheduler:
send_day = (AiTimetable.weekday_int(key) + 5) % 7
if scheduler.get_job(str(uid + "post_alock" + str(send_day))):
await add_alock_someday.finish("出错了!你好像已经订阅过这天的课表了呢", at_sender=True)
scheduler.add_job(
AiTimetable.post_alock,
"cron",
hour=config.timetable_alock_someday,
second=random.randint(0, 60),
id=str(uid + "post_alock" + str(send_day)),
day_of_week=send_day,
kwargs={"key": key, "uid": uid, "bot": bot, "event": event},
)
await add_alock_someday.finish("定时提醒添加成功!", at_sender=True)
else:
await add_alock_someday.finish("apscheduler插件未载入,无法添加定时提醒", at_sender=True)
@remove_alock_someday.handle()
async def _(bot: Bot, event: MessageEvent, key: str = RegexStr()):
"""删除订阅课表"""
uid = event.get_user_id()
if uid not in userdata:
await add_alock_someday.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
else:
if scheduler:
send_day = (AiTimetable.weekday_int(key) + 5) % 7
if scheduler.get_job(str(uid + "post_alock" + str(send_day))):
scheduler.remove_job(str(uid + "post_alock" + str(send_day)))
await remove_alock_someday.finish("定时提醒删除成功!", at_sender=True)
else:
await remove_alock_someday.finish("出错了,好像没有订阅过这天的课表呢", at_sender=True)
else:
await remove_alock_someday.finish(
"apscheduler插件未载入,无法删除定时提醒", at_sender=True
)
# -----------以下为订阅早八----------------#
@add_alock_morningcalss.handle()
async def _(bot: Bot, event: MessageEvent):
uid = event.get_user_id()
if uid not in userdata:
await add_alock_morningcalss.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
else:
if scheduler:
if scheduler.get_job(str(uid + "post_alock_morningclass")):
await add_alock_morningcalss.finish(
"出错了!你好像已经订阅过早八提醒了呢", at_sender=True
)
scheduler.add_job(
AiTimetable.post_alock_morningclass,
"cron",
hour=config.timetable_alock_8,
second=random.randint(0, 60),
id=str(uid + "post_alock_morningclass"),
kwargs={"uid": uid, "bot": bot, "event": event},
)
await add_alock_morningcalss.finish("定时提醒添加成功!", at_sender=True)
else:
await add_alock_morningcalss.finish(
"apscheduler插件未载入,无法添加定时提醒", at_sender=True
)
@remove_alock_morningclass.handle()
async def _(event: MessageEvent):
uid = event.get_user_id()
if uid not in userdata:
await remove_alock_morningclass.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
else:
if scheduler:
if scheduler.get_job(str(uid + "post_alock_morningclass")):
scheduler.remove_job(str(uid + "post_alock_morningclass"))
await remove_alock_morningclass.finish("定时提醒删除成功!", at_sender=True)
else:
await remove_alock_morningclass.finish("出错了,好像没有订阅过早八呢", at_sender=True)
else:
await remove_alock_morningclass.finish(
"apscheduler插件未载入,无法删除定时提醒", at_sender=True
)
@sub_class.handle()
async def _(matcher: Matcher, args: Message = CommandArg()):
if args.extract_plain_text():
matcher.set_arg("text", args)
@sub_class.got("text", prompt="请告诉我课程名~")
async def sub_handler(bot: Bot, event: MessageEvent, text: str = ArgPlainText()):
uid = event.get_user_id()
if uid not in userdata:
await sub_class.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
else:
if scheduler:
msg = AiTimetable.sub_class(uid=uid, key=text, event=event, bot=bot)
await sub_class.finish(msg, at_sender=True)
else:
await sub_class.finish("apscheduler插件未载入,无法添加定时提醒", at_sender=True)
@remove_sub_class.handle()
async def _(matcher: Matcher, args: Message = CommandArg()):
if args.extract_plain_text():
matcher.set_arg("text", args)
@remove_sub_class.got("text", prompt="请告诉我课程名~")
async def remove_sub_handler(event: MessageEvent, text: str = ArgPlainText()):
uid = event.get_user_id()
if uid not in userdata:
await remove_sub_class.finish("你还没有导入课表,发送/导入课表来导入吧!", at_sender=True)
else:
if scheduler:
msg = AiTimetable.remove_sub_class(uid=uid, key=text)
await remove_sub_class.finish(msg, at_sender=True)
else:
await remove_sub_class.finish("apscheduler插件未载入,无法添加定时提醒", at_sender=True)
|
PypiClean
|
/tailchaser-0.2.6.tar.gz/tailchaser-0.2.6/ci/appveyor-download.py
|
from __future__ import unicode_literals
import argparse
import os
import requests
import zipfile
def make_auth_headers():
"""Make the authentication headers needed to use the Appveyor API."""
path = os.path.expanduser("~/.appveyor.token")
if not os.path.exists(path):
raise RuntimeError(
"Please create a file named `.appveyor.token` in your home directory. "
"You can get the token from https://ci.appveyor.com/api-token"
)
with open(path) as f:
token = f.read().strip()
headers = {
'Authorization': 'Bearer {}'.format(token),
}
return headers
def download_latest_artifacts(account_project, build_id):
"""Download all the artifacts from the latest build."""
if build_id is None:
url = "https://ci.appveyor.com/api/projects/{}".format(account_project)
else:
url = "https://ci.appveyor.com/api/projects/{}/build/{}".format(account_project, build_id)
build = requests.get(url, headers=make_auth_headers()).json()
jobs = build['build']['jobs']
print(u"Build {0[build][version]}, {1} jobs: {0[build][message]}".format(build, len(jobs)))
for job in jobs:
name = job['name']
print(u" {0}: {1[status]}, {1[artifactsCount]} artifacts".format(name, job))
url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts".format(job['jobId'])
response = requests.get(url, headers=make_auth_headers())
artifacts = response.json()
for artifact in artifacts:
is_zip = artifact['type'] == "Zip"
filename = artifact['fileName']
print(u" {0}, {1} bytes".format(filename, artifact['size']))
url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts/{}".format(job['jobId'], filename)
download_url(url, filename, make_auth_headers())
if is_zip:
unpack_zipfile(filename)
os.remove(filename)
def ensure_dirs(filename):
"""Make sure the directories exist for `filename`."""
dirname, _ = os.path.split(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
def download_url(url, filename, headers):
"""Download a file from `url` to `filename`."""
ensure_dirs(filename)
response = requests.get(url, headers=headers, stream=True)
if response.status_code == 200:
with open(filename, 'wb') as f:
for chunk in response.iter_content(16 * 1024):
f.write(chunk)
else:
print(u" Error downloading {}: {}".format(url, response))
def unpack_zipfile(filename):
"""Unpack a zipfile, using the names in the zip."""
with open(filename, 'rb') as fzip:
z = zipfile.ZipFile(fzip)
for name in z.namelist():
print(u" extracting {}".format(name))
ensure_dirs(name)
z.extract(name)
parser = argparse.ArgumentParser(description='Download artifacts from AppVeyor.')
parser.add_argument('--id',
metavar='PROJECT_ID',
default='thanos/tailchaser',
help='Project ID in AppVeyor.')
parser.add_argument('build',
nargs='?',
metavar='BUILD_ID',
help='Build ID in AppVeyor. Eg: master-123')
if __name__ == "__main__":
# import logging
# logging.basicConfig(level="DEBUG")
args = parser.parse_args()
download_latest_artifacts(args.id, args.build)
|
PypiClean
|