id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/jep-4.1.1.tar.gz/jep-4.1.1/commands/python.py
|
from commands.util import is_osx
from commands.util import is_windows
from commands.util import is_bsd
import os
import sysconfig
def get_python_libs():
"""
Get the shared library names for embedding jep.
See python-config
"""
v = sysconfig.get_config_var('VERSION')
ldv = sysconfig.get_config_var('LDVERSION')
if ldv:
v = ldv
libs = ['python' + v]
if not is_windows() and not is_bsd():
libs.append('dl')
return libs
def get_python_linker_args():
if is_windows():
return []
return ['-L{0}'.format(sysconfig.get_config_var('LIBDIR'))]
def get_python_lib_dir():
if is_windows():
return os.path.join(os.environ.get('PYTHONHOME'), 'DLLs')
return sysconfig.get_config_var('LIBDIR')
def get_libpython():
"""
Searches for the Python library, e.g. libpython<version>.so.
Used by setup.py to set PYTHON_LDLIBRARY, and by scripts to set up LD_PRELOAD.
"""
libdir = sysconfig.get_config_var('LIBDIR')
ldlibrary = sysconfig.get_config_var('LDLIBRARY')
if libdir is None or ldlibrary is None:
return None
lib_python = os.path.join(libdir, ldlibrary)
if os.path.exists(lib_python):
return lib_python
# x64 systems will tend to also have a MULTIARCH folder
multiarch = sysconfig.get_config_var('MULTIARCH')
if multiarch is not None:
lib_python = os.path.join(libdir, multiarch, ldlibrary)
if os.path.exists(lib_python):
return lib_python
# HACK: Non-existent static library is a known issue with conda-forge python;
# see: https://github.com/conda-forge/python-feedstock/issues/565
# Let's also look for a shared library in this case.
if ldlibrary.endswith('.a'):
ldshared = ldlibrary[:-1] + 'so'
lib_python = os.path.join(libdir, ldshared)
if os.path.exists(lib_python):
return lib_python
if multiarch is not None:
lib_python = os.path.join(libdir, multiarch, ldshared)
if os.path.exists(lib_python):
return lib_python
# give up
return None
|
PypiClean
|
/matcha_ml-0.2.9-py3-none-any.whl/matcha_ml/infrastructure/default/aks/README.md
|
## Requirements
No requirements.
## Providers
| Name | Version |
|------|---------|
| <a name="provider_azurerm"></a> [azurerm](#provider\_azurerm) | n/a |
## Modules
No modules.
## Resources
| Name | Type |
|------|------|
| [azurerm_kubernetes_cluster.main](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| <a name="input_location"></a> [location](#input\_location) | The Azure region where the Kubernetes cluster will be created | `string` | n/a | yes |
| <a name="input_prefix"></a> [prefix](#input\_prefix) | Prefix to be used for all resources in this module | `string` | n/a | yes |
| <a name="input_resource_group_name"></a> [resource\_group\_name](#input\_resource\_group\_name) | The name of the resource group to create the Kubernetes cluster in | `string` | n/a | yes |
## Outputs
| Name | Description |
|------|-------------|
| <a name="output_aks_cluster_id"></a> [aks\_cluster\_id](#output\_aks\_cluster\_id) | ID of the created Kubernetes cluster |
| <a name="output_aks_cluster_name"></a> [aks\_cluster\_name](#output\_aks\_cluster\_name) | Name of the created Kubernetes cluster |
| <a name="output_aks_object_id"></a> [aks\_object\_id](#output\_aks\_object\_id) | Object ID for the Kubernetes cluster |
| <a name="output_aks_principal_id"></a> [aks\_principal\_id](#output\_aks\_principal\_id) | Principal ID for the Kubernetes cluster |
| <a name="output_client_certificate"></a> [client\_certificate](#output\_client\_certificate) | Client certificate for accessing the Kubernetes cluster |
| <a name="output_client_key"></a> [client\_key](#output\_client\_key) | Client key for accessing the Kubernetes cluster |
| <a name="output_cluster_ca_certificate"></a> [cluster\_ca\_certificate](#output\_cluster\_ca\_certificate) | Cluster CA certificate for the Kubernetes cluster |
| <a name="output_host"></a> [host](#output\_host) | Host address for the Kubernetes cluster |
| <a name="output_kube_config"></a> [kube\_config](#output\_kube\_config) | Raw Kubernetes configuration for the created cluster |
|
PypiClean
|
/tf_keras_vis-0.8.5-py3-none-any.whl/tf_keras_vis/scorecam.py
|
from typing import Union
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from scipy.ndimage.interpolation import zoom
from . import ModelVisualization
from .utils import get_num_of_steps_allowed, is_mixed_precision, listify, normalize, zoom_factor
from .utils.model_modifiers import ExtractIntermediateLayerForGradcam as ModelModifier
class Scorecam(ModelVisualization):
"""Score-CAM and Faster Score-CAM
References:
* Score-CAM: Score-Weighted Visual Explanations for Convolutional Neural Networks
(https://arxiv.org/pdf/1910.01279.pdf)
* Faster Score-CAM (https://github.com/tabayashi0117/Score-CAM#faster-score-cam)
"""
def __call__(self,
score,
seed_input,
penultimate_layer=None,
seek_penultimate_conv_layer=True,
activation_modifier=lambda cam: K.relu(cam),
batch_size=32,
max_N=None,
training=False,
expand_cam=True,
normalize_cam=True) -> Union[np.ndarray, list]:
"""Generate score-weighted class activation maps (CAM) by using gradient-free
visualization method.
Args:
score: A :obj:`tf_keras_vis.utils.scores.Score` instance, function or a list of them.
For example of the Score instance to specify visualizing target::
scores = CategoricalScore([1, 294, 413])
The code above means the same with the one below::
score = lambda outputs: (outputs[0][1], outputs[1][294], outputs[2][413])
When the model has multiple outputs, you MUST pass a list of
Score instances or functions. For example::
from tf_keras_vis.utils.scores import CategoricalScore, InactiveScore
score = [
CategoricalScore([1, 23]), # For 1st model output
InactiveScore(), # For 2nd model output
...
]
seed_input: A tf.Tensor, :obj:`numpy.ndarray` or a list of them to input in the model.
That's when the model has multiple inputs, you MUST pass a list of tensors.
penultimate_layer: An index or name of the layer, or the tf.keras.layers.Layer
instance itself. When None, it means the same with `-1`. If the layer specified by
this option is not `convolutional` layer, `penultimate_layer` will work as the
offset to seek `convolutional` layer. Defaults to None.
seek_penultimate_conv_layer: A bool that indicates whether or not seeks a penultimate
layer when the layer specified by `penultimate_layer` is not `convolutional` layer.
Defaults to True.
activation_modifier: A function to modify the Class Activation Map (CAM). Defaults to
`lambda cam: K.relu(cam)`.
batch_size: The number of samples per batch. Defaults to 32.
max_N: When None or under Zero, run as ScoreCAM. When not None and over Zero of
Integer, run as Faster-ScoreCAM. Set larger number (or None), need more time to
visualize CAM but to be able to get clearer attention images. Defaults to None.
training: A bool that indicates whether the model's training-mode on or off. Defaults
to False.
expand_cam: True to resize CAM to the same as input image size. **Note!** When False,
even if the model has multiple inputs, return only a CAM. Defaults to True.
normalize_cam: When True, CAM will be normalized. Defaults to True.
unconnected_gradients: Specifies the gradient value returned when the given input
tensors are unconnected. Defaults to tf.UnconnectedGradients.NONE.
Returns:
An :obj:`numpy.ndarray` or a list of them. They are the Class Activation Maps (CAMs)
that indicate the `seed_input` regions whose change would most contribute the score
value.
Raises:
:obj:`ValueError`: When there is any invalid arguments.
"""
# Preparing
scores = self._get_scores_for_multiple_outputs(score)
seed_inputs = self._get_seed_inputs_for_multiple_inputs(seed_input)
# Processing score-cam
model = ModelModifier(penultimate_layer, seek_penultimate_conv_layer, False)(self.model)
penultimate_output = model(seed_inputs, training=training)
if is_mixed_precision(self.model):
penultimate_output = tf.cast(penultimate_output, self.model.variable_dtype)
# For efficiently visualizing, extract maps that has a large variance.
# This excellent idea is devised by tabayashi0117.
# (see for details: https://github.com/tabayashi0117/Score-CAM#faster-score-cam)
if max_N is None or max_N <= 0:
max_N = get_num_of_steps_allowed(penultimate_output.shape[-1])
elif max_N > 0 and max_N <= penultimate_output.shape[-1]:
max_N = get_num_of_steps_allowed(max_N)
else:
raise ValueError(f"max_N must be 1 or more and {penultimate_output.shape[-1]} or less."
f" max_N: {max_N}")
if max_N < penultimate_output.shape[-1]:
activation_map_std = tf.math.reduce_std(penultimate_output,
axis=tuple(
range(penultimate_output.ndim)[1:-1]),
keepdims=True)
_, top_k_indices = tf.math.top_k(activation_map_std, max_N)
top_k_indices, _ = tf.unique(tf.reshape(top_k_indices, (-1, )))
penultimate_output = tf.gather(penultimate_output, top_k_indices, axis=-1)
nsamples = penultimate_output.shape[0]
channels = penultimate_output.shape[-1]
# Upsampling activations
input_shapes = [seed_input.shape for seed_input in seed_inputs]
zoom_factors = (zoom_factor(penultimate_output.shape[1:-1], input_shape[1:-1])
for input_shape in input_shapes)
zoom_factors = ((1, ) + factor + (1, ) for factor in zoom_factors)
upsampled_activations = [
zoom(penultimate_output, factor, order=1, mode='nearest') for factor in zoom_factors
]
activation_shapes = [activation.shape for activation in upsampled_activations]
# Normalizing activations
min_activations = (np.min(activation,
axis=tuple(range(activation.ndim)[1:-1]),
keepdims=True) for activation in upsampled_activations)
max_activations = (np.max(activation,
axis=tuple(range(activation.ndim)[1:-1]),
keepdims=True) for activation in upsampled_activations)
normalized_activations = zip(upsampled_activations, min_activations, max_activations)
normalized_activations = ((activation - _min) / (_max - _min + K.epsilon())
for activation, _min, _max in normalized_activations)
# (samples, h, w, c) -> (channels, samples, h, w, c)
input_templates = (np.tile(seed_input, (channels, ) + (1, ) * len(seed_input.shape))
for seed_input in seed_inputs)
# (samples, h, w, channels) -> (c, samples, h, w, channels)
masks = (np.tile(mask, (input_shape[-1], ) + (1, ) * len(map_shape)) for mask, input_shape,
map_shape in zip(normalized_activations, input_shapes, activation_shapes))
# (c, samples, h, w, channels) -> (channels, samples, h, w, c)
masks = (np.transpose(mask, (len(mask.shape) - 1, ) + tuple(range(len(mask.shape)))[1:-1] +
(0, )) for mask in masks)
# Create masked inputs
masked_seed_inputs = (np.multiply(input_template, mask)
for input_template, mask in zip(input_templates, masks))
# (channels, samples, h, w, c) -> (channels * samples, h, w, c)
masked_seed_inputs = [
np.reshape(seed_input, (-1, ) + seed_input.shape[2:])
for seed_input in masked_seed_inputs
]
# Predicting masked seed-inputs
preds = self.model.predict(masked_seed_inputs, batch_size=batch_size)
# (channels * samples, logits) -> (channels, samples, logits)
preds = (np.reshape(prediction, (channels, nsamples, prediction.shape[-1]))
for prediction in listify(preds))
# Calculating weights
weights = ([score(K.softmax(p)) for p in prediction]
for score, prediction in zip(scores, preds))
weights = ([self._validate_weight(s, nsamples) for s in w] for w in weights)
weights = (np.array(w, dtype=np.float32) for w in weights)
weights = (np.reshape(w, (channels, nsamples, -1)) for w in weights)
weights = (np.mean(w, axis=2) for w in weights)
weights = (np.transpose(w, (1, 0)) for w in weights)
weights = np.array(list(weights), dtype=np.float32)
weights = np.sum(weights, axis=0)
# Generate cam
cam = K.batch_dot(penultimate_output, weights)
if activation_modifier is not None:
cam = activation_modifier(cam)
if not expand_cam:
if normalize_cam:
cam = normalize(cam)
return cam
# Visualizing
zoom_factors = (zoom_factor(cam.shape, X.shape) for X in seed_inputs)
cam = [zoom(cam, factor, order=1) for factor in zoom_factors]
if normalize_cam:
cam = [normalize(x) for x in cam]
if len(self.model.inputs) == 1 and not isinstance(seed_input, list):
cam = cam[0]
return cam
def _validate_weight(self, score, nsamples):
invalid = False
if tf.is_tensor(score) or isinstance(score, np.ndarray):
invalid = (score.shape[0] != nsamples)
elif isinstance(score, (list, tuple)):
invalid = (len(score) != nsamples)
else:
invalid = (nsamples != 1)
if invalid:
raise ValueError(
"Score function must return a Tensor, whose the first dimension is "
"the same as the first dimension of seed_input or "
", a list or tuple, whose length is the first dimension of seed_input.")
else:
return score
ScoreCAM = Scorecam
|
PypiClean
|
/formification-1.2.0-py3-none-any.whl/formulaic/static/admin/formulaic/ember-formulaic/node_modules/ember-runtime-enumerable-includes-polyfill/node_modules/babylon/README.md
|
<p align="center">
<img alt="babylon" src="https://raw.githubusercontent.com/babel/logo/master/babylon.png" width="700">
</p>
<p align="center">
Babylon is a JavaScript parser used in <a href="https://github.com/babel/babel">Babel</a>.
</p>
<p align="center">
<a href="https://travis-ci.org/babel/babylon"><img alt="Travis Status" src="https://img.shields.io/travis/babel/babylon/master.svg?style=flat&label=travis"></a>
<a href="https://codecov.io/gh/babel/babylon"><img alt="Codecov Status" src="https://img.shields.io/codecov/c/github/babel/babylon/master.svg?style=flat"></a>
</p>
- The latest ECMAScript version enabled by default (ES2017).
- Comment attachment.
- Support for JSX and Flow.
- Support for experimental language proposals (accepting PRs for anything at least [stage-0](https://github.com/tc39/proposals/blob/master/stage-0-proposals.md)).
## Credits
Heavily based on [acorn](https://github.com/marijnh/acorn) and [acorn-jsx](https://github.com/RReverser/acorn-jsx),
thanks to the awesome work of [@RReverser](https://github.com/RReverser) and [@marijnh](https://github.com/marijnh).
Significant diversions are expected to occur in the future such as streaming, EBNF definitions, sweet.js integration, interspatial parsing and more.
## API
### `babylon.parse(code, [options])`
### `babylon.parseExpression(code, [options])`
`parse()` parses the provided `code` as an entire ECMAScript program, while
`parseExpression()` tries to parse a single Expression with performance in
mind. When in doubt, use `.parse()`.
### Options
- **allowImportExportEverywhere**: By default, `import` and `export`
declarations can only appear at a program's top level. Setting this
option to `true` allows them anywhere where a statement is allowed.
- **allowReturnOutsideFunction**: By default, a return statement at
the top level raises an error. Set this to `true` to accept such
code.
- **allowSuperOutsideMethod**: TODO
- **sourceType**: Indicate the mode the code should be parsed in. Can be
either `"script"` or `"module"`.
- **sourceFilename**: Correlate output AST nodes with their source filename. Useful when generating code and source maps from the ASTs of multiple input files.
- **startLine**: By default, the first line of code parsed is treated as line 1. You can provide a line number to alternatively start with. Useful for integration with other source tools.
- **plugins**: Array containing the plugins that you want to enable.
- **strictMode**: TODO
### Output
Babylon generates AST according to [Babel AST format][].
It is based on [ESTree spec][] with the following deviations:
> There is now an `estree` plugin which reverts these deviations
- [Literal][] token is replaced with [StringLiteral][], [NumericLiteral][], [BooleanLiteral][], [NullLiteral][], [RegExpLiteral][]
- [Property][] token is replaced with [ObjectProperty][] and [ObjectMethod][]
- [MethodDefinition][] is replaced with [ClassMethod][]
- [Program][] and [BlockStatement][] contain additional `directives` field with [Directive][] and [DirectiveLiteral][]
- [ClassMethod][], [ObjectProperty][], and [ObjectMethod][] value property's properties in [FunctionExpression][] is coerced/brought into the main method node.
AST for JSX code is based on [Facebook JSX AST][] with the addition of one node type:
- `JSXText`
[Babel AST format]: https://github.com/babel/babylon/blob/master/ast/spec.md
[ESTree spec]: https://github.com/estree/estree
[Literal]: https://github.com/estree/estree/blob/master/es5.md#literal
[Property]: https://github.com/estree/estree/blob/master/es5.md#property
[MethodDefinition]: https://github.com/estree/estree/blob/master/es2015.md#methoddefinition
[StringLiteral]: https://github.com/babel/babylon/blob/master/ast/spec.md#stringliteral
[NumericLiteral]: https://github.com/babel/babylon/blob/master/ast/spec.md#numericliteral
[BooleanLiteral]: https://github.com/babel/babylon/blob/master/ast/spec.md#booleanliteral
[NullLiteral]: https://github.com/babel/babylon/blob/master/ast/spec.md#nullliteral
[RegExpLiteral]: https://github.com/babel/babylon/blob/master/ast/spec.md#regexpliteral
[ObjectProperty]: https://github.com/babel/babylon/blob/master/ast/spec.md#objectproperty
[ObjectMethod]: https://github.com/babel/babylon/blob/master/ast/spec.md#objectmethod
[ClassMethod]: https://github.com/babel/babylon/blob/master/ast/spec.md#classmethod
[Program]: https://github.com/babel/babylon/blob/master/ast/spec.md#programs
[BlockStatement]: https://github.com/babel/babylon/blob/master/ast/spec.md#blockstatement
[Directive]: https://github.com/babel/babylon/blob/master/ast/spec.md#directive
[DirectiveLiteral]: https://github.com/babel/babylon/blob/master/ast/spec.md#directiveliteral
[FunctionExpression]: https://github.com/babel/babylon/blob/master/ast/spec.md#functionexpression
[Facebook JSX AST]: https://github.com/facebook/jsx/blob/master/AST.md
### Semver
Babylon follows semver in most situations. The only thing to note is that some spec-compliancy bug fixes may be released under patch versions.
For example: We push a fix to early error on something like [#107](https://github.com/babel/babylon/pull/107) - multiple default exports per file. That would be considered a bug fix even though it would cause a build to fail.
### Example
```javascript
require("babylon").parse("code", {
// parse in strict mode and allow module declarations
sourceType: "module",
plugins: [
// enable jsx and flow syntax
"jsx",
"flow"
]
});
```
### Plugins
- `estree`
- `jsx`
- `flow`
- `doExpressions`
- `objectRestSpread`
- `decorators` (Based on an outdated version of the Decorators proposal. Will be removed in a future version of `Babylon`)
- `classProperties`
- `exportExtensions`
- `asyncGenerators`
- `functionBind`
- `functionSent`
- `dynamicImport`
- `templateInvalidEscapes`
|
PypiClean
|
/retro_data_structures-0.23.0-py3-none-any.whl/retro_data_structures/properties/corruption/objects/Achievement.py
|
import dataclasses
import struct
import typing
from retro_data_structures.game_check import Game
from retro_data_structures.properties.base_property import BaseObjectType
import retro_data_structures.enums.corruption as enums
from retro_data_structures.properties.corruption.archetypes.BonusCredit import BonusCredit
from retro_data_structures.properties.corruption.archetypes.EditorProperties import EditorProperties
from retro_data_structures.properties.corruption.core.AssetId import AssetId, default_asset_id
@dataclasses.dataclass()
class Achievement(BaseObjectType):
editor_properties: EditorProperties = dataclasses.field(default_factory=EditorProperties)
bonus_credit: BonusCredit = dataclasses.field(default_factory=BonusCredit)
achievement: enums.Achievement = dataclasses.field(default=enums.Achievement.Unknown84)
normal_difficulty: bool = dataclasses.field(default=True)
hard_difficulty: bool = dataclasses.field(default=True)
elite_difficulty: bool = dataclasses.field(default=True)
bonus_credit_string: AssetId = dataclasses.field(metadata={'asset_types': ['STRG']}, default=default_asset_id)
@classmethod
def game(cls) -> Game:
return Game.CORRUPTION
def get_name(self) -> typing.Optional[str]:
return self.editor_properties.name
def set_name(self, name: str) -> None:
self.editor_properties.name = name
@classmethod
def object_type(cls) -> str:
return 'ACHI'
@classmethod
def modules(cls) -> typing.List[str]:
return ['RSO_ScriptAchievement.rso']
@classmethod
def from_stream(cls, data: typing.BinaryIO, size: typing.Optional[int] = None, default_override: typing.Optional[dict] = None):
struct_id, size, property_count = struct.unpack(">LHH", data.read(8))
assert struct_id == 0xFFFFFFFF
root_size_start = data.tell() - 2
present_fields = default_override or {}
for _ in range(property_count):
property_id, property_size = struct.unpack(">LH", data.read(6))
start = data.tell()
try:
property_name, decoder = _property_decoder[property_id]
present_fields[property_name] = decoder(data, property_size)
except KeyError:
raise RuntimeError(f"Unknown property: 0x{property_id:08x}")
assert data.tell() - start == property_size
assert data.tell() - root_size_start == size
return cls(**present_fields)
def to_stream(self, data: typing.BinaryIO, default_override: typing.Optional[dict] = None):
default_override = default_override or {}
data.write(b'\xff\xff\xff\xff') # struct object id
root_size_offset = data.tell()
data.write(b'\x00\x00') # placeholder for root struct size
data.write(b'\x00\x07') # 7 properties
data.write(b'%ZE\x80') # 0x255a4580
before = data.tell()
data.write(b'\x00\x00') # size placeholder
self.editor_properties.to_stream(data)
after = data.tell()
data.seek(before)
data.write(struct.pack(">H", after - before - 2))
data.seek(after)
data.write(b'z\xac\x9e"') # 0x7aac9e22
before = data.tell()
data.write(b'\x00\x00') # size placeholder
self.bonus_credit.to_stream(data)
after = data.tell()
data.seek(before)
data.write(struct.pack(">H", after - before - 2))
data.seek(after)
data.write(b'\x05\x8d-\xdb') # 0x58d2ddb
data.write(b'\x00\x04') # size
self.achievement.to_stream(data)
data.write(b'\x97OJ\xa1') # 0x974f4aa1
data.write(b'\x00\x01') # size
data.write(struct.pack('>?', self.normal_difficulty))
data.write(b'\x0f\x8c\xf6\xff') # 0xf8cf6ff
data.write(b'\x00\x01') # size
data.write(struct.pack('>?', self.hard_difficulty))
data.write(b'\x9b\x89\x03\xeb') # 0x9b8903eb
data.write(b'\x00\x01') # size
data.write(struct.pack('>?', self.elite_difficulty))
data.write(b'\xd6\xa0\xcf\xf1') # 0xd6a0cff1
data.write(b'\x00\x08') # size
data.write(struct.pack(">Q", self.bonus_credit_string))
struct_end_offset = data.tell()
data.seek(root_size_offset)
data.write(struct.pack(">H", struct_end_offset - root_size_offset - 2))
data.seek(struct_end_offset)
@classmethod
def from_json(cls, data: dict):
return cls(
editor_properties=EditorProperties.from_json(data['editor_properties']),
bonus_credit=BonusCredit.from_json(data['bonus_credit']),
achievement=enums.Achievement.from_json(data['achievement']),
normal_difficulty=data['normal_difficulty'],
hard_difficulty=data['hard_difficulty'],
elite_difficulty=data['elite_difficulty'],
bonus_credit_string=data['bonus_credit_string'],
)
def to_json(self) -> dict:
return {
'editor_properties': self.editor_properties.to_json(),
'bonus_credit': self.bonus_credit.to_json(),
'achievement': self.achievement.to_json(),
'normal_difficulty': self.normal_difficulty,
'hard_difficulty': self.hard_difficulty,
'elite_difficulty': self.elite_difficulty,
'bonus_credit_string': self.bonus_credit_string,
}
def _decode_editor_properties(data: typing.BinaryIO, property_size: int):
return EditorProperties.from_stream(data, property_size)
def _decode_bonus_credit(data: typing.BinaryIO, property_size: int):
return BonusCredit.from_stream(data, property_size)
def _decode_achievement(data: typing.BinaryIO, property_size: int):
return enums.Achievement.from_stream(data)
def _decode_normal_difficulty(data: typing.BinaryIO, property_size: int):
return struct.unpack('>?', data.read(1))[0]
def _decode_hard_difficulty(data: typing.BinaryIO, property_size: int):
return struct.unpack('>?', data.read(1))[0]
def _decode_elite_difficulty(data: typing.BinaryIO, property_size: int):
return struct.unpack('>?', data.read(1))[0]
def _decode_bonus_credit_string(data: typing.BinaryIO, property_size: int):
return struct.unpack(">Q", data.read(8))[0]
_property_decoder: typing.Dict[int, typing.Tuple[str, typing.Callable[[typing.BinaryIO, int], typing.Any]]] = {
0x255a4580: ('editor_properties', _decode_editor_properties),
0x7aac9e22: ('bonus_credit', _decode_bonus_credit),
0x58d2ddb: ('achievement', _decode_achievement),
0x974f4aa1: ('normal_difficulty', _decode_normal_difficulty),
0xf8cf6ff: ('hard_difficulty', _decode_hard_difficulty),
0x9b8903eb: ('elite_difficulty', _decode_elite_difficulty),
0xd6a0cff1: ('bonus_credit_string', _decode_bonus_credit_string),
}
|
PypiClean
|
/gFlex-1.1.1.tar.gz/gFlex-1.1.1/README.md
|
[](https://travis-ci.org/awickert/gFlex)
# gFlex
***Multiple methods to solve elastic plate flexure, designed for applications to Earth's lithosphere.***
These instructions are meant to take an user familiar with computers but new to (or a beginner with) Python through the basics of how to get gFlex to work. The Python scripting part towards the end should be pretty straightforward as well, insofar as information is provided on how to get and set the chosen values inside gFlex. *Please leave a message if you have trouble working with gFlex; your comments could assist both you and the more general improvement of this documentation.*
When you use gFlex, please cite:
**Wickert, A. D. (2016), [Open-source modular solutions for flexural isostasy: gFlex v1.0](https://www.geosci-model-dev.net/9/997/2016/gmd-9-997-2016.html), *Geosci. Model Dev.*, *9*(3), 997–1017, doi:10.5194/gmd-9-997-2016.**
## Download and Installation
#### Python
gFlex has been tested on **Python 2.7**, and should work (with a few possible changes) on future versions of Python 2.X. It has not been tested on Python 3.X.
In order to run properly, gFlex requires the following Python dependencies:
* numpy
* scipy
* matplotlib
* setuptools
* pip (optional)
*For users who are new to Python, follow these directions to install the Python interpreters onto your computer.*
###### Linux
Use your package manager to download and install the required Python packages. For Debian/Ubuntu, it will be something like:
```bash
# Basic packages
sudo apt-get install \
python python-numpy python-scipy \
python-setuptools python-matplotlib
# pip (recommended for automatic installs via setuptools)
sudo apt-get install python-pip
# iPython console -- very useful (optional)
sudo apt-get install ipython
# Sypder IDE (I don't personally use it but many others like it: optional)
sudo apt-get install spyder
```
###### Windows
Download [**python(x,y)**](https://code.google.com/p/pythonxy/wiki/Downloads) or another full-featured distribution such as **Anaconda**; both of these distributions have been tested successfully with gFlex. Python(x,y) and several others also contain the required packages (including the numerical libraries), the iPython console, and the Spyder IDE; [**Spyder**](https://code.google.com/p/spyderlib/) is a nice IDE that will provide a familiar-looking interface for users accustomed to Matlab.
###### Mac
The current recommendation is to use a package manager like [**homebrew**](http://brew.sh/). With this you can install Python, and then move on to using **pip** (or **homebrew**) to install the Python modules. A good introduction to this can be found here: http://www.thisisthegreenroom.com/2011/installing-python-numpy-scipy-matplotlib-and-ipython-on-lion. See the **Linux** instructions for the list of packages that you will need; after installing pip, these commands can be substituted as follows, e.g.,
```bash
# Homebrew
sudo brew install python-numpy
# Pip
pip install numpy
```
Recent efforts to download Python distributions (both **Anaconda** and **Enthought**) have not met with success with both gFlex and GRASS, though **Anaconda** has been tested successfully with Windows. As a result, it should be more successful to keep the Python packages managed better by something like **homebrew** with **pip**.
##### Setuptools and ez_setup (Windows and Mac with distributions)
The distributions for Mac and Windows do not come with setuptools, which is required to install gFlex. However, if you install ez_setup, the gFlex install script will automatically install setuptools for you. Simply type:
```bash
pip install ez_setup # Windows or Mac without special privileges required
sudo pip install ez_setup # Mac where sudo privileges are required
```
Of course, one can also bypass the need for the install script to install setuptools by using pip preemptively:
```bash
pip install setuptools # Windows or Mac without special privileges required
sudo pip install setuptools # Mac where sudo privileges are required
```
#### gFlex
##### Downloading and Installing in One Step from PyPI using pip
gFlex is downloadable from the Python Package Index ([PyPI](https://pypi.python.org/pypi)); see https://pypi.python.org/pypi/gFlex.
If you have **pip**, you may simply type:
```bash
pip install
pip install gflex
# Or if the destination install folder requires sudo access
# (for UNIX-like systems)
sudo pip install gflex
# pip install gFlex works too -- install is caps-insensitive
```
and you will have a full, running copy of the latest release version of gFlex.
##### Downloading
gFlex may be downloaded here at GitHub, by either:
* Copying the link at right and pasting it into the command prompt as follows:
```bash
git clone <LINK>
```
* Downloading and extracting the compressed ZIP file (link at right)
* Clicking on the link to add gFlex to your local GitHub desktop app (for Windows or Mac)
# Installing
Install gFlex at the command prompt using [setuptools](https://pypi.python.org/pypi/setuptools). If you have administrator privileges, which *is often also the case when doing this install under Windows*, you may drop the "sudo". For standard Linux or Mac users, the "sudo" will remain necessary, and you will have to enter your administrator password for the program to be added to your local set of applications (e.g., as "/usr/local/bin/gflex").
```bash
# For standard Linux/Mac users:
sudo python setup.py install
# OR
sudo python setup.py develop # If you want the install to see instantly
# any changes made in the source repository
# For Windows users or Unix-type users with SuperUser privileges:
python setup.py install
# OR
python setup.py develop # If you want the install to see instantly
# any changes made in the source repository
```
## Running
Once gFlex is installed, it is possible to run it in four ways:
1. With a configuration file
2. Within a Python script
3. Within GRASS GIS
4. As part of the Landlab Earth-surface modeling framework, including an interface to the the Community Surface Dynamics Modeling System [Component Model Interface (CMI)](http://csdms.colorado.edu/wiki/CMI_Description)
For options 1 and 2, there are pre-built methods that can be selected along the way to visualize results. These use Python's Matplotlib plotting library. For option 3, GRASS GIS is used for visualization. In Option 4, output from Landlab can be visualized with Matplotlib, and output from CSDMS sets of models can be visualized using tools such as [VisIt](https://wci.llnl.gov/simulation/computer-codes/visit/) ([CSDMS page about VisIt](http://csdms.colorado.edu/wiki/CMT_visualization)) and [ParaView](http://www.paraview.org/). ParaView also now has [Python bindings](http://www.paraview.org/python/), which can further be used to visualize outputs produced with any of these methods.
#### With configuration file
A configuration file can be generated to run gFlex; see examples in the **input/** directory. To run gFlex using this file, one simply opens a terminal window and types:
```bash
# run like this:
gflex <path-to-configuration-file>
```
This can be run from any directory, as the installation of gFlex adds the program "gflex" to the system path.
For help constructing configuration files, see the blank template files **input/template1D** and **input/template2D**, as well as the other examples found in the **input/** directory. The **input/** directory also contains **input/README.md**, which provides a further local description of the files available. **input/input_help** provides a longer explanation of what the parameters are, and is therefore reproduced immediately below for reference:
```Lisp
; input_help
; All units are SI. Not all entries are needed.
; Standard parameter values for Earth are included.
[mode]
; 1 (line) or 2 (surface) dimensions
dimension=2
; Solution method: FD (Finite Difference), FFT (Fast Fourier
; Transform, not yet implemented), SAS (Spatial domain analytical
; solutions), or SAS_NG (SPA, but do not require a uniform grid
; - NG = "no grid")
; For SAS_NG, 1D data must be provided and will be returned in
; two columns: (x,q0) --> (x,w). 2D data are similar, except
; will be of the form (x,y,[q0/in or w/out])
; I am working on gridded output for these, so this might change
; in the future.
; Both the FFT and SPA techniques rely on superposition
; of solutions, because they can be combined linearly, whether in
; the spectral or the spatial domain)
method=SPA
; Plate solutions can be:
; * vWC1994 (best), or
; * G2009 (from Govers et al., 2009; not bad, but not
; as robust as vWC1994)
PlateSolutionType=vWC1994
[parameter]
YoungsModulus=65E9
PoissonsRatio=0.25
GravAccel=9.8
MantleDensity=3300
; This is the density of material (e.g., air, water)
; that is filling (or leaving) the hole that was
; created by flexure. If you do not have a constant
; density of infilling material, for example, at a
; subsiding shoreline, you must instead iterate (see
; [numerical], below).
InfillMaterialDensity=0
[input]
; space-delimited array of loads
; stresses (rho*g*h) if gridded (dx (and if applicable, dy)) will be applied
; to convert them into masses
; forces (rho*g*h*Area) if not gridded (SAS_NG)
; If the solution method (above) is selected as "SAS_NG", then this file
; will actually be of the format (x,[y],q0) and the code will sort it out.
; (Once again, working on a gridded output option for ungridded inputs)
Loads=q0_sample/2D/central_square_load.txt
;
; scalar value or space-delimited array of elastic thickness(es)
; array used for finite difference solutions
ElasticThickness=Te_sample/2D/10km_const.txt
;
; xw and yw are vectors of desired output points for the SAS_NG method.
; If they are not specified and a SAS_NG solution is run, the solution will be
; calculated at the points with the loads.
; they are ignored if a different solution method is chosen.
xw=
yw=
[output]
; DeflectionOut is for writing an output file.
; If this is blank, no output is printed.
; Otherwise, a space-delimited ASCII file of
; outputs is with this file name (and path).
DeflectionOut=tmpout.txt
;
; Acceptable inputs to "Plot" are q0 (loads), w (deflection), or both; any
; other entry here will result in no plotting.
; Automatically plots a 1D line or 2D surface based on the choice
; of "dimension" variable in [mode]
Plot=both
[numerical]
; dx [m]
GridSpacing_x=
;
; Boundary conditions can be:
; (FD): 0Slope0Shear, 0Moment0Shear, 0Displacement0Slope, Mirror, or Periodic
; For SAS or SAS_NG, NoOutsideLoads is valid, and no entry defaults to this
BoundaryCondition_West=
BoundaryCondition_East=
;
; Solver can be direct or iterative
Solver=
; Tolerance between iterations [m]
; If you have chosen an iterative solution type ("Solver"), it will iterate
; until this is the difference between two subsequent iterations.
; Set as 0 if you don't want to iterate
convergence=1E-3
[numerical2D]
; dy [m]
GridSpacing_y=
;
; Boundary conditions can be:
; (FD): 0Slope0Shear, 0Moment0Shear, 0Displacement0Slope, Mirror, or Periodic
; For SAS or SAS_NG, NoOutsideLoads is valid, and no entry defaults to this
BoundaryCondition_North=
BoundaryCondition_South=
;
; Flag to enable lat/lon input (true/false). By default, this is false
latlon=
; radius of planet [m], for lat/lon solutions
PlanetaryRadius=
[verbosity]
; true/false. Defaults to true.
Verbose=
; true/false. Defaults to false.
Debug=
; true/false -- total silence if true. Defaults to false.
Quiet=
```
#### Within a Python script (with or without a configuration file)
You may run gFlex from other Python programs. When you install it (above), this also produces a Python module that you may import to access it while scripting.
##### With no configuration file (recommended)
**input/run_in_script_2D.py**, reproduced below, is a good example of how to set the variables and run the model. This method requires no input file, as all of the values are set inside the Python script that imports gflex. This is essentially how the GRASS GIS interface was written, and is a way to embed the abilities of gFlex into another model. A one-dimensional example, **input/run_in_script_1D.py**, is also available.
```python
#! /usr/bin/env python
import gflex
import numpy as np
from matplotlib import pyplot as plt
flex = gflex.F2D()
flex.Quiet = False
flex.Method = 'FD' # Solution method: * FD (finite difference)
# * SAS (superposition of analytical solutions)
# * SAS_NG (ungridded SAS)
flex.PlateSolutionType = 'vWC1994' # van Wees and Cloetingh (1994)
# The other option is 'G2009': Govers et al. (2009)
flex.Solver = 'direct' # direct or iterative
# convergence = 1E-3 # convergence between iterations, if an iterative solution
# method is chosen
flex.g = 9.8 # acceleration due to gravity
flex.E = 65E9 # Young's Modulus
flex.nu = 0.25 # Poisson's Ratio
flex.rho_m = 3300. # MantleDensity
flex.rho_fill = 0. # InfiillMaterialDensity
flex.Te = 35000.*np.ones((50, 50)) # Elastic thickness [m] -- scalar but may be an array
flex.Te[:,-3:] = 0.
flex.qs = np.zeros((50, 50)) # Template array for surface load stresses
flex.qs[10:40, 10:40] += 1E6 # Populating this template
flex.dx = 5000. # grid cell size, x-oriented [m]
flex.dy = 5000. # grid cell size, y-oriented [m]
# Boundary conditions can be:
# (FD): 0Slope0Shear, 0Moment0Shear, 0Displacement0Slope, Mirror, or Periodic
# For SAS or SAS_NG, NoOutsideLoads is valid, and no entry defaults to this
flex.BC_W = '0Displacement0Slope' # west boundary condition
flex.BC_E = '0Moment0Shear' # east boundary condition
flex.BC_S = '0Displacement0Slope' # south boundary condition
flex.BC_N = '0Displacement0Slope' # north boundary condition
# latitude/longitude solutions are exact for SAS, approximate otherwise
#latlon = # true/false: flag to enable lat/lon input. Defaults False.
#PlanetaryRadius = # radius of planet [m], for lat/lon solutions
flex.initialize()
flex.run()
flex.finalize()
# If you want to plot the output
flex.plotChoice='both'
# An output file for deflections could also be defined here
# flex.wOutFile =
flex.output() # Plots and/or saves output, or does nothing, depending on
# whether flex.plotChoice and/or flex.wOutFile have been set
# TO OBTAIN OUTPUT DIRECTLY IN PYTHON, you can assign the internal variable,
# flex.w, to another variable -- or as an element in a list if you are looping
# over many runs of gFlex:
deflection = flex.w
```
##### With a configuration file
If you would like to use a Python script with a configuration file, this is also possible.
```python
import gflex
# To use a configuration file:
filename = '../gflex/input/input_f1d_test' # it works for usage (1) and (2)
obj = gflex.WhichModel(filename)
## SET MODEL TYPE AND DIMENSIONS HERE ##
########################################
if obj.dimension == 1:
obj = gflex.F1D(filename)
elif obj.dimension == 2:
obj = gflex.F2D(filename)
# Then run the code!
obj.initialize(filename)
obj.run()
obj.finalize()
# Standalone plotting output if you so desire
flex.plotChoice='w'
obj.output()
```
#### Within GRASS GIS
To run gFlex inside of GRASS GIS 7, run the following commands from within a GRASS GIS session:
```bash
g.extension r.flexure
g.extension v.flexure
```
This will reach into the GRASS GIS subversion repository, download the source code, and install the packages. **r.flexure** is used for raster grids by either finite difference or analytical methods. **v.flexure** takes advantage of the ungridded analytical method to solve for flexure at an aribtrary set of load points, albeit limited to cases with constant elastic thickness. These are stored at and have help files located at, respectively:
* **r.flexure**
* Source: http://trac.osgeo.org/grass/browser/grass-addons/grass7/raster/r.flexure
* Manual page (HTML): http://grass.osgeo.org/grass70/manuals/addons/r.flexure.html
* **v.flexure**
* Source: http://trac.osgeo.org/grass/browser/grass-addons/grass7/vector/v.flexure
* Manual page (HTML): http://grass.osgeo.org/grass70/manuals/addons/v.flexure.html
When running **r.flexure**, it is important to ensure that the elastic thickness map is at or properly interpolated to the computational region (**g.region**) resolution before solving. A nearest-neighbor interpolated Te map will cause perceived gradients in elastic thickness to be very sharp, and this will strongly affect (and misdirect) the flexural solutions.
#### As part of Landlab and the CSDMS CMI
Landlab is an in-development (but nearing release) Earth-surface modeling framework built to facilitate easy integration of geomorphic, ecological, hydrological, geological, etc. Earth-surface related models to simulate and investigate the links between multiple processes. gFlex can be linked with Landlab, and the code to do this is available within the Landlab repository at https://github.com/landlab/landlab/tree/master/landlab/components/gFlex.
The Landlab interface to gFlex also provides gFlex with the Community Surface Dynamics Modeling System (CSDMS) [Component Model Interface (CMI)](http://csdms.colorado.edu/wiki/CMI_Description) interface. This allows it to be run as a coupled component across multiple programming languages and paradigms as part of the CSDMS community of models. For more information on model coupling with CSDMS, see the example presentation at http://csdms.colorado.edu/w/images/CSDMS_lecture7.pdf and the paper on the model coupling published by [Peckham et al., "A component-based approach to integrated modeling in the geosciences: The design of CSDMS"](http://www.sciencedirect.com/science/article/pii/S0098300412001252).
### Plotting
There are four plot choices, defined via `self.plotChoice`:
* `'q'`: plots the load in mantle-density-equivalent units of length
* `'w'`: plots the deflection in units of length
* `'both'`: plots both deflection and loads in separate panels of a 2-subplot figure
* `'combo'`: (1D only): plots lithospheric deflections and the deflected mantle-density-equivalent load atop it.
* Note that the load does not affect the area above/below the datum filled when `rho_fill != 0`. This affects the buoyant balance associated with the motion of the plate, with no additional considerations for topogrpahy. If you would like to include topogrpahy, an iterative approach (e.g., finding areas below sea level, filling them, flexing, finding new areas below sea level, and so on) is recommended.
## Utilities
The "utilities" folder currently contains only one program, `flexural_wavelength_calculator.py`. Operating it is simple and fairly rudimentary: just edit the input variables directly in the calculator Python file, and then run it to see what the flexural parameter, first zero-crossing point (on the load-side of the forebulge), and the flexural wavelength.
|
PypiClean
|
/pynq-3.0.1.tar.gz/pynq-3.0.1/boards/Pynq-Z2/base/notebooks/pmod/pmod_tmp2.ipynb
|
# PmodTMP2 Sensor example
In this example, the Pmod temperature sensor is initialized and set to log a reading every 1 second.
This example requires the PmodTMP2 sensor, and assumes it is attached to PMODB.
### 1. Simple TMP2 read() to see current room temperature
```
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
from pynq.lib import Pmod_TMP2
mytmp = Pmod_TMP2(base.PMODB)
temperature = mytmp.read()
print(str(temperature) + " C")
```
### 2. Starting logging temperature once every second
```
mytmp.start_log()
```
### 3. Try to modify temperature reading by touching the sensor
The default interval between samples is 1 second. So wait for at least 10 seconds to get enough samples.
During this period, try to press finger on the sensor to increase its temperature reading.
Stop the logging whenever done trying to change sensor's value.
```
mytmp.stop_log()
log = mytmp.get_log()
```
### 5. Plot values over time
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(range(len(log)), log, 'ro')
plt.title('TMP2 Sensor log')
plt.axis([0, len(log), min(log), max(log)])
plt.show()
```
|
PypiClean
|
/austin_tui-1.2.3-py3-none-any.whl/austin_tui/adapters.py
|
from typing import Any
from typing import Optional
from typing import Union
from austin.stats import ThreadStats
from austin_tui import AustinProfileMode
from austin_tui.model import Model
from austin_tui.model.austin import AustinModel
from austin_tui.model.system import Bytes
from austin_tui.model.system import FrozenSystemModel
from austin_tui.model.system import Percentage
from austin_tui.model.system import SystemModel
from austin_tui.view import View
from austin_tui.widgets.graph import FlameGraphData
from austin_tui.widgets.markup import AttrString
from austin_tui.widgets.markup import escape
from austin_tui.widgets.table import TableData
class Adapter:
"""Model-View adapter.
Bridges between a data model and the actual data structure required by a
widget so that it can be displayed in a view.
An adapter is made of two steps: ``transform`` and ``update``. The former
transforms the model data into a format that is suitable for representation
for the given widget. The latter is responsible for updating the widget
appearance.
An adapter is used by simply calling it.
"""
def __init__(self, model: Model, view: View) -> None:
self._model = model
self._view = view
def __call__(self) -> bool:
"""Invoke the adapter."""
return self.update(self.transform())
def transform(self) -> Any:
"""Transform the model data into the widget data."""
pass
def update(self, data: Any) -> bool:
"""Update the view with the widget data."""
pass
class FreezableAdapter(Adapter):
"""An adapter with freezable widget data."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._frozen = False
self._data: Optional[Any] = None
def __call__(self) -> bool:
"""Invoke the adapter on either live or frozen data."""
if self._frozen:
return self.update(self.defrost())
return super().__call__()
def freeze(self) -> None:
"""Freeze the widget data."""
self._data = self.transform()
self._frozen = True
def defrost(self) -> Any:
"""Retrieve the frozen data.
Implement to return the frozen data.
"""
return self._data
def unfreeze(self) -> None:
"""Unfreeze the adapter."""
self._frozen = False
@property
def frozen(self) -> bool:
"""The freeze status of the adapter."""
return self._frozen
class CommandLineAdapter(FreezableAdapter):
"""Command line adapter."""
def transform(self) -> AttrString:
"""Retrieve the command line."""
cmd = self._model.austin.command_line
exec, _, args = cmd.partition(" ")
return self._view.markup(f"<exec><b>{escape(exec)}</b></exec> {escape(args)}")
def update(self, data: AttrString) -> bool:
"""Update the widget."""
return self._view.cmd_line.set_text(data)
class CountAdapter(FreezableAdapter):
"""Sample count adapter."""
def transform(self) -> int:
"""Retrieve the count."""
return self._model.austin.samples_count
def update(self, data: int) -> bool:
"""Update the widget."""
return self._view.samples.set_text(data)
class CpuAdapter(Adapter):
"""CPU metrics adapter."""
def transform(self) -> Percentage:
"""Get the CPU usage."""
return self._model.system.get_cpu(self._model.system.child_process)
def update(self, data: Percentage) -> bool:
"""Update the metric and the plot."""
self._view.cpu.set_text(f"{data}% ")
self._view.cpu_plot.push(data)
return True
class MemoryAdapter(Adapter):
"""Memory usage adapter."""
def transform(self) -> Bytes:
"""Get memory usage."""
return self._model.system.get_memory(self._model.system.child_process)
def update(self, data: Bytes) -> bool:
"""Update metric and plot."""
self._view.mem.set_text(f"{data>>20}M ")
self._view.mem_plot.push(data)
return True
def fmt_time(s: int) -> str:
"""Format microseconds into mm':ss''."""
m = int(s // 60e6)
ret = '{:02d}"'.format(round(s / 1e6) % 60)
if m:
ret = str(m) + "'" + ret
return ret
class DurationAdapter(FreezableAdapter):
"""Duration adapter."""
def transform(self) -> str:
"""Get duration."""
return fmt_time(int(self._model.system.duration * 1e6))
def update(self, data: str) -> bool:
"""Update the widget."""
return self._view.duration.set_text(data)
class CurrentThreadAdapter(Adapter):
"""Currently selected thread adapter."""
def transform(self) -> Union[str, AttrString]:
"""Get current thread."""
austin = self._model.frozen_austin if self._model.frozen else self._model.austin
n = len(austin.threads)
if not n:
return "--/--"
return self._view.markup(
f"<thread>{austin.current_thread + 1}</thread><hdrbox>/{n}</hdrbox>"
)
def update(self, data: Union[str, AttrString]) -> bool:
"""Update the widget."""
return self._view.thread_num.set_text(data)
class ThreadNameAdapter(FreezableAdapter):
"""Currently selected thread name adapter."""
def transform(self) -> Union[str, AttrString]:
"""Get the thread name."""
austin = self._model.frozen_austin if self._model.frozen else self._model.austin
if austin.threads:
pid, _, tid = austin.threads[austin.current_thread].partition(":")
return self._view.markup(f"<pid><b>{pid}</b></pid>:<tid><b>{tid}</b></tid>")
return "--:--"
def update(self, data: Union[str, AttrString]) -> bool:
"""Update the widget."""
return self._view.thread_name.set_text(data)
class BaseThreadDataAdapter(Adapter):
"""Base implementation for the thread table data adapter."""
def transform(self) -> TableData:
"""Transform according to the right model."""
austin = self._model.frozen_austin if self._model.frozen else self._model.austin
system = self._model.frozen_system if self._model.frozen else self._model.system
return self._transform(austin, system)
def update(self, data: TableData) -> bool:
"""Update the table."""
return self._view.table.set_data(data)
class ThreadDataAdapter(BaseThreadDataAdapter):
"""Thread table data adapter."""
def _transform(
self, austin: AustinModel, system: Union[SystemModel, FrozenSystemModel]
) -> TableData:
formatter, scaler = (
(self._view.fmt_mem, self._view.scale_memory)
if self._view.mode == AustinProfileMode.MEMORY
else (self._view.fmt_time, self._view.scale_time)
)
thread_key = austin.threads[austin.current_thread]
pid, _, thread = thread_key.partition(":")
thread_stats = austin.stats.processes[int(pid)].threads[thread]
frames = austin.get_last_stack(thread_key).frames
container = thread_stats.children
frame_stats = []
max_scale = (
system.max_memory
if self._view.mode == AustinProfileMode.MEMORY
else system.duration
)
for frame in frames:
child_frame_stats = container[frame]
if (
child_frame_stats.total.value / 1e6 / max_scale
< self._model.austin.threshold
):
break
frame_stats.append(
[
formatter(child_frame_stats.own.value),
formatter(child_frame_stats.total.value),
scaler(child_frame_stats.own.value, max_scale),
scaler(child_frame_stats.total.value, max_scale),
self._view.markup(
" "
+ escape(child_frame_stats.label.function)
+ f" <inactive>({escape(child_frame_stats.label.filename)}"
f":{child_frame_stats.label.line})</inactive>"
),
]
)
container = child_frame_stats.children
return frame_stats
class ThreadFullDataAdapter(BaseThreadDataAdapter):
"""Full thread data adapter."""
def _transform(
self, austin: AustinModel, system: Union[SystemModel, FrozenSystemModel]
) -> TableData:
formatter, scaler = (
(self._view.fmt_mem, self._view.scale_memory)
if self._view.mode == AustinProfileMode.MEMORY
else (self._view.fmt_time, self._view.scale_time)
)
thread_key = austin.threads[austin.current_thread]
pid, _, thread = thread_key.partition(":")
frames = austin.get_last_stack(thread_key).frames
frame_stats = []
max_scale = (
system.max_memory
if self._view.mode == AustinProfileMode.MEMORY
else system.duration
)
def _add_frame_stats(
stats: ThreadStats,
marker: str,
prefix: str,
level: int = 0,
active_bucket: Optional[dict] = None,
active_parent: bool = True,
) -> None:
if stats.total.value / 1e6 / max_scale < self._model.austin.threshold:
return
try:
active = (
active_bucket is not None
and stats.label in active_bucket
and stats.label == frames[level]
and active_parent
)
active_bucket = stats.children
except IndexError:
active = False
active_bucket = None
frame_stats.append(
[
formatter(stats.own.value, active),
formatter(stats.total.value, active),
scaler(stats.own.value, max_scale, active),
scaler(stats.total.value, max_scale, active),
self._view.markup(
" "
+ f"<inactive>{marker}</inactive>"
+ (
escape(stats.label.function)
if active
else f"<inactive>{escape(stats.label.function)}</inactive>"
)
+ f" <inactive>(<filename>{escape(stats.label.filename)}</filename>"
f":<lineno>{stats.label.line}</lineno>)</inactive>"
),
]
)
children_stats = [child_stats for _, child_stats in stats.children.items()]
if not children_stats:
return
for child_stats in children_stats[:-1]:
_add_frame_stats(
child_stats,
prefix + "├─ ",
prefix + "│ ",
level + 1,
active_bucket,
active,
)
_add_frame_stats(
children_stats[-1],
prefix + "└─ ",
prefix + " ",
level + 1,
active_bucket,
active,
)
thread_stats = austin.stats.processes[int(pid)].threads[thread]
children = [stats for _, stats in thread_stats.children.items()]
if children:
for stats in children[:-1]:
_add_frame_stats(stats, "├─ ", "│ ", 0, thread_stats.children)
_add_frame_stats(children[-1], "└─ ", " ", 0, thread_stats.children)
return frame_stats
class FlameGraphAdapter(Adapter):
"""Flame graph data adapter."""
def transform(self) -> dict:
"""Transform according to the right model."""
austin = self._model.frozen_austin if self._model.frozen else self._model.austin
system = self._model.frozen_system if self._model.frozen else self._model.system
return self._transform(austin, system) # type: ignore[arg-type]
def _transform(
self, austin: AustinModel, system: Union[SystemModel, FrozenSystemModel]
) -> dict:
thread_key = austin.threads[austin.current_thread]
pid, _, thread = thread_key.partition(":")
thread = austin.stats.processes[int(pid)].threads[thread]
cs = {} # type: ignore[var-annotated]
total = thread.total.value
total_pct = min(int(total / system.duration / 1e4), 100)
data: FlameGraphData = {
f"THREAD {thread.label} ⏲️ {fmt_time(total)} ({total_pct}%)": (total, cs)
}
levels = [(c, cs) for c in thread.children.values()]
while levels:
level, c = levels.pop(0)
k = f"{level.label.function} ({level.label.filename})"
if k in c:
v, cs = c[k]
c[k] = (v + level.total.value, cs)
else:
cs = {}
c[k] = (level.total.value, cs)
levels.extend(((c, cs) for c in level.children.values()))
return data
def update(self, data: FlameGraphData) -> bool:
"""Update the table."""
(header,) = data
return self._view.flamegraph.set_data(data) | self._view.graph_header.set_text(
" FLAME GRAPH FOR " + header
)
|
PypiClean
|
/corebytecms_forms-1.0.1-py3-none-any.whl/cms_forms/models.py
|
import json
import warnings
from collections import defaultdict, namedtuple
from functools import partial
from typing import List
from django.conf import settings
from django.db import models
from django.db.models.functions import Coalesce
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from cms.models.fields import PageField
from cms.models.pluginmodel import CMSPlugin
from cms.utils.plugins import downcast_plugins
from djangocms_attributes_field.fields import AttributesField
from filer.fields.folder import FilerFolderField
from six import text_type
from .compat import build_plugin_tree
from .helpers import is_form_element
from .sizefield.models import FileSizeField
from .utils import FORMS_ACTION_BACKEND_KEY_MAX_SIZE, action_backend_choices
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
# Once djangoCMS < 3.3.1 support is dropped
# Remove the explicit cmsplugin_ptr field declarations
CMSPluginField = partial(
models.OneToOneField,
to=CMSPlugin,
related_name='%(app_label)s_%(class)s',
parent_link=True,
)
FieldData = namedtuple(
'FieldData',
field_names=['label', 'value']
)
FormField = namedtuple(
'FormField',
field_names=[
'name',
'label',
'plugin_instance',
'field_occurrence',
'field_type_occurrence',
]
)
Recipient = namedtuple(
'Recipient',
field_names=['name', 'email']
)
BaseSerializedFormField = namedtuple(
'SerializedFormField',
field_names=[
'name',
'label',
'field_occurrence',
'value',
]
)
class SerializedFormField(BaseSerializedFormField):
# For _asdict() with Py3K
__slots__ = ()
@property
def field_id(self):
field_label = self.label.strip()
if field_label:
field_as_string = '{}-{}'.format(field_label, self.field_type)
else:
field_as_string = self.name
field_id = '{}:{}'.format(field_as_string, self.field_occurrence)
return field_id
@property
def field_type_occurrence(self):
return self.name.rpartition('_')[1]
@property
def field_type(self):
return self.name.rpartition('_')[0]
class BaseFormPlugin(CMSPlugin):
FALLBACK_FORM_TEMPLATE = 'cms_forms/form.html'
DEFAULT_FORM_TEMPLATE = getattr(
settings, 'cms_forms_DEFAULT_TEMPLATE', FALLBACK_FORM_TEMPLATE)
FORM_TEMPLATES = ((DEFAULT_FORM_TEMPLATE, _('Default')),)
if hasattr(settings, 'cms_forms_TEMPLATES'):
FORM_TEMPLATES += settings.cms_forms_TEMPLATES
REDIRECT_TO_PAGE = 'redirect_to_page'
REDIRECT_TO_URL = 'redirect_to_url'
REDIRECT_CHOICES = [
(REDIRECT_TO_PAGE, _('CMS Page')),
(REDIRECT_TO_URL, _('Absolute URL')),
]
_form_elements = None
_form_field_key_cache = None
name = models.CharField(
verbose_name=_('Name'),
max_length=255,
help_text=_('Used to filter out form submissions.'),
)
error_message = models.TextField(
verbose_name=_('Error message'),
blank=True,
null=True,
help_text=_('An error message that will be displayed if the form '
'doesn\'t validate.')
)
success_message = models.TextField(
verbose_name=_('Success message'),
blank=True,
null=True,
help_text=_('An success message that will be displayed.')
)
redirect_type = models.CharField(
verbose_name=_('Redirect to'),
max_length=20,
choices=REDIRECT_CHOICES,
help_text=_(
'Where to redirect the user when the form has been successfully sent?'),
blank=True,
)
url = models.URLField(_('Absolute URL'), blank=True, null=True)
custom_classes = models.CharField(
verbose_name=_('custom css classes'), max_length=255, blank=True)
form_template = models.CharField(
verbose_name=_('form template'),
max_length=255,
choices=FORM_TEMPLATES,
default=DEFAULT_FORM_TEMPLATE,
)
# Staff notification email settings
recipients = models.ManyToManyField(
to=AUTH_USER_MODEL,
verbose_name=_('Recipients'),
blank=True,
limit_choices_to={'is_staff': True},
help_text=_('People who will get the form content via e-mail.')
)
action_backend = models.CharField(
verbose_name=_('Action backend'),
max_length=FORMS_ACTION_BACKEND_KEY_MAX_SIZE,
default='default',
choices=action_backend_choices(),
)
form_attributes = AttributesField(
verbose_name=_('Attributes'),
blank=True,
)
redirect_page = PageField(
verbose_name=_('CMS Page'),
blank=True,
null=True,
on_delete=models.SET_NULL,
)
cmsplugin_ptr = CMSPluginField(
on_delete=models.CASCADE,
)
class Meta:
abstract = True
def __str__(self):
return self.name
@property
def page(self):
warnings.warn(
'The "page" field has been renamed to redirect_page '
'and will be removed on Aldryn Forms 3.1.0',
PendingDeprecationWarning
)
return self.redirect_page
@page.setter
def page(self, value):
warnings.warn(
'The "page" field has been renamed to redirect_page '
'and will be removed on Aldryn Forms 3.1.0',
PendingDeprecationWarning
)
self.redirect_page = value
@cached_property
def success_url(self):
if self.redirect_type == FormPlugin.REDIRECT_TO_PAGE:
return self.redirect_page.get_absolute_url()
elif self.redirect_type == FormPlugin.REDIRECT_TO_URL and self.url:
return self.url
def copy_relations(self, oldinstance):
for recipient in oldinstance.recipients.all():
self.recipients.add(recipient)
def get_submit_button(self):
from .cms_plugins import SubmitButton
form_elements = self.get_form_elements()
for element in form_elements:
plugin_class = element.get_plugin_class()
if issubclass(plugin_class, SubmitButton):
return element
return
def get_form_fields(self) -> List[FormField]:
from .cms_plugins import Field
fields = []
# A field occurrence is how many times does a field
# with the same label and type appear within the same form.
# This is used as an identifier for the field within multiple forms.
field_occurrences = defaultdict(lambda: 1)
# A field type occurrence is how many times does a field
# with the same type appear within the same form.
# This is used as an identifier for the field within this form.
field_type_occurrences = defaultdict(lambda: 1)
form_elements = self.get_form_elements()
field_plugins = [
plugin for plugin in form_elements
if issubclass(plugin.get_plugin_class(), Field)
]
for field_plugin in field_plugins:
field_type = field_plugin.field_type
if field_type in field_type_occurrences:
field_type_occurrences[field_type] += 1
field_label = field_plugin.get_label()
field_type_occurrence = field_type_occurrences[field_type]
if field_plugin.name:
field_name = field_plugin.name
else:
field_name = '{0}_{1}'.format(field_type,
field_type_occurrence)
if field_label:
field_id = '{0}_{1}'.format(field_type, field_label)
else:
field_id = field_name
if field_id in field_occurrences:
field_occurrences[field_id] += 1
field = FormField(
name=field_name,
label=field_label,
plugin_instance=field_plugin,
field_occurrence=field_occurrences[field_id],
field_type_occurrence=field_type_occurrence,
)
fields.append(field)
return fields
def get_form_field_name(self, field: 'FieldPluginBase') -> str:
if self._form_field_key_cache is None:
self._form_field_key_cache = {}
is_cache_needs_update = field.pk not in self._form_field_key_cache
if is_cache_needs_update:
form_fields: List[FormField] = self.get_form_fields()
for form_field in form_fields:
self._form_field_key_cache[
form_field.plugin_instance.pk] = form_field.name
return self._form_field_key_cache[field.pk]
def get_form_fields_as_choices(self):
fields = self.get_form_fields()
for field in fields:
yield (field.name, field.label)
def get_form_elements(self):
from .utils import get_nested_plugins
if self.child_plugin_instances is None:
descendants = self.get_descendants().order_by('path')
# Set parent_id to None in order to
# fool the build_plugin_tree function.
# This is sadly necessary to avoid getting all nodes
# higher than the form.
parent_id = self.parent_id
self.parent_id = None
# Important that this is a list in order to modify
# the current instance
descendants_with_self = [self] + list(descendants)
# Let the cms build the tree
build_plugin_tree(descendants_with_self)
# Set back the original parent
self.parent_id = parent_id
if self._form_elements is None:
children = get_nested_plugins(self)
children_instances = downcast_plugins(children)
self._form_elements = [
p for p in children_instances if is_form_element(p)]
return self._form_elements
class FormPlugin(BaseFormPlugin):
class Meta:
abstract = False
def __str__(self):
return self.name
class FieldsetPlugin(CMSPlugin):
legend = models.CharField(_('Legend'), max_length=255, blank=True)
custom_classes = models.CharField(
verbose_name=_('custom css classes'), max_length=255, blank=True)
cmsplugin_ptr = CMSPluginField(
on_delete=models.CASCADE,
)
def __str__(self):
return self.legend or text_type(self.pk)
class FieldPluginBase(CMSPlugin):
name = models.CharField(
_('Name'),
max_length=255,
help_text=_('Used to set the field name'),
blank=True,
)
label = models.CharField(_('Label'), max_length=255, blank=True)
required = models.BooleanField(_('Field is required'), default=False)
required_message = models.TextField(
verbose_name=_('Error message'),
blank=True,
null=True,
help_text=_('Error message displayed if the required field is left '
'empty. Default: "This field is required".')
)
placeholder_text = models.CharField(
verbose_name=_('Placeholder text'),
max_length=255,
blank=True,
help_text=_('Default text in a form. Disappears when user starts '
'typing. Example: "[email protected]"')
)
help_text = models.TextField(
verbose_name=_('Help text'),
blank=True,
null=True,
help_text=_(
'Explanatory text displayed next to input field. Just like '
'this one.')
)
attributes = AttributesField(
verbose_name=_('Attributes'),
blank=True,
excluded_keys=['name']
)
# for text field those are min and max length
# for multiple select those are min and max number of choices
min_value = models.PositiveIntegerField(
_('Min value'),
blank=True,
null=True,
)
max_value = models.PositiveIntegerField(
_('Max value'),
blank=True,
null=True,
)
initial_value = models.CharField(
verbose_name=_('Initial value'),
max_length=255,
blank=True,
help_text=_('Default value of field.')
)
custom_classes = models.CharField(
verbose_name=_('custom css classes'), max_length=255, blank=True)
cmsplugin_ptr = CMSPluginField(
on_delete=models.CASCADE,
)
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.plugin_type:
attribute = 'is_%s' % self.field_type
setattr(self, attribute, True)
def __str__(self):
return self.label or self.name or str(self.pk)
@property
def field_type(self):
return self.plugin_type.lower()
def get_label(self):
return self.label or self.placeholder_text
class FieldPlugin(FieldPluginBase):
def copy_relations(self, oldinstance):
for option in oldinstance.option_set.all():
option.pk = None # copy on save
option.field = self
option.save()
class TextAreaFieldPlugin(FieldPluginBase):
text_area_columns = models.PositiveIntegerField(
verbose_name=_('columns'), blank=True, null=True)
text_area_rows = models.PositiveIntegerField(
verbose_name=_('rows'), blank=True, null=True)
class EmailFieldPlugin(FieldPluginBase):
email_send_notification = models.BooleanField(
verbose_name=_('send notification when form is submitted'),
default=False,
help_text=_('When checked, the value of this field will be used to '
'send an email notification.')
)
email_subject = models.CharField(
verbose_name=_('email subject'),
max_length=255,
blank=True,
default='',
help_text=_('Used as the email subject when email_send_notification '
'is checked.')
)
email_body = models.TextField(
verbose_name=_('Additional email body'),
blank=True,
default='',
help_text=_('Additional body text used when email notifications '
'are active.')
)
class FileFieldPluginBase(FieldPluginBase):
upload_to = FilerFolderField(
verbose_name=_('Upload files to'),
help_text=_('Select a folder to which all files submitted through '
'this field will be uploaded to.'),
on_delete=models.CASCADE,
)
max_size = FileSizeField(
verbose_name=_('Maximum file size'),
null=True, blank=True,
help_text=_('The maximum file size of the upload, in bytes. You can '
'use common size suffixes (kB, MB, GB, ...).')
)
class Meta:
abstract = True
class FileUploadFieldPlugin(FileFieldPluginBase):
pass
class ImageUploadFieldPlugin(FileFieldPluginBase):
max_width = models.PositiveIntegerField(
verbose_name=_('Maximum image width'),
null=True, blank=True,
help_text=_('The maximum width of the uploaded image, in pixels.')
)
max_height = models.PositiveIntegerField(
verbose_name=_('Maximum image height'),
null=True, blank=True,
help_text=_('The maximum height of the uploaded image, in pixels.')
)
class Option(models.Model):
field = models.ForeignKey(FieldPlugin, editable=False,
on_delete=models.CASCADE)
value = models.CharField(_('Value'), max_length=255)
default_value = models.BooleanField(_('Default'), default=False)
position = models.PositiveIntegerField(_('Position'), blank=True)
class Meta:
verbose_name = _('Option')
verbose_name_plural = _('Options')
ordering = ('position',)
def __str__(self):
return self.value
def set_position(self):
if self.position is None:
self.position = self.field.option_set.aggregate(
max_position=Coalesce(models.Max('position'), 0)
).get('max_position', 0) + 10
def save(self, *args, **kwargs):
self.set_position()
return super(Option, self).save(*args, **kwargs)
class FormButtonPlugin(CMSPlugin):
label = models.CharField(_('Label'), max_length=255)
custom_classes = models.CharField(
verbose_name=_('custom css classes'), max_length=255, blank=True)
cmsplugin_ptr = CMSPluginField(
on_delete=models.CASCADE,
)
def __str__(self):
return self.label
class FormSubmission(models.Model):
name = models.CharField(
max_length=255,
verbose_name=_('form name'),
db_index=True,
editable=False
)
data = models.TextField(blank=True, editable=False)
recipients = models.TextField(
verbose_name=_('users notified'),
blank=True,
help_text=_('People who got a notification when form was submitted.'),
editable=False,
)
language = models.CharField(
verbose_name=_('form language'),
max_length=10,
choices=settings.LANGUAGES,
default=settings.LANGUAGE_CODE
)
form_url = models.CharField(
verbose_name=_('form url'),
max_length=255,
blank=True,
)
sent_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-sent_at']
verbose_name = _('Form submission')
verbose_name_plural = _('Form submissions')
def __str__(self):
return self.name
def _form_data_hook(self, data, occurrences):
field_label = data['label'].strip()
if field_label:
field_type = data['name'].rpartition('_')[0]
field_id = '{}_{}'.format(field_type, field_label)
else:
field_id = data['name']
if field_id in occurrences:
occurrences[field_id] += 1
data['field_occurrence'] = occurrences[field_id]
return SerializedFormField(**data)
def _recipients_hook(self, data):
return Recipient(**data)
def get_form_data(self):
occurrences = defaultdict(lambda: 1)
data_hook = partial(self._form_data_hook, occurrences=occurrences)
try:
form_data = json.loads(
self.data,
object_hook=data_hook,
)
except ValueError:
# TODO: Log this?
form_data = []
return form_data
def get_recipients(self):
try:
recipients = json.loads(
self.recipients,
object_hook=self._recipients_hook
)
except ValueError:
# TODO: Log this?
recipients = []
return recipients
def set_form_data(self, form):
fields = form.get_serialized_fields(is_confirmation=False)
fields_as_dicts = [field._asdict() for field in fields]
self.data = json.dumps(fields_as_dicts)
def set_recipients(self, recipients):
raw_recipients = [
{'name': rec[0], 'email': rec[1]} for rec in recipients]
self.recipients = json.dumps(raw_recipients)
|
PypiClean
|
/papermerge_core-2.1.5-py3-none-any.whl/papermerge/core/ocr/document.py
|
import os
import logging
import ocrmypdf
from papermerge.core.storage import abs_path
from papermerge.core.lib import mime
from papermerge.core.lib.tiff import convert_tiff2pdf
from papermerge.core.lib.path import (
DocumentPath,
)
logger = logging.getLogger(__name__)
STARTED = "started"
COMPLETE = "complete"
def notify_hocr_ready(page_path, **kwargs):
pass
def notify_txt_ready(page_path, **kwargs):
pass
def notify_pre_page_ocr(page_path, **kwargs):
pass
def _ocr_document(
input_doc_path: DocumentPath,
target_doc_path,
lang,
preview_width,
):
# file_name = kwargs.pop('file_name', None)
# if not file_name:
# input_file_name = input_doc_path.file_name
sidecars_dir = abs_path(target_doc_path.dirname_sidecars())
input_document = abs_path(input_doc_path.path)
output_document = abs_path(target_doc_path.path)
output_dir = os.path.dirname(output_document)
if not os.path.exists(output_dir):
os.makedirs(
output_dir,
exist_ok=True
)
ocrmypdf.ocr(
input_document,
output_document,
lang=lang,
plugins=["ocrmypdf_papermerge.plugin"],
progress_bar=False,
output_type='pdf',
pdf_renderer='hocr',
use_threads=True,
force_ocr=True,
keep_temporary_files=False,
sidecar_dir=sidecars_dir,
sidecar_format='svg',
preview_width=preview_width,
deskew=True
)
def ocr_document(
user_id,
document_id,
file_name,
lang,
version,
target_version,
namespace='',
):
lang = lang.lower()
doc_path = DocumentPath(
user_id=user_id,
document_id=document_id,
file_name=file_name,
version=version
)
target_doc_path = DocumentPath.copy_from(
doc_path,
version=target_version
)
mime_type = mime.Mime(
abs_path(doc_path.url)
)
if mime_type.is_pdf() or mime_type.is_image():
_ocr_document(
input_doc_path=doc_path,
target_doc_path=target_doc_path,
lang=lang,
preview_width=300
)
elif mime_type.is_tiff():
new_filename = convert_tiff2pdf(
doc_url=abs_path(doc_path.url)
)
# now .pdf
orig_file_name = doc_path.file_name
doc_path.file_name = new_filename
# and continue as usual
_ocr_document(
doc_path=doc_path,
lang=lang,
user_id=user_id,
document_id=document_id,
# Pass original file_name i.e. tiff file name as well.
file_name=orig_file_name,
namespace=namespace,
version=version
)
else:
logger.error(
f" user_id={user_id}"
f" doc_id={document_id}"
)
return True
return True
|
PypiClean
|
/infoblox-netmri-3.8.0.0.tar.gz/infoblox-netmri-3.8.0.0/infoblox_netmri/api/broker/v2_5_0/feature_broker.py
|
from ..broker import Broker
class FeatureBroker(Broker):
controller = "features"
def index(self, **kwargs):
"""Lists the available features. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name: The feature name. This is the same name shown on the API documentation pages for the required feature.
:type name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: The feature name. This is the same name shown on the API documentation pages for the required feature.
:type name: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` name
:param sort: The data field(s) to use for sorting the output. Default is name. Valid values are name.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Feature. Valid values are name. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return features: An array of the Feature objects that match the specified input criteria.
:rtype features: Array of Feature
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available features matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name: The feature name. This is the same name shown on the API documentation pages for the required feature.
:type name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: The feature name. This is the same name shown on the API documentation pages for the required feature.
:type name: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` name
:param sort: The data field(s) to use for sorting the output. Default is name. Valid values are name.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Feature. Valid values are name. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against features, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: name.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return features: An array of the Feature objects that match the specified input criteria.
:rtype features: Array of Feature
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available features matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: name.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_name: The operator to apply to the field name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. name: The feature name. This is the same name shown on the API documentation pages for the required feature. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_name: If op_name is specified, the field named in this input will be compared to the value in name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_name must be specified if op_name is specified.
:type val_f_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_name: If op_name is specified, this value will be compared to the value in name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_name must be specified if op_name is specified.
:type val_c_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` name
:param sort: The data field(s) to use for sorting the output. Default is name. Valid values are name.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Feature. Valid values are name. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return features: An array of the Feature objects that match the specified input criteria.
:rtype features: Array of Feature
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
|
PypiClean
|
/smartmeter_datacollector-1.1.0-py3-none-any.whl/smartmeter_datacollector/app.py
|
import argparse
import asyncio
import logging
from asyncio import CancelledError
from configparser import ConfigParser
from . import config, factory
logging.basicConfig(level=logging.WARNING)
async def build_and_start(app_config: ConfigParser):
readers = factory.build_meters(app_config)
sinks = factory.build_sinks(app_config)
data_collector = factory.build_collector(readers, sinks)
await asyncio.gather(*[sink.start() for sink in sinks])
try:
await asyncio.gather(
*[reader.start() for reader in readers],
data_collector.process_queue())
except CancelledError:
pass
finally:
logging.info("App shutting down now.")
await asyncio.gather(*[sink.stop() for sink in sinks])
def set_logging_levels(app_config: ConfigParser) -> None:
if not app_config.has_section("logging"):
return
# configure root logger
logging.getLogger().setLevel(app_config["logging"].get('default', "WARNING"))
# configure individual loggers
for name, level in app_config["logging"].items():
logging.getLogger(name).setLevel(level)
def parse_arguments():
parser = argparse.ArgumentParser(
description="Smart Meter Data Collector", add_help=True)
parser.add_argument(
'-c', '--config', help="File path of the configuration (.ini) file.", default="./datacollector.ini")
parser.add_argument(
'-s', '--saveconfig', help="Create default configuration (.ini) file at path defined with -c",
action='store_true')
parser.add_argument(
'-d', '--dev', help="Development mode", action='store_true')
return parser.parse_args()
def main():
args = parse_arguments()
debug_mode = bool(args.dev)
if args.saveconfig:
config.write_default_config(args.config)
logging.warning("Default configuration written to file '%s'.", args.config)
return
app_config = config.read_config_files(args.config)
set_logging_levels(app_config)
asyncio.run(build_and_start(app_config), debug=debug_mode)
|
PypiClean
|
/snapshot-dbg-cli-0.3.6.tar.gz/snapshot-dbg-cli-0.3.6/snapshot_dbg_cli/list_logpoints_command.py
|
from snapshot_dbg_cli import breakpoint_utils
DESCRIPTION = """
Used to display the debug logpoints for a debug target (debuggee). By default
all active logpoints are returned. To obtain older, expired logpoints, specify
the --include-inactive option.
"""
INCLUDE_INACTIVE_HELP = 'Include all logpoints which have completed.'
ALL_USERS_HELP = """
If false, display only logpoints created by the current user. Enabled by
default, use --no-all-users to disable.
"""
NO_ALL_USERS_HELP = """
Disables --all-users, which is enabled by default.
"""
SUMMARY_HEADERS = [
'User Email', 'Location', 'Condition', 'Log Level', 'Log Message Format',
'ID', 'Status'
]
def transform_to_logpoint_summary(logpoint):
# Match the fields from SUMMARY_HEADERS
return [
logpoint['userEmail'],
breakpoint_utils.transform_location_to_file_line(logpoint['location']),
logpoint['condition'] if 'condition' in logpoint else '',
logpoint['logLevel'],
logpoint['logMessageFormatString'],
logpoint['id'],
breakpoint_utils.get_logpoint_short_status(logpoint),
]
class ListLogpointsCommand:
"""This class implements the list_logpoints command.
The register() method is called by the CLI startup code to install the
list_logpoints command information, and the cmd() function will be invoked if
the list_logpoints command was specified by the user.
"""
def __init__(self):
pass
def register(self, args_subparsers, required_parsers, common_parsers):
parent_parsers = [
common_parsers.database_url, common_parsers.format,
common_parsers.debuggee_id
]
parent_parsers += required_parsers
parser = args_subparsers.add_parser(
'list_logpoints', description=DESCRIPTION, parents=parent_parsers)
parser.add_argument(
'--include-inactive', help=INCLUDE_INACTIVE_HELP, action='store_true')
parser.add_argument(
'--all-users',
help=ALL_USERS_HELP,
default=True,
action='store_true',
dest='all_users')
parser.add_argument(
'--no-all-users',
help=NO_ALL_USERS_HELP,
action='store_false',
dest='all_users')
parser.set_defaults(func=self.cmd)
def cmd(self, args, cli_services):
user_output = cli_services.user_output
debugger_rtdb_service = cli_services.get_snapshot_debugger_rtdb_service()
debugger_rtdb_service.validate_debuggee_id(args.debuggee_id)
user_email = None if args.all_users is True else cli_services.account
logpoints = debugger_rtdb_service.get_logpoints(
debuggee_id=args.debuggee_id,
include_inactive=args.include_inactive,
user_email=user_email)
if args.format.is_a_json_value():
user_output.json_format(logpoints, pretty=args.format.is_pretty_json())
else:
values = list(map(transform_to_logpoint_summary, logpoints))
user_output.tabular(SUMMARY_HEADERS, values)
|
PypiClean
|
/azure-digitaltwins-core-patched-1.0.0b1.tar.gz/azure-digitaltwins-core-patched-1.0.0b1/samples/dt_scenario.py
|
import os
import json
import uuid
from azure.identity import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
from azure.digitaltwins.core import DigitalTwinsClient
# <summary>
# This sample creates all the models in \DTDL\Models folder in the ADT service instance
# and creates the corresponding twins in \DTDL\DigitalTwins folder
# The Diagram for the Hospital model looks like this:
#
# +------------+
# | Building +-----isEquippedWith-----+
# +------------+ |
# | v
# has +-----+
# | | HVAC|
# v +-----+
# +------------+ |
# | Floor +<--controlsTemperature--+
# +------------+
# |
# contains
# |
# v
# +------------+ +-----------------+
# | Room |-with component->| WifiAccessPoint |
# +------------+ +-----------------+
# </summary>
# Scenario example of how to:
# - create a DigitalTwins Service Client using the DigitalTwinsClient constructor
# - create models from file
# - get created models by modelIds one by one
# - get all models by listing them using the pagianted API
# - delete the created eventRoutes
# - delete the created relationships
# - delete the created digital twins
# - decomission the created models
# - delete the created models
#
# Preconditions:
# - Environment variables have to be set
# - DigitalTwins enabled device must exist on the ADT hub
#
# For the purpose of this example we will create temporary model and a temporay component model using random Ids.
# We have to make sure these model Ids are unique within the DT instance so we use generated UUIDs.
try:
# DefaultAzureCredential supports different authentication mechanisms and determines
# the appropriate credential type based of the environment it is executing in.
# It attempts to use multiple credential types in an order until it finds a working credential.
event_hub_endpoint_name = os.getenv("AZURE_EVENT_HUB_ENDPOINT_NAME")
# - AZURE_URL: The tenant ID in Azure Active Directory
url = os.getenv("AZURE_URL")
# DefaultAzureCredential expects the following three environment variables:
# - AZURE_TENANT_ID: The tenant ID in Azure Active Directory
# - AZURE_CLIENT_ID: The application (client) ID registered in the AAD tenant
# - AZURE_CLIENT_SECRET: The client secret for the registered application
credential = DefaultAzureCredential()
service_client = DigitalTwinsClient(url, credential)
# Create models from the sample dtdls
with open(r"dtdl\models\building.json") as f:
dtdl_model_building = json.load(f)
with open(r"dtdl\models\floor.json") as f:
dtdl_model_floor = json.load(f)
with open(r"dtdl\models\hvac.json") as f:
dtdl_model_hvac = json.load(f)
with open(r"dtdl\models\room.json") as f:
dtdl_model_room = json.load(f)
new_model_list = []
new_model_list.append(
dtdl_model_building,
dtdl_model_floor,
dtdl_model_hvac,
dtdl_model_room
)
models = service_client.create_models(new_model_list)
print('Created Models:')
print(models)
# Create digital twins from the sample dtdls
building_twin_id = 'BuildingTwin-' + str(uuid.uuid4())
with open(r"dtdl\digital_twins\buildingTwin.json") as f:
dtdl_digital_twins_building = json.load(f)
created_building_twin = service_client.upsert_digital_twin(building_twin_id, dtdl_digital_twins_building)
print('BuildingTwin:')
print(created_building_twin)
floor_twin_id = 'FloorTwin-' + str(uuid.uuid4())
with open(r"dtdl\digital_twins\floorTwin.json") as f:
dtdl_digital_twins_floor = json.load(f)
created_floor_twin = service_client.upsert_digital_twin(floor_twin_id, dtdl_digital_twins_floor)
print('FloorTwin:')
print(created_floor_twin)
hvac_twin_id = 'HVACTwin-' + str(uuid.uuid4())
with open(r"dtdl\digital_twins\hvacTwin.json") as f:
dtdl_digital_twins_hvac = json.load(f)
created_hvac_twin = service_client.upsert_digital_twin(hvac_twin_id, dtdl_digital_twins_hvac)
print('HVACTwin:')
print(created_hvac_twin)
room_twin_id = 'RoomTwin-' + str(uuid.uuid4())
with open(r"dtdl\digital_twins\hvacTwin.json") as f:
dtdl_digital_twins_room = json.load(f)
created_room_twin = service_client.upsert_digital_twin(room_twin_id, dtdl_digital_twins_room)
print('RoomTwin:')
print(created_room_twin)
# Create digital relationships from the sample dtdls
with open(r"dtdl\relationships\hospitalRelationships.json") as f:
dtdl_relationships = json.load(f)
for relationship in dtdl_relationships:
service_client.upsert_relationship(
relationship["$sourceId"],
relationship["$relationshipId"],
relationship
)
# Create event route
event_route_id = 'eventRoute-' + str(uuid.uuid4())
event_filter = "$eventType = 'DigitalTwinTelemetryMessages' or $eventType = 'DigitalTwinLifecycleNotification'"
service_client.upsert_event_route(
event_route_id,
event_hub_endpoint_name,
**{"filter": event_filter}
)
# Get event route
created_event_route = service_client.get_event_route(event_route_id)
print('Created Event Route:')
print(created_event_route)
# Clean up
service_client.delete_event_route(event_route_id)
for relationship in dtdl_relationships:
service_client.delete_relationship(
relationship["$sourceId"],
relationship["$relationshipId"]
)
service_client.delete_digital_twin(building_twin_id)
service_client.delete_digital_twin(floor_twin_id)
service_client.delete_digital_twin(hvac_twin_id)
service_client.delete_digital_twin(room_twin_id)
service_client.decommission_model(building_twin_id)
service_client.decommission_model(floor_twin_id)
service_client.decommission_model(hvac_twin_id)
service_client.decommission_model(room_twin_id)
service_client.delete_model(building_twin_id)
service_client.delete_model(floor_twin_id)
service_client.delete_model(hvac_twin_id)
service_client.delete_model(room_twin_id)
except HttpResponseError as e:
print("\nThis sample has caught an error. {0}".format(e.message))
|
PypiClean
|
/matxscript-1.8.1-py3-none-macosx_11_0_arm64.whl/matx/extension/tvm/_tvm_module.py
|
from matx import pipeline
from .lib import compile_or_load_lib
class TVMModel(pipeline.ops.OpKernel):
def __init__(self,
*,
location,
outputs):
compile_or_load_lib(silent=False)
super().__init__(
"TVMModel",
location=location,
outputs=outputs
)
def __call__(self, *args, **kwargs):
raise RuntimeError("TVMModel is not a callable op")
class TVMInferOp(pipeline.ops.OpKernel):
def __init__(self,
*,
models,
device,
batch_arg_name,
share_model):
compile_or_load_lib(silent=False)
super().__init__(
"TVMInferOp",
models=models,
device=device,
batch_arg_name=batch_arg_name,
share_model=share_model
)
def __call__(self, *args, **kwargs):
return super(TVMInferOp, self).__call__(*args, **kwargs)
class TVMModule(object):
def __init__(self,
device=None,
models=None,
batch_arg_name=None,
outputs=None,
share_model=True):
super(TVMModule, self).__init__()
assert isinstance(models, list)
assert isinstance(batch_arg_name, str)
assert isinstance(outputs, list)
assert isinstance(device, int)
compile_or_load_lib(silent=False)
self.tvm_models = []
self.device = device
self.batch_arg_name = batch_arg_name
self.outputs = outputs
self.share_model = share_model
self.model_holder = []
for model_config in models:
assert isinstance(model_config, dict)
batch_size = model_config["batch_size"]
model_path = model_config["model_path"]
tvm_model = TVMModel(
location=model_path, outputs=outputs)
# keep tvm_model alive
self.model_holder.append(tvm_model)
self.tvm_models.append(
{"batch_size": batch_size, "model_name": tvm_model.name})
def make_pipeline_op(self):
op = TVMInferOp(
models=self.tvm_models,
device=self.device,
batch_arg_name=self.batch_arg_name,
share_model=self.share_model
)
return op
|
PypiClean
|
/spyder-terminal-1.2.2.tar.gz/spyder-terminal-1.2.2/spyder_terminal/server/static/components/caniuse-lite/data/regions/TR.js
|
module.exports={C:{"52":0.01994,"68":0.00285,"78":0.01424,"79":0.00854,"80":0.0057,"81":0.0057,"82":0.00854,"83":0.00285,"84":0.00285,"88":0.00285,"89":0.01994,"91":0.01709,"92":0.00854,"93":0.09398,"94":0.55251,"95":0.0057,_:"2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 69 70 71 72 73 74 75 76 77 85 86 87 90 96 3.5 3.6"},D:{"18":0.0057,"22":0.07405,"26":0.04557,"34":0.06835,"38":0.10253,"42":0.00285,"43":0.0057,"47":0.08829,"48":0.00285,"49":0.22214,"51":0.05981,"53":0.02563,"56":0.00854,"57":0.0057,"58":0.00285,"59":0.0057,"61":0.00854,"63":0.01139,"64":0.00285,"65":0.0057,"66":0.0057,"67":0.0057,"68":0.01994,"69":0.00854,"70":0.00854,"71":0.04272,"72":0.00854,"73":0.01139,"74":0.00854,"75":0.01709,"76":0.01994,"77":0.01424,"78":0.01709,"79":0.15664,"80":0.02848,"81":0.01994,"83":0.04557,"84":0.0712,"85":0.07405,"86":0.07974,"87":0.16234,"88":0.04842,"89":0.05126,"90":0.03702,"91":0.07974,"92":0.12816,"93":0.12246,"94":0.46992,"95":10.99613,"96":7.42189,"97":0.0057,_:"4 5 6 7 8 9 10 11 12 13 14 15 16 17 19 20 21 23 24 25 27 28 29 30 31 32 33 35 36 37 39 40 41 44 45 46 50 52 54 55 60 62 98 99"},F:{"28":0.0057,"31":0.01139,"32":0.01139,"36":0.01424,"40":0.05981,"46":0.03987,"71":0.00285,"72":0.00285,"76":0.01139,"78":0.0057,"79":0.01139,"80":0.86294,"81":0.43574,_:"9 11 12 15 16 17 18 19 20 21 22 23 24 25 26 27 29 30 33 34 35 37 38 39 41 42 43 44 45 47 48 49 50 51 52 53 54 55 56 57 58 60 62 63 64 65 66 67 68 69 70 73 74 75 77 9.5-9.6 10.5 10.6 11.1 11.5 11.6 12.1","10.0-10.1":0},B:{"12":0.00854,"13":0.0057,"14":0.0057,"15":0.0057,"16":0.00285,"17":0.00854,"18":0.02848,"84":0.0057,"85":0.0057,"86":0.00285,"89":0.0057,"91":0.0057,"92":0.01424,"93":0.0057,"94":0.03133,"95":0.89712,"96":0.43574,_:"79 80 81 83 87 88 90"},E:{"4":0,"13":0.01139,"14":0.0712,"15":0.10253,_:"0 5 6 7 8 9 10 11 12 3.1 3.2 6.1 7.1 9.1","5.1":0.03418,"10.1":0.0057,"11.1":0.00854,"12.1":0.01139,"13.1":0.06835,"14.1":0.25062,"15.1":0.15379},G:{"8":0,"3.2":0,"4.0-4.1":0,"4.2-4.3":0,"5.0-5.1":0.00208,"6.0-6.1":0.00312,"7.0-7.1":0.0509,"8.1-8.4":0.00623,"9.0-9.2":0.00623,"9.3":0.12257,"10.0-10.2":0.01454,"10.3":0.13815,"11.0-11.2":0.0509,"11.3-11.4":0.05401,"12.0-12.1":0.03532,"12.2-12.5":1.52383,"13.0-13.1":0.02181,"13.2":0.01039,"13.3":0.08102,"13.4-13.7":0.26384,"14.0-14.4":0.70011,"14.5-14.8":3.61586,"15.0-15.1":3.68234},P:{"4":0.76204,"5.0-5.4":0.03048,"6.2-6.4":0.02054,"7.2-7.4":0.24385,"8.2":0.01023,"9.2":0.0508,"10.1":0.03048,"11.1-11.2":0.21337,"12.0":0.1016,"13.0":0.34546,"14.0":0.28449,"15.0":2.97702},I:{"0":0,"3":0,"4":0,"2.1":0,"2.2":0,"2.3":0,"4.1":0.00239,"4.2-4.3":0.00838,"4.4":0,"4.4.3-4.4.4":0.02498},K:{_:"0 10 11 12 11.1 11.5 12.1"},A:{"8":0.00865,"9":0.01153,"11":0.67473,_:"6 7 10 5.5"},J:{"7":0,"10":0},N:{"10":0.02658,"11":0.22582},L:{"0":55.80636},S:{"2.5":0},R:{_:"0"},M:{"0":0.18593},Q:{"10.4":0},O:{"0":0.13587},H:{"0":0.60254}};
|
PypiClean
|
/ps.plone.mlstiles-1.2.1.tar.gz/ps.plone.mlstiles-1.2.1/src/ps/plone/mlstiles/support/mosaic/development_collection.py
|
"""A tile that shows a list of MLS developments for plone.app.mosaic."""
from plone import api
from plone.app.standardtiles import _PMF
from plone.memoize import view
from plone.supermodel.model import Schema
from plone.tiles import PersistentTile
from ps.plone.mls.interfaces import IDevelopmentCollection
from ps.plone.mlstiles import _
from ps.plone.mlstiles.tiles import development_collection
from ps.plone.mlstiles.tiles.base import CatalogSource
from zope import schema
class IDevelopmentCollectionTile(Schema):
"""Configuration schema for a development collection."""
content_uid = schema.Choice(
required=True,
source=CatalogSource(
object_provides=IDevelopmentCollection.__identifier__,
path={
'query': [''],
'depth': -1,
},
),
title=_(u'Select an existing development collection'),
)
count = schema.Int(
default=5,
required=False,
title=_(u'Number of items to display'),
)
offset = schema.Int(
default=0,
required=False,
title=_(u'Start at item'),
)
tile_title = schema.TextLine(
required=False,
title=_(u'Tile Headline'),
)
show_tile_title = schema.Bool(
default=True,
required=False,
title=_(u'Show tile headline'),
)
tile_title_level = schema.Choice(
default=u'h2',
required=False,
title=_(u'Tile headline level'),
values=(u'h1', u'h2', u'h3', u'h4', u'h5', u'h6'),
)
show_title = schema.Bool(
default=True,
required=False,
title=_(u'Show development title'),
)
title_level = schema.Choice(
default=u'h3',
required=False,
title=_(u'Development title level'),
values=(u'h1', u'h2', u'h3', u'h4', u'h5', u'h6'),
)
show_description = schema.Bool(
default=True,
required=False,
title=_(u'Show development description'),
)
show_banner = schema.Bool(
default=True,
required=False,
title=_(u'Show development banner image'),
)
show_logo = schema.Bool(
default=True,
required=False,
title=_(u'Show development logo'),
)
show_location = schema.Bool(
default=True,
required=False,
title=_(u'Show location information for a development'),
)
show_lot_size = schema.Bool(
default=True,
required=False,
title=_(u'Show lot size information for a development'),
)
show_location_type = schema.Bool(
default=True,
required=False,
title=_(u'Show location type information for a development'),
)
show_number_of_listings = schema.Bool(
default=True,
required=False,
title=_(u'Show number of listings for a development'),
)
show_number_of_groups = schema.Bool(
default=True,
required=False,
title=_(u'Show number of groups for a development'),
)
show_more_link = schema.Bool(
default=True,
required=False,
title=_(u'Show link to collection'),
)
more_link_text = schema.TextLine(
default=_(u'More...'),
required=False,
title=_(u'Text for link to collection'),
)
tile_class = schema.TextLine(
default=u'',
description=_PMF(
u'Insert a list of additional CSS classes that will ',
u'be added to the tile',
),
required=False,
title=_PMF(u'Tile additional styles'),
)
class DevelopmentCollectionTile(
development_collection.DevelopmentCollectionTileMixin,
PersistentTile,
):
"""A tile that shows a list of MLS developments."""
@property
def tile_class(self):
css_class = 'development__results'
additional_classes = self.data.get('tile_class', '')
if not additional_classes:
return css_class
return ' '.join([css_class, additional_classes])
@property
@view.memoize
def get_context(self):
"""Return the development collection context."""
uuid = self.data.get('content_uid')
if uuid != api.content.get_uuid(obj=self.context):
item = api.content.get(UID=uuid)
if item is not None:
return item
return None
@property
def size(self):
return self.data.get('count')
@property
def start_at(self):
return self.data.get('offset')
def get_fields(self):
fields = super(DevelopmentCollectionTile, self).get_fields()
if self.data.get('show_banner'):
fields.append('banner_image')
return fields
|
PypiClean
|
/checkoutmanager-2.7.tar.gz/checkoutmanager-2.7/bootstrap.py
|
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
|
PypiClean
|
/django-adminlte-3-0.1.6.tar.gz/django-adminlte-3-0.1.6/adminlte3/static/admin-lte/plugins/summernote/lang/summernote-nl-NL.js
|
(function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else {
var a = factory();
for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i];
}
})(window, function() {
return /******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId]) {
/******/ return installedModules[moduleId].exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/ __webpack_require__.d = function(exports, name, getter) {
/******/ if(!__webpack_require__.o(exports, name)) {
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
/******/ }
/******/ };
/******/
/******/ // define __esModule on exports
/******/ __webpack_require__.r = function(exports) {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function(value, mode) {
/******/ if(mode & 1) value = __webpack_require__(value);
/******/ if(mode & 8) return value;
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = 32);
/******/ })
/************************************************************************/
/******/ ({
/***/ 32:
/***/ (function(module, exports) {
(function ($) {
$.extend($.summernote.lang, {
'nl-NL': {
font: {
bold: 'Vet',
italic: 'Cursief',
underline: 'Onderstrepen',
clear: 'Stijl verwijderen',
height: 'Regelhoogte',
name: 'Lettertype',
strikethrough: 'Doorhalen',
subscript: 'Subscript',
superscript: 'Superscript',
size: 'Tekstgrootte'
},
image: {
image: 'Afbeelding',
insert: 'Afbeelding invoegen',
resizeFull: 'Volledige breedte',
resizeHalf: 'Halve breedte',
resizeQuarter: 'Kwart breedte',
floatLeft: 'Links uitlijnen',
floatRight: 'Rechts uitlijnen',
floatNone: 'Geen uitlijning',
shapeRounded: 'Shape: Rounded',
shapeCircle: 'Shape: Circle',
shapeThumbnail: 'Shape: Thumbnail',
shapeNone: 'Shape: None',
dragImageHere: 'Sleep hier een afbeelding naar toe',
dropImage: 'Drop image or Text',
selectFromFiles: 'Selecteer een bestand',
maximumFileSize: 'Maximum file size',
maximumFileSizeError: 'Maximum file size exceeded.',
url: 'URL van de afbeelding',
remove: 'Verwijder afbeelding',
original: 'Original'
},
video: {
video: 'Video',
videoLink: 'Video link',
insert: 'Video invoegen',
url: 'URL van de video',
providers: '(YouTube, Vimeo, Vine, Instagram, DailyMotion of Youku)'
},
link: {
link: 'Link',
insert: 'Link invoegen',
unlink: 'Link verwijderen',
edit: 'Wijzigen',
textToDisplay: 'Tekst van link',
url: 'Naar welke URL moet deze link verwijzen?',
openInNewWindow: 'Open in nieuw venster'
},
table: {
table: 'Tabel',
addRowAbove: 'Rij hierboven invoegen',
addRowBelow: 'Rij hieronder invoegen',
addColLeft: 'Kolom links toevoegen',
addColRight: 'Kolom rechts toevoegen',
delRow: 'Verwijder rij',
delCol: 'Verwijder kolom',
delTable: 'Verwijder tabel'
},
hr: {
insert: 'Horizontale lijn invoegen'
},
style: {
style: 'Stijl',
p: 'Normaal',
blockquote: 'Quote',
pre: 'Code',
h1: 'Kop 1',
h2: 'Kop 2',
h3: 'Kop 3',
h4: 'Kop 4',
h5: 'Kop 5',
h6: 'Kop 6'
},
lists: {
unordered: 'Ongeordende lijst',
ordered: 'Geordende lijst'
},
options: {
help: 'Help',
fullscreen: 'Volledig scherm',
codeview: 'Bekijk Code'
},
paragraph: {
paragraph: 'Paragraaf',
outdent: 'Inspringen verkleinen',
indent: 'Inspringen vergroten',
left: 'Links uitlijnen',
center: 'Centreren',
right: 'Rechts uitlijnen',
justify: 'Uitvullen'
},
color: {
recent: 'Recente kleur',
more: 'Meer kleuren',
background: 'Achtergrond kleur',
foreground: 'Tekst kleur',
transparent: 'Transparant',
setTransparent: 'Transparant',
reset: 'Standaard',
resetToDefault: 'Standaard kleur'
},
shortcut: {
shortcuts: 'Toetsencombinaties',
close: 'sluiten',
textFormatting: 'Tekststijlen',
action: 'Acties',
paragraphFormatting: 'Paragraafstijlen',
documentStyle: 'Documentstijlen',
extraKeys: 'Extra keys'
},
help: {
'insertParagraph': 'Alinea invoegen',
'undo': 'Laatste handeling ongedaan maken',
'redo': 'Laatste handeling opnieuw uitvoeren',
'tab': 'Tab',
'untab': 'Herstel tab',
'bold': 'Stel stijl in als vet',
'italic': 'Stel stijl in als cursief',
'underline': 'Stel stijl in als onderstreept',
'strikethrough': 'Stel stijl in als doorgestreept',
'removeFormat': 'Verwijder stijl',
'justifyLeft': 'Lijn links uit',
'justifyCenter': 'Set center align',
'justifyRight': 'Lijn rechts uit',
'justifyFull': 'Lijn uit op volledige breedte',
'insertUnorderedList': 'Zet ongeordende lijstweergave aan',
'insertOrderedList': 'Zet geordende lijstweergave aan',
'outdent': 'Verwijder inspringing huidige alinea',
'indent': 'Inspringen op huidige alinea',
'formatPara': 'Wijzig formattering huidig blok in alinea(P tag)',
'formatH1': 'Formatteer huidig blok als H1',
'formatH2': 'Formatteer huidig blok als H2',
'formatH3': 'Formatteer huidig blok als H3',
'formatH4': 'Formatteer huidig blok als H4',
'formatH5': 'Formatteer huidig blok als H5',
'formatH6': 'Formatteer huidig blok als H6',
'insertHorizontalRule': 'Invoegen horizontale lijn',
'linkDialog.show': 'Toon Link Dialoogvenster'
},
history: {
undo: 'Ongedaan maken',
redo: 'Opnieuw doorvoeren'
},
specialChar: {
specialChar: 'SPECIALE TEKENS',
select: 'Selecteer Speciale Tekens'
}
}
});
})(jQuery);
/***/ })
/******/ });
});
|
PypiClean
|
/keystack-0.12.0.tar.gz/keystack-0.12.0/Keystack/KeystackUI/static/src/core/plugins/auth/actions.js
|
import parseUrl from "url-parse"
import win from "core/window"
import { btoa, buildFormData } from "core/utils"
export const SHOW_AUTH_POPUP = "show_popup"
export const AUTHORIZE = "authorize"
export const LOGOUT = "logout"
export const PRE_AUTHORIZE_OAUTH2 = "pre_authorize_oauth2"
export const AUTHORIZE_OAUTH2 = "authorize_oauth2"
export const VALIDATE = "validate"
export const CONFIGURE_AUTH = "configure_auth"
export const RESTORE_AUTHORIZATION = "restore_authorization"
const scopeSeparator = " "
export function showDefinitions(payload) {
return {
type: SHOW_AUTH_POPUP,
payload: payload
}
}
export function authorize(payload) {
return {
type: AUTHORIZE,
payload: payload
}
}
export const authorizeWithPersistOption = (payload) => ( { authActions } ) => {
authActions.authorize(payload)
authActions.persistAuthorizationIfNeeded()
}
export function logout(payload) {
return {
type: LOGOUT,
payload: payload
}
}
export const logoutWithPersistOption = (payload) => ( { authActions } ) => {
authActions.logout(payload)
authActions.persistAuthorizationIfNeeded()
}
export const preAuthorizeImplicit = (payload) => ( { authActions, errActions } ) => {
let { auth , token, isValid } = payload
let { schema, name } = auth
let flow = schema.get("flow")
// remove oauth2 property from window after redirect from authentication
delete win.swaggerUIRedirectOauth2
if ( flow !== "accessCode" && !isValid ) {
errActions.newAuthErr( {
authId: name,
source: "auth",
level: "warning",
message: "Authorization may be unsafe, passed state was changed in server Passed state wasn't returned from auth server"
})
}
if ( token.error ) {
errActions.newAuthErr({
authId: name,
source: "auth",
level: "error",
message: JSON.stringify(token)
})
return
}
authActions.authorizeOauth2WithPersistOption({ auth, token })
}
export function authorizeOauth2(payload) {
return {
type: AUTHORIZE_OAUTH2,
payload: payload
}
}
export const authorizeOauth2WithPersistOption = (payload) => ( { authActions } ) => {
authActions.authorizeOauth2(payload)
authActions.persistAuthorizationIfNeeded()
}
export const authorizePassword = ( auth ) => ( { authActions } ) => {
let { schema, name, username, password, passwordType, clientId, clientSecret } = auth
let form = {
grant_type: "password",
scope: auth.scopes.join(scopeSeparator),
username,
password
}
let query = {}
let headers = {}
switch (passwordType) {
case "request-body":
setClientIdAndSecret(form, clientId, clientSecret)
break
case "basic":
headers.Authorization = "Basic " + btoa(clientId + ":" + clientSecret)
break
default:
console.warn(`Warning: invalid passwordType ${passwordType} was passed, not including client id and secret`)
}
return authActions.authorizeRequest({ body: buildFormData(form), url: schema.get("tokenUrl"), name, headers, query, auth})
}
function setClientIdAndSecret(target, clientId, clientSecret) {
if ( clientId ) {
Object.assign(target, {client_id: clientId})
}
if ( clientSecret ) {
Object.assign(target, {client_secret: clientSecret})
}
}
export const authorizeApplication = ( auth ) => ( { authActions } ) => {
let { schema, scopes, name, clientId, clientSecret } = auth
let headers = {
Authorization: "Basic " + btoa(clientId + ":" + clientSecret)
}
let form = {
grant_type: "client_credentials",
scope: scopes.join(scopeSeparator)
}
return authActions.authorizeRequest({body: buildFormData(form), name, url: schema.get("tokenUrl"), auth, headers })
}
export const authorizeAccessCodeWithFormParams = ( { auth, redirectUrl } ) => ( { authActions } ) => {
let { schema, name, clientId, clientSecret, codeVerifier } = auth
let form = {
grant_type: "authorization_code",
code: auth.code,
client_id: clientId,
client_secret: clientSecret,
redirect_uri: redirectUrl,
code_verifier: codeVerifier
}
return authActions.authorizeRequest({body: buildFormData(form), name, url: schema.get("tokenUrl"), auth})
}
export const authorizeAccessCodeWithBasicAuthentication = ( { auth, redirectUrl } ) => ( { authActions } ) => {
let { schema, name, clientId, clientSecret, codeVerifier } = auth
let headers = {
Authorization: "Basic " + btoa(clientId + ":" + clientSecret)
}
let form = {
grant_type: "authorization_code",
code: auth.code,
client_id: clientId,
redirect_uri: redirectUrl,
code_verifier: codeVerifier
}
return authActions.authorizeRequest({body: buildFormData(form), name, url: schema.get("tokenUrl"), auth, headers})
}
export const authorizeRequest = ( data ) => ( { fn, getConfigs, authActions, errActions, oas3Selectors, specSelectors, authSelectors } ) => {
let { body, query={}, headers={}, name, url, auth } = data
let { additionalQueryStringParams } = authSelectors.getConfigs() || {}
let parsedUrl
if (specSelectors.isOAS3()) {
let finalServerUrl = oas3Selectors.serverEffectiveValue(oas3Selectors.selectedServer())
parsedUrl = parseUrl(url, finalServerUrl, true)
} else {
parsedUrl = parseUrl(url, specSelectors.url(), true)
}
if(typeof additionalQueryStringParams === "object") {
parsedUrl.query = Object.assign({}, parsedUrl.query, additionalQueryStringParams)
}
const fetchUrl = parsedUrl.toString()
let _headers = Object.assign({
"Accept":"application/json, text/plain, */*",
"Content-Type": "application/x-www-form-urlencoded",
"X-Requested-With": "XMLHttpRequest"
}, headers)
fn.fetch({
url: fetchUrl,
method: "post",
headers: _headers,
query: query,
body: body,
requestInterceptor: getConfigs().requestInterceptor,
responseInterceptor: getConfigs().responseInterceptor
})
.then(function (response) {
let token = JSON.parse(response.data)
let error = token && ( token.error || "" )
let parseError = token && ( token.parseError || "" )
if ( !response.ok ) {
errActions.newAuthErr( {
authId: name,
level: "error",
source: "auth",
message: response.statusText
} )
return
}
if ( error || parseError ) {
errActions.newAuthErr({
authId: name,
level: "error",
source: "auth",
message: JSON.stringify(token)
})
return
}
authActions.authorizeOauth2WithPersistOption({ auth, token})
})
.catch(e => {
let err = new Error(e)
let message = err.message
// swagger-js wraps the response (if available) into the e.response property;
// investigate to check whether there are more details on why the authorization
// request failed (according to RFC 6479).
// See also https://github.com/swagger-api/swagger-ui/issues/4048
if (e.response && e.response.data) {
const errData = e.response.data
try {
const jsonResponse = typeof errData === "string" ? JSON.parse(errData) : errData
if (jsonResponse.error)
message += `, error: ${jsonResponse.error}`
if (jsonResponse.error_description)
message += `, description: ${jsonResponse.error_description}`
} catch (jsonError) {
// Ignore
}
}
errActions.newAuthErr( {
authId: name,
level: "error",
source: "auth",
message: message
} )
})
}
export function configureAuth(payload) {
return {
type: CONFIGURE_AUTH,
payload: payload
}
}
export function restoreAuthorization(payload) {
return {
type: RESTORE_AUTHORIZATION,
payload: payload
}
}
export const persistAuthorizationIfNeeded = () => ( { authSelectors, getConfigs } ) => {
const configs = getConfigs()
if (configs.persistAuthorization)
{
const authorized = authSelectors.authorized()
localStorage.setItem("authorized", JSON.stringify(authorized.toJS()))
}
}
export const authPopup = (url, swaggerUIRedirectOauth2) => ( ) => {
win.swaggerUIRedirectOauth2 = swaggerUIRedirectOauth2
win.open(url)
}
|
PypiClean
|
/larksuite-oapi-1.0.34.tar.gz/larksuite-oapi-1.0.34/larksuiteoapi/service/bitable/v1/api.py
|
from typing import *
from ....api import Request as APIRequest, Response as APIResponse, set_timeout, set_tenant_key, set_user_access_token, set_path_params, \
set_query_params, set_response_stream, set_is_response_stream, FormData, FormDataFile
from ....config import Config
from ....consts import ACCESS_TOKEN_TYPE_TENANT, ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_APP
from .model import *
class Service(object):
def __init__(self, conf):
# type: (Config) -> None
self.conf = conf
self.apps = AppService(self)
self.app_tables = AppTableService(self)
self.app_table_fields = AppTableFieldService(self)
self.app_table_records = AppTableRecordService(self)
self.app_table_views = AppTableViewService(self)
class AppService(object):
def __init__(self, service):
# type: (Service) -> None
self.service = service
def get(self, tenant_key=None, user_access_token=None, timeout=None):
# type: (str, str, int) -> AppGetReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppGetReqCall(self, request_opts=request_opts)
class AppTableService(object):
def __init__(self, service):
# type: (Service) -> None
self.service = service
def list(self, tenant_key=None, user_access_token=None, timeout=None):
# type: (str, str, int) -> AppTableListReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableListReqCall(self, request_opts=request_opts)
def batch_create(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableBatchCreateReqBody, str, str, int) -> AppTableBatchCreateReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableBatchCreateReqCall(self, body, request_opts=request_opts)
def create(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableCreateReqBody, str, str, int) -> AppTableCreateReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableCreateReqCall(self, body, request_opts=request_opts)
def delete(self, tenant_key=None, user_access_token=None, timeout=None):
# type: (str, str, int) -> AppTableDeleteReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableDeleteReqCall(self, request_opts=request_opts)
def batch_delete(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableBatchDeleteReqBody, str, str, int) -> AppTableBatchDeleteReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableBatchDeleteReqCall(self, body, request_opts=request_opts)
class AppTableFieldService(object):
def __init__(self, service):
# type: (Service) -> None
self.service = service
def list(self, tenant_key=None, user_access_token=None, timeout=None):
# type: (str, str, int) -> AppTableFieldListReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableFieldListReqCall(self, request_opts=request_opts)
def create(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableField, str, str, int) -> AppTableFieldCreateReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableFieldCreateReqCall(self, body, request_opts=request_opts)
def delete(self, tenant_key=None, user_access_token=None, timeout=None):
# type: (str, str, int) -> AppTableFieldDeleteReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableFieldDeleteReqCall(self, request_opts=request_opts)
def update(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableField, str, str, int) -> AppTableFieldUpdateReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableFieldUpdateReqCall(self, body, request_opts=request_opts)
class AppTableRecordService(object):
def __init__(self, service):
# type: (Service) -> None
self.service = service
def batch_delete(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableRecordBatchDeleteReqBody, str, str, int) -> AppTableRecordBatchDeleteReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableRecordBatchDeleteReqCall(self, body, request_opts=request_opts)
def batch_create(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableRecordBatchCreateReqBody, str, str, int) -> AppTableRecordBatchCreateReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableRecordBatchCreateReqCall(self, body, request_opts=request_opts)
def get(self, tenant_key=None, user_access_token=None, timeout=None):
# type: (str, str, int) -> AppTableRecordGetReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableRecordGetReqCall(self, request_opts=request_opts)
def update(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableRecord, str, str, int) -> AppTableRecordUpdateReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableRecordUpdateReqCall(self, body, request_opts=request_opts)
def delete(self, tenant_key=None, user_access_token=None, timeout=None):
# type: (str, str, int) -> AppTableRecordDeleteReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableRecordDeleteReqCall(self, request_opts=request_opts)
def list(self, tenant_key=None, user_access_token=None, timeout=None):
# type: (str, str, int) -> AppTableRecordListReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableRecordListReqCall(self, request_opts=request_opts)
def batch_update(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableRecordBatchUpdateReqBody, str, str, int) -> AppTableRecordBatchUpdateReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableRecordBatchUpdateReqCall(self, body, request_opts=request_opts)
def create(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableRecord, str, str, int) -> AppTableRecordCreateReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableRecordCreateReqCall(self, body, request_opts=request_opts)
class AppTableViewService(object):
def __init__(self, service):
# type: (Service) -> None
self.service = service
def create(self, body, tenant_key=None, user_access_token=None, timeout=None):
# type: (AppTableView, str, str, int) -> AppTableViewCreateReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableViewCreateReqCall(self, body, request_opts=request_opts)
def delete(self, tenant_key=None, user_access_token=None, timeout=None):
# type: (str, str, int) -> AppTableViewDeleteReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableViewDeleteReqCall(self, request_opts=request_opts)
def list(self, tenant_key=None, user_access_token=None, timeout=None):
# type: (str, str, int) -> AppTableViewListReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if user_access_token is not None:
request_opts += [set_user_access_token(user_access_token)]
return AppTableViewListReqCall(self, request_opts=request_opts)
class AppGetReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AppService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppGetReqCall
self.path_params['app_token'] = app_token
return self
def do(self):
# type: () -> APIResponse[Type[AppGetResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token', 'GET', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
None, output_class=AppGetResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableListReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AppTableService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableListReqCall
self.path_params['app_token'] = app_token
return self
def set_page_token(self, page_token):
# type: (str) -> AppTableListReqCall
self.query_params['page_token'] = page_token
return self
def set_page_size(self, page_size):
# type: (int) -> AppTableListReqCall
self.query_params['page_size'] = page_size
return self
def do(self):
# type: () -> APIResponse[Type[AppTableListResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables', 'GET', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
None, output_class=AppTableListResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableBatchCreateReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableService, AppTableBatchCreateReqBody, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableBatchCreateReqCall
self.path_params['app_token'] = app_token
return self
def set_user_id_type(self, user_id_type):
# type: (str) -> AppTableBatchCreateReqCall
self.query_params['user_id_type'] = user_id_type
return self
def do(self):
# type: () -> APIResponse[Type[AppTableBatchCreateResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/batch_create', 'POST', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, output_class=AppTableBatchCreateResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableCreateReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableService, AppTableCreateReqBody, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableCreateReqCall
self.path_params['app_token'] = app_token
return self
def set_user_id_type(self, user_id_type):
# type: (str) -> AppTableCreateReqCall
self.query_params['user_id_type'] = user_id_type
return self
def do(self):
# type: () -> APIResponse[Type[AppTableCreateResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables', 'POST', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, output_class=AppTableCreateResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableDeleteReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AppTableService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableDeleteReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableDeleteReqCall
self.path_params['table_id'] = table_id
return self
def do(self):
# type: () -> APIResponse[Type[None]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id', 'DELETE', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
None, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableBatchDeleteReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableService, AppTableBatchDeleteReqBody, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableBatchDeleteReqCall
self.path_params['app_token'] = app_token
return self
def do(self):
# type: () -> APIResponse[Type[None]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/batch_delete', 'POST', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableFieldListReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AppTableFieldService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableFieldListReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableFieldListReqCall
self.path_params['table_id'] = table_id
return self
def set_view_id(self, view_id):
# type: (str) -> AppTableFieldListReqCall
self.query_params['view_id'] = view_id
return self
def set_page_token(self, page_token):
# type: (str) -> AppTableFieldListReqCall
self.query_params['page_token'] = page_token
return self
def set_page_size(self, page_size):
# type: (int) -> AppTableFieldListReqCall
self.query_params['page_size'] = page_size
return self
def do(self):
# type: () -> APIResponse[Type[AppTableFieldListResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/fields', 'GET', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
None, output_class=AppTableFieldListResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableFieldCreateReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableFieldService, AppTableField, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableFieldCreateReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableFieldCreateReqCall
self.path_params['table_id'] = table_id
return self
def set_user_id_type(self, user_id_type):
# type: (str) -> AppTableFieldCreateReqCall
self.query_params['user_id_type'] = user_id_type
return self
def do(self):
# type: () -> APIResponse[Type[AppTableFieldCreateResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/fields', 'POST', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, output_class=AppTableFieldCreateResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableFieldDeleteReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AppTableFieldService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableFieldDeleteReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableFieldDeleteReqCall
self.path_params['table_id'] = table_id
return self
def set_field_id(self, field_id):
# type: (str) -> AppTableFieldDeleteReqCall
self.path_params['field_id'] = field_id
return self
def do(self):
# type: () -> APIResponse[Type[AppTableFieldDeleteResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/fields/:field_id', 'DELETE', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
None, output_class=AppTableFieldDeleteResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableFieldUpdateReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableFieldService, AppTableField, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableFieldUpdateReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableFieldUpdateReqCall
self.path_params['table_id'] = table_id
return self
def set_field_id(self, field_id):
# type: (str) -> AppTableFieldUpdateReqCall
self.path_params['field_id'] = field_id
return self
def do(self):
# type: () -> APIResponse[Type[AppTableFieldUpdateResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/fields/:field_id', 'PUT', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, output_class=AppTableFieldUpdateResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableRecordBatchDeleteReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableRecordService, AppTableRecordBatchDeleteReqBody, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableRecordBatchDeleteReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableRecordBatchDeleteReqCall
self.path_params['table_id'] = table_id
return self
def do(self):
# type: () -> APIResponse[Type[AppTableRecordBatchDeleteResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/records/batch_delete', 'POST', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, output_class=AppTableRecordBatchDeleteResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableRecordBatchCreateReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableRecordService, AppTableRecordBatchCreateReqBody, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableRecordBatchCreateReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableRecordBatchCreateReqCall
self.path_params['table_id'] = table_id
return self
def set_user_id_type(self, user_id_type):
# type: (str) -> AppTableRecordBatchCreateReqCall
self.query_params['user_id_type'] = user_id_type
return self
def do(self):
# type: () -> APIResponse[Type[AppTableRecordBatchCreateResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/records/batch_create', 'POST', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, output_class=AppTableRecordBatchCreateResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableRecordGetReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AppTableRecordService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableRecordGetReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableRecordGetReqCall
self.path_params['table_id'] = table_id
return self
def set_record_id(self, record_id):
# type: (str) -> AppTableRecordGetReqCall
self.path_params['record_id'] = record_id
return self
def set_user_id_type(self, user_id_type):
# type: (str) -> AppTableRecordGetReqCall
self.query_params['user_id_type'] = user_id_type
return self
def do(self):
# type: () -> APIResponse[Type[AppTableRecordGetResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/records/:record_id', 'GET', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
None, output_class=AppTableRecordGetResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableRecordUpdateReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableRecordService, AppTableRecord, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableRecordUpdateReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableRecordUpdateReqCall
self.path_params['table_id'] = table_id
return self
def set_record_id(self, record_id):
# type: (str) -> AppTableRecordUpdateReqCall
self.path_params['record_id'] = record_id
return self
def set_user_id_type(self, user_id_type):
# type: (str) -> AppTableRecordUpdateReqCall
self.query_params['user_id_type'] = user_id_type
return self
def do(self):
# type: () -> APIResponse[Type[AppTableRecordUpdateResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/records/:record_id', 'PUT', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, output_class=AppTableRecordUpdateResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableRecordDeleteReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AppTableRecordService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableRecordDeleteReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableRecordDeleteReqCall
self.path_params['table_id'] = table_id
return self
def set_record_id(self, record_id):
# type: (str) -> AppTableRecordDeleteReqCall
self.path_params['record_id'] = record_id
return self
def do(self):
# type: () -> APIResponse[Type[DeleteRecord]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/records/:record_id', 'DELETE', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
None, output_class=DeleteRecord, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableRecordListReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AppTableRecordService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableRecordListReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableRecordListReqCall
self.path_params['table_id'] = table_id
return self
def set_view_id(self, view_id):
# type: (str) -> AppTableRecordListReqCall
self.query_params['view_id'] = view_id
return self
def set_filter(self, filter):
# type: (str) -> AppTableRecordListReqCall
self.query_params['filter'] = filter
return self
def set_sort(self, sort):
# type: (str) -> AppTableRecordListReqCall
self.query_params['sort'] = sort
return self
def set_field_names(self, field_names):
# type: (str) -> AppTableRecordListReqCall
self.query_params['field_names'] = field_names
return self
def set_page_token(self, page_token):
# type: (str) -> AppTableRecordListReqCall
self.query_params['page_token'] = page_token
return self
def set_page_size(self, page_size):
# type: (int) -> AppTableRecordListReqCall
self.query_params['page_size'] = page_size
return self
def set_user_id_type(self, user_id_type):
# type: (str) -> AppTableRecordListReqCall
self.query_params['user_id_type'] = user_id_type
return self
def do(self):
# type: () -> APIResponse[Type[AppTableRecordListResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/records', 'GET', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
None, output_class=AppTableRecordListResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableRecordBatchUpdateReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableRecordService, AppTableRecordBatchUpdateReqBody, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableRecordBatchUpdateReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableRecordBatchUpdateReqCall
self.path_params['table_id'] = table_id
return self
def set_user_id_type(self, user_id_type):
# type: (str) -> AppTableRecordBatchUpdateReqCall
self.query_params['user_id_type'] = user_id_type
return self
def do(self):
# type: () -> APIResponse[Type[AppTableRecordBatchUpdateResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/records/batch_update', 'POST', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, output_class=AppTableRecordBatchUpdateResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableRecordCreateReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableRecordService, AppTableRecord, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableRecordCreateReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableRecordCreateReqCall
self.path_params['table_id'] = table_id
return self
def set_user_id_type(self, user_id_type):
# type: (str) -> AppTableRecordCreateReqCall
self.query_params['user_id_type'] = user_id_type
return self
def do(self):
# type: () -> APIResponse[Type[AppTableRecordCreateResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/records', 'POST', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, output_class=AppTableRecordCreateResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableViewCreateReqCall(object):
def __init__(self, service, body, request_opts=None):
# type: (AppTableViewService, AppTableView, List[Any]) -> None
self.service = service
self.body = body
self.path_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableViewCreateReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableViewCreateReqCall
self.path_params['table_id'] = table_id
return self
def do(self):
# type: () -> APIResponse[Type[AppTableViewCreateResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/views', 'POST', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
self.body, output_class=AppTableViewCreateResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableViewDeleteReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AppTableViewService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableViewDeleteReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableViewDeleteReqCall
self.path_params['table_id'] = table_id
return self
def set_view_id(self, view_id):
# type: (str) -> AppTableViewDeleteReqCall
self.path_params['view_id'] = view_id
return self
def do(self):
# type: () -> APIResponse[Type[None]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/views/:view_id', 'DELETE', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
None, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class AppTableViewListReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AppTableViewService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_app_token(self, app_token):
# type: (str) -> AppTableViewListReqCall
self.path_params['app_token'] = app_token
return self
def set_table_id(self, table_id):
# type: (str) -> AppTableViewListReqCall
self.path_params['table_id'] = table_id
return self
def set_page_size(self, page_size):
# type: (int) -> AppTableViewListReqCall
self.query_params['page_size'] = page_size
return self
def set_page_token(self, page_token):
# type: (str) -> AppTableViewListReqCall
self.query_params['page_token'] = page_token
return self
def do(self):
# type: () -> APIResponse[Type[AppTableViewListResult]]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_query_params(self.query_params)]
req = APIRequest('/open-apis/bitable/v1/apps/:app_token/tables/:table_id/views', 'GET', [ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_TENANT],
None, output_class=AppTableViewListResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
|
PypiClean
|
/idem-azure-2.2.0.tar.gz/idem-azure-2.2.0/idem_azure/tool/azure/key_vault/vault.py
|
import copy
from typing import Any
from typing import Dict
from typing import List
def convert_key_vault_to_present(
hub,
resource: Dict,
idem_resource_name: str,
resource_group_name: str,
resource_id: str,
vault_name: str,
subscription_id: str = None,
) -> Dict[str, Any]:
"""
Giving an existing resource state and desired state inputs, generate a Dict that match the format of
present input parameters.
Args:
hub: The redistributed pop central hub.
resource: An existing resource state from Azure. This is usually a GET operation response.
idem_resource_name: The Idem name of the resource.
resource_group_name: Azure Resource Group name.
vault_name: Azure vault resource name.
resource_id: Azure vault resource id.
subscription_id: The Microsoft Azure subscription ID.
Returns:
A Dict that contains the parameters that match the present function's input format.
"""
resource_translated = {
"name": idem_resource_name,
"resource_id": resource_id,
"resource_group_name": resource_group_name,
"subscription_id": subscription_id,
"vault_name": vault_name,
"location": resource["location"],
}
if "tags" in resource:
resource_translated["tags"] = resource["tags"]
if "properties" in resource:
resource_translated["properties"] = resource["properties"]
if "type" in resource:
resource_translated["type"] = resource["type"]
if "systemData" in resource:
resource_translated["system_data"] = resource["systemData"]
return resource_translated
def convert_present_to_key_vault(
hub,
location: str,
sku: Dict[str, Any] = None,
tags: Dict = None,
tenant_id: str = None,
enabled_for_deployment: bool = None,
enabled_for_disk_encryption: bool = None,
enabled_for_template_deployment: bool = None,
enable_rbac_authorization: bool = None,
public_network_access_enabled: str = None,
soft_delete_retention_days: int = None,
purge_protection_enabled: bool = None,
access_policies: List[Dict[str, Any]] = None,
network_acls: Dict[str, Any] = None,
):
"""
Giving some present function inputs, generate a payload that can be used during PUT operation to Azure. Any None
value input will be ignored, unless this parameter is a required input parameter.
Args:
hub: The redistributed pop central hub.
location(str): Resource location. Changing this forces a new resource to be created.
sku(Dict, Optional): The SKU of the key vault.
tags(Dict, Optional): Resource tags.
tenant_id(str, Optional): Tenant id of azure account.
enabled_for_deployment(bool, Optional): Boolean flag to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault.
enabled_for_disk_encryption(bool, Optional): Boolean flag to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys.
enabled_for_template_deployment(bool, Optional): Boolean flag to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault.
enable_rbac_authorization(bool, Optional): Boolean flag to specify whether Azure Key Vault uses Role Based Access Control (RBAC) for authorization of data actions.
public_network_access_enabled(str, Optional): Whether public network access is allowed for this Key Vault. Defaults to true
soft_delete_retention_days(int, Optional): The number of days that items should be retained for once soft-deleted. This value can be between 7 and 90 (the default) days.
purge_protection_enabled(bool, Optional): Is Purge Protection enabled for this Key Vault?
access_policies(List[Dict[str, Any]], Optional): Key vault access policies.
network_acls(Dict[str, Any], Optional): Key vault Network Acl.
Returns:
A Dict in the format of an Azure PUT operation payload.
"""
payload = {"location": location, "properties": {}}
if tags is not None:
payload["tags"] = tags
payload["properties"]["sku"] = {
"name": sku.get("name") if sku else "Standard",
"family": sku.get("family") if sku else "A",
}
payload["properties"]["tenantId"] = tenant_id
if enabled_for_deployment is not None:
payload["properties"]["enabledForDeployment"] = enabled_for_deployment
if enabled_for_disk_encryption is not None:
payload["properties"]["enabledForDiskEncryption"] = enabled_for_disk_encryption
if enabled_for_template_deployment is not None:
payload["properties"][
"enabledForTemplateDeployment"
] = enabled_for_template_deployment
if public_network_access_enabled is not None:
payload["properties"]["publicNetworkAccess"] = public_network_access_enabled
if enable_rbac_authorization is not None:
payload["properties"]["enableRbacAuthorization"] = enable_rbac_authorization
if soft_delete_retention_days is not None:
payload["properties"]["softDeleteRetentionInDays"] = soft_delete_retention_days
if purge_protection_enabled is not None:
payload["properties"]["enablePurgeProtection"] = purge_protection_enabled
payload["properties"]["accessPolicies"] = (
convert_present_to_access_policies(access_policies) if access_policies else []
)
if network_acls is not None:
payload["properties"]["networkAcls"] = convert_present_to_network_acl(
network_acls
)
return payload
def convert_present_to_access_policies(access_policies: List[Dict[str, Any]]):
"""
Giving some present function inputs, generate a payload that can be used during PUT operation to Azure. Any None
value input will be ignored, unless this parameter is a required input parameter.
Args:
access_policies(List[Dict[str, Any]]) : List of access policies for key vault.
Returns:
List of access policies for key vault payload List[Dict[str, Any]] in the format of an Azure PUT operation payload.
"""
access_policies_list: List = []
for access_policy in access_policies:
access_policies_payload = {
"tenantId": access_policy.get("tenant_id"),
"objectId": access_policy.get("object_id"),
}
access_policies_payload["permissions"] = {}
if access_policy.get("key_permissions") is not None:
access_policies_payload["permissions"]["keys"] = access_policy.get(
"key_permissions"
)
if access_policy.get("secret_permissions") is not None:
access_policies_payload["permissions"]["secrets"] = access_policy.get(
"secret_permissions"
)
if access_policy.get("certificate_permissions") is not None:
access_policies_payload["permissions"]["certificates"] = access_policy.get(
"certificate_permissions"
)
if access_policy.get("storage_permissions") is not None:
access_policies_payload["permissions"]["storage"] = access_policy.get(
"storage_permissions"
)
access_policies_list.append(access_policies_payload)
return access_policies_list
def convert_present_to_network_acl(network_acls: Dict[str, Any]):
"""
Giving some present function inputs, generate a payload that can be used during PUT operation to Azure. Any None
value input will be ignored, unless this parameter is a required input parameter.
Args:
network_acls(Dict(str, Any)) : Specifies information about the Network Acl for key vault.
Returns:
Network Acl Payload Dict[str,any] in the format of an Azure PUT operation payload.
"""
network_acls_payload = {
"bypass": network_acls.get("bypass"),
"defaultAction": network_acls.get("default_action"),
"ipRules": network_acls.get("ip_rules"),
"virtualNetworkRules": network_acls.get("virtual_network_subnet_ids"),
}
return network_acls_payload
def update_key_vault_payload(
hub, existing_payload: Dict[str, Any], new_values: Dict[str, Any]
) -> Dict[str, Any]:
"""
Giving an existing resource state and desired state inputs, generate an updated payload, which can be used by
PUT operation to update a resource on Azure.
Args:
hub: The redistributed pop central hub.
existing_payload: An existing resource state from Azure. This is usually a GET operation response.
new_values: A dictionary of desired state values. If any property's value is None,
this property will be ignored. This is to match the behavior when a present() input is a None, Idem does not
do an update.
Returns:
A result Dict.
result: True if no error occurs during the operation.
ret: An updated payload that can be used to call PUT operation to update the resource. None if no update on all values.
comment: A messages list.
"""
result = {"result": True, "ret": None, "comment": []}
need_update = False
new_payload = copy.deepcopy(existing_payload)
if (new_values.get("tags") is not None) and (
existing_payload.get("tags") != new_values.get("tags")
):
new_payload["tags"] = new_values["tags"]
need_update = True
if need_update:
result["ret"] = new_payload
return result
|
PypiClean
|
/ressources/lib/node_modules/highcharts/indicators/bollinger-bands.src.js
|
'use strict';
(function (factory) {
if (typeof module === 'object' && module.exports) {
module.exports = factory;
} else if (typeof define === 'function' && define.amd) {
define(function () {
return factory;
});
} else {
factory(Highcharts);
}
}(function (Highcharts) {
(function (H) {
var each = H.each,
merge = H.merge,
isArray = H.isArray,
SMA = H.seriesTypes.sma;
// Utils:
function getStandardDeviation(arr, index, isOHLC, mean) {
var variance = 0,
arrLen = arr.length,
std = 0,
i = 0,
value;
for (; i < arrLen; i++) {
value = (isOHLC ? arr[i][index] : arr[i]) - mean;
variance += value * value;
}
variance = variance / (arrLen - 1);
std = Math.sqrt(variance);
return std;
}
H.seriesType('bb', 'sma',
/**
* Bollinger bands (BB). This series requires the `linkedTo` option to be
* set and should be loaded after the `stock/indicators/indicators.js` file.
*
* @extends plotOptions.sma
* @product highstock
* @sample {highstock} stock/indicators/bollinger-bands
* Bollinger bands
* @since 6.0.0
* @optionparent plotOptions.bb
*/
{
name: 'BB (20, 2)',
params: {
period: 20,
/**
* Standard deviation for top and bottom bands.
*
* @type {Number}
* @since 6.0.0
* @product highstock
*/
standardDeviation: 2,
index: 3
},
/**
* Bottom line options.
*
* @since 6.0.0
* @product highstock
*/
bottomLine: {
/**
* Styles for a bottom line.
*
* @since 6.0.0
* @product highstock
*/
styles: {
/**
* Pixel width of the line.
*
* @type {Number}
* @since 6.0.0
* @product highstock
*/
lineWidth: 1,
/**
* Color of the line. If not set, it's inherited from
* [plotOptions.bb.color](#plotOptions.bb.color).
*
* @type {String}
* @since 6.0.0
* @product highstock
*/
lineColor: undefined
}
},
/**
* Top line options.
*
* @extends plotOptions.bb.bottomLine
* @since 6.0.0
* @product highstock
*/
topLine: {
styles: {
lineWidth: 1,
lineColor: undefined
}
},
tooltip: {
pointFormat: '<span style="color:{point.color}">\u25CF</span><b> {series.name}</b><br/>Top: {point.top}<br/>Middle: {point.middle}<br/>Bottom: {point.bottom}<br/>'
},
marker: {
enabled: false
},
dataGrouping: {
approximation: 'averages'
}
}, /** @lends Highcharts.Series.prototype */ {
pointArrayMap: ['top', 'middle', 'bottom'],
pointValKey: 'middle',
nameComponents: ['period', 'standardDeviation'],
init: function () {
SMA.prototype.init.apply(this, arguments);
// Set default color for lines:
this.options = merge({
topLine: {
styles: {
lineColor: this.color
}
},
bottomLine: {
styles: {
lineColor: this.color
}
}
}, this.options);
},
toYData: function (point) {
return [point.top, point.middle, point.bottom];
},
translate: function () {
var indicator = this,
translatedBB = ['plotTop', 'plotMiddle', 'plotBottom'];
SMA.prototype.translate.apply(indicator, arguments);
each(indicator.points, function (point) {
each(
[point.top, point.middle, point.bottom],
function (value, i) {
if (value !== null) {
point[translatedBB[i]] = indicator.yAxis.toPixels(
value,
true
);
}
}
);
});
},
drawGraph: function () {
var indicator = this,
middleLinePoints = indicator.points,
pointsLength = middleLinePoints.length,
middleLineOptions = indicator.options,
middleLinePath = indicator.graph,
gappedExtend = {
options: {
gapSize: middleLineOptions.gapSize
}
},
deviations = [[], []], // top and bottom point place holders
point;
// Generate points for top and bottom lines:
while (pointsLength--) {
point = middleLinePoints[pointsLength];
deviations[0].push({
plotX: point.plotX,
plotY: point.plotTop,
isNull: point.isNull
});
deviations[1].push({
plotX: point.plotX,
plotY: point.plotBottom,
isNull: point.isNull
});
}
// Modify options and generate lines:
each(['topLine', 'bottomLine'], function (lineName, i) {
indicator.points = deviations[i];
indicator.options = merge(
middleLineOptions[lineName].styles,
gappedExtend
);
indicator.graph = indicator['graph' + lineName];
SMA.prototype.drawGraph.call(indicator);
// Now save lines:
indicator['graph' + lineName] = indicator.graph;
});
// Restore options and draw a middle line:
indicator.points = middleLinePoints;
indicator.options = middleLineOptions;
indicator.graph = middleLinePath;
SMA.prototype.drawGraph.call(indicator);
},
getValues: function (series, params) {
var period = params.period,
standardDeviation = params.standardDeviation,
xVal = series.xData,
yVal = series.yData,
yValLen = yVal ? yVal.length : 0,
BB = [], // 0- date, 1-middle line, 2-top line, 3-bottom line
ML, TL, BL, // middle line, top line and bottom line
date,
xData = [],
yData = [],
slicedX,
slicedY,
stdDev,
isOHLC,
point,
i;
if (xVal.length < period) {
return false;
}
isOHLC = isArray(yVal[0]);
for (i = period; i <= yValLen; i++) {
slicedX = xVal.slice(i - period, i);
slicedY = yVal.slice(i - period, i);
point = SMA.prototype.getValues.call(
this,
{
xData: slicedX,
yData: slicedY
},
params
);
date = point.xData[0];
ML = point.yData[0];
stdDev = getStandardDeviation(
slicedY,
params.index,
isOHLC,
ML
);
TL = ML + standardDeviation * stdDev;
BL = ML - standardDeviation * stdDev;
BB.push([date, TL, ML, BL]);
xData.push(date);
yData.push([TL, ML, BL]);
}
return {
values: BB,
xData: xData,
yData: yData
};
}
}
);
/**
* A bollinger bands indicator. If the [type](#series.bb.type) option is not
* specified, it is inherited from [chart.type](#chart.type).
*
* @type {Object}
* @since 6.0.0
* @extends series,plotOptions.bb
* @excluding data,dataParser,dataURL
* @product highstock
* @apioption series.bb
*/
/**
* An array of data points for the series. For the `bb` series type,
* points are calculated dynamically.
*
* @type {Array<Object|Array>}
* @since 6.0.0
* @extends series.line.data
* @product highstock
* @apioption series.bb.data
*/
}(Highcharts));
return (function () {
}());
}));
|
PypiClean
|
/pulumiverse_esxi_native-0.0.5.tar.gz/pulumiverse_esxi_native-0.0.5/pulumiverse_esxi_native/resource_pool.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ResourcePoolArgs', 'ResourcePool']
@pulumi.input_type
class ResourcePoolArgs:
def __init__(__self__, *,
cpu_max: Optional[pulumi.Input[int]] = None,
cpu_min: Optional[pulumi.Input[int]] = None,
cpu_min_expandable: Optional[pulumi.Input[str]] = None,
cpu_shares: Optional[pulumi.Input[str]] = None,
mem_max: Optional[pulumi.Input[int]] = None,
mem_min: Optional[pulumi.Input[int]] = None,
mem_min_expandable: Optional[pulumi.Input[str]] = None,
mem_shares: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ResourcePool resource.
:param pulumi.Input[int] cpu_max: CPU maximum (in MHz).
:param pulumi.Input[int] cpu_min: CPU minimum (in MHz).
:param pulumi.Input[str] cpu_min_expandable: Can pool borrow CPU resources from parent?
:param pulumi.Input[str] cpu_shares: CPU shares (low/normal/high/<custom>).
:param pulumi.Input[int] mem_max: Memory maximum (in MB).
:param pulumi.Input[int] mem_min: Memory minimum (in MB).
:param pulumi.Input[str] mem_min_expandable: Can pool borrow memory resources from parent?
:param pulumi.Input[str] mem_shares: Memory shares (low/normal/high/<custom>).
:param pulumi.Input[str] name: Resource Pool Name
"""
if cpu_max is not None:
pulumi.set(__self__, "cpu_max", cpu_max)
if cpu_min is None:
cpu_min = 100
if cpu_min is not None:
pulumi.set(__self__, "cpu_min", cpu_min)
if cpu_min_expandable is None:
cpu_min_expandable = 'true'
if cpu_min_expandable is not None:
pulumi.set(__self__, "cpu_min_expandable", cpu_min_expandable)
if cpu_shares is None:
cpu_shares = 'normal'
if cpu_shares is not None:
pulumi.set(__self__, "cpu_shares", cpu_shares)
if mem_max is not None:
pulumi.set(__self__, "mem_max", mem_max)
if mem_min is None:
mem_min = 200
if mem_min is not None:
pulumi.set(__self__, "mem_min", mem_min)
if mem_min_expandable is None:
mem_min_expandable = 'true'
if mem_min_expandable is not None:
pulumi.set(__self__, "mem_min_expandable", mem_min_expandable)
if mem_shares is None:
mem_shares = 'normal'
if mem_shares is not None:
pulumi.set(__self__, "mem_shares", mem_shares)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="cpuMax")
def cpu_max(self) -> Optional[pulumi.Input[int]]:
"""
CPU maximum (in MHz).
"""
return pulumi.get(self, "cpu_max")
@cpu_max.setter
def cpu_max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_max", value)
@property
@pulumi.getter(name="cpuMin")
def cpu_min(self) -> Optional[pulumi.Input[int]]:
"""
CPU minimum (in MHz).
"""
return pulumi.get(self, "cpu_min")
@cpu_min.setter
def cpu_min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_min", value)
@property
@pulumi.getter(name="cpuMinExpandable")
def cpu_min_expandable(self) -> Optional[pulumi.Input[str]]:
"""
Can pool borrow CPU resources from parent?
"""
return pulumi.get(self, "cpu_min_expandable")
@cpu_min_expandable.setter
def cpu_min_expandable(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cpu_min_expandable", value)
@property
@pulumi.getter(name="cpuShares")
def cpu_shares(self) -> Optional[pulumi.Input[str]]:
"""
CPU shares (low/normal/high/<custom>).
"""
return pulumi.get(self, "cpu_shares")
@cpu_shares.setter
def cpu_shares(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cpu_shares", value)
@property
@pulumi.getter(name="memMax")
def mem_max(self) -> Optional[pulumi.Input[int]]:
"""
Memory maximum (in MB).
"""
return pulumi.get(self, "mem_max")
@mem_max.setter
def mem_max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mem_max", value)
@property
@pulumi.getter(name="memMin")
def mem_min(self) -> Optional[pulumi.Input[int]]:
"""
Memory minimum (in MB).
"""
return pulumi.get(self, "mem_min")
@mem_min.setter
def mem_min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mem_min", value)
@property
@pulumi.getter(name="memMinExpandable")
def mem_min_expandable(self) -> Optional[pulumi.Input[str]]:
"""
Can pool borrow memory resources from parent?
"""
return pulumi.get(self, "mem_min_expandable")
@mem_min_expandable.setter
def mem_min_expandable(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mem_min_expandable", value)
@property
@pulumi.getter(name="memShares")
def mem_shares(self) -> Optional[pulumi.Input[str]]:
"""
Memory shares (low/normal/high/<custom>).
"""
return pulumi.get(self, "mem_shares")
@mem_shares.setter
def mem_shares(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mem_shares", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource Pool Name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class ResourcePool(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cpu_max: Optional[pulumi.Input[int]] = None,
cpu_min: Optional[pulumi.Input[int]] = None,
cpu_min_expandable: Optional[pulumi.Input[str]] = None,
cpu_shares: Optional[pulumi.Input[str]] = None,
mem_max: Optional[pulumi.Input[int]] = None,
mem_min: Optional[pulumi.Input[int]] = None,
mem_min_expandable: Optional[pulumi.Input[str]] = None,
mem_shares: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a ResourcePool resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] cpu_max: CPU maximum (in MHz).
:param pulumi.Input[int] cpu_min: CPU minimum (in MHz).
:param pulumi.Input[str] cpu_min_expandable: Can pool borrow CPU resources from parent?
:param pulumi.Input[str] cpu_shares: CPU shares (low/normal/high/<custom>).
:param pulumi.Input[int] mem_max: Memory maximum (in MB).
:param pulumi.Input[int] mem_min: Memory minimum (in MB).
:param pulumi.Input[str] mem_min_expandable: Can pool borrow memory resources from parent?
:param pulumi.Input[str] mem_shares: Memory shares (low/normal/high/<custom>).
:param pulumi.Input[str] name: Resource Pool Name
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ResourcePoolArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a ResourcePool resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param ResourcePoolArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResourcePoolArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cpu_max: Optional[pulumi.Input[int]] = None,
cpu_min: Optional[pulumi.Input[int]] = None,
cpu_min_expandable: Optional[pulumi.Input[str]] = None,
cpu_shares: Optional[pulumi.Input[str]] = None,
mem_max: Optional[pulumi.Input[int]] = None,
mem_min: Optional[pulumi.Input[int]] = None,
mem_min_expandable: Optional[pulumi.Input[str]] = None,
mem_shares: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResourcePoolArgs.__new__(ResourcePoolArgs)
__props__.__dict__["cpu_max"] = cpu_max
if cpu_min is None:
cpu_min = 100
__props__.__dict__["cpu_min"] = cpu_min
if cpu_min_expandable is None:
cpu_min_expandable = 'true'
__props__.__dict__["cpu_min_expandable"] = cpu_min_expandable
if cpu_shares is None:
cpu_shares = 'normal'
__props__.__dict__["cpu_shares"] = cpu_shares
__props__.__dict__["mem_max"] = mem_max
if mem_min is None:
mem_min = 200
__props__.__dict__["mem_min"] = mem_min
if mem_min_expandable is None:
mem_min_expandable = 'true'
__props__.__dict__["mem_min_expandable"] = mem_min_expandable
if mem_shares is None:
mem_shares = 'normal'
__props__.__dict__["mem_shares"] = mem_shares
__props__.__dict__["name"] = name
super(ResourcePool, __self__).__init__(
'esxi-native:index:ResourcePool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ResourcePool':
"""
Get an existing ResourcePool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ResourcePoolArgs.__new__(ResourcePoolArgs)
__props__.__dict__["cpu_max"] = None
__props__.__dict__["cpu_min"] = None
__props__.__dict__["cpu_min_expandable"] = None
__props__.__dict__["cpu_shares"] = None
__props__.__dict__["mem_max"] = None
__props__.__dict__["mem_min"] = None
__props__.__dict__["mem_min_expandable"] = None
__props__.__dict__["mem_shares"] = None
__props__.__dict__["name"] = None
return ResourcePool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="cpuMax")
def cpu_max(self) -> pulumi.Output[Optional[int]]:
"""
CPU maximum (in MHz).
"""
return pulumi.get(self, "cpu_max")
@property
@pulumi.getter(name="cpuMin")
def cpu_min(self) -> pulumi.Output[Optional[int]]:
"""
CPU minimum (in MHz).
"""
return pulumi.get(self, "cpu_min")
@property
@pulumi.getter(name="cpuMinExpandable")
def cpu_min_expandable(self) -> pulumi.Output[Optional[str]]:
"""
Can pool borrow CPU resources from parent?
"""
return pulumi.get(self, "cpu_min_expandable")
@property
@pulumi.getter(name="cpuShares")
def cpu_shares(self) -> pulumi.Output[Optional[str]]:
"""
CPU shares (low/normal/high/<custom>).
"""
return pulumi.get(self, "cpu_shares")
@property
@pulumi.getter(name="memMax")
def mem_max(self) -> pulumi.Output[Optional[int]]:
"""
Memory maximum (in MB).
"""
return pulumi.get(self, "mem_max")
@property
@pulumi.getter(name="memMin")
def mem_min(self) -> pulumi.Output[Optional[int]]:
"""
Memory minimum (in MB).
"""
return pulumi.get(self, "mem_min")
@property
@pulumi.getter(name="memMinExpandable")
def mem_min_expandable(self) -> pulumi.Output[Optional[str]]:
"""
Can pool borrow memory resources from parent?
"""
return pulumi.get(self, "mem_min_expandable")
@property
@pulumi.getter(name="memShares")
def mem_shares(self) -> pulumi.Output[Optional[str]]:
"""
Memory shares (low/normal/high/<custom>).
"""
return pulumi.get(self, "mem_shares")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Pool Name
"""
return pulumi.get(self, "name")
|
PypiClean
|
/dbinterface-0.6-py3-none-any.whl/somenzz/db/postgres_client.py
|
import psycopg2
import sys
from psycopg2.extras import DictCursor
from psycopg2.extensions import ISOLATION_LEVEL_READ_UNCOMMITTED
# from psycopg2.errors import UndefinedTable,InvalidSchemaName
from io import StringIO
from db.database_interface import DataBaseInterface
class PostgresClient(DataBaseInterface):
'''
使用 %s对应元组 或 %(name)s 对应字典作为占位符
>>> cur.execute("""
INSERT INTO some_table (an_int, a_date, a_string)
VALUES (%s, %s, %s);
""",
(10, datetime.date(2005, 11, 18), "O'Reilly"))
>>> cur.execute("""
INSERT INTO some_table (an_int, a_date, another_date, a_string)
VALUES (%(int)s, %(date)s, %(date)s, %(str)s);
""",
{'int': 10, 'str': "O'Reilly", 'date': datetime.date(2005, 11, 18)})
like
cur.execute("SELECT * FROM mytable WHERE path LIKE %s ESCAPE ''", (path,))
'''
def __init__(
self,
database,
user,
password,
host,
port,
):
self.database = database
self.user = user
self.password = password
self.host = host
self.port = port
self.connection = None
self.cursor_count = 0
def connect(self):
self.connection = psycopg2.connect(
database=self.database,
user=self.user,
password=self.password,
host=self.host,
port=self.port,
)
self.connection.set_client_encoding("utf-8")
self.connection.set_session(isolation_level=ISOLATION_LEVEL_READ_UNCOMMITTED)
def set_current_schema(self, schema):
pass
def close(self):
self.connection.close()
def exists(self, tabname):
"""
仅传入表名,判断是否存在
"""
assert "." in tabname
# result = elk_prod.fetch_many(
# f"select * from sys.all_tables where table_name = %s ", (tabname.lower(),), 0
# )
try:
self.fetch_many(f"select * from {tabname}", parameters=(), nums=10)
# except UndefinedTable :
# return False
# except InvalidSchemaName:
# return False
except Exception as e:
return False
return True
def is_active(self):
return False if self.connection.closed else True
def get_tables(self, schema: str) -> list:
pass
def read(self, sql: str, params: tuple = ()) -> tuple:
cur = self.connection.cursor(f"cusor{self.cursor_count}")
self.cursor_count += 1
cur.execute(sql, params)
return cur
def fetch(self, sql: str, params: tuple = ()) -> tuple:
with self.connection.cursor(f"cusor{self.cursor_count}") as cur:
self.cursor_count += 1
cur.execute(sql, params)
row = cur.fetchone()
while row:
yield row
row = cur.fetchone()
def read_map(self, sql: str, params: tuple = ()) -> dict:
cur = self.connection.cursor(cursor_factory=DictCursor)
cur.execute(sql, params)
for row in cur:
yield dict(row)
def write(self, sql: str, params: tuple) -> tuple:
pass
def write_many(self, sql: str, params: tuple) -> tuple:
pass
def execute_immediate_for_selectFirst(self, sql, parameters=()):
# print(sql)
row = None
with self.connection.cursor() as cur:
cur.execute(sql, parameters)
row = cur.fetchone()
return row
def fetch_many(self, sql, parameters=(), nums=10):
# print(sql)
row = None
with self.connection.cursor() as cur:
cur.execute(sql, parameters)
if nums == 0:
rows = cur.fetchall()
else:
rows = cur.fetchmany(size=nums)
return rows
def execute_immediate_for_modify(self, sql, parameters=()):
self.connection.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
)
if sql == "" or sql is None:
return
# print(sql)
try:
with self.connection.cursor() as cur:
cur.execute(sql, parameters)
self.connection.commit()
return True
except Exception as e:
print(f"failed with {sql}: {e}")
return False
def fetch_tuple_iter(self, sql, parameters=[]):
# print(sql)
cur = self.connection.cursor(f"cusor{self.cursor_count}")
self.cursor_count += 1
cur.execute(sql, parameters)
return cur
def fetch_both(self, sql, parameters=None):
"""
返回一个列表,元素是字典,键是列名,值是列值。
"""
pass
def insert_single_row(self, sql_insert, value):
"""
sql_insert = "insert in tab values(?,?)"
values =()
"""
pass
def copy_to_file(self, query, file_name, encoding="utf8", delimiter="\x02"):
copy_sql = f'''COPY (
{query}
) TO '{file_name}' WITH(encoding '{encoding}', delimiter '{delimiter}', null '', format 'text')'''
print(f'''copy_sql =======begin========
{copy_sql}
copy_sql =======end========''')
with self.connection.cursor() as cur:
cur.copy_expert(
copy_sql,
sys.stdout
)
return cur.rowcount
def copy_from_memory(self, values, schema, tabName, columns=None):
# self.connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
"""
sql_insert = "insert in tab values(?,?)"
values =[()()()]
"""
# with self.connection.cursor() as cur:
# f = StringIO()
# a = cur.copy_expert(sql,f,size = 8192)
# f.seek(0)
# print(f.read())
with StringIO() as w:
for value in values:
# print(value)
text = (
"\x02".join(
[
""
if x is None
else str(x)
.replace("\n", "")
.replace("\r", "")
.replace("\\", "")
for x in value
]
)
+ "\n"
)
# print(text)
w.write(text)
w.seek(0)
with self.connection.cursor() as cur:
cur.copy_from(
file=w,
table=f"{schema}.{tabName}",
sep="\x02",
size=16384,
columns=columns,
)
self.connection.commit()
# print(f"elk -> insert into {tabName}: {len(values)} rows")
def insert_many_row(self, sql_insert, values):
"""
sql_insert = "insert in tab values(?,?)"
values =[()()()]
"""
with self.connection.cursor() as cursor:
cursor.executemany(sql_insert, values)
self.connection.commit()
print(f"elk -> insert {len(values)} rows")
def get_table_cols_info(self, schema, tabname):
"""
返回:
列信息
主键信息
外键信息
索引信息
"""
colums = []
with self.connection.cursor() as cur:
cur.execute(
f"SELECT column_name FROM sys.all_tab_columns where table_name = '{tabname}' and owner = '{self.user}' order by column_id"
)
for x in cur:
colums.append(x[0])
if colums == []:
with self.connection.cursor() as cur:
cur.execute(
f"SELECT column_name FROM sys.all_tab_columns where table_name = '{tabname}' and owner = 'omm' order by column_id"
)
for x in cur:
colums.append(x[0])
return colums
def get_table_names(self, table_schema=None):
tabs = []
sql = "select table_schema,table_name from information_schema.tables where 1=1 or table_schema = %s"
if table_schema:
sql = "select table_schema,table_name from information_schema.tables where table_schema = %s"
for schema, tabname in self.read(
sql, (table_schema.lower() if table_schema else "",)
):
tabs.append(f"{schema}.{tabname}")
return tabs
def get_all_tables(self, schema_name="EDW", table_type=""):
"""
'TABLE_SCHEM': 'SYSCAT', 'TABLE_NAME': 'VARIABLEAUTH', 'TABLE_TYPE': 'VIEW', 'REMARKS': None
"""
pass
def copy_from_file(self, file_path, tabName, encoding="GBK", columns=None):
with open(file_path, "r", encoding="GBK") as reader:
with self.connection.cursor() as cur:
cur.copy_from(
file=reader, table=tabName, sep="\x02", size=16384, columns=columns
)
self.connection.commit()
print(f"elk -> insert into {tabName} done")
return True
if __name__ == "__main__":
import time
pg = PostgresClient(database = "postgres",user="postgres", password="121113", host="localhost", port="5432")
pg.connect()
start = time.time()
x = pg.copy_to_file(
"select * from f_dep_lsflls",
file_name="/tmp/lsflls.txt",
encoding="gbk",
delimiter="|",
)
print("x:", x)
end = time.time()
print(end - start)
|
PypiClean
|
/mo-logs-7.333.23006.tar.gz/mo-logs-7.333.23006/mo_logs/startup.py
|
from __future__ import absolute_import, division, unicode_literals
import argparse as _argparse
import os
import sys
import tempfile
from mo_dots import coalesce, listwrap, from_data, to_data
from mo_logs import logger
# PARAMETERS MATCH argparse.ArgumentParser.add_argument()
# https://docs.python.org/dev/library/argparse.html#the-add-argument-method
#
# name or flags - Either a name or a list of option strings, e.g. foo or -f, --foo.
# action - The basic type of action to be taken when this argument is encountered at the command line.
# nargs - The number of command-line arguments that should be consumed.
# const - A constant value required by some action and nargs selections.
# default - The value produced if the argument is absent from the command line.
# type - The type to which the command-line argument should be converted.
# choices - A container of the allowable values for the argument.
# required - Whether or not the command-line option may be omitted (optionals only).
# help - A brief description of what the argument does.
# metavar - A name for the argument in usage messages.
# dest - The name of the attribute to be added to the object returned by parse_args().
class _ArgParser(_argparse.ArgumentParser):
def error(self, message):
logger.error("argparse error: {{error}}", error=message)
def argparse(defs, complain=True):
parser = _ArgParser()
for d in listwrap(defs):
args = d.copy()
name = args.name
args.name = None
parser.add_argument(*from_data(listwrap(name)), **args)
namespace, unknown = parser.parse_known_args()
if unknown and complain:
logger.warning("Ignoring arguments: {{unknown|json}}", unknown=unknown)
output = {k: getattr(namespace, k) for k in vars(namespace)}
return to_data(output)
def read_settings(defs=None, filename=None, default_filename=None, complain=True):
"""
:param filename: Force load a file
:param defs: more arguments you want to accept (see https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.add_argument)
:param default_filename: A config file from an environment variable (a fallback config file, if no other provided)
:parma complain: Complain about args mismatch
"""
from mo_files import File
import mo_json_config
# READ SETTINGS
defs = listwrap(defs)
defs.append({
"name": ["--config", "--settings", "--settings-file", "--settings_file"],
"help": "path to JSON file with settings",
"type": str,
"dest": "filename",
"default": None,
"required": False,
})
args = argparse(defs, complain)
args.filename = coalesce(
filename,
args.filename if args.filename.endswith(".json") else None,
default_filename,
"./config.json",
)
settings_file = File(args.filename)
if settings_file.exists:
logger.info("Using {{filename}} for configuration", filename=settings_file.abs_path)
else:
logger.error(
"Can not read configuration file {{filename}}",
filename=settings_file.abs_path,
)
settings = mo_json_config.get_file(settings_file)
settings.args = args
return settings
# snagged from https://github.com/pycontribs/tendo/blob/master/tendo/singleton.py (under licence PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2)
class SingleInstance:
"""
ONLY ONE INSTANCE OF PROGRAM ALLOWED
If you want to prevent your script from running in parallel just instantiate SingleInstance() class.
If is there another instance already running it will exist the application with the message
"Another instance is already running, quitting.", returning -1 error code.
with SingleInstance():
<your code here>
settings = startup.read_settings()
with SingleInstance(settings.args.filename):
<your code here>
This option is very useful if you have scripts executed by crontab at small intervals, causing multiple instances
Remember that this works by creating a lock file with a filename based on the full path to the script file.
"""
def __init__(self, flavor_id=""):
self.initialized = False
appname = os.path.splitext(os.path.abs_path(sys.argv[0]))[0]
basename = ((appname + "-%s") % flavor_id).replace("/", "-").replace(
":", ""
).replace("\\", "-").replace("-.-", "-") + ".lock"
self.lockfile = os.path.normpath(tempfile.gettempdir() + "/" + basename)
def __enter__(self):
logger.info("SingleInstance.lockfile = " + self.lockfile)
if sys.platform == "win32":
try:
# file already exists, we try to remove (in case previous execution was interrupted)
if os.path.exists(self.lockfile):
os.unlink(self.lockfile)
self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except Exception as e:
Log.alarm("Another instance is already running, quitting.")
sys.exit(-1)
else: # non Windows
import fcntl
self.fp = open(self.lockfile, "w")
try:
fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
Log.alarm("Another instance is already running, quitting.")
sys.exit(-1)
self.initialized = True
def __exit__(self, type, value, traceback):
self.__del__()
def __del__(self):
temp, self.initialized = self.initialized, False
if not temp:
return
try:
if sys.platform == "win32":
if hasattr(self, "fd"):
os.close(self.fd)
os.unlink(self.lockfile)
else:
import fcntl
fcntl.lockf(self.fp, fcntl.LOCK_UN)
if os.path.isfile(self.lockfile):
os.unlink(self.lockfile)
except Exception as e:
logger.warning("Problem with SingleInstance __del__()", e)
sys.exit(-1)
|
PypiClean
|
/dnv_bladed_models-0.3.44.tar.gz/dnv_bladed_models-0.3.44/src/dnv_bladed_models/structural_modelling_settings.py
|
from __future__ import annotations
from datetime import date, datetime # noqa: F401
from enum import Enum, IntEnum
import re # noqa: F401
from typing import Any, Dict, List, Optional, Type, Union, Callable # noqa: F401
from pathlib import Path
from typing import TypeVar
Model = TypeVar('Model', bound='BaseModel')
StrBytes = Union[str, bytes]
from pydantic import AnyUrl, BaseModel, EmailStr, Field, validator, root_validator, Extra # noqa: F401
from dnv_bladed_models.dnv import Dnv
class StructuralModellingSettings_BladeGeometricStiffnessModelEnum(str, Enum):
AXIAL_LOADS_ONLY = "AxialLoadsOnly"
FULL_MODEL_WITH_ORIENTATION_CORRECTION = "FullModelWithOrientationCorrection"
INTERNAL_LOADS_ONLY = "InternalLoadsOnly"
DISABLED = "Disabled"
class StructuralModellingSettings_SupportStructureGeometricStiffnessModelEnum(str, Enum):
AXIAL_LOADS_ONLY = "AxialLoadsOnly"
INTERNAL_LOADS_ONLY = "InternalLoadsOnly"
DISABLED = "Disabled"
class StructuralModellingSettings(Dnv):
"""StructuralModellingSettings - Settings affecting the structural modelling.
Attributes:
----------
BladeGeometricStiffnessModel : StructuralModellingSettings_BladeGeometricStiffnessModelEnum, default='AxialLoadsOnly'
The geometric stiffness model to use for the blades. For blades with 1 part, the \"axial loads only\" model is recommended. This configuration is only appropriate for relatively stiff blades, undergoing small deflection. For more flexible blade models, a multi-part blade model is more appropriate. In this case, the \"full with orientation correction\" is the recommended option, as long as deflection remains small within each blade part.
SupportStructureGeometricStiffnessModel : StructuralModellingSettings_SupportStructureGeometricStiffnessModelEnum, default='AxialLoadsOnly'
The geometric stiffness model to use for the support structure
"""
BladeGeometricStiffnessModel: Optional[StructuralModellingSettings_BladeGeometricStiffnessModelEnum] = Field(alias="BladeGeometricStiffnessModel", default='AxialLoadsOnly')
SupportStructureGeometricStiffnessModel: Optional[StructuralModellingSettings_SupportStructureGeometricStiffnessModelEnum] = Field(alias="SupportStructureGeometricStiffnessModel", default='AxialLoadsOnly')
class Config:
extra = Extra.forbid
validate_assignment = True
allow_population_by_field_name = True
pass
@root_validator(pre=True)
def _parsing_ignores_underscore_properties(cls, values: dict[str, any]):
allowed_vals = {}
for key, val in values.items():
if not key.startswith('_'):
if isinstance(val, dict):
allowed_child_vals = {}
for child_key, child_val in val.items():
if not child_key.startswith('_'):
allowed_child_vals[child_key] = child_val
allowed_vals[key] = allowed_child_vals
else:
allowed_vals[key] = val
return allowed_vals
def to_json(
self,
*,
include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
by_alias: bool = True,
skip_defaults: Optional[bool] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = True,
encoder: Optional[Callable[[Any], Any]] = None,
models_as_dict: bool = True,
**dumps_kwargs: Any) -> str:
r"""
Generates a JSON string representation of the model.
Notes
-----
`include` and `exclude` arguments as per `dict()`.
`encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.
Examples
--------
>>> model.to_json()
Renders the full JSON representation of the model object.
"""
if dumps_kwargs.get('indent') is None:
dumps_kwargs.update(indent=2)
return super().json(
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
encoder=encoder,
models_as_dict=models_as_dict,
**dumps_kwargs)
@classmethod
def from_file(
cls: Type['Model'],
path: Union[str, Path]) -> 'Model':
r"""
Loads a model from a given file path.
Parameters
----------
path : string
The file path to the model.
Returns
-------
StructuralModellingSettings
The model object.
Raises
------
ValueError, ValidationError
If the JSON document does not correctly describe the model according to the model schema.
Examples
--------
>>> model = StructuralModellingSettings.from_file('/path/to/file')
"""
return super().parse_file(path=path)
@classmethod
def from_json(
cls: Type['Model'],
b: StrBytes) -> 'Model':
r"""
Creates a model object from a JSON string.
Parameters
----------
b: StrBytes
The JSON string describing the model.
Returns
-------
StructuralModellingSettings
The model object.
Raises
------
ValueError, ValidationError
If the JSON document does not correctly describe the model according to the model schema.
Examples
--------
>>> model = StructuralModellingSettings.from_json('{ ... }')
"""
return super().parse_raw(
b=b,
content_type='application/json')
@classmethod
def from_dict(
cls: Type['Model'],
obj: Any) -> 'Model':
r"""
Creates a model object from a dict.
Parameters
----------
obj : Any
The dictionary object describing the model.
Returns
-------
StructuralModellingSettings
The model object.
Raises
------
ValueError, ValidationError
If the JSON document does not correctly describe the model according to the model schema.
"""
return super().parse_obj(obj=obj)
def to_file(
self,
path: Union[str, Path]):
r"""
Writes the model as a JSON document to a file with UTF8 encoding.
Parameters
----------
path : string
The file path to which the model will be written.
Examples
--------
>>> model.to_file('/path/to/file')
"""
with open(file=path, mode='w', encoding="utf8") as output_file:
output_file.write(self.to_json())
StructuralModellingSettings.update_forward_refs()
|
PypiClean
|
/espnet-202308-py3-none-any.whl/espnet2/enh/decoder/stft_decoder.py
|
import math
import torch
import torch_complex
from packaging.version import parse as V
from torch_complex.tensor import ComplexTensor
from espnet2.enh.decoder.abs_decoder import AbsDecoder
from espnet2.enh.layers.complex_utils import is_torch_complex_tensor
from espnet2.layers.stft import Stft
is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")
class STFTDecoder(AbsDecoder):
"""STFT decoder for speech enhancement and separation"""
def __init__(
self,
n_fft: int = 512,
win_length: int = None,
hop_length: int = 128,
window="hann",
center: bool = True,
normalized: bool = False,
onesided: bool = True,
):
super().__init__()
self.stft = Stft(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
)
self.win_length = win_length if win_length else n_fft
self.n_fft = n_fft
self.hop_length = hop_length
self.window = window
self.center = center
def forward(self, input: ComplexTensor, ilens: torch.Tensor):
"""Forward.
Args:
input (ComplexTensor): spectrum [Batch, T, (C,) F]
ilens (torch.Tensor): input lengths [Batch]
"""
if not isinstance(input, ComplexTensor) and (
is_torch_1_9_plus and not torch.is_complex(input)
):
raise TypeError("Only support complex tensors for stft decoder")
bs = input.size(0)
if input.dim() == 4:
multi_channel = True
# input: (Batch, T, C, F) -> (Batch * C, T, F)
input = input.transpose(1, 2).reshape(-1, input.size(1), input.size(3))
else:
multi_channel = False
# for supporting half-precision training
if input.dtype in (torch.float16, torch.bfloat16):
wav, wav_lens = self.stft.inverse(input.float(), ilens)
wav = wav.to(dtype=input.dtype)
elif (
is_torch_complex_tensor(input)
and hasattr(torch, "complex32")
and input.dtype == torch.complex32
):
wav, wav_lens = self.stft.inverse(input.cfloat(), ilens)
wav = wav.to(dtype=input.dtype)
else:
wav, wav_lens = self.stft.inverse(input, ilens)
if multi_channel:
# wav: (Batch * C, Nsamples) -> (Batch, Nsamples, C)
wav = wav.reshape(bs, -1, wav.size(1)).transpose(1, 2)
return wav, wav_lens
def _get_window_func(self):
window_func = getattr(torch, f"{self.window}_window")
window = window_func(self.win_length)
n_pad_left = (self.n_fft - window.shape[0]) // 2
n_pad_right = self.n_fft - window.shape[0] - n_pad_left
return window
def forward_streaming(self, input_frame: torch.Tensor):
"""Forward.
Args:
input (ComplexTensor): spectrum [Batch, 1, F]
output: wavs [Batch, 1, self.win_length]
"""
input_frame = input_frame.real + 1j * input_frame.imag
output_wav = (
torch.fft.irfft(input_frame)
if self.stft.onesided
else torch.fft.ifft(input_frame).real
)
output_wav = output_wav.squeeze(1)
n_pad_left = (self.n_fft - self.win_length) // 2
output_wav = output_wav[..., n_pad_left : n_pad_left + self.win_length]
return output_wav * self._get_window_func()
def streaming_merge(self, chunks, ilens=None):
"""streaming_merge. It merges the frame-level processed audio chunks
in the streaming *simulation*. It is noted that, in real applications,
the processed audio should be sent to the output channel frame by frame.
You may refer to this function to manage your streaming output buffer.
Args:
chunks: List [(B, frame_size),]
ilens: [B]
Returns:
merge_audio: [B, T]
"""
frame_size = self.win_length
hop_size = self.hop_length
num_chunks = len(chunks)
batch_size = chunks[0].shape[0]
audio_len = int(hop_size * num_chunks + frame_size - hop_size)
output = torch.zeros((batch_size, audio_len), dtype=chunks[0].dtype).to(
chunks[0].device
)
for i, chunk in enumerate(chunks):
output[:, i * hop_size : i * hop_size + frame_size] += chunk
window_sq = self._get_window_func().pow(2)
window_envelop = torch.zeros((batch_size, audio_len), dtype=chunks[0].dtype).to(
chunks[0].device
)
for i in range(len(chunks)):
window_envelop[:, i * hop_size : i * hop_size + frame_size] += window_sq
output = output / window_envelop
# We need to trim the front padding away if center.
start = (frame_size // 2) if self.center else 0
end = -(frame_size // 2) if ilens.max() is None else start + ilens.max()
return output[..., start:end]
if __name__ == "__main__":
from espnet2.enh.encoder.stft_encoder import STFTEncoder
input_audio = torch.randn((1, 100))
ilens = torch.LongTensor([100])
nfft = 32
win_length = 28
hop = 10
encoder = STFTEncoder(
n_fft=nfft, win_length=win_length, hop_length=hop, onesided=True
)
decoder = STFTDecoder(
n_fft=nfft, win_length=win_length, hop_length=hop, onesided=True
)
frames, flens = encoder(input_audio, ilens)
wav, ilens = decoder(frames, ilens)
splited = encoder.streaming_frame(input_audio)
sframes = [encoder.forward_streaming(s) for s in splited]
swavs = [decoder.forward_streaming(s) for s in sframes]
merged = decoder.streaming_merge(swavs, ilens)
if not (is_torch_1_9_plus and encoder.use_builtin_complex):
sframes = torch_complex.cat(sframes, dim=1)
else:
sframes = torch.cat(sframes, dim=1)
torch.testing.assert_close(sframes.real, frames.real)
torch.testing.assert_close(sframes.imag, frames.imag)
torch.testing.assert_close(wav, input_audio)
torch.testing.assert_close(wav, merged)
|
PypiClean
|
/DInk-0.0.5.tar.gz/DInk-0.0.5/dink/resources.py
|
import json
import zipfile
# NOTE: The `PDF` class provides a thin wrappers to data fetched from the
# API by the API client and should not be initialized directly.
class _BaseResource:
"""
A base resource used to wrap documents fetched from the API with dot
notation access to attributes and methods for access to related API
endpoints.
"""
def __init__(self, client, document):
# The API client used to fetch the resource
self._client = client
# The document representing the resource's data
self._document = document
def __getattr__(self, name):
if '_document' in self.__dict__:
return self.__dict__['_document'].get(name, None)
raise AttributeError(
f"'{self.__class__.__name__}' has no attribute '{name}'"
)
def __getitem__(self, name):
return self.__dict__['_document'][name]
def __contains__(self, name):
return name in self.__dict__['_document']
def get(self, name, default=None):
return self.__dict__['_document'].get(name, default)
class PDF(_BaseResource):
"""
A PDF generated by DInk (specifically the store key and UID for the PDF).
"""
def __str__(self):
return f'PDF: {self.store_key}'
@classmethod
def create(
cls,
client,
template_html,
document_args,
global_args=None,
assets=None,
notification_url=None
):
"""
Create one or more PDFs. Returns a map of created PDFs with each key
in the document args being assigned a PDF.
NOTE: If there was an error generating a PDF then the PDF will not
feature `store_key` and `uid` attributes but an `error` attribute.
"""
results = client(
'put',
f'pdfs',
files={'assets': assets} if assets else None,
data={
'template_html': template_html,
'document_args': json.dumps(document_args),
'global_args': json.dumps(global_args) \
if global_args else None,
'notification_url': notification_url
}
)
if notification_url is None:
return {k: cls(client, v) for k, v in results.items()}
|
PypiClean
|
/wolfe-0.0.8.tar.gz/wolfe-0.0.8/README.rst
|
wolfe
=====
|image0|
🐺 i am winston wolfe, i solve problems
Demo
----
|image1|
Features
--------
- Written in Python
- Uses the stackoverflow api.
- You need bash for it too work.
Installation
------------
1: `PIP`_
~~~~~~~~~
.. code:: bash
$ pip install wolfe
2: From Source
~~~~~~~~~~~~~~
.. code:: bash
$ git clone https://github.com/Zephrys/wolfe
$ cd wolfe/
$ python setup.py install
Usage
-----
Ask Mr. Wolfe to record errors, ask politely
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: bash
$ wolfe on
Ask him to solve the last problem you faced via stackoverflow
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: bash
$ wolfe $l
Ask him to solve the last problem you faced via google
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: bash
$ wolfe $l --google
Tell him his services aren’t needed any more
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: bash
$ wolfe off
Help
~~~~
.. code:: bash
$ wolfe --help
Note
~~~~
Mr. Wolfe edits your ``.bashrc`` and stores a copy of the original
``.bashrc`` file in ``~/.bashrc.bak``, every time the ``wolfe on``
command is run.
If your terminal is messed up do ``cp ~/.bashrc.bak ~/.bashrc``
| Mr. Wolfe doesn’t change ``.bashrc`` when the terminal is exited, do
``wolfe off``
| to undo the changes to the ``.bashrc`` file.
Contributing
------------
Use the `issue tracker`_ to file bugs or push new features.
License
-------
Open sourced under the **MIT License**
.. _PIP: https://pypi.python.org/pypi/wolfe
.. _issue tracker: https://github.com/h4ck3rk3y/wolfe
.. |image0| image:: http://i.imgur.com/ffMQrWB.png
.. |image1| image:: http://i.imgur.com/L6lXDyG.gif?1
|
PypiClean
|
/oh_my_tools_package-0.0.3beta56.tar.gz/oh_my_tools_package-0.0.3beta56/my_tools_package/nlp/dictionary.py
|
import random
import pypinyin
from copy import deepcopy
from pathlib import Path
from typing import Union, Dict, List
from collections import defaultdict
# from my_tools_package import CONF
from my_tools_package.utils.file import IOUtils
class StatesMachineException(Exception): pass
class Node(object):
def __init__(self, from_word, to_word=None, is_tail=True,
have_child=False):
self.from_word = from_word
if to_word is None:
self.to_word = from_word
self.data = (is_tail, have_child, from_word)
self.is_original = True
else:
self.to_word = to_word or from_word
self.data = (is_tail, have_child, to_word)
self.is_original = False
self.is_tail = is_tail
self.have_child = have_child
def is_original_long_word(self):
return self.is_original and len(self.from_word) > 1
def is_follow(self, chars):
return chars != self.from_word[:-1]
def __str__(self):
return '<Node, %s, %s, %s, %s>' % (repr(self.from_word),
repr(self.to_word), self.is_tail, self.have_child)
__repr__ = __str__
class ConvertMap(object):
def __init__(self, mapping):
self._map = {}
self.set_convert_map(mapping)
def set_convert_map(self, mapping):
convert_map = {}
have_child = {}
max_key_length = 0
for key in sorted(mapping.keys()):
if len(key) > 1:
for i in range(1, len(key)):
parent_key = key[:i]
have_child[parent_key] = True
have_child[key] = False
max_key_length = max(max_key_length, len(key))
for key in sorted(have_child.keys()):
convert_map[key] = (key in mapping, have_child[key],
mapping.get(key, ""))
self._map = convert_map
self.max_key_length = max_key_length
def __getitem__(self, k):
try:
is_tail, have_child, to_word = self._map[k]
return Node(k, to_word, is_tail, have_child)
except:
return Node(k)
def __contains__(self, k):
return k in self._map
def __len__(self):
return len(self._map)
# states
# (START, END, FAIL, WAIT_TAIL) = list(range(4))
# (START, END, FAIL, WAIT_TAIL) = 0,1,2,3
# # conditions
# (TAIL, ERROR, MATCHED_SWITCH, UNMATCHED_SWITCH, CONNECTOR) = list(range(5))
class StatesMachine(object):
# states: start,end,fail,wait_tail
START, END, FAIL, WAIT_TAIL = 0, 1, 2, 3
# condition:tail,error,matched_switch,unmatched_switch,connector
TAIL, ERROR, MATCHED_SWITCH, UNMATCHED_SWITCH, CONNECTOR = 0, 2, 3, 4, 5
def __init__(self):
self.state = self.START
self.final = ""
self.len = 0
self.pool = ""
def clone(self, pool):
new = deepcopy(self)
new.state = self.WAIT_TAIL
new.pool = pool
return new
def feed(self, char, map):
node = map[self.pool + char]
if node.have_child:
if node.is_tail:
if node.is_original:
cond = self.UNMATCHED_SWITCH
else:
cond = self.MATCHED_SWITCH
else:
cond = self.CONNECTOR
else:
if node.is_tail:
cond = self.TAIL
else:
cond = self.ERROR
new = None
if cond == self.ERROR:
self.state = self.FAIL
elif cond == self.TAIL:
if self.state == self.WAIT_TAIL and node.is_original_long_word():
self.state = self.FAIL
else:
self.final += node.to_word
self.len += 1
self.pool = ""
self.state = self.END
elif self.state == self.START or self.state == self.WAIT_TAIL:
if cond == self.MATCHED_SWITCH:
new = self.clone(node.from_word)
self.final += node.to_word
self.len += 1
self.state = self.END
self.pool = ""
elif cond == self.UNMATCHED_SWITCH or cond == self.CONNECTOR:
if self.state == self.START:
new = self.clone(node.from_word)
self.final += node.to_word
self.len += 1
self.state = self.END
else:
if node.is_follow(self.pool):
self.state = self.FAIL
else:
self.pool = node.from_word
elif self.state == self.END:
# END is a new START
self.state = self.START
new = self.feed(char, map)
elif self.state == self.FAIL:
raise StatesMachineException('Translate States Machine '
'have error with input data %s' % node)
return new
def __len__(self):
return self.len + 1
def __str__(self):
return '<StatesMachine %s, pool: "%s", state: %s, final: %s>' % (
id(self), self.pool, self.state, self.final)
__repr__ = __str__
class Converter(object):
START, END, FAIL, WAIT_TAIL = 0, 1, 2, 3
def __init__(self, converted_map):
self.map = converted_map
self.start()
def feed(self, char):
branches = []
for fsm in self.machines:
new = fsm.feed(char, self.map)
if new:
branches.append(new)
if branches:
self.machines.extend(branches)
self.machines = [fsm for fsm in self.machines if fsm.state != self.FAIL]
all_ok = True
for fsm in self.machines:
if fsm.state != self.END:
all_ok = False
if all_ok:
self._clean()
return self.get_result()
def _clean(self):
if len(self.machines):
self.machines.sort(key=lambda x: len(x))
# self.machines.sort(cmp=lambda x,y: cmp(len(x), len(y)))
self.final += self.machines[0].final
self.machines = [StatesMachine()]
def start(self):
self.machines = [StatesMachine()]
self.final = ""
def end(self):
self.machines = [fsm for fsm in self.machines
if fsm.state == self.FAIL or fsm.state == self.END]
self._clean()
def convert(self, string):
self.start()
for char in string:
self.feed(char)
self.end()
return self.get_result()
def get_result(self):
return self.final
def make_dict(text: str) -> defaultdict:
"""将加载的词表转化成字典类型,仅适用于一行只有两个元素"""
word_map = defaultdict()
lines = text.split("\n")
for line in lines:
splits = line.split("\t")
if len(splits) > 1:
word_map[splits[0]] = splits[1]
return word_map
class Dictionary:
def __init__(self):
self.dictionary_dir = Path(CONF["data_dir"]) / "dictionary"
self.dict_config = IOUtils.read_yaml(CONF["dictionary_config_file"])
def _download_file(self, url: str, save_path: Union[str, Path] = None) -> str:
"""
"""
save_path = IOUtils.ensure_dir(save_path) if save_path else IOUtils.ensure_dir(self.dictionary_dir)
filename = url.split("/")[-1]
file_path = save_path / filename
if not file_path.exists():
file_content = IOUtils.download_file(url)
file_path = IOUtils.ensure_file(file_path)
with file_path.open("w") as f:
f.write(file_content)
else:
with file_path.open("r") as f:
file_content = f.read()
return file_content
def _download_file_from_cos(self,bucket:str,key:str,save_path:Union[str, Path] = None)->str:
"""
从腾讯云上下载文件到本地
Args:
bucket:
key:
save_path:
Returns:
"""
save_path = IOUtils.ensure_dir(save_path) if save_path else IOUtils.ensure_dir(self.dictionary_dir)
filename = key.split("/")[-1]
file_path = save_path / filename
if not file_path.exists():
IOUtils.download_file_from_cos(bucket,key,str(file_path))
with file_path.open("r") as f:
return f.read()
def load_words(self, save_path: Union[str, Path] = None) -> str:
"""
从腾讯云上下载 通用分词词典文件
Args:
save_path: 文件的保存本地路径
Returns:
"""
return self._download_file(self.dict_config["word_freq"]["url"], save_path)
def load_common_char(self, save_path: Union[str, Path] = None) -> str:
"""
中文常用字符集(一些常用的汉字)
Args:
save_path: 文件的保存本地路径
Returns:
"""
return self._download_file(self.dict_config["common_char"]["url"], save_path)
def load_same_pinyin(self, save_path: Union[str, Path] = None) -> str:
"""
同音字,其内容:汉字 同音同调 同音异调 。 比如: 八 巴扒捌笆芭疤吧叭 爸靶霸把伯耙罢拔跋坝
Args:
save_path: 文件的保存本地路径
Returns:
"""
return self._download_file(self.dict_config["same_pinyin"]["url"], save_path)
def load_same_stroke(self, save_path: Union[str, Path] = None) -> str:
"""
形似字,其内容, 龚 龛 詟 垄 陇
Args:
save_path: 文件的保存本地路径
Returns:
"""
return self._download_file(self.dict_config["same_stroke"]["url"], save_path)
def load_person_name(self, save_path: Union[str, Path] = None) -> str:
"""
知名人名词典 format: 词语 词频 ,刘德华 5086
Args:
save_path: 文件的保存本地路径
Returns:
"""
return self._download_file(self.dict_config["person_name"]["url"], save_path)
def load_place_name(self, save_path: Union[str, Path] = None) -> str:
"""
地名词典 format: 词语 词频 , 酒店 201212
Args:
save_path:
Returns:
"""
return self._download_file(self.dict_config["place_name"]["url"], save_path)
def load_stop_words(self, save_path: Union[str, Path] = None) -> str:
"""
停用词
Args:
save_path:
Returns:
"""
return self._download_file(self.dict_config["stop_words"]["url"], save_path)
def load_en_word_freq(self, save_path: Union[str, Path] = None) -> str:
"""
英文拼写词频文件
Args:
save_path:
Returns:
"""
return self._download_file(self.dict_config["en_word_freq"]["url"], save_path)
def load_wechat_expression(self, save_path: Union[str, Path] = None) -> str:
"""微信表情 代码 转 文字"""
return self._download_file(self.dict_config["wechat_expression"]["url"], save_path)
def load_traditional2simple(self, save_path: Union[str, Path] = None) -> str:
return self._download_file(self.dict_config["traditional2simple"]["url"], save_path)
def load_simple2traditional(self, save_path: Union[str, Path] = None) -> str:
return self._download_file(self.dict_config["simple2traditional"]["url"], save_path)
def sample_with_freq(self, input_dict):
"""
基于频率采样
输入的形式为:`{word1:2, word2:3, word3:1}`,其中键表示要采样的值,值表示对应出现的次数。要求按照频率每次随机输出一个采样值。
Args:
input_dict:{word1:2, word2:3, word3:1}
Returns:
"""
keys = list(input_dict.keys())
values = list(input_dict.values())
sum_values = sum(values)
# 计算概率
probas = []
for v in values:
probas.append(v / sum_values)
# 产生随机数
rand = random.uniform(0, 1)
# 累积概率
cum_proba = 0
for value, proba in zip(values, probas):
cum_proba += proba
if cum_proba >= rand:
return value
def get_homophones_by_char(self, input_char: str) -> List[str]:
"""取字符 input_char 的所有同音字"""
result = []
# CJK统一汉字区的范围是0x4E00-0x9FA5,也就是我们经常提到的20902个汉字
for i in range(0x4e00, 0x9fa6):
if pypinyin.core.pinyin([chr(i)], style=pypinyin.NORMAL, strict=False)[0][0] == \
pypinyin.core.pinyin(input_char, style=pypinyin.NORMAL, strict=False)[0][0]:
result.append(chr(i))
return result
def get_homophones_by_pinyin(self, input_pinyin) -> List[str]:
"""根据拼音取所有同音字"""
result = []
# CJK统一汉字区的范围是0x4E00-0x9FA5,也就是我们经常提到的20902个汉字
for i in range(0x4e00, 0x9fa6):
if pypinyin.core.pinyin([chr(i)], style=pypinyin.NORMAL, strict=False)[0][0] == input_pinyin:
result.append(chr(i))
return result
@property
def stop_words(self) -> Dict[str, int]:
"""停用词表"""
file_content = self.load_stop_words()
stop_words = defaultdict(int)
for word in set(file_content.split("\n")):
stop_words[word.strip()] += 1
return stop_words
@property
def common_char(self) -> List[str]:
"""中文从常见的3502个汉字"""
file_content = self.load_common_char()
common_chars = []
for char in file_content.split('\n'):
common_chars.append(char.strip())
return common_chars
@property
def same_stroke(self) -> Dict[str, List[str]]:
file_content = self.load_same_stroke()
word_stroke = defaultdict(list)
for word_same in file_content.split("\n"):
if not word_same.strip():
continue
word, stroke_word_str = word_same.split("\t")
if stroke_word_str.strip():
same_stroke_words = stroke_word_str.split(",")
else:
same_stroke_words = []
word_stroke[word] = same_stroke_words
return word_stroke
@property
def same_pinyin(self) -> Dict[str, Dict[str, List[str]]]:
file_content = self.load_same_pinyin()
word_pinyin_dict = defaultdict(dict)
for line in file_content.split("\n"):
splits = line.split("\t")
if not len(splits) == 3:
continue
word_pinyin_dict[splits[0]] = {
"homonym": splits[1].strip().split(",") if splits[1].strip() else [],
"difference": splits[2].strip().split(",") if splits[2].strip() else []
}
return word_pinyin_dict
|
PypiClean
|
/etu_django_mcmt-1.0.2-py3-none-any.whl/etu_django_mcmt/core/ver_3213/migrate.py
|
import os
import sys
import time
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.core.management.base import (
BaseCommand, CommandError, no_translations,
)
from django.core.management.sql import (
emit_post_migrate_signal, emit_pre_migrate_signal,
)
from django.db import DEFAULT_DB_ALIAS, connections, router
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ModelState, ProjectState
from django.utils.module_loading import module_has_submodule
from django.utils.text import Truncator
from etu_django_mcmt.utils.db_cache_api import DjangoCacheMigrations
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
requires_system_checks = []
def add_arguments(self, parser):
parser.add_argument(
'--skip-checks', action='store_true',
help='Skip system checks.',
)
parser.add_argument(
'app_label', nargs='?',
help='App label of an application to synchronize the state.',
)
parser.add_argument(
'migration_name', nargs='?',
help='Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.',
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
parser.add_argument(
'--fake', action='store_true',
help='Mark migrations as run without actually running them.',
)
parser.add_argument(
'--fake-initial', action='store_true',
help='Detect if tables already exist and fake-apply initial migrations if so. Make sure '
'that the current database schema matches your initial migration before using this '
'flag. Django will only check for an existing table name.',
)
parser.add_argument(
'--plan', action='store_true',
help='Shows a list of the migration actions that will be performed.',
)
parser.add_argument(
'--run-syncdb', action='store_true',
help='Creates tables for apps without migrations.',
)
parser.add_argument(
'--check', action='store_true', dest='check_unapplied',
help='Exits with a non-zero status if unapplied migrations exist.',
)
@no_translations
def handle(self, *args, **options):
database = options['database']
if not options['skip_checks']:
self.check(databases=[database])
self.verbosity = options['verbosity']
self.interactive = options['interactive']
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
connection = connections[database]
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Raise an error if any migrations are applied before their dependencies.
executor.loader.check_consistent_history(connection)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
run_syncdb = options['run_syncdb']
target_app_labels_only = True
if options['app_label']:
# Validate app_label.
app_label = options['app_label']
try:
apps.get_app_config(app_label)
except LookupError as err:
raise CommandError(str(err))
if run_syncdb:
if app_label in executor.loader.migrated_apps:
raise CommandError("Can't use run_syncdb with app '%s' as it has migrations." % app_label)
elif app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations." % app_label)
if options['app_label'] and options['migration_name']:
migration_name = options['migration_name']
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
plan = executor.migration_plan(targets)
exit_dry = plan and options['check_unapplied']
if options['plan']:
self.stdout.write('Planned operations:', self.style.MIGRATE_LABEL)
if not plan:
self.stdout.write(' No planned migration operations.')
for migration, backwards in plan:
self.stdout.write(str(migration), self.style.MIGRATE_HEADING)
for operation in migration.operations:
message, is_error = self.describe_operation(operation, backwards)
style = self.style.WARNING if is_error else None
self.stdout.write(' ' + message, style)
if exit_dry:
sys.exit(1)
return
if exit_dry:
sys.exit(1)
# At this point, ignore run_syncdb if there aren't any apps to sync.
run_syncdb = options['run_syncdb'] and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
if options['app_label']:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated app: %s" % app_label)
)
else:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(sorted(executor.loader.unmigrated_apps)))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(sorted({a for a, n in targets})) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(
self.style.MIGRATE_LABEL(' Unapply all migrations: ') +
str(targets[0][0])
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
pre_migrate_state = executor._create_project_state(with_applied_migrations=True)
pre_migrate_apps = pre_migrate_state.apps
emit_pre_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=pre_migrate_apps, plan=plan,
)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
if options['app_label']:
self.sync_apps(connection, [app_label])
else:
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(
" Your models in app(s): %s have changes that are not "
"yet reflected in a migration, and so won't be "
"applied." % ", ".join(repr(app) for app in sorted(changes))
))
self.stdout.write(self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
))
fake = False
fake_initial = False
else:
fake = options['fake']
fake_initial = options['fake_initial']
post_migrate_state = executor.migrate(
targets, plan=plan, state=pre_migrate_state.clone(), fake=fake,
fake_initial=fake_initial,
)
# post_migrate signals have access to all models. Ensure that all models
# are reloaded in case any are delayed.
post_migrate_state.clear_delayed_apps_cache()
post_migrate_apps = post_migrate_state.apps
# Re-render models of real apps to include relationships now that
# we've got a final state. This wouldn't be necessary if real apps
# models were rendered with relationships in the first place.
with post_migrate_apps.bulk_update():
model_keys = []
for model_state in post_migrate_apps.real_models:
model_key = model_state.app_label, model_state.name_lower
model_keys.append(model_key)
post_migrate_apps.unregister_model(*model_key)
post_migrate_apps.render_multiple([
ModelState.from_model(apps.get_model(*model)) for model in model_keys
])
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=post_migrate_apps, plan=plan,
)
# 配置数据库标签
dcm_db_alias = os.environ.get('DCM_DB_ALIAS', getattr(settings, 'DCM_DB_ALIAS', DEFAULT_DB_ALIAS))
dcm_app_names = os.environ.get('DCM_APP_NAMES', getattr(settings, 'DCM_APP_NAMES', []))
# 实例化 Django Migrations 缓存管理
d_c_m = DjangoCacheMigrations(db_alias=dcm_db_alias, app_names=dcm_app_names)
d_c_m_app_labels = d_c_m.app_labels()
# 检查缓存表是否创建成功
check_django_cache = d_c_m.check_django_cache_migrations()
if check_django_cache is True:
# 更新本地文件到远程
for u_dcm_app_label in d_c_m_app_labels:
d_c_m.update_app_migrations_dir_files_to_db(name=u_dcm_app_label)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.monotonic()
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = " (%.3fs)" % (time.monotonic() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.monotonic()
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = " (%.3fs)" % (time.monotonic() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.monotonic()
self.stdout.write(" Rendering model states...", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = " (%.3fs)" % (time.monotonic() - self.start) if compute_time else ""
self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"""Run the old syncdb-style operation on a list of app_labels."""
with connection.cursor() as cursor:
tables = connection.introspection.table_names(cursor)
# Build the manifest of apps and models that are to be synchronized.
all_models = [
(
app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=False),
)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.identifier_converter
return not (
(converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables)
)
manifest = {
app_name: list(filter(model_installed, model_list))
for app_name, model_list in all_models
}
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(' Creating tables...')
with connection.schema_editor() as editor:
for app_name, model_list in manifest.items():
for model in model_list:
# Never install unmanaged models, etc.
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
' Processing %s.%s model' % (app_name, model._meta.object_name)
)
if self.verbosity >= 1:
self.stdout.write(' Creating table %s' % model._meta.db_table)
editor.create_model(model)
# Deferred SQL is executed when exiting the editor's context.
if self.verbosity >= 1:
self.stdout.write(' Running deferred SQL...')
@staticmethod
def describe_operation(operation, backwards):
"""Return a string that describes a migration operation for --plan."""
prefix = ''
is_error = False
if hasattr(operation, 'code'):
code = operation.reverse_code if backwards else operation.code
action = (code.__doc__ or '') if code else None
elif hasattr(operation, 'sql'):
action = operation.reverse_sql if backwards else operation.sql
else:
action = ''
if backwards:
prefix = 'Undo '
if action is not None:
action = str(action).replace('\n', '')
elif backwards:
action = 'IRREVERSIBLE'
is_error = True
if action:
action = ' -> ' + action
truncated = Truncator(action)
return prefix + operation.describe() + truncated.chars(40), is_error
|
PypiClean
|
/rekall_gui-1.5.0.post4.tar.gz/rekall_gui-1.5.0.post4/manuskript/plugins/shell.py
|
import hashlib
import json
import logging
from flask import jsonify
from flask import request
from manuskript import plugin
from manuskript import shell
def GenerateCacheKey(state):
data = json.dumps(state, sort_keys=True)
hash = hashlib.md5(data).hexdigest()
try:
return "%s-shell" % (hash)
except KeyError:
return hash
class Shell(plugin.Plugin):
ANGULAR_MODULE = "manuskript.shell"
JS_FILES = ["/static/components/pythoncall/renderer-service.js",
"/static/components/shell/shell-controller.js",
"/static/components/shell/shell.js"]
CSS_FILES = ["/static/components/shell/shell.css"]
@classmethod
def PlugIntoApp(cls, app):
@app.route("/controllers/shell", methods=["POST"])
def shell_call(): # pylint: disable=unused-variable
if cls.__name__ not in app.config:
app.config[cls.__name__] = shell_ = shell.Shell()
shell_ = app.config[cls.__name__]
cell = request.get_json()
cell_id = cell["cell_id"]
source_code = cell["source"]
worksheet = app.config["worksheet"]
# If the data is cached locally just return it.
cache_key = "%s/%s" % (cell_id, GenerateCacheKey(source_code))
cache_filename = "%s/shell" % cell_id
cache = worksheet.GetData(cache_filename)
if cache and cache["cache_key"] == cache_key:
logging.debug("Dumping request from cache")
return json.dumps(cache)
result = None
error = None
is_parsing_error = False
try:
stdout, stderr, result = shell_.Exec(
source_code, cwd=worksheet.location)
except shell.ParseError as e:
stdout, stderr, error = "", "", e.original_error
is_parsing_error = True
except shell.ExecError as e:
stdout, stderr, error = e.stdout, e.stderr, e.original_error
result = dict(stdout=stdout,
stderr=stderr,
result=result,
error=error,
cache_key=cache_key)
response = jsonify(result)
# Cache the data in the worksheet.
worksheet.StoreData(cache_filename, result)
return response
|
PypiClean
|
/absfuyuEX-2.3.1.tar.gz/absfuyuEX-2.3.1/README.md
|
absfuyu's dlc
## INSTALLATION:
```bash
pip install -U absfuyuEX
```
## LICENSE:
```
MIT License
Copyright (c) 2022 AbsoluteWinter
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```
|
PypiClean
|
/dj-twilio-sms-2.1.0.tar.gz/dj-twilio-sms-2.1.0/dj_twilio_sms/migrations/0001_initial.py
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='IncomingSMS',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sms_sid', models.CharField(max_length=34)),
('account_sid', models.CharField(max_length=34)),
('from_number', models.CharField(max_length=30)),
('from_city', models.CharField(blank=True, default='', max_length=30)),
('from_state', models.CharField(blank=True, default='', max_length=30)),
('from_zip', models.CharField(blank=True, default='', max_length=30)),
('from_country', models.CharField(blank=True, default='', max_length=120)),
('to_number', models.CharField(max_length=30)),
('body', models.TextField(blank=True, default='', max_length=160)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Incoming SMS',
'verbose_name': 'Incoming SMS',
},
),
migrations.CreateModel(
name='OutgoingSMS',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sms_sid', models.CharField(blank=True, default='', max_length=34)),
('account_sid', models.CharField(blank=True, default='', max_length=34)),
('from_number', models.CharField(max_length=30)),
('to_number', models.CharField(max_length=30)),
('to_parsed', models.CharField(blank=True, default='', max_length=30)),
('body', models.TextField(blank=True, default='', max_length=160)),
('sent_at', models.DateTimeField(blank=True, null=True)),
('delivered_at', models.DateTimeField(blank=True, null=True)),
('status', models.CharField(blank=True, default='', max_length=20)),
('price', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True)),
('price_unit', models.CharField(blank=True, default='', max_length=3)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Outgoing SMS',
'verbose_name': 'Outgoing SMS',
},
),
]
|
PypiClean
|
/pycscl-0.2.0.tar.gz/pycscl-0.2.0/cscl/bitvector_gate_encoders.py
|
from cscl.interfaces import CNFLiteralFactory, ClauseConsumer
import cscl.basic_gate_encoders as gates
# TODO: support Plaisted-Greenbaum encoders
def encode_gate_vector(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory, basic_gate_encoder_fn,
lhs_input_lits, rhs_input_lits, output_lits=None):
"""
Encodes a vector of binary gates.
For input literals lhs_input_lits = [l1, ..., lN], rhs_input_lits = [r1, ..., rN],
output_lits = [o1, ..., oN], this function encodes N gates o1 <-> g(l1, r1),
o2 <-> g(l2, r2), ..., oN <-> g(lN, rN), using the basic gate encoder function
basic_gate_encoder_fn, which must be a binary gate encoder of the cscl.basic_gate_encoders
package.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param basic_gate_encoder_fn: a binary gate encoder function of the cscl.basic_gate_encoders package.
:param lhs_input_lits: The list of left-hand-side input literals.
:param rhs_input_lits: The list of right-hand-side input literals. The length of rhs_input_lits must
be the same as the length of lhs_input_lits.
:param output_lits: The list of output literals, or None. If output_lits is none, N gate output literals,
each having a new variable, are created. Otherwise, output_lits must be a list
with length len(lhs_input_lits), with each contained element either being a literal
or None. If the i'th entry of output_lits is None, a literal with a new variable is
created as the i'th output literal.
:return: The list of gate output literals, containing len(lhs_input_lits) literals, with
output_lit[i] <-> g(lhs_input_lits[i], rhs_input_lits[i]) for all i in
range(0, len(lhs_input_lits)).
"""
if len(lhs_input_lits) != len(rhs_input_lits):
raise ValueError("lhs_input_lits and rhs_input_lits must have the same size")
if output_lits is None:
output_lits = [None] * len(lhs_input_lits)
elif len(lhs_input_lits) != len(output_lits):
raise ValueError("If output_lits is not None, it must have the same size as lhs_input_lits")
return [basic_gate_encoder_fn(clause_consumer, lit_factory, (lhs, rhs), output_lit)
for lhs, rhs, output_lit in zip(lhs_input_lits, rhs_input_lits, output_lits)]
def encode_bv_and_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, output_lits=None):
"""
Encodes a bitvector AND gate.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals.
:param rhs_input_lits: The list of right-hand-side input literals. The length of rhs_input_lits must
be the same as the length of lhs_input_lits.
:param output_lits: The list of output literals, or None. If output_lits is none, len(lhs_input_lits) gate output
literals, each having a new variable, are created. Otherwise, output_lits must be a list
with length len(lhs_input_lits), with each contained element either being a literal
or None. If the i'th entry of output_lits is None, a literal with a new variable is
created as the i'th output literal.
:return: The list of gate output literals, containing len(lhs_input_lits) literals, with
output_lit[i] <-> (lhs_input_lits[i] AND rhs_input_lits[i]) for all i in
range(0, len(lhs_input_lits)).
"""
return encode_gate_vector(clause_consumer, lit_factory,
gates.encode_and_gate,
lhs_input_lits, rhs_input_lits, output_lits)
def encode_bv_or_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, output_lits=None):
"""
Encodes a bitvector OR gate.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals.
:param rhs_input_lits: The list of right-hand-side input literals. The length of rhs_input_lits must
be the same as the length of lhs_input_lits.
:param output_lits: The list of output literals, or None. If output_lits is none, len(lhs_input_lits) gate output
literals, each having a new variable, are created. Otherwise, output_lits must be a list
with length len(lhs_input_lits), with each contained element either being a literal
or None. If the i'th entry of output_lits is None, a literal with a new variable is
created as the i'th output literal.
:return: The list of gate output literals, containing len(lhs_input_lits) literals, with
output_lit[i] <-> (lhs_input_lits[i] OR rhs_input_lits[i]) for all i in
range(0, len(lhs_input_lits)).
"""
return encode_gate_vector(clause_consumer, lit_factory,
gates.encode_or_gate,
lhs_input_lits, rhs_input_lits, output_lits)
def encode_bv_xor_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, output_lits=None):
"""
Encodes a bitvector XOR gate.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals.
:param rhs_input_lits: The list of right-hand-side input literals. The length of rhs_input_lits must
be the same as the length of lhs_input_lits.
:param output_lits: The list of output literals, or None. If output_lits is none, len(lhs_input_lits) gate output
literals, each having a new variable, are created. Otherwise, output_lits must be a list
with length len(lhs_input_lits), with each contained element either being a literal
or None. If the i'th entry of output_lits is None, a literal with a new variable is
created as the i'th output literal.
:return: The list of gate output literals, containing len(lhs_input_lits) literals, with
output_lit[i] <-> (lhs_input_lits[i] XOR rhs_input_lits[i]) for all i in
range(0, len(lhs_input_lits)).
"""
return encode_gate_vector(clause_consumer, lit_factory,
gates.encode_binary_xor_gate,
lhs_input_lits, rhs_input_lits, output_lits)
def encode_bv_ripple_carry_adder_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, carry_in_lit=None,
output_lits=None, carry_out_lit=None):
"""
Encodes a ripple-carry-adder-gate constraint.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals, in LSB-to-MSB order.
:param rhs_input_lits: The list of right-hand-side input literals, in LSB-to-MSB order. The length of
rhs_input_lits must be the same as the length of lhs_input_lits.
:param output_lits: The list of output literals, or None. If output_lits is none, len(lhs_input_lits) gate output
literals, each having a new variable, are created. Otherwise, output_lits must be a list
with length len(lhs_input_lits), with each contained element either being a literal
or None. If the i'th entry of output_lits is None, a literal with a new variable is
created as the i'th output literal.
:param carry_in_lit: None or a literal. If carry_in_lit is a literal, carry_in_lit is used as the adder's
carry input value. Otherwise, a fixed carry input value of 0 is used.
:param carry_out_lit: None or a literal. If carry_out_lit is a literal, a gate is created constraining carry_out_lit
to the adder's carry output value.
:return: The list of gate output literals in LSB-to-MSB order, containing len(lhs_input_literals) literals.
The i'th literal of output_lits signifies the i'th bit of the sum.
"""
width = len(lhs_input_lits)
if len(rhs_input_lits) != width or (output_lits is not None and (len(output_lits) != width)):
raise ValueError("Bitvector length mismatch")
if width == 0:
return []
if output_lits is None:
output_lits = [lit_factory.create_literal() for _ in range(0, width)]
output_lits = [o if o is not None else lit_factory.create_literal() for o in output_lits]
# Carries: carries[i] is the carry-out of full-adder no. i for i in range(0,width)
# If carries[i] is None, the carry output is irrelevant and does not need to be encoded
carries = [lit_factory.create_literal() for _ in range(0, width-1)]
carries.append(carry_out_lit)
# Encode the first adder. If there is a carry_in_lit, use a full adder, otherwise, use
# a half adder:
if carry_in_lit is not None:
adder_input = (lhs_input_lits[0], rhs_input_lits[0], carry_in_lit)
gates.encode_full_adder_sum_gate(clause_consumer, lit_factory, adder_input, output_lits[0])
if carries[0] is not None:
gates.encode_full_adder_carry_gate(clause_consumer, lit_factory, adder_input, carries[0])
else:
adder_input = (lhs_input_lits[0], rhs_input_lits[0])
gates.encode_binary_xor_gate(clause_consumer, lit_factory, adder_input, output_lits[0])
if carries[0] is not None:
gates.encode_and_gate(clause_consumer, lit_factory, adder_input, carries[0])
# Encode the rest of the adder:
for i in range(1, width):
adder_input = (lhs_input_lits[i], rhs_input_lits[i], carries[i-1])
gates.encode_full_adder_sum_gate(clause_consumer, lit_factory, adder_input, output_lits[i])
if carries[i] is not None:
gates.encode_full_adder_carry_gate(clause_consumer, lit_factory, adder_input, carries[i])
return output_lits
def encode_bv_ripple_carry_sub_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, output_lits=None):
"""
Encodes a subtraction-gate constraint using a ripple carry adder.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals, in LSB-to-MSB order.
:param rhs_input_lits: The list of right-hand-side input literals, in LSB-to-MSB order. The length of
rhs_input_lits must be the same as the length of lhs_input_lits.
:param output_lits: The list of output literals, or None. If output_lits is none, len(lhs_input_lits) gate output
literals, each having a new variable, are created. Otherwise, output_lits must be a list
with length len(lhs_input_lits), with each contained element either being a literal
or None. If the i'th entry of output_lits is None, a literal with a new variable is
created as the i'th output literal.
:return: The list of gate output literals in LSB-to-MSB order, containing len(lhs_input_literals) literals.
The i'th literal of output_lits signifies the i'th bit of the difference.
"""
flipped_rhs = [-x for x in rhs_input_lits]
constantly_1 = lit_factory.create_literal()
clause_consumer.consume_clause([constantly_1])
return encode_bv_ripple_carry_adder_gate(clause_consumer, lit_factory,
lhs_input_lits=lhs_input_lits,
rhs_input_lits=flipped_rhs,
carry_in_lit=constantly_1,
output_lits=output_lits)
def encode_bv_parallel_mul_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, output_lits=None, overflow_lit=None):
"""
Encodes a bitvector multiplication-gate constraint, using parallel addition of partial products.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals, in LSB-to-MSB order.
:param rhs_input_lits: The list of right-hand-side input literals, in LSB-to-MSB order. The length of
rhs_input_lits must be the same as the length of lhs_input_lits.
:param output_lits: The list of output literals, or None. If output_lits is none, len(lhs_input_lits) gate output
literals, each having a new variable, are created. Otherwise, output_lits must be a list
with length len(lhs_input_lits), with each contained element either being a literal
or None. If the i'th entry of output_lits is None, a literal with a new variable is
created as the i'th output literal.
:param overflow_lit: Iff overflow_lit is not None, gates are added forcing the value of overflow_lit to be true
iff the product of lhs_input_lits and rhs_input_lits cannot be expressed using
len(output_lits) bits.
:return: The list of gate output literals in LSB-to-MSB order, containing len(lhs_input_literals) literals.
The i'th literal of output_lits signifies the i'th bit of the product.
"""
width = len(lhs_input_lits)
if len(rhs_input_lits) != width or (output_lits is not None and len(output_lits) != width):
raise ValueError("Mismatching bitvector sizes")
if width == 0:
return []
# Implementation:
#
# 1. Encode W=width partial products P(0), ..., P(W-1) with P(i) = rhs_input_lits * [W times lhs_input_lits[i]]
# (0 <= i < W)
# 2. Encode partial sums S(0), ..., S(W-1) with S(0) = P(0)[0:W] and for 1 <= i < W,
# S(i) = S(i-1)[1:] + P(i)[0:W-i]
# 3. For 0 <= i < W, S(i)[0] is the i'th output bit. If any overflow condition occurred in step 2 or any
# partial sum bit not used in step 2 is set to true, the multiplication has an overflow condition.
#
# Example for W=4:
#
# P(0)[3] P(0)[2] P(0)[1] P(0)[0]
# + P(1)[3] P(1)[2] P(1)[1] P(1)[0]
# + P(2)[3] P(2)[2] P(2)[1] P(2)[0]
# + P(3)[3] P(3)[2] P(3)[1] P(3)[0]
# -----------------------------------------------------------
# (can be discarded) | out[3] out[2] out[1] out[0] = Output
#
# Partial sums for output computation:
#
# S(0)[0:W] = P(0)[0:W]
# S(1)[0:W-1] = S(0)[1:W] + P(1)[0:W-1]
# S(2)[0:W-2] = S(1)[1:W-1] + P(2)[0:W-2]
# S(3)[0:W-3] = S(2)[1:W-2] + P(3)[0:W-3]
def __create_fresh_lits(n):
return [lit_factory.create_literal() for _ in range(0, n)]
if output_lits is None:
output_lits = __create_fresh_lits(width)
else:
output_lits = list(map(lambda l: lit_factory.create_literal() if l is None else l, output_lits))
# Directly include the lowermost output bit in the first partial product:
partial_products = [[output_lits[0]] + __create_fresh_lits(width-1)]
lowest_lhs = lhs_input_lits[0]
encode_bv_and_gate(clause_consumer, lit_factory, rhs_input_lits, [lowest_lhs] * width, partial_products[0])
# Don't compute partial sum bits which are discarded anyway:
if overflow_lit is not None:
partial_products += [encode_bv_and_gate(clause_consumer, lit_factory, rhs_input_lits, [l] * width)
for l in lhs_input_lits[1:]]
else:
partial_products += [encode_bv_and_gate(clause_consumer, lit_factory,
rhs_input_lits[0:width-i], [lhs_input_lits[i]] * (width-i))
for i in range(1, width)]
# Compute the partial sums, directly forcing the output literal setting. partial_sums[i] corresponds to S(i+1)
partial_sums = [([output_lits[i]] + __create_fresh_lits(width-i-1)) for i in range(1, width)]
# partial_sum_carries[i] is the carry bit for the computation of partial_sums[i]:
partial_sum_carries = __create_fresh_lits(width-1) if overflow_lit is not None else [None]*(width-1)
current_partial_sum = partial_products[0][1:width]
for i in range(1, width):
current_partial_product = partial_products[i][0:width-i]
partial_sum_accu = partial_sums[i-1]
assert len(current_partial_sum) == width - i
encode_bv_ripple_carry_adder_gate(clause_consumer, lit_factory,
lhs_input_lits=current_partial_sum,
rhs_input_lits=current_partial_product,
output_lits=partial_sum_accu,
carry_out_lit=partial_sum_carries[i-1])
current_partial_sum = partial_sum_accu[1:]
# Check if an overflow occurred:
if overflow_lit is not None:
overflow_indicators = partial_sum_carries[:]
for i in range(1, width):
overflow_indicators += partial_products[i][width-i:width]
gates.encode_or_gate(clause_consumer, lit_factory, overflow_indicators, overflow_lit)
return output_lits
def encode_bv_ule_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, output_lit=None):
"""
Encodes a less-than-or-equal-to-comparison gate for bitvectors representing unsigned integers.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals, in LSB-to-MSB order.
:param rhs_input_lits: The list of right-hand-side input literals, in LSB-to-MSB order. The length of
rhs_input_lits must be the same as the length of lhs_input_lits.
:param output_lit: The gate's output literal. If output_lit is None, a positive literal with a
new variable will be used as the gate's output literal.
:return: The encoded gate's output literal.
"""
if len(lhs_input_lits) != len(rhs_input_lits):
raise ValueError("Sizes of lhs_input_lits and rhs_input_lits illegally mismatching")
if output_lit is None:
output_lit = lit_factory.create_literal()
# Base cases:
if len(lhs_input_lits) == 0:
clause_consumer.consume_clause([output_lit])
return output_lit
if len(lhs_input_lits) == 1:
gates.encode_and_gate(clause_consumer, lit_factory, [lhs_input_lits[0], -rhs_input_lits[0]], -output_lit)
return output_lit
# Recursion: lhs <= rhs <-> (lhs[0] < rhs[0] or (lhs[0] == rhs[0] and lhs[1:] <= rhs[1:])
width = len(lhs_input_lits)
rest_leq = encode_bv_ule_gate(clause_consumer, lit_factory,
lhs_input_lits[:width-1], rhs_input_lits[:width-1])
lhs_msb, rhs_msb = lhs_input_lits[width-1], rhs_input_lits[width-1]
msb_is_lt = gates.encode_and_gate(clause_consumer, lit_factory, [-lhs_msb, rhs_msb])
msb_is_eq = -gates.encode_binary_xor_gate(clause_consumer, lit_factory, [lhs_msb, rhs_msb])
leq_if_first_is_eq = gates.encode_and_gate(clause_consumer, lit_factory, [msb_is_eq, rest_leq])
return gates.encode_or_gate(clause_consumer, lit_factory, [msb_is_lt, leq_if_first_is_eq], output_lit)
def encode_bv_sle_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, output_lit=None):
"""
Encodes a less-than-or-equal-to-comparison gate for bitvectors representing signed integers
in two's complement encoding.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals, in LSB-to-MSB order.
:param rhs_input_lits: The list of right-hand-side input literals, in LSB-to-MSB order. The length of
rhs_input_lits must be the same as the length of lhs_input_lits.
:param output_lit: The gate's output literal. If output_lit is None, a positive literal with a
new variable will be used as the gate's output literal.
:return: The encoded gate's output literal.
"""
if len(lhs_input_lits) != len(rhs_input_lits):
raise ValueError("Sizes of lhs_input_lits and rhs_input_lits illegally mismatching")
if output_lit is None:
output_lit = lit_factory.create_literal()
if len(lhs_input_lits) == 0:
clause_consumer.consume_clause([output_lit])
return output_lit
if len(lhs_input_lits) == 1:
return gates.encode_or_gate(clause_consumer, lit_factory, [lhs_input_lits[0], -rhs_input_lits[0]], output_lit)
width = len(lhs_input_lits)
lhs_msb = lhs_input_lits[width-1]
rhs_msb = rhs_input_lits[width-1]
rest_leq = encode_bv_ule_gate(clause_consumer, lit_factory,
lhs_input_lits=lhs_input_lits[:width-1],
rhs_input_lits=rhs_input_lits[:width-1])
msb_eq = -gates.encode_binary_xor_gate(clause_consumer, lit_factory, input_lits=[lhs_msb, rhs_msb])
same_sign_and_leq = gates.encode_and_gate(clause_consumer, lit_factory, input_lits=[msb_eq, rest_leq])
lhs_neg_and_rhs_pos = gates.encode_and_gate(clause_consumer, lit_factory, input_lits=[lhs_msb, -rhs_msb])
return gates.encode_or_gate(clause_consumer, lit_factory,
input_lits=[lhs_neg_and_rhs_pos, same_sign_and_leq],
output_lit=output_lit)
def encode_bv_eq_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, output_lit=None):
"""
Encodes a equality-comparison gate for bitvectors.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals, in LSB-to-MSB order.
:param rhs_input_lits: The list of right-hand-side input literals, in LSB-to-MSB order. The length of
rhs_input_lits must be the same as the length of lhs_input_lits.
:param output_lit: The gate's output literal. If output_lit is None, a positive literal with a
new variable will be used as the gate's output literal.
:return: The encoded gate's output literal.
"""
if len(lhs_input_lits) != len(rhs_input_lits):
raise ValueError("Sizes of lhs_input_lits and rhs_input_lits illegally mismatching")
if output_lit is None:
output_lit = lit_factory.create_literal()
differences = encode_bv_xor_gate(clause_consumer, lit_factory, lhs_input_lits, rhs_input_lits)
gates.encode_or_gate(clause_consumer, lit_factory, differences, -output_lit)
return output_lit
def encode_bv_mux_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, select_lhs_lit=None, output_lits=None):
"""
Encodes a bitvector multiplexer gate. The gate's output literals are forced to equal lhs_input_lits if
select_lhs_lit has the value True. If select_lhs_lit has the value False, the output literals are forced
to equal rhs_input_lits.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals.
:param rhs_input_lits: The list of right-hand-side input literals.
:param select_lhs_lit: The selector literal controlling whether the output literals are tied to lhs_input_lits
or to rhs_input_lits. If select_lhs_lit is None, this gate represents the arbitrary
choice of an element in {lhs_input_lits, rhs_input_lits}.
:param output_lits: The list of output literals, or None. If output_lits is none, len(lhs_input_lits) gate output
literals, each having a new variable, are created. Otherwise, output_lits must be a list
with length len(lhs_input_lits), with each contained element either being a literal
or None. If the i'th entry of output_lits is None, a literal with a new variable is
created as the i'th output literal.
:return: The list of gate output literals in LSB-to-MSB order, containing len(lhs_input_literals) literals.
The i'th literal of output_lits represents the value of the expression
`if selector_lit then lhs_input_lits[i] else lhs_input_lits[i]`.
"""
select_lhs_lit = lit_factory.create_literal() if select_lhs_lit is None else select_lhs_lit
lhs_selection = encode_bv_and_gate(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=lhs_input_lits,
rhs_input_lits=[select_lhs_lit]*len(lhs_input_lits))
rhs_selection = encode_bv_and_gate(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=rhs_input_lits,
rhs_input_lits=[-select_lhs_lit]*len(rhs_input_lits))
return encode_bv_or_gate(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=lhs_selection,
rhs_input_lits=rhs_selection,
output_lits=output_lits)
def encode_staggered_or_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
input_lits, output_lits=None):
"""
Given a bitvector `[x_1, x_2, ..., x_n]`, returns a bitvector `[y_1, y_2, ..., y_n]` constrained
such that for all `1 <= i <= n`: `y_i <-> (x_i or x_{i+1} or ... or x_n)`
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param input_lits: The list of literals [x_1, x_2, ..., x_n]
:param output_lits: The list of output literals [y_1, y_2, ..., y_n], or None. If output_lits is none,
len(lhs_input_lits) gate output literals, each having a new variable, are created. Otherwise,
output_lits must be a list with length len(lhs_input_lits), with each contained element either
being a literal or None. If the i'th entry of output_lits is None, a literal with a new variable
is created as the i'th output literal.
:return: literals `[y_1, y_2, ..., y_n]` constrained as described above.
"""
width = len(input_lits)
if output_lits is not None and len(output_lits) != width:
raise ValueError("Mismatching bitvector sizes")
if width == 0:
return []
if output_lits is None:
result = [lit_factory.create_literal() for _ in range(0, width)]
else:
result = [out_lit if out_lit is not None else lit_factory.create_literal() for out_lit in output_lits]
gates.encode_or_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
input_lits=[input_lits[-1]], output_lit=result[-1])
for idx in reversed(range(0, width-1)):
gates.encode_or_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
input_lits=[input_lits[idx], result[idx+1]], output_lit=result[idx])
return result
def encode_bv_long_udiv_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, output_lits=None, remainder_output_lits=None):
"""
Encodes a bitvector division gate using the "long" integer division algorithm (see e.g.
https://en.wikipedia.org/wiki/Division_algorithm#Integer_division_(unsigned)_with_remainder), for unsigned integers.
This division encoding is one of the simplest and likely very inefficient.
This divider sets x/0 = 0 for all possible inputs x.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals, in LSB-to-MSB order.
:param rhs_input_lits: The list of right-hand-side input literals, in LSB-to-MSB order. The length of
rhs_input_lits must be the same as the length of lhs_input_lits.
:param output_lits: The list of output literals, or None. If output_lits is none, len(lhs_input_lits) gate output
literals, each having a new variable, are created. Otherwise, output_lits must be a list
with length len(lhs_input_lits), with each contained element either being a literal
or None. If the i'th entry of output_lits is None, a literal with a new variable is
created as the i'th output literal.
:param remainder_output_lits: The list of remainder literals in LSB-to-MSB order, or None. If remainder_lits
is not None, it must contain exactly as many literals as `lhs_input_lits`, and
it will be constrained to represent the remainder of the division.
:return: The list of gate output literals in LSB-to-MSB order, containing len(lhs_input_literals) literals.
The i'th literal of output_lits signifies the i'th bit of the quotient.
"""
width = len(lhs_input_lits)
if len(rhs_input_lits) != width or (output_lits is not None and len(output_lits) != width)\
or (remainder_output_lits is not None and len(remainder_output_lits) != width):
raise ValueError("Mismatching bitvector sizes")
if width == 0:
return []
def __create_fresh_lits(n):
return [lit_factory.create_literal() for _ in range(0, n)]
constantly_false = lit_factory.create_literal()
clause_consumer.consume_clause([-constantly_false])
divisor_any_higher_bits_nonzero = encode_staggered_or_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
input_lits=rhs_input_lits)
quotient = __create_fresh_lits(width)
remainder = list()
for step_idx in reversed(range(0, width)):
remainder = [lhs_input_lits[step_idx]] + remainder
# storing the comparison remainder>=divisior in quotient[step_idx]
if len(remainder) == len(rhs_input_lits):
# divisor has exaxtly as many bits as remainder: perform a direct comparison
encode_bv_ule_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
lhs_input_lits=rhs_input_lits, rhs_input_lits=remainder, output_lit=quotient[step_idx])
else:
# divisor has more bits than the remainder. Save some variable introductions by comparing the divisor's
# extra bits separately:
lower_bit_comparison = encode_bv_ule_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
lhs_input_lits=rhs_input_lits[0:len(remainder)],
rhs_input_lits=remainder)
higher_bits_comparison = divisor_any_higher_bits_nonzero[len(remainder)]
gates.encode_and_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
input_lits=[lower_bit_comparison, -higher_bits_comparison],
output_lit=quotient[step_idx])
remainder_minus_divisor = encode_bv_ripple_carry_sub_gate(clause_consumer=clause_consumer,
lit_factory=lit_factory,
lhs_input_lits=remainder,
rhs_input_lits=rhs_input_lits[0:len(remainder)])
# If remainder>=divisior, then remainder := remainder - divisor
remainder = encode_bv_mux_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
lhs_input_lits=remainder_minus_divisor, rhs_input_lits=remainder,
select_lhs_lit=quotient[step_idx])
rhs_is_zero = -gates.encode_or_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
input_lits=rhs_input_lits)
# If the user specified remainder literals, use them when appropriate
if remainder_output_lits is not None:
encode_bv_and_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
lhs_input_lits=[-rhs_is_zero] * width, rhs_input_lits=remainder,
output_lits=remainder_output_lits)
# Tie the gate output to False if rhs is 0:
return encode_bv_and_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
lhs_input_lits=[-rhs_is_zero]*width, rhs_input_lits=quotient,
output_lits=output_lits)
def encode_bv_long_urem_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
lhs_input_lits, rhs_input_lits, output_lits=None):
"""
Encodes a bitvector division remainder gate using the "long" integer division algorithm (see e.g.
https://en.wikipedia.org/wiki/Division_algorithm#Integer_division_(unsigned)_with_remainder), for unsigned integers.
This division encoding is one of the simplest and likely very inefficient.
This divider sets x/0 = 0 for all possible inputs x.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param lhs_input_lits: The list of left-hand-side input literals, in LSB-to-MSB order.
:param rhs_input_lits: The list of right-hand-side input literals, in LSB-to-MSB order. The length of
rhs_input_lits must be the same as the length of lhs_input_lits.
:param output_lits: The list of output literals, or None. If output_lits is none, len(lhs_input_lits) gate output
literals, each having a new variable, are created. Otherwise, output_lits must be a list
with length len(lhs_input_lits), with each contained element either being a literal
or None. If the i'th entry of output_lits is None, a literal with a new variable is
created as the i'th output literal.
:return: The list of gate output literals in LSB-to-MSB order, containing len(lhs_input_literals) literals.
The i'th literal of output_lits signifies the i'th bit of the remainder.
"""
if output_lits is None:
output_lits = [lit_factory.create_literal() for _ in lhs_input_lits]
else:
output_lits = [lit_factory.create_literal() if x is None else x for x in output_lits]
encode_bv_long_udiv_gate(clause_consumer=clause_consumer, lit_factory=lit_factory,
lhs_input_lits=lhs_input_lits, rhs_input_lits=rhs_input_lits,
remainder_output_lits=output_lits)
return output_lits
|
PypiClean
|
/datasette-surveys-1.1.8.tar.gz/datasette-surveys-1.1.8/datasette_surveys/static/lib/alpaca/site/docs/fields/table4.md
|
---
layout: documentation-field
title: Table Field
header: Table Field
group: navigation
tags: field
---
{% include JB/setup %}
## Example 9
Drag-and-drop support for draggable table row re-ordering using the <code>dragRows</code> option.
Set <code>dragRows</code> to <code>true</code> to enable draggable rows within your table.
NOTE: This feature currently requires the <code>datatables.net-rowreorder</code> plugin as well as the core <code>datatables.net</code> library.
See the <a href="https://datatables.net/extensions/rowreorder/" target="_blank">DataTables RowReorder Plugin</a> for more information.
If you run into problems with the placement of the draggable overlay, you may need to force absolute positioning of the
overlay like this:
````
.table.dt-rowReorder-float
{
position: absolute !important;
}
````
NOTE: This feature is experimental and may change in the future. We're not altogether that happy with the
DataTables RowReorder Plugin and may seek to implement differently. However, the <code>dragRows</code> option will
continue to work as it does currently and will be supported in the future.
<div id="field9"> </div>
{% raw %}
<script type="text/javascript" id="field9-script">
$("#field9").alpaca({
"schema": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string",
"title": "Name"
},
"sport": {
"type": "string",
"title": "Sport",
"enum": [
"basketball",
"baseball",
"hockey",
"soccer",
"football"
]
},
"number": {
"type": "number",
"title": "Number"
}
}
}
},
"options": {
"type": "table",
"items": {
"fields": {
"name": {
"type": "personalname"
},
"sport": {
"type": "select",
"optionLabels": [
"Basketball",
"Baseball",
"Hockey",
"Soccer",
"Football"
]
},
"number": {
"type": "integer"
}
}
},
"dragRows": true,
"form": {
"buttons": {
"addRow": {
"title": "Add Row",
"click": function() {
var value = this.getValue();
value.push({
"name": "New Athlete",
"sport": "basketball",
"number": 99
});
this.setValue(value);
}
},
"removeRow": {
"title": "Remove Row",
"click": function() {
var value = this.getValue();
if (value.length > 0) {
value.pop();
this.setValue(value);
}
}
},
"submit": {
"title": "Show JSON",
"click": function() {
alert(JSON.stringify(this.getValue(), null, " "));
}
}
}
}
}
});
</script>
{% endraw %}
<style>
/** we override this here since the DataTables row-reorder plugin seems to miscalculate the DOM position and height on our samples page **/
.table.dt-rowReorder-float
{
position: absolute !important;
}
</style>
|
PypiClean
|
/faas-grip-1.0.1.tar.gz/faas-grip-1.0.1/faas_grip.py
|
import json
import os
import threading
import types
from base64 import b64encode, b64decode
from struct import pack
import six
from gripcontrol import (
GripPubControl,
WebSocketContext,
WebSocketEvent,
decode_websocket_events,
encode_websocket_events,
parse_grip_uri
)
from pubcontrol import Item
# The PubControl instance and lock used for synchronization.
_pubcontrol = None
_lock = threading.Lock()
def _get_proxies():
proxies = []
grip_proxies = os.environ.get('GRIP_PROXIES')
if grip_proxies:
proxies.extend(json.loads(grip_proxies))
grip_url = os.environ.get('GRIP_URL')
if grip_url:
proxies.append(parse_grip_uri(grip_url))
return proxies
def _get_pubcontrol():
global _pubcontrol
_lock.acquire()
if _pubcontrol is None:
_pubcontrol = GripPubControl()
_pubcontrol.apply_grip_config(_get_proxies())
_lock.release()
return _pubcontrol
def _get_prefix():
return os.environ.get('GRIP_PREFIX', '')
def get_pubcontrol():
return _get_pubcontrol()
def publish(channel, formats, id=None, prev_id=None,
blocking=True, callback=None, meta={}):
pub = _get_pubcontrol()
pub.publish(
_get_prefix() + channel,
Item(formats, id=id, prev_id=prev_id, meta=meta),
blocking=blocking,
callback=callback
)
def lambda_websocket_to_response(wscontext):
# meta to remove?
meta_remove = set()
for k, v in six.iteritems(wscontext.orig_meta):
found = False
for nk, nv in six.iteritems(wscontext.meta):
if nk.lower() == k:
found = True
break
if not found:
meta_remove.add(k)
# meta to set?
meta_set = {}
for k, v in six.iteritems(wscontext.meta):
lname = k.lower()
need_set = True
for ok, ov in six.iteritems(wscontext.orig_meta):
if lname == ok and v == ov:
need_set = False
break
if need_set:
meta_set[lname] = v
events = []
if wscontext.accepted:
events.append(WebSocketEvent('OPEN'))
events.extend(wscontext.out_events)
if wscontext.closed:
events.append(
WebSocketEvent('CLOSE', pack('>H', wscontext.out_close_code))
)
headers = {'Content-Type': 'application/websocket-events'}
if wscontext.accepted:
headers['Sec-WebSocket-Extensions'] = 'grip'
for k in meta_remove:
headers['Set-Meta-' + k] = ''
for k, v in six.iteritems(meta_set):
headers['Set-Meta-' + k] = v
body = encode_websocket_events(events)
return {
'isBase64Encoded': True,
'statusCode': 200,
'headers': headers,
'body': b64encode(body).decode('utf-8')
}
def lambda_get_websocket(event):
lower_headers = {}
for k, v in six.iteritems(event.get('headers', {})):
lower_headers[k.lower()] = v
content_type = lower_headers.get('content-type')
if content_type:
at = content_type.find(';')
if at != -1:
content_type = content_type[:at].strip()
if (event['httpMethod'] != 'POST'
or content_type != 'application/websocket-events'):
raise ValueError(
'request does not seem to be a websocket-over-http request'
)
cid = lower_headers.get('connection-id')
meta = {}
for k, v in six.iteritems(lower_headers):
if k.startswith('meta-'):
meta[k[5:]] = v
# read body as binary
if event.get('isBase64Encoded'):
body = b64decode(event['body'])
else:
body = event['body']
if isinstance(body, six.text_type):
body = body.encode('utf-8')
events = decode_websocket_events(body)
wscontext = WebSocketContext(cid, meta, events, grip_prefix=_get_prefix())
wscontext.to_response = types.MethodType(
lambda_websocket_to_response,
wscontext
)
return wscontext
|
PypiClean
|
/pytdx-async-1.62.tar.gz/pytdx-async-1.62/pytdx/reader/block_reader.py
|
import struct
from pytdx.reader.base_reader import BaseReader
from collections import OrderedDict
import pandas as pd
import os
from io import BytesIO
"""
参考这个 http://blog.csdn.net/Metal1/article/details/44352639
"""
BlockReader_TYPE_FLAT = 0
BlockReader_TYPE_GROUP = 1
class BlockReader(BaseReader):
def get_df(self, fname, result_type=BlockReader_TYPE_FLAT):
result = self.get_data(fname, result_type)
return pd.DataFrame(result)
def get_data(self, fname, result_type=BlockReader_TYPE_FLAT):
result = []
if type(fname) is not bytearray:
with open(fname, "rb") as f:
data = f.read()
else:
data = fname
pos = 384
(num, ) = struct.unpack("<H", data[pos: pos+2])
pos += 2
for i in range(num):
blockname_raw = data[pos: pos+9]
pos += 9
blockname = blockname_raw.decode("gbk", 'ignore').rstrip("\x00")
stock_count, block_type = struct.unpack("<HH", data[pos: pos+4])
pos += 4
block_stock_begin = pos
codes = []
for code_index in range(stock_count):
one_code = data[pos: pos+7].decode("utf-8", 'ignore').rstrip("\x00")
pos += 7
if result_type == BlockReader_TYPE_FLAT:
result.append(
OrderedDict([
("blockname", blockname),
("block_type", block_type),
("code_index", code_index),
("code", one_code),
])
)
elif result_type == BlockReader_TYPE_GROUP:
codes.append(one_code)
if result_type == BlockReader_TYPE_GROUP:
result.append(
OrderedDict([
("blockname", blockname),
("block_type", block_type),
("stock_count", stock_count),
("code_list", ",".join(codes))
])
)
pos = block_stock_begin + 2800
return result
"""
读取通达信备份的自定义板块文件夹,返回格式与通达信板块一致,在广发证券客户端上测试通过,其它未测试
"""
class CustomerBlockReader(BaseReader):
def get_df(self, fname, result_type=BlockReader_TYPE_FLAT):
result = self.get_data(fname, result_type)
return pd.DataFrame(result)
def get_data(self, fname, result_type=BlockReader_TYPE_FLAT):
result = []
if not os.path.isdir(fname):
raise Exception('not a directory')
block_file = '/'.join([fname,'blocknew.cfg'])
if not os.path.exists(block_file):
raise Exception('file not exists')
block_data = open(block_file,'rb').read()
pos = 0
result = []
# print(block_data.decode('gbk','ignore'))
while pos < len(block_data):
n1 = block_data[pos:pos + 50].decode('gbk', 'ignore').rstrip("\x00")
n2 = block_data[pos + 50:pos + 120].decode('gbk', 'ignore').rstrip("\x00")
pos = pos + 120
n1 = n1.split('\x00')[0]
n2 = n2.split('\x00')[0]
bf = '/'.join([fname,n2 + '.blk'])
if not os.path.exists(bf):
raise Exception('file not exists')
codes = open(bf).read().splitlines()
if result_type == BlockReader_TYPE_FLAT:
for index,code in enumerate(codes):
if code is not '':
result.append(
OrderedDict([
("blockname",n1),
("block_type",n2),
('code_index',index),
('code',code[1:])
])
)
if result_type == BlockReader_TYPE_GROUP:
cc = [c[1:] for c in codes if c is not '']
result.append(
OrderedDict([
("blockname",n1),
("block_type",n2),
("stock_count",len(cc)),
("code_list",",".join(cc))
])
)
return result
if __name__ == '__main__':
df = BlockReader().get_df("/Users/rainx/tmp/block_zs.dat")
print(df)
df2 = BlockReader().get_df("/Users/rainx/tmp/block_zs.dat", BlockReader_TYPE_GROUP)
print(df2)
df3 = CustomerBlockReader().get_df('C:/Users/fit/Desktop/blocknew')
print(df3)
df4 = CustomerBlockReader().get_df('C:/Users/fit/Desktop/blocknew',BlockReader_TYPE_GROUP)
print(df4)
|
PypiClean
|
/honeybee-radiance-1.65.33.tar.gz/honeybee-radiance-1.65.33/honeybee_radiance/lightsource/sky/skymatrix.py
|
from __future__ import division
from .sunmatrix import SunMatrix
import honeybee.typing as typing
from ladybug.wea import Wea
class SkyMatrix(SunMatrix):
"""Annual Climate-based Sky matrix.
The output of SkyMatrix is similar to using command Radiance's gendaymtx command with
default options. For more information see gendaymtx documentation.
https://www.radiance-online.org/learning/documentation/manual-pages/pdfs/gendaymtx.pdf
Args:
wea: A Ladybug wea object.
north: A number between -360 and 360 for the counterclockwise difference between
the North and the positive Y-axis in degrees. 90 is West and 270 is East
(Default: 0)
density: Sky patch subdivision density. This values is similar to -m option
in gendaymtx command. Default is 1 which means 145 sky patches and 1
patch for the ground.
One can add to the resolution typically by factors of two (2, 4, 8, ...)
which yields a higher resolution sky using the Reinhart patch subdivision
For example, setting density to 4 yields a sky with 2305 patches plus one
patch for the ground.
Properties:
* wea
* location
* north
* is_point_in_time
* is_climate_based
"""
__slots__ = ('_density',)
def __init__(self, wea, north=0, density=1):
"""Create a climate-based sky matrix."""
SunMatrix.__init__(self, wea, north)
self.density = density
@property
def density(self):
"""Set and get sky patch subdivision density.
This values is similar to -m option in gendaymtx command. Default is 1 which
means 145 sky patches and 1 patch for the ground.
One can add to the resolution typically by factors of two (2, 4, 8, ...) which
yields a higher resolution sky using the Reinhart patch subdivision. For example,
setting density to 4 yields a sky with 2305 patches plus one patch for the
ground.
"""
return self._density
@density.setter
def density(self, value):
typing.int_in_range(value, 1, input_name='SkyMatrix subdivision density')
self._density = value
@classmethod
def from_dict(cls, input_dict):
"""Create the sky from a dictionary.
Args:
input_dict: A python dictionary in the following format
.. code-block:: python
{
'type': 'SkyMatrix',
'wea': {},
'north': 0.0 # optional
'density': 1 # optional
}
"""
if 'type' not in input_dict or input_dict['type'] != 'SkyMatrix':
raise ValueError('Input dict "type" must be "SkyMatrix".')
if 'north' in input_dict:
north = input_dict['north']
else:
north = 0
if 'density' in input_dict:
density = input_dict['density']
else:
density = 1
sky = cls(Wea.from_dict(input_dict['wea']), north, density)
return sky
# TODO: add support for additional parameters
# TODO: add gendaymtx to radiance-command and use it for validating inputs
def to_radiance(
self, output_type=0, wea_file=None, output_name=None, cumulative=False,
components=0):
"""Return Radiance command to generate the sky.
Note that you need to write the wea to a file (in.wea) before running this
command.
Alternatively you can use write method which will write the wea data to a file.
Args:
output_type: An integer between 0 to 1 for output type.
* 0 = output in W/m2/sr visible (default)
* 1 = output in W/m2/sr solar
wea_file: Path to wea file (default: in.wea).
output_name: A name for output files (default: sky_mtx).
cumulative: A boolean to generate cumulative sky. This option is only
available in Radiance 5.3 and higher versions (default: False).
components: An integer between 0-2 to note the distribution of which
components should be included. 0 might be used to include both sun and
sky contribution. 1 may be used to produce a sun-only matrix, with no sky
contributions. Alternatively, 2 may be used to exclude any sun component
from the output. If there is a sun in the description, gendaymtx will
include its contribution in the four nearest sky patches, distributing
energy according to centroid proximity (default: 0).
"""
output_type = typing.int_in_range(output_type, 0, 1, 'SkyMatrix output type')
wea_file = wea_file or 'in.wea'
output_name = output_name or 'sky'
options = ['-O{}'.format(output_type)]
if self.density != 1:
options.append('-m %d' % self.density)
if self.north != 0:
options.append('-r {}'.format(self.north))
if cumulative:
options.append('-A')
if components == 1:
# sun-only
options.append('-d')
elif components == 2:
# sky only
options.append('-s')
options.append(wea_file)
# add all the other options here
command = 'gendaymtx {0} > {1}.mtx'.format(' '.join(options), output_name)
return command
def to_dict(self):
"""Translate this matrix to a dictionary."""
return {
'type': 'SkyMatrix',
'wea': self.wea.to_dict(),
'north': self.north,
'density': self.density
}
def __eq__(self, value):
if type(value) != type(self) \
or value.wea != self.wea \
or value.north != self.north \
or value.density != self.density:
return False
return True
|
PypiClean
|
/brasil.gov.portal-2.1.1.tar.gz/brasil.gov.portal-2.1.1/src/brasil/gov/portal/browser/busca/searchbox.py
|
from brasil.gov.portal.controlpanel.portal import ISettingsPortal
from plone.app.layout.viewlets.common import SearchBoxViewlet as SearchBoxViewletBase # noqa: E501
from plone.formwidget.namedfile.converter import b64decode_file
from plone.registry.interfaces import IRegistry
from zope.component import getUtility
import mimetypes
class SearchBoxViewlet(SearchBoxViewletBase):
"""Search box viewlet customization."""
def update(self):
super(SearchBoxViewlet, self).update()
registry = getUtility(IRegistry)
self.settings = registry.forInterface(ISettingsPortal, check=False)
self.expandable_header = getattr(self.settings, 'expandable_header', False) # noqa: E501
self.media_url = self.site_url + '/@@searchbox-background-media'
@staticmethod
def split(iterable):
results = []
if iterable is None:
return results
for item in iterable:
title, url = item.split('|')
results.append({'title': title, 'url': url})
return results
def featured_news(self):
"""Return the list of defined featured news."""
return self.split(self.settings.featured_news)
def more_news(self):
return self.settings.more_news
def featured_services(self):
"""Return the list of defined featured services."""
return self.split(self.settings.featured_services)
def more_services(self):
return self.settings.more_services
def top_subjects(self):
"""Return the list of defined top subjects."""
return self.split(self.settings.top_subjects)
def klass(self):
"""Return a CSS class to let Diazo know which search box is in use."""
if self.expandable_header:
return 'expandable-header'
@property
def is_video(self):
"""Guess if the mimetype of the file stored in the
background_image field, is from a video.
"""
if self.settings.background_image is None:
return False
filename, _ = b64decode_file(self.settings.background_image)
self.mimetype, _ = mimetypes.guess_type(filename)
return 'video' in self.mimetype
def style(self):
"""Return a CSS style to add a background image to an element.
If the expandable header is not used, or there is no background
image defined, return None to remove the style attribute from
rendering.
"""
if not self.expandable_header:
return None
if self.settings.background_image is None:
return None
if self.is_video:
return None
return 'background-image: url({0})'.format(self.media_url)
|
PypiClean
|
/flask_uio-0.1.6.2.tar.gz/flask_uio-0.1.6.2/flask_uio/static/vendor/summernote-0.8.18-dist/lang/summernote-nl-NL.js
|
(function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else {
var a = factory();
for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i];
}
})(window, function() {
return /******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId]) {
/******/ return installedModules[moduleId].exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/ __webpack_require__.d = function(exports, name, getter) {
/******/ if(!__webpack_require__.o(exports, name)) {
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
/******/ }
/******/ };
/******/
/******/ // define __esModule on exports
/******/ __webpack_require__.r = function(exports) {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function(value, mode) {
/******/ if(mode & 1) value = __webpack_require__(value);
/******/ if(mode & 8) return value;
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = 32);
/******/ })
/************************************************************************/
/******/ ({
/***/ 32:
/***/ (function(module, exports) {
(function ($) {
$.extend($.summernote.lang, {
'nl-NL': {
font: {
bold: 'Vet',
italic: 'Cursief',
underline: 'Onderstrepen',
clear: 'Stijl verwijderen',
height: 'Regelhoogte',
name: 'Lettertype',
strikethrough: 'Doorhalen',
subscript: 'Subscript',
superscript: 'Superscript',
size: 'Tekstgrootte'
},
image: {
image: 'Afbeelding',
insert: 'Afbeelding invoegen',
resizeFull: 'Volledige breedte',
resizeHalf: 'Halve breedte',
resizeQuarter: 'Kwart breedte',
floatLeft: 'Links uitlijnen',
floatRight: 'Rechts uitlijnen',
floatNone: 'Geen uitlijning',
shapeRounded: 'Shape: Rounded',
shapeCircle: 'Shape: Circle',
shapeThumbnail: 'Shape: Thumbnail',
shapeNone: 'Shape: None',
dragImageHere: 'Sleep hier een afbeelding naar toe',
dropImage: 'Drop image or Text',
selectFromFiles: 'Selecteer een bestand',
maximumFileSize: 'Maximum file size',
maximumFileSizeError: 'Maximum file size exceeded.',
url: 'URL van de afbeelding',
remove: 'Verwijder afbeelding',
original: 'Original'
},
video: {
video: 'Video',
videoLink: 'Video link',
insert: 'Video invoegen',
url: 'URL van de video',
providers: '(YouTube, Vimeo, Vine, Instagram, DailyMotion of Youku)'
},
link: {
link: 'Link',
insert: 'Link invoegen',
unlink: 'Link verwijderen',
edit: 'Wijzigen',
textToDisplay: 'Tekst van link',
url: 'Naar welke URL moet deze link verwijzen?',
openInNewWindow: 'Open in nieuw venster'
},
table: {
table: 'Tabel',
addRowAbove: 'Rij hierboven invoegen',
addRowBelow: 'Rij hieronder invoegen',
addColLeft: 'Kolom links toevoegen',
addColRight: 'Kolom rechts toevoegen',
delRow: 'Verwijder rij',
delCol: 'Verwijder kolom',
delTable: 'Verwijder tabel'
},
hr: {
insert: 'Horizontale lijn invoegen'
},
style: {
style: 'Stijl',
p: 'Normaal',
blockquote: 'Quote',
pre: 'Code',
h1: 'Kop 1',
h2: 'Kop 2',
h3: 'Kop 3',
h4: 'Kop 4',
h5: 'Kop 5',
h6: 'Kop 6'
},
lists: {
unordered: 'Ongeordende lijst',
ordered: 'Geordende lijst'
},
options: {
help: 'Help',
fullscreen: 'Volledig scherm',
codeview: 'Bekijk Code'
},
paragraph: {
paragraph: 'Paragraaf',
outdent: 'Inspringen verkleinen',
indent: 'Inspringen vergroten',
left: 'Links uitlijnen',
center: 'Centreren',
right: 'Rechts uitlijnen',
justify: 'Uitvullen'
},
color: {
recent: 'Recente kleur',
more: 'Meer kleuren',
background: 'Achtergrond kleur',
foreground: 'Tekst kleur',
transparent: 'Transparant',
setTransparent: 'Transparant',
reset: 'Standaard',
resetToDefault: 'Standaard kleur'
},
shortcut: {
shortcuts: 'Toetsencombinaties',
close: 'sluiten',
textFormatting: 'Tekststijlen',
action: 'Acties',
paragraphFormatting: 'Paragraafstijlen',
documentStyle: 'Documentstijlen',
extraKeys: 'Extra keys'
},
help: {
'insertParagraph': 'Alinea invoegen',
'undo': 'Laatste handeling ongedaan maken',
'redo': 'Laatste handeling opnieuw uitvoeren',
'tab': 'Tab',
'untab': 'Herstel tab',
'bold': 'Stel stijl in als vet',
'italic': 'Stel stijl in als cursief',
'underline': 'Stel stijl in als onderstreept',
'strikethrough': 'Stel stijl in als doorgestreept',
'removeFormat': 'Verwijder stijl',
'justifyLeft': 'Lijn links uit',
'justifyCenter': 'Set center align',
'justifyRight': 'Lijn rechts uit',
'justifyFull': 'Lijn uit op volledige breedte',
'insertUnorderedList': 'Zet ongeordende lijstweergave aan',
'insertOrderedList': 'Zet geordende lijstweergave aan',
'outdent': 'Verwijder inspringing huidige alinea',
'indent': 'Inspringen op huidige alinea',
'formatPara': 'Wijzig formattering huidig blok in alinea(P tag)',
'formatH1': 'Formatteer huidig blok als H1',
'formatH2': 'Formatteer huidig blok als H2',
'formatH3': 'Formatteer huidig blok als H3',
'formatH4': 'Formatteer huidig blok als H4',
'formatH5': 'Formatteer huidig blok als H5',
'formatH6': 'Formatteer huidig blok als H6',
'insertHorizontalRule': 'Invoegen horizontale lijn',
'linkDialog.show': 'Toon Link Dialoogvenster'
},
history: {
undo: 'Ongedaan maken',
redo: 'Opnieuw doorvoeren'
},
specialChar: {
specialChar: 'SPECIALE TEKENS',
select: 'Selecteer Speciale Tekens'
}
}
});
})(jQuery);
/***/ })
/******/ });
});
|
PypiClean
|
/aio_tinder-0.1.8-py35-none-any.whl/aiotinder/controllers/api.py
|
import asyncio
import warnings
from http import HTTPStatus
from typing import (AnyStr, Dict, List, TypeVar, Any)
import aiohttp
import ujson as json
from aiotinder import settings
from aiotinder.controllers import exceptions
from aiotinder.models.model import User
warnings.resetwarnings()
T = TypeVar("T", int, List)
G = TypeVar("G", bool, int, AnyStr, List)
class Api:
"""API
"""
def __init__(self, facebook_id: AnyStr, facebook_token: AnyStr,
tinder_token: AnyStr = None,
loop: asyncio.events.AbstractEventLoop = None,
headers: Dict[str, str] = None) -> None:
"""
:param facebook_id: Facebook ID
:param facebook_token: Facebook Token
:param tinder_token: Tinder Token
"""
self.token = facebook_token
self.id = facebook_id
self.tinder_token = tinder_token
self.headers = headers or settings.HEADERS
self.loop = loop or asyncio.get_event_loop()
self.session = aiohttp.ClientSession(headers=self.headers, loop=self.loop)
def __del__(self) -> None:
"""
"""
self.session.close()
@staticmethod
def construct_url(path: AnyStr) -> AnyStr:
"""Construct given path with the Tinder API URL.
:param path: Relative URL
:return: Constructed full URL.
"""
return "{0}{1}".format(settings.API_URL, path)
async def request(self, method: AnyStr, path: AnyStr,
data: Dict[AnyStr, Any] = None) -> Dict:
"""Make a request to the Tinder API.
:param method: HTTP Method to make a request (GET, POST etc.)
:param path: Relative URL to make the request.
:param data: Data to to pass to the API.
:return: JSON response.
"""
payload = None
if data is not None:
payload = json.dumps(data)
if not self.tinder_token:
await self.authenticate()
url = Api.construct_url(path)
async with self.session.request(method, url, headers=self.headers,
data=payload) as response:
if response.status != HTTPStatus.OK:
message = "Response status was {0}".format(response.status)
raise exceptions.TinderConnectionException(message)
return await response.json()
async def authenticate(self) -> Dict[AnyStr, Dict[AnyStr, Any]]:
"""Authenticate with Tinder API. Once we get the `token` from Tinder,
we use this token to sign the requests while making other
requests (dislike, profile etc.)
:return: JSON response.
"""
payload = json.dumps({"facebook_id": self.id,
"facebook_token": self.token})
url = Api.construct_url("auth")
async with self.session.post(url, data=payload) as response:
if response.status != HTTPStatus.OK:
message = "Connection error: {0}".format(response.status)
raise exceptions.TinderConnectionException(message)
data = await response.json()
if "token" not in data:
raise exceptions.TinderAuthenticationException("Cannot get token")
self.tinder_token = data.get("token")
self.headers.update({"X-Auth-Token": self.tinder_token})
return data
async def profile(self) -> Dict[AnyStr, G]:
"""User profile (This is your profile)
:return: User's profile settings.
"""
return await self.request("get", "profile?include=spotify")
async def swipe_left(self, user: User) -> Dict[AnyStr, int]:
"""Swipe left (to be not interested with the person with the given `uid`).
:param user: User object.
:param group:
:return: JSON response.
"""
# if group:
# return await self.request("get", "group/pass/{0}".format(uid))
url_path = "pass/{0}?content_hash={1}".format(user._id, user.content_hash)
return await self.request("get", url_path)
async def message(self, match_id: str, message: str) -> Dict[AnyStr, G]:
"""Send message to the match.
:param match_id: Match Id.
:param message: Message.
:return: JSON Response.
"""
url_path = "user/matches/{0}".format(match_id)
return await self.request("post", url_path, data={"message": message})
async def meta(self, path: str = None) -> Dict[AnyStr, G]:
"""Meta information. Possible optional `path` would be superlike info.
:param path: Additional meta path such as superlike info.
:return: JSON Response.
"""
if path:
return await self.request("get", "meta/{0}".format(path))
return await self.request("get", "meta")
async def common_connections(self, uid: AnyStr) -> Dict[AnyStr, G]:
"""Common connections with the user with the given `uid`.
:param uid: User Id.
:return: JSON Response.
"""
url_path = "user/{0}/common_connections".format(uid)
return await self.request("get", url_path)
async def spotify_popular(self) -> Dict[AnyStr, G]:
"""Get popular songs from Spotify
:return: JSON Response.
"""
url_path = "v2/profile/spotify/popular"
return await self.request("get", url_path)
async def spotify_theme(self, song_id: str,
delete: bool = False) -> Dict[AnyStr, T]:
"""Add or delete spotify anthem from profile
:param song_id: Spotify Song ID to add
:param delete: If it's set `True`, then we delete the anthem.
:return: JSON Response.
"""
url_path = "v2/profile/spotify/theme"
if delete:
return await self.request("delete", url_path)
return await self.request("put", url_path, data={"id": song_id})
async def share(self, user: User) -> Dict[AnyStr, G]:
"""Share a user with someone on your contact list.
:param user: User
:return: JSON Response.
"""
url_path = "user/{0}/share".format(user._id)
return await self.request("post", url_path)
async def superlike(self, user: User) -> Dict[AnyStr, G]:
"""Superlike a user.
:param user: User
:return: JSON Response.
"""
url_path = "like/{0}/super/".format(user._id)
return await self.request("post", url_path,
data={"content_hash": user.content_hash})
async def swipe_right(self, user: User) -> Dict[AnyStr, int]:
"""Swipe right (to like the person with the given `uid`)
:param user: User object.
:return: JSON Response.
"""
url_path = "like/{0}?content_hash={1}".format(user._id, user.content_hash)
return await self.request("get", url_path)
async def prospective(self, locale: AnyStr = "en-US") -> Dict[AnyStr, T]:
"""Get recommended users from Tinder.
:param locale: Locale setting.
:return: JSON response.
"""
return await self.request("get", "recs/social?locale={0}".format(locale))
|
PypiClean
|
/django-classic-user-accounts-1.0.39.tar.gz/django-classic-user-accounts-1.0.39/ClassicUserAccounts/static/matrix-admin-v2/assets/libs/chart/jquery.flot.min.js
|
(function(b) { b.color = {};
b.color.make = function(d, e, g, f) { var c = {};
c.r = d || 0;
c.g = e || 0;
c.b = g || 0;
c.a = f != null ? f : 1;
c.add = function(h, j) { for (var k = 0; k < h.length; ++k) { c[h.charAt(k)] += j } return c.normalize() };
c.scale = function(h, j) { for (var k = 0; k < h.length; ++k) { c[h.charAt(k)] *= j } return c.normalize() };
c.toString = function() { if (c.a >= 1) { return "rgb(" + [c.r, c.g, c.b].join(",") + ")" } else { return "rgba(" + [c.r, c.g, c.b, c.a].join(",") + ")" } };
c.normalize = function() {
function h(k, j, l) { return j < k ? k : (j > l ? l : j) } c.r = h(0, parseInt(c.r), 255);
c.g = h(0, parseInt(c.g), 255);
c.b = h(0, parseInt(c.b), 255);
c.a = h(0, c.a, 1); return c };
c.clone = function() { return b.color.make(c.r, c.b, c.g, c.a) }; return c.normalize() };
b.color.extract = function(d, e) { var c;
do { c = d.css(e).toLowerCase(); if (c != "" && c != "transparent") { break } d = d.parent() } while (!b.nodeName(d.get(0), "body")); if (c == "rgba(0, 0, 0, 0)") { c = "transparent" } return b.color.parse(c) };
b.color.parse = function(c) { var d, f = b.color.make; if (d = /rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(c)) { return f(parseInt(d[1], 10), parseInt(d[2], 10), parseInt(d[3], 10)) } if (d = /rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(c)) { return f(parseInt(d[1], 10), parseInt(d[2], 10), parseInt(d[3], 10), parseFloat(d[4])) } if (d = /rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(c)) { return f(parseFloat(d[1]) * 2.55, parseFloat(d[2]) * 2.55, parseFloat(d[3]) * 2.55) } if (d = /rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(c)) { return f(parseFloat(d[1]) * 2.55, parseFloat(d[2]) * 2.55, parseFloat(d[3]) * 2.55, parseFloat(d[4])) } if (d = /#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(c)) { return f(parseInt(d[1], 16), parseInt(d[2], 16), parseInt(d[3], 16)) } if (d = /#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(c)) { return f(parseInt(d[1] + d[1], 16), parseInt(d[2] + d[2], 16), parseInt(d[3] + d[3], 16)) } var e = b.trim(c).toLowerCase(); if (e == "transparent") { return f(255, 255, 255, 0) } else { d = a[e] || [0, 0, 0]; return f(d[0], d[1], d[2]) } }; var a = { aqua: [0, 255, 255], azure: [240, 255, 255], beige: [245, 245, 220], black: [0, 0, 0], blue: [0, 0, 255], brown: [165, 42, 42], cyan: [0, 255, 255], darkblue: [0, 0, 139], darkcyan: [0, 139, 139], darkgrey: [169, 169, 169], darkgreen: [0, 100, 0], darkkhaki: [189, 183, 107], darkmagenta: [139, 0, 139], darkolivegreen: [85, 107, 47], darkorange: [255, 140, 0], darkorchid: [153, 50, 204], darkred: [139, 0, 0], darksalmon: [233, 150, 122], darkviolet: [148, 0, 211], fuchsia: [255, 0, 255], gold: [255, 215, 0], green: [0, 128, 0], indigo: [75, 0, 130], khaki: [240, 230, 140], lightblue: [173, 216, 230], lightcyan: [224, 255, 255], lightgreen: [144, 238, 144], lightgrey: [211, 211, 211], lightpink: [255, 182, 193], lightyellow: [255, 255, 224], lime: [0, 255, 0], magenta: [255, 0, 255], maroon: [128, 0, 0], navy: [0, 0, 128], olive: [128, 128, 0], orange: [255, 165, 0], pink: [255, 192, 203], purple: [128, 0, 128], violet: [128, 0, 128], red: [255, 0, 0], silver: [192, 192, 192], white: [255, 255, 255], yellow: [255, 255, 0] } })(jQuery);
(function(c) {
function b(av, ai, J, af) { var Q = [],
O = { colors: ["#488c13", "#da4b0f", "#e9b104", "#97080e", "#1b55c0"], legend: { show: true, noColumns: 1, labelFormatter: null, labelBoxBorderColor: "#ccc", container: null, position: "ne", margin: 5, backgroundColor: null, backgroundOpacity: 0.85 }, xaxis: { show: null, position: "bottom", mode: null, color: null, tickColor: null, transform: null, inverseTransform: null, min: null, max: null, autoscaleMargin: null, ticks: null, tickFormatter: null, labelWidth: null, labelHeight: null, reserveSpace: null, tickLength: null, alignTicksWithAxis: null, tickDecimals: null, tickSize: null, minTickSize: null, monthNames: null, timeformat: null, twelveHourClock: false }, yaxis: { autoscaleMargin: 0.02, position: "left" }, xaxes: [], yaxes: [], series: { points: { show: false, radius: 3, lineWidth: 2, fill: true, fillColor: "#ffffff", symbol: "circle" }, lines: { lineWidth: 2, fill: false, fillColor: null, steps: false }, bars: { show: false, lineWidth: 2, barWidth: 1, fill: true, fillColor: null, align: "left", horizontal: false }, shadowSize: 3 }, grid: { show: true, aboveData: false, color: "#545454", backgroundColor: null, borderColor: "#cdcdcd", tickColor: "#ebebeb", labelMargin: 5, axisMargin: 8, borderWidth: 1, minBorderMargin: null, markings: null, markingsColor: "#f4f4f4", markingsLineWidth: 2, clickable: false, hoverable: false, autoHighlight: true, mouseActiveRadius: 10 }, hooks: {} },
az = null,
ad = null,
y = null,
H = null,
A = null,
p = [],
aw = [],
q = { left: 0, right: 0, top: 0, bottom: 0 },
G = 0,
I = 0,
h = 0,
w = 0,
ak = { processOptions: [], processRawData: [], processDatapoints: [], drawSeries: [], draw: [], bindEvents: [], drawOverlay: [], shutdown: [] },
aq = this;
aq.setData = aj;
aq.setupGrid = t;
aq.draw = W;
aq.getPlaceholder = function() { return av };
aq.getCanvas = function() { return az };
aq.getPlotOffset = function() { return q };
aq.width = function() { return h };
aq.height = function() { return w };
aq.offset = function() { var aB = y.offset();
aB.left += q.left;
aB.top += q.top; return aB };
aq.getData = function() { return Q };
aq.getAxes = function() { var aC = {},
aB;
c.each(p.concat(aw), function(aD, aE) { if (aE) { aC[aE.direction + (aE.n != 1 ? aE.n : "") + "axis"] = aE } }); return aC };
aq.getXAxes = function() { return p };
aq.getYAxes = function() { return aw };
aq.c2p = C;
aq.p2c = ar;
aq.getOptions = function() { return O };
aq.highlight = x;
aq.unhighlight = T;
aq.triggerRedrawOverlay = f;
aq.pointOffset = function(aB) { return { left: parseInt(p[aA(aB, "x") - 1].p2c(+aB.x) + q.left), top: parseInt(aw[aA(aB, "y") - 1].p2c(+aB.y) + q.top) } };
aq.shutdown = ag;
aq.resize = function() { B();
g(az);
g(ad) };
aq.hooks = ak;
F(aq);
Z(J);
X();
aj(ai);
t();
W();
ah();
function an(aD, aB) { aB = [aq].concat(aB); for (var aC = 0; aC < aD.length; ++aC) { aD[aC].apply(this, aB) } }
function F() { for (var aB = 0; aB < af.length; ++aB) { var aC = af[aB];
aC.init(aq); if (aC.options) { c.extend(true, O, aC.options) } } }
function Z(aC) { var aB;
c.extend(true, O, aC); if (O.xaxis.color == null) { O.xaxis.color = O.grid.color } if (O.yaxis.color == null) { O.yaxis.color = O.grid.color } if (O.xaxis.tickColor == null) { O.xaxis.tickColor = O.grid.tickColor } if (O.yaxis.tickColor == null) { O.yaxis.tickColor = O.grid.tickColor } if (O.grid.borderColor == null) { O.grid.borderColor = O.grid.color } if (O.grid.tickColor == null) { O.grid.tickColor = c.color.parse(O.grid.color).scale("a", 0.22).toString() } for (aB = 0; aB < Math.max(1, O.xaxes.length); ++aB) { O.xaxes[aB] = c.extend(true, {}, O.xaxis, O.xaxes[aB]) } for (aB = 0; aB < Math.max(1, O.yaxes.length); ++aB) { O.yaxes[aB] = c.extend(true, {}, O.yaxis, O.yaxes[aB]) } if (O.xaxis.noTicks && O.xaxis.ticks == null) { O.xaxis.ticks = O.xaxis.noTicks } if (O.yaxis.noTicks && O.yaxis.ticks == null) { O.yaxis.ticks = O.yaxis.noTicks } if (O.x2axis) { O.xaxes[1] = c.extend(true, {}, O.xaxis, O.x2axis);
O.xaxes[1].position = "top" } if (O.y2axis) { O.yaxes[1] = c.extend(true, {}, O.yaxis, O.y2axis);
O.yaxes[1].position = "right" } if (O.grid.coloredAreas) { O.grid.markings = O.grid.coloredAreas } if (O.grid.coloredAreasColor) { O.grid.markingsColor = O.grid.coloredAreasColor } if (O.lines) { c.extend(true, O.series.lines, O.lines) } if (O.points) { c.extend(true, O.series.points, O.points) } if (O.bars) { c.extend(true, O.series.bars, O.bars) } if (O.shadowSize != null) { O.series.shadowSize = O.shadowSize } for (aB = 0; aB < O.xaxes.length; ++aB) { V(p, aB + 1).options = O.xaxes[aB] } for (aB = 0; aB < O.yaxes.length; ++aB) { V(aw, aB + 1).options = O.yaxes[aB] } for (var aD in ak) { if (O.hooks[aD] && O.hooks[aD].length) { ak[aD] = ak[aD].concat(O.hooks[aD]) } } an(ak.processOptions, [O]) }
function aj(aB) { Q = Y(aB);
ax();
z() }
function Y(aE) { var aC = []; for (var aB = 0; aB < aE.length; ++aB) { var aD = c.extend(true, {}, O.series); if (aE[aB].data != null) { aD.data = aE[aB].data;
delete aE[aB].data;
c.extend(true, aD, aE[aB]);
aE[aB].data = aD.data } else { aD.data = aE[aB] } aC.push(aD) } return aC }
function aA(aC, aD) { var aB = aC[aD + "axis"]; if (typeof aB == "object") { aB = aB.n } if (typeof aB != "number") { aB = 1 } return aB }
function m() { return c.grep(p.concat(aw), function(aB) { return aB }) }
function C(aE) { var aC = {},
aB, aD; for (aB = 0; aB < p.length; ++aB) { aD = p[aB]; if (aD && aD.used) { aC["x" + aD.n] = aD.c2p(aE.left) } } for (aB = 0; aB < aw.length; ++aB) { aD = aw[aB]; if (aD && aD.used) { aC["y" + aD.n] = aD.c2p(aE.top) } } if (aC.x1 !== undefined) { aC.x = aC.x1 } if (aC.y1 !== undefined) { aC.y = aC.y1 } return aC }
function ar(aF) { var aD = {},
aC, aE, aB; for (aC = 0; aC < p.length; ++aC) { aE = p[aC]; if (aE && aE.used) { aB = "x" + aE.n; if (aF[aB] == null && aE.n == 1) { aB = "x" } if (aF[aB] != null) { aD.left = aE.p2c(aF[aB]); break } } } for (aC = 0; aC < aw.length; ++aC) { aE = aw[aC]; if (aE && aE.used) { aB = "y" + aE.n; if (aF[aB] == null && aE.n == 1) { aB = "y" } if (aF[aB] != null) { aD.top = aE.p2c(aF[aB]); break } } } return aD }
function V(aC, aB) { if (!aC[aB - 1]) { aC[aB - 1] = { n: aB, direction: aC == p ? "x" : "y", options: c.extend(true, {}, aC == p ? O.xaxis : O.yaxis) } } return aC[aB - 1] }
function ax() { var aG; var aM = Q.length,
aB = [],
aE = []; for (aG = 0; aG < Q.length; ++aG) { var aJ = Q[aG].color; if (aJ != null) {--aM; if (typeof aJ == "number") { aE.push(aJ) } else { aB.push(c.color.parse(Q[aG].color)) } } } for (aG = 0; aG < aE.length; ++aG) { aM = Math.max(aM, aE[aG] + 1) } var aC = [],
aF = 0;
aG = 0; while (aC.length < aM) { var aI; if (O.colors.length == aG) { aI = c.color.make(100, 100, 100) } else { aI = c.color.parse(O.colors[aG]) } var aD = aF % 2 == 1 ? -1 : 1;
aI.scale("rgb", 1 + aD * Math.ceil(aF / 2) * 0.2);
aC.push(aI);++aG; if (aG >= O.colors.length) { aG = 0;++aF } } var aH = 0,
aN; for (aG = 0; aG < Q.length; ++aG) { aN = Q[aG]; if (aN.color == null) { aN.color = aC[aH].toString();++aH } else { if (typeof aN.color == "number") { aN.color = aC[aN.color].toString() } } if (aN.lines.show == null) { var aL, aK = true; for (aL in aN) { if (aN[aL] && aN[aL].show) { aK = false; break } } if (aK) { aN.lines.show = true } } aN.xaxis = V(p, aA(aN, "x"));
aN.yaxis = V(aw, aA(aN, "y")) } }
function z() { var aO = Number.POSITIVE_INFINITY,
aI = Number.NEGATIVE_INFINITY,
aB = Number.MAX_VALUE,
aU, aS, aR, aN, aD, aJ, aT, aP, aH, aG, aC, a0, aX, aL;
function aF(a3, a2, a1) { if (a2 < a3.datamin && a2 != -aB) { a3.datamin = a2 } if (a1 > a3.datamax && a1 != aB) { a3.datamax = a1 } } c.each(m(), function(a1, a2) { a2.datamin = aO;
a2.datamax = aI;
a2.used = false }); for (aU = 0; aU < Q.length; ++aU) { aJ = Q[aU];
aJ.datapoints = { points: [] };
an(ak.processRawData, [aJ, aJ.data, aJ.datapoints]) } for (aU = 0; aU < Q.length; ++aU) { aJ = Q[aU]; var aZ = aJ.data,
aW = aJ.datapoints.format; if (!aW) { aW = [];
aW.push({ x: true, number: true, required: true });
aW.push({ y: true, number: true, required: true }); if (aJ.bars.show || (aJ.lines.show && aJ.lines.fill)) { aW.push({ y: true, number: true, required: false, defaultValue: 0 }); if (aJ.bars.horizontal) { delete aW[aW.length - 1].y;
aW[aW.length - 1].x = true } } aJ.datapoints.format = aW } if (aJ.datapoints.pointsize != null) { continue } aJ.datapoints.pointsize = aW.length;
aP = aJ.datapoints.pointsize;
aT = aJ.datapoints.points;
insertSteps = aJ.lines.show && aJ.lines.steps;
aJ.xaxis.used = aJ.yaxis.used = true; for (aS = aR = 0; aS < aZ.length; ++aS, aR += aP) { aL = aZ[aS]; var aE = aL == null; if (!aE) { for (aN = 0; aN < aP; ++aN) { a0 = aL[aN];
aX = aW[aN]; if (aX) { if (aX.number && a0 != null) { a0 = +a0; if (isNaN(a0)) { a0 = null } else { if (a0 == Infinity) { a0 = aB } else { if (a0 == -Infinity) { a0 = -aB } } } } if (a0 == null) { if (aX.required) { aE = true } if (aX.defaultValue != null) { a0 = aX.defaultValue } } } aT[aR + aN] = a0 } } if (aE) { for (aN = 0; aN < aP; ++aN) { a0 = aT[aR + aN]; if (a0 != null) { aX = aW[aN]; if (aX.x) { aF(aJ.xaxis, a0, a0) } if (aX.y) { aF(aJ.yaxis, a0, a0) } } aT[aR + aN] = null } } else { if (insertSteps && aR > 0 && aT[aR - aP] != null && aT[aR - aP] != aT[aR] && aT[aR - aP + 1] != aT[aR + 1]) { for (aN = 0; aN < aP; ++aN) { aT[aR + aP + aN] = aT[aR + aN] } aT[aR + 1] = aT[aR - aP + 1];
aR += aP } } } } for (aU = 0; aU < Q.length; ++aU) { aJ = Q[aU];
an(ak.processDatapoints, [aJ, aJ.datapoints]) } for (aU = 0; aU < Q.length; ++aU) { aJ = Q[aU];
aT = aJ.datapoints.points, aP = aJ.datapoints.pointsize; var aK = aO,
aQ = aO,
aM = aI,
aV = aI; for (aS = 0; aS < aT.length; aS += aP) { if (aT[aS] == null) { continue } for (aN = 0; aN < aP; ++aN) { a0 = aT[aS + aN];
aX = aW[aN]; if (!aX || a0 == aB || a0 == -aB) { continue } if (aX.x) { if (a0 < aK) { aK = a0 } if (a0 > aM) { aM = a0 } } if (aX.y) { if (a0 < aQ) { aQ = a0 } if (a0 > aV) { aV = a0 } } } } if (aJ.bars.show) { var aY = aJ.bars.align == "left" ? 0 : -aJ.bars.barWidth / 2; if (aJ.bars.horizontal) { aQ += aY;
aV += aY + aJ.bars.barWidth } else { aK += aY;
aM += aY + aJ.bars.barWidth } } aF(aJ.xaxis, aK, aM);
aF(aJ.yaxis, aQ, aV) } c.each(m(), function(a1, a2) { if (a2.datamin == aO) { a2.datamin = null } if (a2.datamax == aI) { a2.datamax = null } }) }
function j(aB, aC) { var aD = document.createElement("canvas");
aD.className = aC;
aD.width = G;
aD.height = I; if (!aB) { c(aD).css({ position: "absolute", left: 0, top: 0 }) } c(aD).appendTo(av); if (!aD.getContext) { aD = window.G_vmlCanvasManager.initElement(aD) } aD.getContext("2d").save(); return aD }
function B() { G = av.width();
I = av.height(); if (G <= 0 || I <= 0) { throw "Invalid dimensions for plot, width = " + G + ", height = " + I } }
function g(aC) { if (aC.width != G) { aC.width = G } if (aC.height != I) { aC.height = I } var aB = aC.getContext("2d");
aB.restore();
aB.save() }
function X() { var aC, aB = av.children("canvas.base"),
aD = av.children("canvas.overlay"); if (aB.length == 0 || aD == 0) { av.html("");
av.css({ padding: 0 }); if (av.css("position") == "static") { av.css("position", "relative") } B();
az = j(true, "base");
ad = j(false, "overlay");
aC = false } else { az = aB.get(0);
ad = aD.get(0);
aC = true } H = az.getContext("2d");
A = ad.getContext("2d");
y = c([ad, az]); if (aC) { av.data("plot").shutdown();
aq.resize();
A.clearRect(0, 0, G, I);
y.unbind();
av.children().not([az, ad]).remove() } av.data("plot", aq) }
function ah() { if (O.grid.hoverable) { y.mousemove(aa);
y.mouseleave(l) } if (O.grid.clickable) { y.click(R) } an(ak.bindEvents, [y]) }
function ag() { if (M) { clearTimeout(M) } y.unbind("mousemove", aa);
y.unbind("mouseleave", l);
y.unbind("click", R);
an(ak.shutdown, [y]) }
function r(aG) {
function aC(aH) { return aH } var aF, aB, aD = aG.options.transform || aC,
aE = aG.options.inverseTransform; if (aG.direction == "x") { aF = aG.scale = h / Math.abs(aD(aG.max) - aD(aG.min));
aB = Math.min(aD(aG.max), aD(aG.min)) } else { aF = aG.scale = w / Math.abs(aD(aG.max) - aD(aG.min));
aF = -aF;
aB = Math.max(aD(aG.max), aD(aG.min)) } if (aD == aC) { aG.p2c = function(aH) { return (aH - aB) * aF } } else { aG.p2c = function(aH) { return (aD(aH) - aB) * aF } } if (!aE) { aG.c2p = function(aH) { return aB + aH / aF } } else { aG.c2p = function(aH) { return aE(aB + aH / aF) } } }
function L(aD) { var aB = aD.options,
aF, aJ = aD.ticks || [],
aI = [],
aE, aK = aB.labelWidth,
aG = aB.labelHeight,
aC;
function aH(aM, aL) { return c('<div style="position:absolute;top:-10000px;' + aL + 'font-size:smaller"><div class="' + aD.direction + "Axis " + aD.direction + aD.n + 'Axis">' + aM.join("") + "</div></div>").appendTo(av) } if (aD.direction == "x") { if (aK == null) { aK = Math.floor(G / (aJ.length > 0 ? aJ.length : 1)) } if (aG == null) { aI = []; for (aF = 0; aF < aJ.length; ++aF) { aE = aJ[aF].label; if (aE) { aI.push('<div class="tickLabel" style="float:left;width:' + aK + 'px">' + aE + "</div>") } } if (aI.length > 0) { aI.push('<div style="clear:left"></div>');
aC = aH(aI, "width:10000px;");
aG = aC.height();
aC.remove() } } } else { if (aK == null || aG == null) { for (aF = 0; aF < aJ.length; ++aF) { aE = aJ[aF].label; if (aE) { aI.push('<div class="tickLabel">' + aE + "</div>") } } if (aI.length > 0) { aC = aH(aI, ""); if (aK == null) { aK = aC.children().width() } if (aG == null) { aG = aC.find("div.tickLabel").height() } aC.remove() } } } if (aK == null) { aK = 0 } if (aG == null) { aG = 0 } aD.labelWidth = aK;
aD.labelHeight = aG }
function au(aD) { var aC = aD.labelWidth,
aL = aD.labelHeight,
aH = aD.options.position,
aF = aD.options.tickLength,
aG = O.grid.axisMargin,
aJ = O.grid.labelMargin,
aK = aD.direction == "x" ? p : aw,
aE; var aB = c.grep(aK, function(aN) { return aN && aN.options.position == aH && aN.reserveSpace }); if (c.inArray(aD, aB) == aB.length - 1) { aG = 0 } if (aF == null) { aF = "full" } var aI = c.grep(aK, function(aN) { return aN && aN.reserveSpace }); var aM = c.inArray(aD, aI) == 0; if (!aM && aF == "full") { aF = 5 } if (!isNaN(+aF)) { aJ += +aF } if (aD.direction == "x") { aL += aJ; if (aH == "bottom") { q.bottom += aL + aG;
aD.box = { top: I - q.bottom, height: aL } } else { aD.box = { top: q.top + aG, height: aL };
q.top += aL + aG } } else { aC += aJ; if (aH == "left") { aD.box = { left: q.left + aG, width: aC };
q.left += aC + aG } else { q.right += aC + aG;
aD.box = { left: G - q.right, width: aC } } } aD.position = aH;
aD.tickLength = aF;
aD.box.padding = aJ;
aD.innermost = aM }
function U(aB) { if (aB.direction == "x") { aB.box.left = q.left;
aB.box.width = h } else { aB.box.top = q.top;
aB.box.height = w } }
function t() { var aC, aE = m();
c.each(aE, function(aF, aG) { aG.show = aG.options.show; if (aG.show == null) { aG.show = aG.used } aG.reserveSpace = aG.show || aG.options.reserveSpace;
n(aG) });
allocatedAxes = c.grep(aE, function(aF) { return aF.reserveSpace });
q.left = q.right = q.top = q.bottom = 0; if (O.grid.show) { c.each(allocatedAxes, function(aF, aG) { S(aG);
P(aG);
ap(aG, aG.ticks);
L(aG) }); for (aC = allocatedAxes.length - 1; aC >= 0; --aC) { au(allocatedAxes[aC]) } var aD = O.grid.minBorderMargin; if (aD == null) { aD = 0; for (aC = 0; aC < Q.length; ++aC) { aD = Math.max(aD, Q[aC].points.radius + Q[aC].points.lineWidth / 2) } } for (var aB in q) { q[aB] += O.grid.borderWidth;
q[aB] = Math.max(aD, q[aB]) } } h = G - q.left - q.right;
w = I - q.bottom - q.top;
c.each(aE, function(aF, aG) { r(aG) }); if (O.grid.show) { c.each(allocatedAxes, function(aF, aG) { U(aG) });
k() } o() }
function n(aE) { var aF = aE.options,
aD = +(aF.min != null ? aF.min : aE.datamin),
aB = +(aF.max != null ? aF.max : aE.datamax),
aH = aB - aD; if (aH == 0) { var aC = aB == 0 ? 1 : 0.01; if (aF.min == null) { aD -= aC } if (aF.max == null || aF.min != null) { aB += aC } } else { var aG = aF.autoscaleMargin; if (aG != null) { if (aF.min == null) { aD -= aH * aG; if (aD < 0 && aE.datamin != null && aE.datamin >= 0) { aD = 0 } } if (aF.max == null) { aB += aH * aG; if (aB > 0 && aE.datamax != null && aE.datamax <= 0) { aB = 0 } } } } aE.min = aD;
aE.max = aB }
function S(aG) { var aM = aG.options; var aH; if (typeof aM.ticks == "number" && aM.ticks > 0) { aH = aM.ticks } else { aH = 0.3 * Math.sqrt(aG.direction == "x" ? G : I) } var aT = (aG.max - aG.min) / aH,
aO, aB, aN, aR, aS, aQ, aI; if (aM.mode == "time") { var aJ = { second: 1000, minute: 60 * 1000, hour: 60 * 60 * 1000, day: 24 * 60 * 60 * 1000, month: 30 * 24 * 60 * 60 * 1000, year: 365.2425 * 24 * 60 * 60 * 1000 }; var aK = [
[1, "second"],
[2, "second"],
[5, "second"],
[10, "second"],
[30, "second"],
[1, "minute"],
[2, "minute"],
[5, "minute"],
[10, "minute"],
[30, "minute"],
[1, "hour"],
[2, "hour"],
[4, "hour"],
[8, "hour"],
[12, "hour"],
[1, "day"],
[2, "day"],
[3, "day"],
[0.25, "month"],
[0.5, "month"],
[1, "month"],
[2, "month"],
[3, "month"],
[6, "month"],
[1, "year"]
]; var aC = 0; if (aM.minTickSize != null) { if (typeof aM.tickSize == "number") { aC = aM.tickSize } else { aC = aM.minTickSize[0] * aJ[aM.minTickSize[1]] } } for (var aS = 0; aS < aK.length - 1; ++aS) { if (aT < (aK[aS][0] * aJ[aK[aS][1]] + aK[aS + 1][0] * aJ[aK[aS + 1][1]]) / 2 && aK[aS][0] * aJ[aK[aS][1]] >= aC) { break } } aO = aK[aS][0];
aN = aK[aS][1]; if (aN == "year") { aQ = Math.pow(10, Math.floor(Math.log(aT / aJ.year) / Math.LN10));
aI = (aT / aJ.year) / aQ; if (aI < 1.5) { aO = 1 } else { if (aI < 3) { aO = 2 } else { if (aI < 7.5) { aO = 5 } else { aO = 10 } } } aO *= aQ } aG.tickSize = aM.tickSize || [aO, aN];
aB = function(aX) { var a2 = [],
a0 = aX.tickSize[0],
a3 = aX.tickSize[1],
a1 = new Date(aX.min); var aW = a0 * aJ[a3]; if (a3 == "second") { a1.setUTCSeconds(a(a1.getUTCSeconds(), a0)) } if (a3 == "minute") { a1.setUTCMinutes(a(a1.getUTCMinutes(), a0)) } if (a3 == "hour") { a1.setUTCHours(a(a1.getUTCHours(), a0)) } if (a3 == "month") { a1.setUTCMonth(a(a1.getUTCMonth(), a0)) } if (a3 == "year") { a1.setUTCFullYear(a(a1.getUTCFullYear(), a0)) } a1.setUTCMilliseconds(0); if (aW >= aJ.minute) { a1.setUTCSeconds(0) } if (aW >= aJ.hour) { a1.setUTCMinutes(0) } if (aW >= aJ.day) { a1.setUTCHours(0) } if (aW >= aJ.day * 4) { a1.setUTCDate(1) } if (aW >= aJ.year) { a1.setUTCMonth(0) } var a5 = 0,
a4 = Number.NaN,
aY;
do { aY = a4;
a4 = a1.getTime();
a2.push(a4); if (a3 == "month") { if (a0 < 1) { a1.setUTCDate(1); var aV = a1.getTime();
a1.setUTCMonth(a1.getUTCMonth() + 1); var aZ = a1.getTime();
a1.setTime(a4 + a5 * aJ.hour + (aZ - aV) * a0);
a5 = a1.getUTCHours();
a1.setUTCHours(0) } else { a1.setUTCMonth(a1.getUTCMonth() + a0) } } else { if (a3 == "year") { a1.setUTCFullYear(a1.getUTCFullYear() + a0) } else { a1.setTime(a4 + aW) } } } while (a4 < aX.max && a4 != aY); return a2 };
aR = function(aV, aY) { var a0 = new Date(aV); if (aM.timeformat != null) { return c.plot.formatDate(a0, aM.timeformat, aM.monthNames) } var aW = aY.tickSize[0] * aJ[aY.tickSize[1]]; var aX = aY.max - aY.min; var aZ = (aM.twelveHourClock) ? " %p" : ""; if (aW < aJ.minute) { fmt = "%h:%M:%S" + aZ } else { if (aW < aJ.day) { if (aX < 2 * aJ.day) { fmt = "%h:%M" + aZ } else { fmt = "%b %d %h:%M" + aZ } } else { if (aW < aJ.month) { fmt = "%b %d" } else { if (aW < aJ.year) { if (aX < aJ.year) { fmt = "%b" } else { fmt = "%b %y" } } else { fmt = "%y" } } } } return c.plot.formatDate(a0, fmt, aM.monthNames) } } else { var aU = aM.tickDecimals; var aP = -Math.floor(Math.log(aT) / Math.LN10); if (aU != null && aP > aU) { aP = aU } aQ = Math.pow(10, -aP);
aI = aT / aQ; if (aI < 1.5) { aO = 1 } else { if (aI < 3) { aO = 2; if (aI > 2.25 && (aU == null || aP + 1 <= aU)) { aO = 2.5;++aP } } else { if (aI < 7.5) { aO = 5 } else { aO = 10 } } } aO *= aQ; if (aM.minTickSize != null && aO < aM.minTickSize) { aO = aM.minTickSize } aG.tickDecimals = Math.max(0, aU != null ? aU : aP);
aG.tickSize = aM.tickSize || aO;
aB = function(aX) { var aZ = []; var a0 = a(aX.min, aX.tickSize),
aW = 0,
aV = Number.NaN,
aY;
do { aY = aV;
aV = a0 + aW * aX.tickSize;
aZ.push(aV);++aW } while (aV < aX.max && aV != aY); return aZ };
aR = function(aV, aW) { return aV.toFixed(aW.tickDecimals) } } if (aM.alignTicksWithAxis != null) { var aF = (aG.direction == "x" ? p : aw)[aM.alignTicksWithAxis - 1]; if (aF && aF.used && aF != aG) { var aL = aB(aG); if (aL.length > 0) { if (aM.min == null) { aG.min = Math.min(aG.min, aL[0]) } if (aM.max == null && aL.length > 1) { aG.max = Math.max(aG.max, aL[aL.length - 1]) } } aB = function(aX) { var aY = [],
aV, aW; for (aW = 0; aW < aF.ticks.length; ++aW) { aV = (aF.ticks[aW].v - aF.min) / (aF.max - aF.min);
aV = aX.min + aV * (aX.max - aX.min);
aY.push(aV) } return aY }; if (aG.mode != "time" && aM.tickDecimals == null) { var aE = Math.max(0, -Math.floor(Math.log(aT) / Math.LN10) + 1),
aD = aB(aG); if (!(aD.length > 1 && /\..*0$/.test((aD[1] - aD[0]).toFixed(aE)))) { aG.tickDecimals = aE } } } } aG.tickGenerator = aB; if (c.isFunction(aM.tickFormatter)) { aG.tickFormatter = function(aV, aW) { return "" + aM.tickFormatter(aV, aW) } } else { aG.tickFormatter = aR } }
function P(aF) { var aH = aF.options.ticks,
aG = []; if (aH == null || (typeof aH == "number" && aH > 0)) { aG = aF.tickGenerator(aF) } else { if (aH) { if (c.isFunction(aH)) { aG = aH({ min: aF.min, max: aF.max }) } else { aG = aH } } } var aE, aB;
aF.ticks = []; for (aE = 0; aE < aG.length; ++aE) { var aC = null; var aD = aG[aE]; if (typeof aD == "object") { aB = +aD[0]; if (aD.length > 1) { aC = aD[1] } } else { aB = +aD } if (aC == null) { aC = aF.tickFormatter(aB, aF) } if (!isNaN(aB)) { aF.ticks.push({ v: aB, label: aC }) } } }
function ap(aB, aC) { if (aB.options.autoscaleMargin && aC.length > 0) { if (aB.options.min == null) { aB.min = Math.min(aB.min, aC[0].v) } if (aB.options.max == null && aC.length > 1) { aB.max = Math.max(aB.max, aC[aC.length - 1].v) } } }
function W() { H.clearRect(0, 0, G, I); var aC = O.grid; if (aC.show && aC.backgroundColor) { N() } if (aC.show && !aC.aboveData) { ac() } for (var aB = 0; aB < Q.length; ++aB) { an(ak.drawSeries, [H, Q[aB]]);
d(Q[aB]) } an(ak.draw, [H]); if (aC.show && aC.aboveData) { ac() } }
function D(aB, aI) { var aE, aH, aG, aD, aF = m(); for (i = 0; i < aF.length; ++i) { aE = aF[i]; if (aE.direction == aI) { aD = aI + aE.n + "axis"; if (!aB[aD] && aE.n == 1) { aD = aI + "axis" } if (aB[aD]) { aH = aB[aD].from;
aG = aB[aD].to; break } } } if (!aB[aD]) { aE = aI == "x" ? p[0] : aw[0];
aH = aB[aI + "1"];
aG = aB[aI + "2"] } if (aH != null && aG != null && aH > aG) { var aC = aH;
aH = aG;
aG = aC } return { from: aH, to: aG, axis: aE } }
function N() { H.save();
H.translate(q.left, q.top);
H.fillStyle = am(O.grid.backgroundColor, w, 0, "rgba(255, 255, 255, 0)");
H.fillRect(0, 0, h, w);
H.restore() }
function ac() { var aF;
H.save();
H.translate(q.left, q.top); var aH = O.grid.markings; if (aH) { if (c.isFunction(aH)) { var aK = aq.getAxes();
aK.xmin = aK.xaxis.min;
aK.xmax = aK.xaxis.max;
aK.ymin = aK.yaxis.min;
aK.ymax = aK.yaxis.max;
aH = aH(aK) } for (aF = 0; aF < aH.length; ++aF) { var aD = aH[aF],
aC = D(aD, "x"),
aI = D(aD, "y"); if (aC.from == null) { aC.from = aC.axis.min } if (aC.to == null) { aC.to = aC.axis.max } if (aI.from == null) { aI.from = aI.axis.min } if (aI.to == null) { aI.to = aI.axis.max } if (aC.to < aC.axis.min || aC.from > aC.axis.max || aI.to < aI.axis.min || aI.from > aI.axis.max) { continue } aC.from = Math.max(aC.from, aC.axis.min);
aC.to = Math.min(aC.to, aC.axis.max);
aI.from = Math.max(aI.from, aI.axis.min);
aI.to = Math.min(aI.to, aI.axis.max); if (aC.from == aC.to && aI.from == aI.to) { continue } aC.from = aC.axis.p2c(aC.from);
aC.to = aC.axis.p2c(aC.to);
aI.from = aI.axis.p2c(aI.from);
aI.to = aI.axis.p2c(aI.to); if (aC.from == aC.to || aI.from == aI.to) { H.beginPath();
H.strokeStyle = aD.color || O.grid.markingsColor;
H.lineWidth = aD.lineWidth || O.grid.markingsLineWidth;
H.moveTo(aC.from, aI.from);
H.lineTo(aC.to, aI.to);
H.stroke() } else { H.fillStyle = aD.color || O.grid.markingsColor;
H.fillRect(aC.from, aI.to, aC.to - aC.from, aI.from - aI.to) } } } var aK = m(),
aM = O.grid.borderWidth; for (var aE = 0; aE < aK.length; ++aE) { var aB = aK[aE],
aG = aB.box,
aQ = aB.tickLength,
aN, aL, aP, aJ; if (!aB.show || aB.ticks.length == 0) { continue } H.strokeStyle = aB.options.tickColor || c.color.parse(aB.options.color).scale("a", 0.22).toString();
H.lineWidth = 1; if (aB.direction == "x") { aN = 0; if (aQ == "full") { aL = (aB.position == "top" ? 0 : w) } else { aL = aG.top - q.top + (aB.position == "top" ? aG.height : 0) } } else { aL = 0; if (aQ == "full") { aN = (aB.position == "left" ? 0 : h) } else { aN = aG.left - q.left + (aB.position == "left" ? aG.width : 0) } } if (!aB.innermost) { H.beginPath();
aP = aJ = 0; if (aB.direction == "x") { aP = h } else { aJ = w } if (H.lineWidth == 1) { aN = Math.floor(aN) + 0.5;
aL = Math.floor(aL) + 0.5 } H.moveTo(aN, aL);
H.lineTo(aN + aP, aL + aJ);
H.stroke() } H.beginPath(); for (aF = 0; aF < aB.ticks.length; ++aF) { var aO = aB.ticks[aF].v;
aP = aJ = 0; if (aO < aB.min || aO > aB.max || (aQ == "full" && aM > 0 && (aO == aB.min || aO == aB.max))) { continue } if (aB.direction == "x") { aN = aB.p2c(aO);
aJ = aQ == "full" ? -w : aQ; if (aB.position == "top") { aJ = -aJ } } else { aL = aB.p2c(aO);
aP = aQ == "full" ? -h : aQ; if (aB.position == "left") { aP = -aP } } if (H.lineWidth == 1) { if (aB.direction == "x") { aN = Math.floor(aN) + 0.5 } else { aL = Math.floor(aL) + 0.5 } } H.moveTo(aN, aL);
H.lineTo(aN + aP, aL + aJ) } H.stroke() } if (aM) { H.lineWidth = aM;
H.strokeStyle = O.grid.borderColor;
H.strokeRect(-aM / 2, -aM / 2, h + aM, w + aM) } H.restore() }
function k() { av.find(".tickLabels").remove(); var aG = ['<div class="tickLabels" style="font-size:smaller">']; var aJ = m(); for (var aD = 0; aD < aJ.length; ++aD) { var aC = aJ[aD],
aF = aC.box; if (!aC.show) { continue } aG.push('<div class="' + aC.direction + "Axis " + aC.direction + aC.n + 'Axis" style="color:' + aC.options.color + '">'); for (var aE = 0; aE < aC.ticks.length; ++aE) { var aH = aC.ticks[aE]; if (!aH.label || aH.v < aC.min || aH.v > aC.max) { continue } var aK = {},
aI; if (aC.direction == "x") { aI = "center";
aK.left = Math.round(q.left + aC.p2c(aH.v) - aC.labelWidth / 2); if (aC.position == "bottom") { aK.top = aF.top + aF.padding } else { aK.bottom = I - (aF.top + aF.height - aF.padding) } } else { aK.top = Math.round(q.top + aC.p2c(aH.v) - aC.labelHeight / 2); if (aC.position == "left") { aK.right = G - (aF.left + aF.width - aF.padding);
aI = "right" } else { aK.left = aF.left + aF.padding;
aI = "left" } } aK.width = aC.labelWidth; var aB = ["position:absolute", "text-align:" + aI]; for (var aL in aK) { aB.push(aL + ":" + aK[aL] + "px") } aG.push('<div class="tickLabel" style="' + aB.join(";") + '">' + aH.label + "</div>") } aG.push("</div>") } aG.push("</div>");
av.append(aG.join("")) }
function d(aB) { if (aB.lines.show) { at(aB) } if (aB.bars.show) { e(aB) } if (aB.points.show) { ao(aB) } }
function at(aE) {
function aD(aP, aQ, aI, aU, aT) { var aV = aP.points,
aJ = aP.pointsize,
aN = null,
aM = null;
H.beginPath(); for (var aO = aJ; aO < aV.length; aO += aJ) { var aL = aV[aO - aJ],
aS = aV[aO - aJ + 1],
aK = aV[aO],
aR = aV[aO + 1]; if (aL == null || aK == null) { continue } if (aS <= aR && aS < aT.min) { if (aR < aT.min) { continue } aL = (aT.min - aS) / (aR - aS) * (aK - aL) + aL;
aS = aT.min } else { if (aR <= aS && aR < aT.min) { if (aS < aT.min) { continue } aK = (aT.min - aS) / (aR - aS) * (aK - aL) + aL;
aR = aT.min } } if (aS >= aR && aS > aT.max) { if (aR > aT.max) { continue } aL = (aT.max - aS) / (aR - aS) * (aK - aL) + aL;
aS = aT.max } else { if (aR >= aS && aR > aT.max) { if (aS > aT.max) { continue } aK = (aT.max - aS) / (aR - aS) * (aK - aL) + aL;
aR = aT.max } } if (aL <= aK && aL < aU.min) { if (aK < aU.min) { continue } aS = (aU.min - aL) / (aK - aL) * (aR - aS) + aS;
aL = aU.min } else { if (aK <= aL && aK < aU.min) { if (aL < aU.min) { continue } aR = (aU.min - aL) / (aK - aL) * (aR - aS) + aS;
aK = aU.min } } if (aL >= aK && aL > aU.max) { if (aK > aU.max) { continue } aS = (aU.max - aL) / (aK - aL) * (aR - aS) + aS;
aL = aU.max } else { if (aK >= aL && aK > aU.max) { if (aL > aU.max) { continue } aR = (aU.max - aL) / (aK - aL) * (aR - aS) + aS;
aK = aU.max } } if (aL != aN || aS != aM) { H.moveTo(aU.p2c(aL) + aQ, aT.p2c(aS) + aI) } aN = aK;
aM = aR;
H.lineTo(aU.p2c(aK) + aQ, aT.p2c(aR) + aI) } H.stroke() }
function aF(aI, aQ, aP) { var aW = aI.points,
aV = aI.pointsize,
aN = Math.min(Math.max(0, aP.min), aP.max),
aX = 0,
aU, aT = false,
aM = 1,
aL = 0,
aR = 0; while (true) { if (aV > 0 && aX > aW.length + aV) { break } aX += aV; var aZ = aW[aX - aV],
aK = aW[aX - aV + aM],
aY = aW[aX],
aJ = aW[aX + aM]; if (aT) { if (aV > 0 && aZ != null && aY == null) { aR = aX;
aV = -aV;
aM = 2; continue } if (aV < 0 && aX == aL + aV) { H.fill();
aT = false;
aV = -aV;
aM = 1;
aX = aL = aR + aV; continue } } if (aZ == null || aY == null) { continue } if (aZ <= aY && aZ < aQ.min) { if (aY < aQ.min) { continue } aK = (aQ.min - aZ) / (aY - aZ) * (aJ - aK) + aK;
aZ = aQ.min } else { if (aY <= aZ && aY < aQ.min) { if (aZ < aQ.min) { continue } aJ = (aQ.min - aZ) / (aY - aZ) * (aJ - aK) + aK;
aY = aQ.min } } if (aZ >= aY && aZ > aQ.max) { if (aY > aQ.max) { continue } aK = (aQ.max - aZ) / (aY - aZ) * (aJ - aK) + aK;
aZ = aQ.max } else { if (aY >= aZ && aY > aQ.max) { if (aZ > aQ.max) { continue } aJ = (aQ.max - aZ) / (aY - aZ) * (aJ - aK) + aK;
aY = aQ.max } } if (!aT) { H.beginPath();
H.moveTo(aQ.p2c(aZ), aP.p2c(aN));
aT = true } if (aK >= aP.max && aJ >= aP.max) { H.lineTo(aQ.p2c(aZ), aP.p2c(aP.max));
H.lineTo(aQ.p2c(aY), aP.p2c(aP.max)); continue } else { if (aK <= aP.min && aJ <= aP.min) { H.lineTo(aQ.p2c(aZ), aP.p2c(aP.min));
H.lineTo(aQ.p2c(aY), aP.p2c(aP.min)); continue } } var aO = aZ,
aS = aY; if (aK <= aJ && aK < aP.min && aJ >= aP.min) { aZ = (aP.min - aK) / (aJ - aK) * (aY - aZ) + aZ;
aK = aP.min } else { if (aJ <= aK && aJ < aP.min && aK >= aP.min) { aY = (aP.min - aK) / (aJ - aK) * (aY - aZ) + aZ;
aJ = aP.min } } if (aK >= aJ && aK > aP.max && aJ <= aP.max) { aZ = (aP.max - aK) / (aJ - aK) * (aY - aZ) + aZ;
aK = aP.max } else { if (aJ >= aK && aJ > aP.max && aK <= aP.max) { aY = (aP.max - aK) / (aJ - aK) * (aY - aZ) + aZ;
aJ = aP.max } } if (aZ != aO) { H.lineTo(aQ.p2c(aO), aP.p2c(aK)) } H.lineTo(aQ.p2c(aZ), aP.p2c(aK));
H.lineTo(aQ.p2c(aY), aP.p2c(aJ)); if (aY != aS) { H.lineTo(aQ.p2c(aY), aP.p2c(aJ));
H.lineTo(aQ.p2c(aS), aP.p2c(aJ)) } } } H.save();
H.translate(q.left, q.top);
H.lineJoin = "round"; var aG = aE.lines.lineWidth,
aB = aE.shadowSize; if (aG > 0 && aB > 0) { H.lineWidth = aB;
H.strokeStyle = "rgba(0,0,0,0.1)"; var aH = Math.PI / 18;
aD(aE.datapoints, Math.sin(aH) * (aG / 2 + aB / 2), Math.cos(aH) * (aG / 2 + aB / 2), aE.xaxis, aE.yaxis);
H.lineWidth = aB / 2;
aD(aE.datapoints, Math.sin(aH) * (aG / 2 + aB / 4), Math.cos(aH) * (aG / 2 + aB / 4), aE.xaxis, aE.yaxis) } H.lineWidth = aG;
H.strokeStyle = aE.color; var aC = ae(aE.lines, aE.color, 0, w); if (aC) { H.fillStyle = aC;
aF(aE.datapoints, aE.xaxis, aE.yaxis) } if (aG > 0) { aD(aE.datapoints, 0, 0, aE.xaxis, aE.yaxis) } H.restore() }
function ao(aE) {
function aH(aN, aM, aU, aK, aS, aT, aQ, aJ) { var aR = aN.points,
aI = aN.pointsize; for (var aL = 0; aL < aR.length; aL += aI) { var aP = aR[aL],
aO = aR[aL + 1]; if (aP == null || aP < aT.min || aP > aT.max || aO < aQ.min || aO > aQ.max) { continue } H.beginPath();
aP = aT.p2c(aP);
aO = aQ.p2c(aO) + aK; if (aJ == "circle") { H.arc(aP, aO, aM, 0, aS ? Math.PI : Math.PI * 2, false) } else { aJ(H, aP, aO, aM, aS) } H.closePath(); if (aU) { H.fillStyle = aU;
H.fill() } H.stroke() } } H.save();
H.translate(q.left, q.top); var aG = aE.points.lineWidth,
aC = aE.shadowSize,
aB = aE.points.radius,
aF = aE.points.symbol; if (aG > 0 && aC > 0) { var aD = aC / 2;
H.lineWidth = aD;
H.strokeStyle = "rgba(0,0,0,0.1)";
aH(aE.datapoints, aB, null, aD + aD / 2, true, aE.xaxis, aE.yaxis, aF);
H.strokeStyle = "rgba(0,0,0,0.2)";
aH(aE.datapoints, aB, null, aD / 2, true, aE.xaxis, aE.yaxis, aF) } H.lineWidth = aG;
H.strokeStyle = aE.color;
aH(aE.datapoints, aB, ae(aE.points, aE.color), 0, false, aE.xaxis, aE.yaxis, aF);
H.restore() }
function E(aN, aM, aV, aI, aQ, aF, aD, aL, aK, aU, aR, aC) { var aE, aT, aJ, aP, aG, aB, aO, aH, aS; if (aR) { aH = aB = aO = true;
aG = false;
aE = aV;
aT = aN;
aP = aM + aI;
aJ = aM + aQ; if (aT < aE) { aS = aT;
aT = aE;
aE = aS;
aG = true;
aB = false } } else { aG = aB = aO = true;
aH = false;
aE = aN + aI;
aT = aN + aQ;
aJ = aV;
aP = aM; if (aP < aJ) { aS = aP;
aP = aJ;
aJ = aS;
aH = true;
aO = false } } if (aT < aL.min || aE > aL.max || aP < aK.min || aJ > aK.max) { return } if (aE < aL.min) { aE = aL.min;
aG = false } if (aT > aL.max) { aT = aL.max;
aB = false } if (aJ < aK.min) { aJ = aK.min;
aH = false } if (aP > aK.max) { aP = aK.max;
aO = false } aE = aL.p2c(aE);
aJ = aK.p2c(aJ);
aT = aL.p2c(aT);
aP = aK.p2c(aP); if (aD) { aU.beginPath();
aU.moveTo(aE, aJ);
aU.lineTo(aE, aP);
aU.lineTo(aT, aP);
aU.lineTo(aT, aJ);
aU.fillStyle = aD(aJ, aP);
aU.fill() } if (aC > 0 && (aG || aB || aO || aH)) { aU.beginPath();
aU.moveTo(aE, aJ + aF); if (aG) { aU.lineTo(aE, aP + aF) } else { aU.moveTo(aE, aP + aF) } if (aO) { aU.lineTo(aT, aP + aF) } else { aU.moveTo(aT, aP + aF) } if (aB) { aU.lineTo(aT, aJ + aF) } else { aU.moveTo(aT, aJ + aF) } if (aH) { aU.lineTo(aE, aJ + aF) } else { aU.moveTo(aE, aJ + aF) } aU.stroke() } }
function e(aD) {
function aC(aJ, aI, aL, aG, aK, aN, aM) { var aO = aJ.points,
aF = aJ.pointsize; for (var aH = 0; aH < aO.length; aH += aF) { if (aO[aH] == null) { continue } E(aO[aH], aO[aH + 1], aO[aH + 2], aI, aL, aG, aK, aN, aM, H, aD.bars.horizontal, aD.bars.lineWidth) } } H.save();
H.translate(q.left, q.top);
H.lineWidth = aD.bars.lineWidth;
H.strokeStyle = aD.color; var aB = aD.bars.align == "left" ? 0 : -aD.bars.barWidth / 2; var aE = aD.bars.fill ? function(aF, aG) { return ae(aD.bars, aD.color, aF, aG) } : null;
aC(aD.datapoints, aB, aB + aD.bars.barWidth, 0, aE, aD.xaxis, aD.yaxis);
H.restore() }
function ae(aD, aB, aC, aF) { var aE = aD.fill; if (!aE) { return null } if (aD.fillColor) { return am(aD.fillColor, aC, aF, aB) } var aG = c.color.parse(aB);
aG.a = typeof aE == "number" ? aE : 0.4;
aG.normalize(); return aG.toString() }
function o() { av.find(".legend").remove(); if (!O.legend.show) { return } var aH = [],
aF = false,
aN = O.legend.labelFormatter,
aM, aJ; for (var aE = 0; aE < Q.length; ++aE) { aM = Q[aE];
aJ = aM.label; if (!aJ) { continue } if (aE % O.legend.noColumns == 0) { if (aF) { aH.push("</tr>") } aH.push("<tr>");
aF = true } if (aN) { aJ = aN(aJ, aM) } aH.push('<td class="legendColorBox"><div style="border:1px solid ' + O.legend.labelBoxBorderColor + ';padding:1px"><div style="width:4px;height:0;border:5px solid ' + aM.color + ';overflow:hidden"></div></div></td><td class="legendLabel">' + aJ + "</td>") } if (aF) { aH.push("</tr>") } if (aH.length == 0) { return } var aL = '<table style="font-size:smaller;color:' + O.grid.color + '">' + aH.join("") + "</table>"; if (O.legend.container != null) { c(O.legend.container).html(aL) } else { var aI = "",
aC = O.legend.position,
aD = O.legend.margin; if (aD[0] == null) { aD = [aD, aD] } if (aC.charAt(0) == "n") { aI += "top:" + (aD[1] + q.top) + "px;" } else { if (aC.charAt(0) == "s") { aI += "bottom:" + (aD[1] + q.bottom) + "px;" } } if (aC.charAt(1) == "e") { aI += "right:" + (aD[0] + q.right) + "px;" } else { if (aC.charAt(1) == "w") { aI += "left:" + (aD[0] + q.left) + "px;" } } var aK = c('<div class="legend">' + aL.replace('style="', 'style="position:absolute;' + aI + ";") + "</div>").appendTo(av); if (O.legend.backgroundOpacity != 0) { var aG = O.legend.backgroundColor; if (aG == null) { aG = O.grid.backgroundColor; if (aG && typeof aG == "string") { aG = c.color.parse(aG) } else { aG = c.color.extract(aK, "background-color") } aG.a = 1;
aG = aG.toString() } var aB = aK.children();
c('<div style="position:absolute;width:' + aB.width() + "px;height:" + aB.height() + "px;" + aI + "background-color:" + aG + ';"> </div>').prependTo(aK).css("opacity", O.legend.backgroundOpacity) } } } var ab = [],
M = null;
function K(aI, aG, aD) { var aO = O.grid.mouseActiveRadius,
a0 = aO * aO + 1,
aY = null,
aR = false,
aW, aU; for (aW = Q.length - 1; aW >= 0; --aW) { if (!aD(Q[aW])) { continue } var aP = Q[aW],
aH = aP.xaxis,
aF = aP.yaxis,
aV = aP.datapoints.points,
aT = aP.datapoints.pointsize,
aQ = aH.c2p(aI),
aN = aF.c2p(aG),
aC = aO / aH.scale,
aB = aO / aF.scale; if (aH.options.inverseTransform) { aC = Number.MAX_VALUE } if (aF.options.inverseTransform) { aB = Number.MAX_VALUE } if (aP.lines.show || aP.points.show) { for (aU = 0; aU < aV.length; aU += aT) { var aK = aV[aU],
aJ = aV[aU + 1]; if (aK == null) { continue } if (aK - aQ > aC || aK - aQ < -aC || aJ - aN > aB || aJ - aN < -aB) { continue } var aM = Math.abs(aH.p2c(aK) - aI),
aL = Math.abs(aF.p2c(aJ) - aG),
aS = aM * aM + aL * aL; if (aS < a0) { a0 = aS;
aY = [aW, aU / aT] } } } if (aP.bars.show && !aY) { var aE = aP.bars.align == "left" ? 0 : -aP.bars.barWidth / 2,
aX = aE + aP.bars.barWidth; for (aU = 0; aU < aV.length; aU += aT) { var aK = aV[aU],
aJ = aV[aU + 1],
aZ = aV[aU + 2]; if (aK == null) { continue } if (Q[aW].bars.horizontal ? (aQ <= Math.max(aZ, aK) && aQ >= Math.min(aZ, aK) && aN >= aJ + aE && aN <= aJ + aX) : (aQ >= aK + aE && aQ <= aK + aX && aN >= Math.min(aZ, aJ) && aN <= Math.max(aZ, aJ))) { aY = [aW, aU / aT] } } } } if (aY) { aW = aY[0];
aU = aY[1];
aT = Q[aW].datapoints.pointsize; return { datapoint: Q[aW].datapoints.points.slice(aU * aT, (aU + 1) * aT), dataIndex: aU, series: Q[aW], seriesIndex: aW } } return null }
function aa(aB) { if (O.grid.hoverable) { u("plothover", aB, function(aC) { return aC.hoverable != false }) } }
function l(aB) { if (O.grid.hoverable) { u("plothover", aB, function(aC) { return false }) } }
function R(aB) { u("plotclick", aB, function(aC) { return aC.clickable != false }) }
function u(aC, aB, aD) { var aE = y.offset(),
aH = aB.pageX - aE.left - q.left,
aF = aB.pageY - aE.top - q.top,
aJ = C({ left: aH, top: aF });
aJ.pageX = aB.pageX;
aJ.pageY = aB.pageY; var aK = K(aH, aF, aD); if (aK) { aK.pageX = parseInt(aK.series.xaxis.p2c(aK.datapoint[0]) + aE.left + q.left);
aK.pageY = parseInt(aK.series.yaxis.p2c(aK.datapoint[1]) + aE.top + q.top) } if (O.grid.autoHighlight) { for (var aG = 0; aG < ab.length; ++aG) { var aI = ab[aG]; if (aI.auto == aC && !(aK && aI.series == aK.series && aI.point[0] == aK.datapoint[0] && aI.point[1] == aK.datapoint[1])) { T(aI.series, aI.point) } } if (aK) { x(aK.series, aK.datapoint, aC) } } av.trigger(aC, [aJ, aK]) }
function f() { if (!M) { M = setTimeout(s, 30) } }
function s() { M = null;
A.save();
A.clearRect(0, 0, G, I);
A.translate(q.left, q.top); var aC, aB; for (aC = 0; aC < ab.length; ++aC) { aB = ab[aC]; if (aB.series.bars.show) { v(aB.series, aB.point) } else { ay(aB.series, aB.point) } } A.restore();
an(ak.drawOverlay, [A]) }
function x(aD, aB, aF) { if (typeof aD == "number") { aD = Q[aD] } if (typeof aB == "number") { var aE = aD.datapoints.pointsize;
aB = aD.datapoints.points.slice(aE * aB, aE * (aB + 1)) } var aC = al(aD, aB); if (aC == -1) { ab.push({ series: aD, point: aB, auto: aF });
f() } else { if (!aF) { ab[aC].auto = false } } }
function T(aD, aB) { if (aD == null && aB == null) { ab = [];
f() } if (typeof aD == "number") { aD = Q[aD] } if (typeof aB == "number") { aB = aD.data[aB] } var aC = al(aD, aB); if (aC != -1) { ab.splice(aC, 1);
f() } }
function al(aD, aE) { for (var aB = 0; aB < ab.length; ++aB) { var aC = ab[aB]; if (aC.series == aD && aC.point[0] == aE[0] && aC.point[1] == aE[1]) { return aB } } return -1 }
function ay(aE, aD) { var aC = aD[0],
aI = aD[1],
aH = aE.xaxis,
aG = aE.yaxis; if (aC < aH.min || aC > aH.max || aI < aG.min || aI > aG.max) { return } var aF = aE.points.radius + aE.points.lineWidth / 2;
A.lineWidth = aF;
A.strokeStyle = c.color.parse(aE.color).scale("a", 0.5).toString(); var aB = 1.5 * aF,
aC = aH.p2c(aC),
aI = aG.p2c(aI);
A.beginPath(); if (aE.points.symbol == "circle") { A.arc(aC, aI, aB, 0, 2 * Math.PI, false) } else { aE.points.symbol(A, aC, aI, aB, false) } A.closePath();
A.stroke() }
function v(aE, aB) { A.lineWidth = aE.bars.lineWidth;
A.strokeStyle = c.color.parse(aE.color).scale("a", 0.5).toString(); var aD = c.color.parse(aE.color).scale("a", 0.5).toString(); var aC = aE.bars.align == "left" ? 0 : -aE.bars.barWidth / 2;
E(aB[0], aB[1], aB[2] || 0, aC, aC + aE.bars.barWidth, 0, function() { return aD }, aE.xaxis, aE.yaxis, A, aE.bars.horizontal, aE.bars.lineWidth) }
function am(aJ, aB, aH, aC) { if (typeof aJ == "string") { return aJ } else { var aI = H.createLinearGradient(0, aH, 0, aB); for (var aE = 0, aD = aJ.colors.length; aE < aD; ++aE) { var aF = aJ.colors[aE]; if (typeof aF != "string") { var aG = c.color.parse(aC); if (aF.brightness != null) { aG = aG.scale("rgb", aF.brightness) } if (aF.opacity != null) { aG.a *= aF.opacity } aF = aG.toString() } aI.addColorStop(aE / (aD - 1), aF) } return aI } } } c.plot = function(g, e, d) { var f = new b(c(g), e, d, c.plot.plugins); return f };
c.plot.version = "0.7";
c.plot.plugins = [];
c.plot.formatDate = function(l, f, h) { var o = function(d) { d = "" + d; return d.length == 1 ? "0" + d : d }; var e = []; var p = false,
j = false; var n = l.getUTCHours(); var k = n < 12; if (h == null) { h = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] } if (f.search(/%p|%P/) != -1) { if (n > 12) { n = n - 12 } else { if (n == 0) { n = 12 } } } for (var g = 0; g < f.length; ++g) { var m = f.charAt(g); if (p) { switch (m) {
case "h":
m = "" + n; break;
case "H":
m = o(n); break;
case "M":
m = o(l.getUTCMinutes()); break;
case "S":
m = o(l.getUTCSeconds()); break;
case "d":
m = "" + l.getUTCDate(); break;
case "m":
m = "" + (l.getUTCMonth() + 1); break;
case "y":
m = "" + l.getUTCFullYear(); break;
case "b":
m = "" + h[l.getUTCMonth()]; break;
case "p":
m = (k) ? ("am") : ("pm"); break;
case "P":
m = (k) ? ("AM") : ("PM"); break;
case "0":
m = "";
j = true; break } if (m && j) { m = o(m);
j = false } e.push(m); if (!j) { p = false } } else { if (m == "%") { p = true } else { e.push(m) } } } return e.join("") };
function a(e, d) { return d * Math.floor(e / d) } })(jQuery);
|
PypiClean
|
/get_pybrowser-0.2.0-py3-none-any.whl/pybrowser/log_adapter.py
|
import logging
from logging.handlers import RotatingFileHandler
import os
import sys
from functools import lru_cache
from .constants import CONSTANTS
from .common_utils import get_user_home_dir, make_dir
_CURRENT_LOGGER = None
_DEFAULT_LOGGER = CONSTANTS.DEFAULT_LOGGER
def _default_handler(level=logging.DEBUG):
global _DEFAULT_LOGGER
MAX_SIZE_BYTES = 1000000
BACKUP_COUNT = 2
filename = f"{_DEFAULT_LOGGER}.log"
final_path = log_path()
p = os.path.abspath(final_path)
p = os.path.join(p, filename)
h = RotatingFileHandler(p, maxBytes=MAX_SIZE_BYTES, backupCount=BACKUP_COUNT)
h.setLevel(level)
h.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
return h
def log_path():
default_path = _get_default_path()
given_path = CONSTANTS.DEFAULT_LOGGER_PATH
final_path = given_path or default_path
return final_path
def _get_default_path():
default_path = CONSTANTS.DIR_PATH or get_user_home_dir()
#default_path = default_path or os.path.dirname(sys.argv[0])
default_path = os.path.join(default_path, CONSTANTS.DIR_NAME, "logs")
make_dir(default_path)
return default_path
def _logger_has_handler(logger):
level = logger.getEffectiveLevel()
current = logger
while current:
if any(h.level <= level for h in current.handlers):
return True
if not current.propagate:
break
current = current.parent
return False
@lru_cache(maxsize=10)
def get_logger(logger_name=None):
global _CURRENT_LOGGER, _DEFAULT_LOGGER
if not logger_name:
if _CURRENT_LOGGER:
logger_name = _CURRENT_LOGGER
else:
logger_name = _DEFAULT_LOGGER
logger = logging.getLogger(logger_name)
_CURRENT_LOGGER = logger_name
if logger.level == logging.NOTSET:
logger.setLevel(logging.DEBUG)
if _logger_has_handler(logger):
return logger
h = _default_handler(logger.level)
logger.addHandler(h)
return logger
|
PypiClean
|
/fds.sdk.PAEngine-0.21.10-py3-none-any.whl/fds/sdk/PAEngine/model/unlinked_pa_template_category_and_type_details.py
|
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.PAEngine.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.PAEngine.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.PAEngine.model.unlinked_pa_template_column_details import UnlinkedPATemplateColumnDetails
from fds.sdk.PAEngine.model.unlinked_pa_template_group_details import UnlinkedPATemplateGroupDetails
globals()['UnlinkedPATemplateColumnDetails'] = UnlinkedPATemplateColumnDetails
globals()['UnlinkedPATemplateGroupDetails'] = UnlinkedPATemplateGroupDetails
class UnlinkedPATemplateCategoryAndTypeDetails(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str, none_type,), # noqa: E501
'columns': ([UnlinkedPATemplateColumnDetails], none_type,), # noqa: E501
'groups': ([UnlinkedPATemplateGroupDetails], none_type,), # noqa: E501
'snapshot': (bool,), # noqa: E501
'category': (str, none_type,), # noqa: E501
'name': (str, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'columns': 'columns', # noqa: E501
'groups': 'groups', # noqa: E501
'snapshot': 'snapshot', # noqa: E501
'category': 'category', # noqa: E501
'name': 'name', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""UnlinkedPATemplateCategoryAndTypeDetails - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str, none_type): Type Id. [optional] # noqa: E501
columns ([UnlinkedPATemplateColumnDetails], none_type): List of default columns. [optional] # noqa: E501
groups ([UnlinkedPATemplateGroupDetails], none_type): List of default groupings. [optional] # noqa: E501
snapshot (bool): Snapshot. [optional] # noqa: E501
category (str, none_type): Unlinked template category. [optional] # noqa: E501
name (str, none_type): Unlinked template type. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""UnlinkedPATemplateCategoryAndTypeDetails - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str, none_type): Type Id. [optional] # noqa: E501
columns ([UnlinkedPATemplateColumnDetails], none_type): List of default columns. [optional] # noqa: E501
groups ([UnlinkedPATemplateGroupDetails], none_type): List of default groupings. [optional] # noqa: E501
snapshot (bool): Snapshot. [optional] # noqa: E501
category (str, none_type): Unlinked template category. [optional] # noqa: E501
name (str, none_type): Unlinked template type. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/js.amcharts-3.3.1.tar.gz/js.amcharts-3.3.1/js/amcharts/resources/themes/light.js
|
AmCharts.themes.light = {
AmChart: {
color: "#000000"
},
AmCoordinateChart: {
colors: ["#67b7dc", "#fdd400", "#84b761", "#cc4748", "#cd82ad", "#2f4074", "#448e4d", "#b7b83f", "#b9783f", "#b93e3d", "#913167"]
},
AmPieChart: {
colors: ["#67b7dc", "#fdd400", "#84b761", "#cc4748", "#cd82ad", "#2f4074", "#448e4d", "#b7b83f", "#b9783f", "#b93e3d", "#913167"]
},
AmStockChart: {
colors: ["#67b7dc", "#fdd400", "#84b761", "#cc4748", "#cd82ad", "#2f4074", "#448e4d", "#b7b83f", "#b9783f", "#b93e3d", "#913167"]
},
AmSlicedChart: {
outlineAlpha: 1,
outlineThickness: 2,
labelTickColor: "#000000",
labelTickAlpha: 0.3
},
AmRectangularChart: {
zoomOutButtonColor: '#000000',
zoomOutButtonRollOverAlpha: 0.15,
zoomOutButtonImage: "lens.png"
},
AxisBase: {
axisColor: "#000000",
axisAlpha: 0.3,
gridAlpha: 0.1,
gridColor: "#000000"
},
ChartScrollbar: {
backgroundColor: "#000000",
backgroundAlpha: 0.2,
graphFillAlpha: 0.5,
graphLineAlpha: 0,
selectedBackgroundColor: "#FFFFFF",
selectedBackgroundAlpha: 0.25,
gridAlpha: 0.15
},
ChartCursor: {
cursorColor: "#000000",
color: "#FFFFFF",
cursorAlpha: 0.5
},
AmLegend: {
color: "#000000"
},
AmGraph: {
lineAlpha: 0.9
},
GaugeArrow: {
color: "#000000",
alpha: 0.8,
nailAlpha: 0,
innerRadius: "40%",
nailRadius: 15,
startWidth: 15,
borderAlpha: 0.8,
nailBorderAlpha: 0
},
GaugeAxis: {
tickColor: "#000000",
tickAlpha: 1,
tickLength: 15,
minorTickLength: 8,
axisThickness: 3,
axisColor: '#000000',
axisAlpha: 1,
bandAlpha: 0.8
},
TrendLine: {
lineColor: "#c03246",
lineAlpha: 0.8
},
// ammap
AreasSettings: {
alpha: 0.8,
color: "#000000",
colorSolid: "#000000",
unlistedAreasAlpha: 0.4,
unlistedAreasColor: "#000000",
outlineColor: "#FFFFFF",
outlineAlpha: 0.5,
outlineThickness: 0.5,
rollOverColor: "#3c5bdc",
rollOverOutlineColor: "#FFFFFF",
selectedOutlineColor: "#FFFFFF",
selectedColor: "#f15135",
unlistedAreasOutlineColor: "#FFFFFF",
unlistedAreasOutlineAlpha: 0.5
},
LinesSettings: {
color: "#000000",
alpha: 0.8
},
ImagesSettings: {
alpha: 0.8,
labelColor: "#000000",
color: "#000000",
labelRollOverColor: "#3c5bdc"
},
ZoomControl: {
buttonRollOverColor: "#3c5bdc",
buttonFillColor: "#f15135",
buttonFillAlpha: 0.8,
buttonBorderColor: "#000000",
gridBackgroundColor: "#000000",
gridAlpha: 0.8
},
SmallMap: {
mapColor: "#000000",
rectangleColor: "#f15135",
backgroundColor: "#FFFFFF",
backgroundAlpha: 0.7,
borderThickness: 1,
borderAlpha: 0.8
},
// the defaults below are set using CSS syntax, you can use any existing css property
// if you don't use Stock chart, you can delete lines below
PeriodSelector: {
color: "#000000"
},
PeriodButton: {
color: "#000000",
backgroundColor: "#FFFFFF",
borderStyle: "solid",
borderColor: "#a9a9a9",
borderWidth: "1px",
MozBorderRadius: "5px",
borderRadius: "5px",
margin: "1px",
outline: "none"
},
PeriodButtonSelected: {
color: "#000000",
backgroundColor: "#b9cdf5",
borderStyle: "solid",
borderColor: "#b9cdf5",
borderWidth: "1px",
MozBorderRadius: "5px",
borderRadius: "5px",
margin: "1px",
outline: "none"
},
PeriodInputField: {
background: "transparent",
borderStyle: "solid",
borderColor: "#a9a9a9",
borderWidth: "1px",
outline: "none"
},
DataSetSelector: {
selectedBackgroundColor: "#b9cdf5",
rollOverBackgroundColor: "#a8b0e4"
},
DataSetCompareList: {
borderStyle: "solid",
borderColor: "#a9a9a9",
borderWidth: "1px"
},
DataSetSelect: {
borderStyle: "solid",
borderColor: "#a9a9a9",
borderWidth: "1px",
outline: "none"
}
};
|
PypiClean
|
/pyramid_storage-1.3.0-cp3-none-any.whl/pyramid_storage/s3.py
|
import os
import mimetypes
import urllib
from pyramid.settings import asbool
from zope.interface import implementer
from . import utils
from .exceptions import FileNotAllowed
from .extensions import resolve_extensions
from .interfaces import IFileStorage
from .registry import register_file_storage_impl
def includeme(config):
impl = S3FileStorage.from_settings(
config.registry.settings, prefix='storage.'
)
register_file_storage_impl(config, impl)
@implementer(IFileStorage)
class S3FileStorage(object):
@classmethod
def from_settings(cls, settings, prefix):
options = (
('aws.bucket_name', True, None),
('aws.acl', False, 'public-read'),
('base_url', False, ''),
('extensions', False, 'default'),
# S3 Connection options.
('aws.access_key', False, None),
('aws.secret_key', False, None),
('aws.use_path_style', False, False),
('aws.is_secure', False, True),
('aws.host', False, None),
('aws.port', False, None),
('aws.region', False, None),
('aws.num_retries', False, 1),
('aws.timeout', False, 5),
)
kwargs = utils.read_settings(settings, options, prefix)
kwargs = dict([(k.replace('aws.', ''), v) for k, v in kwargs.items()])
kwargs['aws_access_key_id'] = kwargs.pop('access_key')
kwargs['aws_secret_access_key'] = kwargs.pop('secret_key')
return cls(**kwargs)
def __init__(self, bucket_name, acl=None, base_url='',
extensions='default', **conn_options):
self.bucket_name = bucket_name
self.acl = acl
self.base_url = base_url
self.extensions = resolve_extensions(extensions)
self.conn_options = conn_options
def get_connection(self):
try:
import boto
except ImportError:
raise RuntimeError("You must have boto installed to use s3")
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3 import connect_to_region
options = self.conn_options.copy()
options['is_secure'] = asbool(options['is_secure'])
if options['port']:
options['port'] = int(options['port'])
else:
del options['port']
if not options['host']:
del options['host']
if asbool(options.pop('use_path_style')):
options['calling_format'] = OrdinaryCallingFormat()
num_retries = int(options.pop('num_retries'))
timeout = float(options.pop('timeout'))
region = options.pop('region')
if region:
del options['host']
del options['port']
conn = connect_to_region(region, **options)
else:
conn = boto.connect_s3(**options)
conn.num_retries = num_retries
conn.http_connection_kwargs['timeout'] = timeout
return conn
def get_bucket(self, bucket_name=None):
return self.get_connection().get_bucket(bucket_name or self.bucket_name)
def url(self, filename):
"""Returns entire URL of the filename, joined to the base_url
:param filename: base name of file
"""
return urllib.parse.urljoin(self.base_url, filename)
def exists(self, filename, bucket_name=None):
return self.get_bucket(bucket_name).new_key(filename).exists()
def delete(self, filename, bucket_name=None):
"""Deletes the filename. Filename is resolved with the
absolute path based on base_path. If file does not exist,
returns **False**, otherwise **True**
:param filename: base name of file
:param bucket_name: name of the bucket, if not default
"""
self.get_bucket(bucket_name).delete_key(filename)
def filename_allowed(self, filename, extensions=None):
"""Checks if a filename has an allowed extension
:param filename: base name of file
:param extensions: iterable of extensions (or self.extensions)
"""
_, ext = os.path.splitext(filename)
return self.extension_allowed(ext, extensions)
def file_allowed(self, fs, extensions=None):
"""Checks if a file can be saved, based on extensions
:param fs: **cgi.FieldStorage** object or similar
:param extensions: iterable of extensions (or self.extensions)
"""
return self.filename_allowed(fs.filename, extensions)
def extension_allowed(self, ext, extensions=None):
"""Checks if an extension is permitted. Both e.g. ".jpg" and
"jpg" can be passed in. Extension lookup is case-insensitive.
:param extensions: iterable of extensions (or self.extensions)
"""
extensions = extensions or self.extensions
if not extensions:
return True
if ext.startswith('.'):
ext = ext[1:]
return ext.lower() in extensions
def save(self, fs, *args, **kwargs):
"""Saves contents of a **cgi.FieldStorage** object to the file system.
Returns modified filename(including folder).
Returns the resolved filename, i.e. the folder + (modified/randomized)
filename.
:param fs: **cgi.FieldStorage** object (or similar)
:param folder: relative path of sub-folder
:param randomize: randomize the filename
:param extensions: iterable of allowed extensions, if not default
:param acl: ACL policy (if None then uses default)
:param replace: replace existing key
:param headers: dict of s3 request headers
:returns: modified filename
"""
return self.save_file(fs.file, fs.filename, *args, **kwargs)
def save_filename(self, filename, *args, **kwargs):
"""Saves a filename in local filesystem to the uploads location.
Returns the resolved filename, i.e. the folder +
the (randomized/incremented) base name.
:param filename: local filename
:param folder: relative path of sub-folder
:param randomize: randomize the filename
:param extensions: iterable of allowed extensions, if not default
:param acl: ACL policy (if None then uses default)
:param replace: replace existing key
:param headers: dict of s3 request headers
:returns: modified filename
"""
return self.save_file(open(filename, "rb"), filename, *args, **kwargs)
def save_file(self, file, filename, folder=None, bucket_name=None, randomize=False,
extensions=None, acl=None, replace=False, headers=None):
"""
:param filename: local filename
:param folder: relative path of sub-folder
:param bucket_name: name of the bucket, if not default
:param randomize: randomize the filename
:param extensions: iterable of allowed extensions, if not default
:param acl: ACL policy (if None then uses default)
:param replace: replace existing key
:param headers: dict of s3 request headers
:returns: modified filename
"""
acl = acl or self.acl
headers = headers or {}
extensions = extensions or self.extensions
if not self.filename_allowed(filename, extensions):
raise FileNotAllowed()
filename = utils.secure_filename(
os.path.basename(filename)
)
if randomize:
filename = utils.random_filename(filename)
if folder:
filename = folder + "/" + filename
content_type = headers.get('Content-Type')
if content_type is None:
content_type, _ = mimetypes.guess_type(filename)
content_type = content_type or 'application/octet-stream'
headers['Content-Type'] = content_type
bucket = self.get_bucket(bucket_name)
key = bucket.get_key(filename) or bucket.new_key(filename)
file.seek(0)
key.set_contents_from_file(file,
headers=headers,
policy=acl,
replace=replace,
rewind=True)
return filename
|
PypiClean
|
/z3c.dobbin-0.4.2.tar.gz/z3c.dobbin-0.4.2/src/z3c/dobbin/soup.py
|
from zope import interface
from interfaces import IMapper
from interfaces import IMapped
from zope.dottedname.resolve import resolve
from z3c.saconfig import Session
import factory
import bootstrap
import interfaces
import session as tx
import types
BASIC_TYPES = (int, float, str, unicode, tuple, list, set, dict)
IMMUTABLE_TYPES = (int, float, str, unicode, tuple)
def lookup(uuid, ignore_pending=False):
session = Session()
# check if object is in pending session objects
if not ignore_pending:
try:
token = tx.COPY_CONCRETE_TO_INSTANCE(uuid)
return session._d_pending[token]
except (AttributeError, KeyError):
pass
try:
item = session.query(bootstrap.Soup).filter_by(uuid=uuid)[0]
except IndexError:
raise LookupError("Unable to locate object with UUID = '%s'." % uuid)
# build item
return build(item.spec, item.uuid)
def build(spec, uuid):
kls = resolve(spec)
mapper = IMapper(kls)
session = Session()
return session.query(mapper).filter_by(uuid=uuid)[0]
def persist(item):
instance = interfaces.IMapped(item)
if interfaces.IBasicType.providedBy(instance):
instance.value = item
else:
update(instance, item)
# set soup identifier on instances
if type(item) not in BASIC_TYPES:
item._d_uuid = instance.uuid
# register mutable objects with transaction manager
if type(item) not in IMMUTABLE_TYPES:
uuid = instance.uuid
def copy_concrete_to_mapped():
# update attributes
update(instance, item)
# add transaction hook
tx.addBeforeCommitHook(
tx.COPY_CONCRETE_TO_INSTANCE(uuid), item, copy_concrete_to_mapped)
return instance
def update(instance, item):
# set attributes
for iface in interface.providedBy(item):
for name in iface.names():
value = getattr(item, name)
setattr(instance, name, value)
|
PypiClean
|
/taskcc-alipay-sdk-python-3.3.398.tar.gz/taskcc-alipay-sdk-python-3.3.398/alipay/aop/api/request/KoubeiMarketingCampaignCrowdDeleteRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiMarketingCampaignCrowdDeleteModel import KoubeiMarketingCampaignCrowdDeleteModel
class KoubeiMarketingCampaignCrowdDeleteRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiMarketingCampaignCrowdDeleteModel):
self._biz_content = value
else:
self._biz_content = KoubeiMarketingCampaignCrowdDeleteModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.marketing.campaign.crowd.delete'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/binom_gaussian-0.1.tar.gz/binom_gaussian-0.1/binom_gaussian/Gaussiandistribution.py
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
PypiClean
|
/funkwhale-api-client-0.1.1.tar.gz/funkwhale-api-client-0.1.1/funkwhale_api_client/api/channels/get_channel_metadata_choices.py
|
from typing import Any, Dict, Optional
import httpx
from ...client import AuthenticatedClient
from ...models.channel import Channel
from ...types import Response
def _get_kwargs(
*,
client: AuthenticatedClient,
) -> Dict[str, Any]:
url = "{}/api/v1/channels/metadata-choices/".format(client.base_url)
headers: Dict[str, str] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
return {
"method": "get",
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
}
def _parse_response(*, response: httpx.Response) -> Optional[Channel]:
if response.status_code == 200:
response_200 = Channel.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[Channel]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: AuthenticatedClient,
) -> Response[Channel]:
"""
Returns:
Response[Channel]
"""
kwargs = _get_kwargs(
client=client,
)
response = httpx.request(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: AuthenticatedClient,
) -> Optional[Channel]:
"""
Returns:
Response[Channel]
"""
return sync_detailed(
client=client,
).parsed
async def asyncio_detailed(
*,
client: AuthenticatedClient,
) -> Response[Channel]:
"""
Returns:
Response[Channel]
"""
kwargs = _get_kwargs(
client=client,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.request(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: AuthenticatedClient,
) -> Optional[Channel]:
"""
Returns:
Response[Channel]
"""
return (
await asyncio_detailed(
client=client,
)
).parsed
|
PypiClean
|
/another_linked_list-0.1.1-py3-none-any.whl/another_linked_list/_vendor/all_purpose_set/_vendor/tedent/_vendor/wrapt/decorators.py
|
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
import builtins
exec_ = getattr(builtins, "exec")
del builtins
else:
string_types = basestring,
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
from functools import partial
from inspect import ismethod, isclass, formatargspec
from collections import namedtuple
from threading import Lock, RLock
try:
from inspect import signature
except ImportError:
pass
from .wrappers import (FunctionWrapper, BoundFunctionWrapper, ObjectProxy,
CallableObjectProxy)
# Adapter wrapper for the wrapped function which will overlay certain
# properties from the adapter function onto the wrapped function so that
# functions such as inspect.getargspec(), inspect.getfullargspec(),
# inspect.signature() and inspect.getsource() return the correct results
# one would expect.
class _AdapterFunctionCode(CallableObjectProxy):
def __init__(self, wrapped_code, adapter_code):
super(_AdapterFunctionCode, self).__init__(wrapped_code)
self._self_adapter_code = adapter_code
@property
def co_argcount(self):
return self._self_adapter_code.co_argcount
@property
def co_code(self):
return self._self_adapter_code.co_code
@property
def co_flags(self):
return self._self_adapter_code.co_flags
@property
def co_kwonlyargcount(self):
return self._self_adapter_code.co_kwonlyargcount
@property
def co_varnames(self):
return self._self_adapter_code.co_varnames
class _AdapterFunctionSurrogate(CallableObjectProxy):
def __init__(self, wrapped, adapter):
super(_AdapterFunctionSurrogate, self).__init__(wrapped)
self._self_adapter = adapter
@property
def __code__(self):
return _AdapterFunctionCode(self.__wrapped__.__code__,
self._self_adapter.__code__)
@property
def __defaults__(self):
return self._self_adapter.__defaults__
@property
def __kwdefaults__(self):
return self._self_adapter.__kwdefaults__
@property
def __signature__(self):
if 'signature' not in globals():
return self._self_adapter.__signature__
else:
# Can't allow this to fail on Python 3 else it falls
# through to using __wrapped__, but that will be the
# wrong function we want to derive the signature
# from. Thus generate the signature ourselves.
return signature(self._self_adapter)
if PY2:
func_code = __code__
func_defaults = __defaults__
class _BoundAdapterWrapper(BoundFunctionWrapper):
@property
def __func__(self):
return _AdapterFunctionSurrogate(self.__wrapped__.__func__,
self._self_parent._self_adapter)
if PY2:
im_func = __func__
class AdapterWrapper(FunctionWrapper):
__bound_function_wrapper__ = _BoundAdapterWrapper
def __init__(self, *args, **kwargs):
adapter = kwargs.pop('adapter')
super(AdapterWrapper, self).__init__(*args, **kwargs)
self._self_surrogate = _AdapterFunctionSurrogate(
self.__wrapped__, adapter)
self._self_adapter = adapter
@property
def __code__(self):
return self._self_surrogate.__code__
@property
def __defaults__(self):
return self._self_surrogate.__defaults__
@property
def __kwdefaults__(self):
return self._self_surrogate.__kwdefaults__
if PY2:
func_code = __code__
func_defaults = __defaults__
@property
def __signature__(self):
return self._self_surrogate.__signature__
class AdapterFactory(object):
def __call__(self, wrapped):
raise NotImplementedError()
class DelegatedAdapterFactory(AdapterFactory):
def __init__(self, factory):
super(DelegatedAdapterFactory, self).__init__()
self.factory = factory
def __call__(self, wrapped):
return self.factory(wrapped)
adapter_factory = DelegatedAdapterFactory
# Decorator for creating other decorators. This decorator and the
# wrappers which they use are designed to properly preserve any name
# attributes, function signatures etc, in addition to the wrappers
# themselves acting like a transparent proxy for the original wrapped
# function so the wrapper is effectively indistinguishable from the
# original wrapped function.
def decorator(wrapper=None, enabled=None, adapter=None):
# The decorator should be supplied with a single positional argument
# which is the wrapper function to be used to implement the
# decorator. This may be preceded by a step whereby the keyword
# arguments are supplied to customise the behaviour of the
# decorator. The 'adapter' argument is used to optionally denote a
# separate function which is notionally used by an adapter
# decorator. In that case parts of the function '__code__' and
# '__defaults__' attributes are used from the adapter function
# rather than those of the wrapped function. This allows for the
# argument specification from inspect.getargspec() and similar
# functions to be overridden with a prototype for a different
# function than what was wrapped. The 'enabled' argument provides a
# way to enable/disable the use of the decorator. If the type of
# 'enabled' is a boolean, then it is evaluated immediately and the
# wrapper not even applied if it is False. If not a boolean, it will
# be evaluated when the wrapper is called for an unbound wrapper,
# and when binding occurs for a bound wrapper. When being evaluated,
# if 'enabled' is callable it will be called to obtain the value to
# be checked. If False, the wrapper will not be called and instead
# the original wrapped function will be called directly instead.
if wrapper is not None:
# Helper function for creating wrapper of the appropriate
# time when we need it down below.
def _build(wrapped, wrapper, enabled=None, adapter=None):
if adapter:
if isinstance(adapter, AdapterFactory):
adapter = adapter(wrapped)
if not callable(adapter):
ns = {}
if not isinstance(adapter, string_types):
adapter = formatargspec(*adapter)
exec_('def adapter{}: pass'.format(adapter), ns, ns)
adapter = ns['adapter']
return AdapterWrapper(wrapped=wrapped, wrapper=wrapper,
enabled=enabled, adapter=adapter)
return FunctionWrapper(wrapped=wrapped, wrapper=wrapper,
enabled=enabled)
# The wrapper has been provided so return the final decorator.
# The decorator is itself one of our function wrappers so we
# can determine when it is applied to functions, instance methods
# or class methods. This allows us to bind the instance or class
# method so the appropriate self or cls attribute is supplied
# when it is finally called.
def _wrapper(wrapped, instance, args, kwargs):
# We first check for the case where the decorator was applied
# to a class type.
#
# @decorator
# class mydecoratorclass(object):
# def __init__(self, arg=None):
# self.arg = arg
# def __call__(self, wrapped, instance, args, kwargs):
# return wrapped(*args, **kwargs)
#
# @mydecoratorclass(arg=1)
# def function():
# pass
#
# In this case an instance of the class is to be used as the
# decorator wrapper function. If args was empty at this point,
# then it means that there were optional keyword arguments
# supplied to be used when creating an instance of the class
# to be used as the wrapper function.
if instance is None and isclass(wrapped) and not args:
# We still need to be passed the target function to be
# wrapped as yet, so we need to return a further function
# to be able to capture it.
def _capture(target_wrapped):
# Now have the target function to be wrapped and need
# to create an instance of the class which is to act
# as the decorator wrapper function. Before we do that,
# we need to first check that use of the decorator
# hadn't been disabled by a simple boolean. If it was,
# the target function to be wrapped is returned instead.
_enabled = enabled
if type(_enabled) is bool:
if not _enabled:
return target_wrapped
_enabled = None
# Now create an instance of the class which is to act
# as the decorator wrapper function. Any arguments had
# to be supplied as keyword only arguments so that is
# all we pass when creating it.
target_wrapper = wrapped(**kwargs)
# Finally build the wrapper itself and return it.
return _build(target_wrapped, target_wrapper,
_enabled, adapter)
return _capture
# We should always have the target function to be wrapped at
# this point as the first (and only) value in args.
target_wrapped = args[0]
# Need to now check that use of the decorator hadn't been
# disabled by a simple boolean. If it was, then target
# function to be wrapped is returned instead.
_enabled = enabled
if type(_enabled) is bool:
if not _enabled:
return target_wrapped
_enabled = None
# We now need to build the wrapper, but there are a couple of
# different cases we need to consider.
if instance is None:
if isclass(wrapped):
# In this case the decorator was applied to a class
# type but optional keyword arguments were not supplied
# for initialising an instance of the class to be used
# as the decorator wrapper function.
#
# @decorator
# class mydecoratorclass(object):
# def __init__(self, arg=None):
# self.arg = arg
# def __call__(self, wrapped, instance,
# args, kwargs):
# return wrapped(*args, **kwargs)
#
# @mydecoratorclass
# def function():
# pass
#
# We still need to create an instance of the class to
# be used as the decorator wrapper function, but no
# arguments are pass.
target_wrapper = wrapped()
else:
# In this case the decorator was applied to a normal
# function, or possibly a static method of a class.
#
# @decorator
# def mydecoratorfuntion(wrapped, instance,
# args, kwargs):
# return wrapped(*args, **kwargs)
#
# @mydecoratorfunction
# def function():
# pass
#
# That normal function becomes the decorator wrapper
# function.
target_wrapper = wrapper
else:
if isclass(instance):
# In this case the decorator was applied to a class
# method.
#
# class myclass(object):
# @decorator
# @classmethod
# def decoratorclassmethod(cls, wrapped,
# instance, args, kwargs):
# return wrapped(*args, **kwargs)
#
# instance = myclass()
#
# @instance.decoratorclassmethod
# def function():
# pass
#
# This one is a bit strange because binding was actually
# performed on the wrapper created by our decorator
# factory. We need to apply that binding to the decorator
# wrapper function which which the decorator factory
# was applied to.
target_wrapper = wrapper.__get__(None, instance)
else:
# In this case the decorator was applied to an instance
# method.
#
# class myclass(object):
# @decorator
# def decoratorclassmethod(self, wrapped,
# instance, args, kwargs):
# return wrapped(*args, **kwargs)
#
# instance = myclass()
#
# @instance.decoratorclassmethod
# def function():
# pass
#
# This one is a bit strange because binding was actually
# performed on the wrapper created by our decorator
# factory. We need to apply that binding to the decorator
# wrapper function which which the decorator factory
# was applied to.
target_wrapper = wrapper.__get__(instance, type(instance))
# Finally build the wrapper itself and return it.
return _build(target_wrapped, target_wrapper, _enabled, adapter)
# We first return our magic function wrapper here so we can
# determine in what context the decorator factory was used. In
# other words, it is itself a universal decorator.
return _build(wrapper, _wrapper)
else:
# The wrapper still has not been provided, so we are just
# collecting the optional keyword arguments. Return the
# decorator again wrapped in a partial using the collected
# arguments.
return partial(decorator, enabled=enabled, adapter=adapter)
# Decorator for implementing thread synchronization. It can be used as a
# decorator, in which case the synchronization context is determined by
# what type of function is wrapped, or it can also be used as a context
# manager, where the user needs to supply the correct synchronization
# context. It is also possible to supply an object which appears to be a
# synchronization primitive of some sort, by virtue of having release()
# and acquire() methods. In that case that will be used directly as the
# synchronization primitive without creating a separate lock against the
# derived or supplied context.
def synchronized(wrapped):
# Determine if being passed an object which is a synchronization
# primitive. We can't check by type for Lock, RLock, Semaphore etc,
# as the means of creating them isn't the type. Therefore use the
# existence of acquire() and release() methods. This is more
# extensible anyway as it allows custom synchronization mechanisms.
if hasattr(wrapped, 'acquire') and hasattr(wrapped, 'release'):
# We remember what the original lock is and then return a new
# decorator which accesses and locks it. When returning the new
# decorator we wrap it with an object proxy so we can override
# the context manager methods in case it is being used to wrap
# synchronized statements with a 'with' statement.
lock = wrapped
@decorator
def _synchronized(wrapped, instance, args, kwargs):
# Execute the wrapped function while the original supplied
# lock is held.
with lock:
return wrapped(*args, **kwargs)
class _PartialDecorator(CallableObjectProxy):
def __enter__(self):
lock.acquire()
return lock
def __exit__(self, *args):
lock.release()
return _PartialDecorator(wrapped=_synchronized)
# Following only apply when the lock is being created automatically
# based on the context of what was supplied. In this case we supply
# a final decorator, but need to use FunctionWrapper directly as we
# want to derive from it to add context manager methods in case it is
# being used to wrap synchronized statements with a 'with' statement.
def _synchronized_lock(context):
# Attempt to retrieve the lock for the specific context.
lock = vars(context).get('_synchronized_lock', None)
if lock is None:
# There is no existing lock defined for the context we
# are dealing with so we need to create one. This needs
# to be done in a way to guarantee there is only one
# created, even if multiple threads try and create it at
# the same time. We can't always use the setdefault()
# method on the __dict__ for the context. This is the
# case where the context is a class, as __dict__ is
# actually a dictproxy. What we therefore do is use a
# meta lock on this wrapper itself, to control the
# creation and assignment of the lock attribute against
# the context.
with synchronized._synchronized_meta_lock:
# We need to check again for whether the lock we want
# exists in case two threads were trying to create it
# at the same time and were competing to create the
# meta lock.
lock = vars(context).get('_synchronized_lock', None)
if lock is None:
lock = RLock()
setattr(context, '_synchronized_lock', lock)
return lock
def _synchronized_wrapper(wrapped, instance, args, kwargs):
# Execute the wrapped function while the lock for the
# desired context is held. If instance is None then the
# wrapped function is used as the context.
with _synchronized_lock(instance or wrapped):
return wrapped(*args, **kwargs)
class _FinalDecorator(FunctionWrapper):
def __enter__(self):
self._self_lock = _synchronized_lock(self.__wrapped__)
self._self_lock.acquire()
return self._self_lock
def __exit__(self, *args):
self._self_lock.release()
return _FinalDecorator(wrapped=wrapped, wrapper=_synchronized_wrapper)
synchronized._synchronized_meta_lock = Lock()
|
PypiClean
|
/mxnet_cu100-1.9.0-py3-none-manylinux2014_x86_64.whl/mxnet/symbol/gen_sparse.py
|
from ._internal import SymbolBase
from ..base import _Null
def ElementWiseSum(*args, **kwargs):
r"""Adds all input arguments element-wise.
.. math::
add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n
``add_n`` is potentially more efficient than calling ``add`` by `n` times.
The storage type of ``add_n`` output depends on storage types of inputs
- add_n(row_sparse, row_sparse, ..) = row_sparse
- add_n(default, csr, default) = default
- add_n(any input combinations longer than 4 (>4) with at least one default type) = default
- otherwise, ``add_n`` falls all inputs back to default storage and generates default storage
Defined in ../src/operator/tensor/elemwise_sum.cc:L155
This function support variable length of positional input.
Parameters
----------
args : Symbol[]
Positional input arguments
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def Embedding(data=None, weight=None, input_dim=_Null, output_dim=_Null, dtype=_Null, sparse_grad=_Null, name=None, attr=None, out=None, **kwargs):
r"""Maps integer indices to vector representations (embeddings).
This operator maps words to real-valued vectors in a high-dimensional space,
called word embeddings. These embeddings can capture semantic and syntactic properties of the words.
For example, it has been noted that in the learned embedding spaces, similar words tend
to be close to each other and dissimilar words far apart.
For an input array of shape (d1, ..., dK),
the shape of an output array is (d1, ..., dK, output_dim).
All the input values should be integers in the range [0, input_dim).
If the input_dim is ip0 and output_dim is op0, then shape of the embedding weight matrix must be
(ip0, op0).
When "sparse_grad" is False, if any index mentioned is too large, it is replaced by the index that
addresses the last vector in an embedding matrix.
When "sparse_grad" is True, an error will be raised if invalid indices are found.
Examples::
input_dim = 4
output_dim = 5
// Each row in weight matrix y represents a word. So, y = (w0,w1,w2,w3)
y = [[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.],
[ 10., 11., 12., 13., 14.],
[ 15., 16., 17., 18., 19.]]
// Input array x represents n-grams(2-gram). So, x = [(w1,w3), (w0,w2)]
x = [[ 1., 3.],
[ 0., 2.]]
// Mapped input x to its vector representation y.
Embedding(x, y, 4, 5) = [[[ 5., 6., 7., 8., 9.],
[ 15., 16., 17., 18., 19.]],
[[ 0., 1., 2., 3., 4.],
[ 10., 11., 12., 13., 14.]]]
The storage type of weight can be either row_sparse or default.
.. Note::
If "sparse_grad" is set to True, the storage type of gradient w.r.t weights will be
"row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad
and Adam. Note that by default lazy updates is turned on, which may perform differently
from standard updates. For more details, please check the Optimization API at:
https://mxnet.incubator.apache.org/api/python/optimization/optimization.html
Defined in ../src/operator/tensor/indexing_op.cc:L597
Parameters
----------
data : Symbol
The input array to the embedding operator.
weight : Symbol
The embedding weight matrix.
input_dim : int, required
Vocabulary size of the input indices.
output_dim : int, required
Dimension of the embedding vectors.
dtype : {'bfloat16', 'float16', 'float32', 'float64', 'int32', 'int64', 'int8', 'uint8'},optional, default='float32'
Data type of weight.
sparse_grad : boolean, optional, default=0
Compute row sparse gradient in the backward calculation. If set to True, the grad's storage type is row_sparse.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def FullyConnected(data=None, weight=None, bias=None, num_hidden=_Null, no_bias=_Null, flatten=_Null, name=None, attr=None, out=None, **kwargs):
r"""Applies a linear transformation: :math:`Y = XW^T + b`.
If ``flatten`` is set to be true, then the shapes are:
- **data**: `(batch_size, x1, x2, ..., xn)`
- **weight**: `(num_hidden, x1 * x2 * ... * xn)`
- **bias**: `(num_hidden,)`
- **out**: `(batch_size, num_hidden)`
If ``flatten`` is set to be false, then the shapes are:
- **data**: `(x1, x2, ..., xn, input_dim)`
- **weight**: `(num_hidden, input_dim)`
- **bias**: `(num_hidden,)`
- **out**: `(x1, x2, ..., xn, num_hidden)`
The learnable parameters include both ``weight`` and ``bias``.
If ``no_bias`` is set to be true, then the ``bias`` term is ignored.
.. Note::
The sparse support for FullyConnected is limited to forward evaluation with `row_sparse`
weight and bias, where the length of `weight.indices` and `bias.indices` must be equal
to `num_hidden`. This could be useful for model inference with `row_sparse` weights
trained with importance sampling or noise contrastive estimation.
To compute linear transformation with 'csr' sparse data, sparse.dot is recommended instead
of sparse.FullyConnected.
Defined in ../src/operator/nn/fully_connected.cc:L286
Parameters
----------
data : Symbol
Input data.
weight : Symbol
Weight matrix.
bias : Symbol
Bias parameter.
num_hidden : int, required
Number of hidden nodes of the output.
no_bias : boolean, optional, default=0
Whether to disable bias parameter.
flatten : boolean, optional, default=1
Whether to collapse all but the first axis of the input data tensor.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def LinearRegressionOutput(data=None, label=None, grad_scale=_Null, name=None, attr=None, out=None, **kwargs):
r"""Computes and optimizes for squared loss during backward propagation.
Just outputs ``data`` during forward propagation.
If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value,
then the squared loss estimated over :math:`n` samples is defined as
:math:`\text{SquaredLoss}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_2`
.. note::
Use the LinearRegressionOutput as the final output layer of a net.
The storage type of ``label`` can be ``default`` or ``csr``
- LinearRegressionOutput(default, default) = default
- LinearRegressionOutput(default, csr) = default
By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example.
The parameter `grad_scale` can be used to change this scale to `grad_scale/m`.
Defined in ../src/operator/regression_output.cc:L92
Parameters
----------
data : Symbol
Input data to the function.
label : Symbol
Input label to the function.
grad_scale : float, optional, default=1
Scale the gradient by a float factor
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def LogisticRegressionOutput(data=None, label=None, grad_scale=_Null, name=None, attr=None, out=None, **kwargs):
r"""Applies a logistic function to the input.
The logistic function, also known as the sigmoid function, is computed as
:math:`\frac{1}{1+exp(-\textbf{x})}`.
Commonly, the sigmoid is used to squash the real-valued output of a linear model
:math:`wTx+b` into the [0,1] range so that it can be interpreted as a probability.
It is suitable for binary classification or probability prediction tasks.
.. note::
Use the LogisticRegressionOutput as the final output layer of a net.
The storage type of ``label`` can be ``default`` or ``csr``
- LogisticRegressionOutput(default, default) = default
- LogisticRegressionOutput(default, csr) = default
The loss function used is the Binary Cross Entropy Loss:
:math:`-{(y\log(p) + (1 - y)\log(1 - p))}`
Where `y` is the ground truth probability of positive outcome for a given example, and `p` the probability predicted by the model. By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example.
The parameter `grad_scale` can be used to change this scale to `grad_scale/m`.
Defined in ../src/operator/regression_output.cc:L152
Parameters
----------
data : Symbol
Input data to the function.
label : Symbol
Input label to the function.
grad_scale : float, optional, default=1
Scale the gradient by a float factor
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def MAERegressionOutput(data=None, label=None, grad_scale=_Null, name=None, attr=None, out=None, **kwargs):
r"""Computes mean absolute error of the input.
MAE is a risk metric corresponding to the expected value of the absolute error.
If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value,
then the mean absolute error (MAE) estimated over :math:`n` samples is defined as
:math:`\text{MAE}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_1`
.. note::
Use the MAERegressionOutput as the final output layer of a net.
The storage type of ``label`` can be ``default`` or ``csr``
- MAERegressionOutput(default, default) = default
- MAERegressionOutput(default, csr) = default
By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example.
The parameter `grad_scale` can be used to change this scale to `grad_scale/m`.
Defined in ../src/operator/regression_output.cc:L120
Parameters
----------
data : Symbol
Input data to the function.
label : Symbol
Input label to the function.
grad_scale : float, optional, default=1
Scale the gradient by a float factor
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def _contrib_round_ste(data=None, name=None, attr=None, out=None, **kwargs):
r"""Straight-through-estimator of `round()`.
In forward pass, returns element-wise rounded value to the nearest integer of the input (same as `round()`).
In backward pass, returns gradients of ``1`` everywhere (instead of ``0`` everywhere as in `round()`):
:math:`\frac{d}{dx}{round\_ste(x)} = 1` vs. :math:`\frac{d}{dx}{round(x)} = 0`.
This is useful for quantized training.
Reference: Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation.
Example::
x = round_ste([-1.5, 1.5, -1.9, 1.9, 2.7])
x.backward()
x = [-2., 2., -2., 2., 3.]
x.grad() = [1., 1., 1., 1., 1.]
The storage type of ``round_ste`` output depends upon the input storage type:
- round_ste(default) = default
- round_ste(row_sparse) = row_sparse
- round_ste(csr) = csr
Defined in ../src/operator/contrib/stes_op.cc:L54
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def _contrib_sign_ste(data=None, name=None, attr=None, out=None, **kwargs):
r"""Straight-through-estimator of `sign()`.
In forward pass, returns element-wise sign of the input (same as `sign()`).
In backward pass, returns gradients of ``1`` everywhere (instead of ``0`` everywhere as in ``sign()``):
:math:`\frac{d}{dx}{sign\_ste(x)} = 1` vs. :math:`\frac{d}{dx}{sign(x)} = 0`.
This is useful for quantized training.
Reference: Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation.
Example::
x = sign_ste([-2, 0, 3])
x.backward()
x = [-1., 0., 1.]
x.grad() = [1., 1., 1.]
The storage type of ``sign_ste`` output depends upon the input storage type:
- round_ste(default) = default
- round_ste(row_sparse) = row_sparse
- round_ste(csr) = csr
Defined in ../src/operator/contrib/stes_op.cc:L79
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def abs(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise absolute value of the input.
Example::
abs([-2, 0, 3]) = [2, 0, 3]
The storage type of ``abs`` output depends upon the input storage type:
- abs(default) = default
- abs(row_sparse) = row_sparse
- abs(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L720
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def adagrad_update(weight=None, grad=None, history=None, lr=_Null, epsilon=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, name=None, attr=None, out=None, **kwargs):
r"""Update function for AdaGrad optimizer.
Referenced from *Adaptive Subgradient Methods for Online Learning and Stochastic Optimization*,
and available at http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
Updates are applied by::
rescaled_grad = clip(grad * rescale_grad, clip_gradient)
history = history + square(rescaled_grad)
w = w - learning_rate * rescaled_grad / sqrt(history + epsilon)
Note that non-zero values for the weight decay option are not supported.
Defined in ../src/operator/optimizer_op.cc:L908
Parameters
----------
weight : Symbol
Weight
grad : Symbol
Gradient
history : Symbol
History
lr : float, required
Learning rate
epsilon : float, optional, default=1.00000001e-07
epsilon
wd : float, optional, default=0
weight decay
rescale_grad : float, optional, default=1
Rescale gradient to grad = rescale_grad*grad.
clip_gradient : float, optional, default=-1
Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient).
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def adam_update(weight=None, grad=None, mean=None, var=None, lr=_Null, beta1=_Null, beta2=_Null, epsilon=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, lazy_update=_Null, name=None, attr=None, out=None, **kwargs):
r"""Update function for Adam optimizer. Adam is seen as a generalization
of AdaGrad.
Adam update consists of the following steps, where g represents gradient and m, v
are 1st and 2nd order moment estimates (mean and variance).
.. math::
g_t = \nabla J(W_{t-1})\\
m_t = \beta_1 m_{t-1} + (1 - \beta_1) g_t\\
v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\
W_t = W_{t-1} - \alpha \frac{ m_t }{ \sqrt{ v_t } + \epsilon }
It updates the weights using::
m = beta1*m + (1-beta1)*grad
v = beta2*v + (1-beta2)*(grad**2)
w += - learning_rate * m / (sqrt(v) + epsilon)
However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and the storage
type of weight is the same as those of m and v,
only the row slices whose indices appear in grad.indices are updated (for w, m and v)::
for row in grad.indices:
m[row] = beta1*m[row] + (1-beta1)*grad[row]
v[row] = beta2*v[row] + (1-beta2)*(grad[row]**2)
w[row] += - learning_rate * m[row] / (sqrt(v[row]) + epsilon)
Defined in ../src/operator/optimizer_op.cc:L687
Parameters
----------
weight : Symbol
Weight
grad : Symbol
Gradient
mean : Symbol
Moving mean
var : Symbol
Moving variance
lr : float, required
Learning rate
beta1 : float, optional, default=0.899999976
The decay rate for the 1st moment estimates.
beta2 : float, optional, default=0.999000013
The decay rate for the 2nd moment estimates.
epsilon : float, optional, default=9.99999994e-09
A small constant for numerical stability.
wd : float, optional, default=0
Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight.
rescale_grad : float, optional, default=1
Rescale gradient to grad = rescale_grad*grad.
clip_gradient : float, optional, default=-1
Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient).
lazy_update : boolean, optional, default=1
If true, lazy updates are applied if gradient's stype is row_sparse and all of w, m and v have the same stype
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def add_n(*args, **kwargs):
r"""Adds all input arguments element-wise.
.. math::
add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n
``add_n`` is potentially more efficient than calling ``add`` by `n` times.
The storage type of ``add_n`` output depends on storage types of inputs
- add_n(row_sparse, row_sparse, ..) = row_sparse
- add_n(default, csr, default) = default
- add_n(any input combinations longer than 4 (>4) with at least one default type) = default
- otherwise, ``add_n`` falls all inputs back to default storage and generates default storage
Defined in ../src/operator/tensor/elemwise_sum.cc:L155
This function support variable length of positional input.
Parameters
----------
args : Symbol[]
Positional input arguments
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def arccos(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise inverse cosine of the input array.
The input should be in range `[-1, 1]`.
The output is in the closed interval :math:`[0, \pi]`
.. math::
arccos([-1, -.707, 0, .707, 1]) = [\pi, 3\pi/4, \pi/2, \pi/4, 0]
The storage type of ``arccos`` output is always dense
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L233
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def arccosh(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns the element-wise inverse hyperbolic cosine of the input array, \
computed element-wise.
The storage type of ``arccosh`` output is always dense
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L535
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def arcsin(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise inverse sine of the input array.
The input should be in the range `[-1, 1]`.
The output is in the closed interval of [:math:`-\pi/2`, :math:`\pi/2`].
.. math::
arcsin([-1, -.707, 0, .707, 1]) = [-\pi/2, -\pi/4, 0, \pi/4, \pi/2]
The storage type of ``arcsin`` output depends upon the input storage type:
- arcsin(default) = default
- arcsin(row_sparse) = row_sparse
- arcsin(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L187
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def arcsinh(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns the element-wise inverse hyperbolic sine of the input array, \
computed element-wise.
The storage type of ``arcsinh`` output depends upon the input storage type:
- arcsinh(default) = default
- arcsinh(row_sparse) = row_sparse
- arcsinh(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L494
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def arctan(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise inverse tangent of the input array.
The output is in the closed interval :math:`[-\pi/2, \pi/2]`
.. math::
arctan([-1, 0, 1]) = [-\pi/4, 0, \pi/4]
The storage type of ``arctan`` output depends upon the input storage type:
- arctan(default) = default
- arctan(row_sparse) = row_sparse
- arctan(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L282
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def arctanh(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns the element-wise inverse hyperbolic tangent of the input array, \
computed element-wise.
The storage type of ``arctanh`` output depends upon the input storage type:
- arctanh(default) = default
- arctanh(row_sparse) = row_sparse
- arctanh(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L579
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def broadcast_add(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise sum of the input arrays with broadcasting.
`broadcast_plus` is an alias to the function `broadcast_add`.
Example::
x = [[ 1., 1., 1.],
[ 1., 1., 1.]]
y = [[ 0.],
[ 1.]]
broadcast_add(x, y) = [[ 1., 1., 1.],
[ 2., 2., 2.]]
broadcast_plus(x, y) = [[ 1., 1., 1.],
[ 2., 2., 2.]]
Supported sparse operations:
broadcast_add(csr, dense(1D)) = dense
broadcast_add(dense(1D), csr) = dense
Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L57
Parameters
----------
lhs : Symbol
First input to the function
rhs : Symbol
Second input to the function
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def broadcast_div(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise division of the input arrays with broadcasting.
Example::
x = [[ 6., 6., 6.],
[ 6., 6., 6.]]
y = [[ 2.],
[ 3.]]
broadcast_div(x, y) = [[ 3., 3., 3.],
[ 2., 2., 2.]]
Supported sparse operations:
broadcast_div(csr, dense(1D)) = csr
Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L186
Parameters
----------
lhs : Symbol
First input to the function
rhs : Symbol
Second input to the function
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def broadcast_minus(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise difference of the input arrays with broadcasting.
`broadcast_minus` is an alias to the function `broadcast_sub`.
Example::
x = [[ 1., 1., 1.],
[ 1., 1., 1.]]
y = [[ 0.],
[ 1.]]
broadcast_sub(x, y) = [[ 1., 1., 1.],
[ 0., 0., 0.]]
broadcast_minus(x, y) = [[ 1., 1., 1.],
[ 0., 0., 0.]]
Supported sparse operations:
broadcast_sub/minus(csr, dense(1D)) = dense
broadcast_sub/minus(dense(1D), csr) = dense
Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L105
Parameters
----------
lhs : Symbol
First input to the function
rhs : Symbol
Second input to the function
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def broadcast_mul(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise product of the input arrays with broadcasting.
Example::
x = [[ 1., 1., 1.],
[ 1., 1., 1.]]
y = [[ 0.],
[ 1.]]
broadcast_mul(x, y) = [[ 0., 0., 0.],
[ 1., 1., 1.]]
Supported sparse operations:
broadcast_mul(csr, dense(1D)) = csr
Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L145
Parameters
----------
lhs : Symbol
First input to the function
rhs : Symbol
Second input to the function
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def broadcast_plus(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise sum of the input arrays with broadcasting.
`broadcast_plus` is an alias to the function `broadcast_add`.
Example::
x = [[ 1., 1., 1.],
[ 1., 1., 1.]]
y = [[ 0.],
[ 1.]]
broadcast_add(x, y) = [[ 1., 1., 1.],
[ 2., 2., 2.]]
broadcast_plus(x, y) = [[ 1., 1., 1.],
[ 2., 2., 2.]]
Supported sparse operations:
broadcast_add(csr, dense(1D)) = dense
broadcast_add(dense(1D), csr) = dense
Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L57
Parameters
----------
lhs : Symbol
First input to the function
rhs : Symbol
Second input to the function
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def broadcast_sub(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise difference of the input arrays with broadcasting.
`broadcast_minus` is an alias to the function `broadcast_sub`.
Example::
x = [[ 1., 1., 1.],
[ 1., 1., 1.]]
y = [[ 0.],
[ 1.]]
broadcast_sub(x, y) = [[ 1., 1., 1.],
[ 0., 0., 0.]]
broadcast_minus(x, y) = [[ 1., 1., 1.],
[ 0., 0., 0.]]
Supported sparse operations:
broadcast_sub/minus(csr, dense(1D)) = dense
broadcast_sub/minus(dense(1D), csr) = dense
Defined in ../src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L105
Parameters
----------
lhs : Symbol
First input to the function
rhs : Symbol
Second input to the function
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def cast_storage(data=None, stype=_Null, name=None, attr=None, out=None, **kwargs):
r"""Casts tensor storage type to the new type.
When an NDArray with default storage type is cast to csr or row_sparse storage,
the result is compact, which means:
- for csr, zero values will not be retained
- for row_sparse, row slices of all zeros will not be retained
The storage type of ``cast_storage`` output depends on stype parameter:
- cast_storage(csr, 'default') = default
- cast_storage(row_sparse, 'default') = default
- cast_storage(default, 'csr') = csr
- cast_storage(default, 'row_sparse') = row_sparse
- cast_storage(csr, 'csr') = csr
- cast_storage(row_sparse, 'row_sparse') = row_sparse
Example::
dense = [[ 0., 1., 0.],
[ 2., 0., 3.],
[ 0., 0., 0.],
[ 0., 0., 0.]]
# cast to row_sparse storage type
rsp = cast_storage(dense, 'row_sparse')
rsp.indices = [0, 1]
rsp.values = [[ 0., 1., 0.],
[ 2., 0., 3.]]
# cast to csr storage type
csr = cast_storage(dense, 'csr')
csr.indices = [1, 0, 2]
csr.values = [ 1., 2., 3.]
csr.indptr = [0, 1, 3, 3, 3]
Defined in ../src/operator/tensor/cast_storage.cc:L71
Parameters
----------
data : Symbol
The input.
stype : {'csr', 'default', 'row_sparse'}, required
Output storage type.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def cbrt(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise cube-root value of the input.
.. math::
cbrt(x) = \sqrt[3]{x}
Example::
cbrt([1, 8, -125]) = [1, 2, -5]
The storage type of ``cbrt`` output depends upon the input storage type:
- cbrt(default) = default
- cbrt(row_sparse) = row_sparse
- cbrt(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_pow.cc:L270
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def ceil(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise ceiling of the input.
The ceil of the scalar x is the smallest integer i, such that i >= x.
Example::
ceil([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 2., 2., 3.]
The storage type of ``ceil`` output depends upon the input storage type:
- ceil(default) = default
- ceil(row_sparse) = row_sparse
- ceil(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L817
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def clip(data=None, a_min=_Null, a_max=_Null, name=None, attr=None, out=None, **kwargs):
r"""Clips (limits) the values in an array.
Given an interval, values outside the interval are clipped to the interval edges.
Clipping ``x`` between `a_min` and `a_max` would be::
.. math::
clip(x, a_min, a_max) = \max(\min(x, a_max), a_min))
Example::
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
clip(x,1,8) = [ 1., 1., 2., 3., 4., 5., 6., 7., 8., 8.]
The storage type of ``clip`` output depends on storage types of inputs and the a_min, a_max \
parameter values:
- clip(default) = default
- clip(row_sparse, a_min <= 0, a_max >= 0) = row_sparse
- clip(csr, a_min <= 0, a_max >= 0) = csr
- clip(row_sparse, a_min < 0, a_max < 0) = default
- clip(row_sparse, a_min > 0, a_max > 0) = default
- clip(csr, a_min < 0, a_max < 0) = csr
- clip(csr, a_min > 0, a_max > 0) = csr
Defined in ../src/operator/tensor/matrix_op.cc:L676
Parameters
----------
data : Symbol
Input array.
a_min : float, required
Minimum value
a_max : float, required
Maximum value
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def concat(*data, **kwargs):
r"""Joins input arrays along a given axis.
.. note:: `Concat` is deprecated. Use `concat` instead.
The dimensions of the input arrays should be the same except the axis along
which they will be concatenated.
The dimension of the output array along the concatenated axis will be equal
to the sum of the corresponding dimensions of the input arrays.
The storage type of ``concat`` output depends on storage types of inputs
- concat(csr, csr, ..., csr, dim=0) = csr
- otherwise, ``concat`` generates output with default storage
Example::
x = [[1,1],[2,2]]
y = [[3,3],[4,4],[5,5]]
z = [[6,6], [7,7],[8,8]]
concat(x,y,z,dim=0) = [[ 1., 1.],
[ 2., 2.],
[ 3., 3.],
[ 4., 4.],
[ 5., 5.],
[ 6., 6.],
[ 7., 7.],
[ 8., 8.]]
Note that you cannot concat x,y,z along dimension 1 since dimension
0 is not the same for all the input arrays.
concat(y,z,dim=1) = [[ 3., 3., 6., 6.],
[ 4., 4., 7., 7.],
[ 5., 5., 8., 8.]]
Defined in ../src/operator/nn/concat.cc:L384
This function support variable length of positional input.
Parameters
----------
data : Symbol[]
List of arrays to concatenate
dim : int, optional, default='1'
the dimension to be concated.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def cos(data=None, name=None, attr=None, out=None, **kwargs):
r"""Computes the element-wise cosine of the input array.
The input should be in radians (:math:`2\pi` rad equals 360 degrees).
.. math::
cos([0, \pi/4, \pi/2]) = [1, 0.707, 0]
The storage type of ``cos`` output is always dense
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L90
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def cosh(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns the hyperbolic cosine of the input array, computed element-wise.
.. math::
cosh(x) = 0.5\times(exp(x) + exp(-x))
The storage type of ``cosh`` output is always dense
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L409
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def degrees(data=None, name=None, attr=None, out=None, **kwargs):
r"""Converts each element of the input array from radians to degrees.
.. math::
degrees([0, \pi/2, \pi, 3\pi/2, 2\pi]) = [0, 90, 180, 270, 360]
The storage type of ``degrees`` output depends upon the input storage type:
- degrees(default) = default
- degrees(row_sparse) = row_sparse
- degrees(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L332
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def dot(lhs=None, rhs=None, transpose_a=_Null, transpose_b=_Null, forward_stype=_Null, name=None, attr=None, out=None, **kwargs):
r"""Dot product of two arrays.
``dot``'s behavior depends on the input array dimensions:
- 1-D arrays: inner product of vectors
- 2-D arrays: matrix multiplication
- N-D arrays: a sum product over the last axis of the first input and the first
axis of the second input
For example, given 3-D ``x`` with shape `(n,m,k)` and ``y`` with shape `(k,r,s)`, the
result array will have shape `(n,m,r,s)`. It is computed by::
dot(x,y)[i,j,a,b] = sum(x[i,j,:]*y[:,a,b])
Example::
x = reshape([0,1,2,3,4,5,6,7], shape=(2,2,2))
y = reshape([7,6,5,4,3,2,1,0], shape=(2,2,2))
dot(x,y)[0,0,1,1] = 0
sum(x[0,0,:]*y[:,1,1]) = 0
The storage type of ``dot`` output depends on storage types of inputs, transpose option and
forward_stype option for output storage type. Implemented sparse operations include:
- dot(default, default, transpose_a=True/False, transpose_b=True/False) = default
- dot(csr, default, transpose_a=True) = default
- dot(csr, default, transpose_a=True) = row_sparse
- dot(csr, default) = default
- dot(csr, row_sparse) = default
- dot(default, csr) = csr (CPU only)
- dot(default, csr, forward_stype='default') = default
- dot(default, csr, transpose_b=True, forward_stype='default') = default
If the combination of input storage types and forward_stype does not match any of the
above patterns, ``dot`` will fallback and generate output with default storage.
.. Note::
If the storage type of the lhs is "csr", the storage type of gradient w.r.t rhs will be
"row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad
and Adam. Note that by default lazy updates is turned on, which may perform differently
from standard updates. For more details, please check the Optimization API at:
https://mxnet.incubator.apache.org/api/python/optimization/optimization.html
Defined in ../src/operator/tensor/dot.cc:L77
Parameters
----------
lhs : Symbol
The first input
rhs : Symbol
The second input
transpose_a : boolean, optional, default=0
If true then transpose the first input before dot.
transpose_b : boolean, optional, default=0
If true then transpose the second input before dot.
forward_stype : {None, 'csr', 'default', 'row_sparse'},optional, default='None'
The desired storage type of the forward output given by user, if thecombination of input storage types and this hint does not matchany implemented ones, the dot operator will perform fallback operationand still produce an output of the desired storage type.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def elemwise_add(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):
r"""Adds arguments element-wise.
The storage type of ``elemwise_add`` output depends on storage types of inputs
- elemwise_add(row_sparse, row_sparse) = row_sparse
- elemwise_add(csr, csr) = csr
- elemwise_add(default, csr) = default
- elemwise_add(csr, default) = default
- elemwise_add(default, rsp) = default
- elemwise_add(rsp, default) = default
- otherwise, ``elemwise_add`` generates output with default storage
Parameters
----------
lhs : Symbol
first input
rhs : Symbol
second input
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def elemwise_div(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):
r"""Divides arguments element-wise.
The storage type of ``elemwise_div`` output is always dense
Parameters
----------
lhs : Symbol
first input
rhs : Symbol
second input
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def elemwise_mul(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):
r"""Multiplies arguments element-wise.
The storage type of ``elemwise_mul`` output depends on storage types of inputs
- elemwise_mul(default, default) = default
- elemwise_mul(row_sparse, row_sparse) = row_sparse
- elemwise_mul(default, row_sparse) = row_sparse
- elemwise_mul(row_sparse, default) = row_sparse
- elemwise_mul(csr, csr) = csr
- otherwise, ``elemwise_mul`` generates output with default storage
Parameters
----------
lhs : Symbol
first input
rhs : Symbol
second input
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def elemwise_sub(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):
r"""Subtracts arguments element-wise.
The storage type of ``elemwise_sub`` output depends on storage types of inputs
- elemwise_sub(row_sparse, row_sparse) = row_sparse
- elemwise_sub(csr, csr) = csr
- elemwise_sub(default, csr) = default
- elemwise_sub(csr, default) = default
- elemwise_sub(default, rsp) = default
- elemwise_sub(rsp, default) = default
- otherwise, ``elemwise_sub`` generates output with default storage
Parameters
----------
lhs : Symbol
first input
rhs : Symbol
second input
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def exp(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise exponential value of the input.
.. math::
exp(x) = e^x \approx 2.718^x
Example::
exp([0, 1, 2]) = [1., 2.71828175, 7.38905621]
The storage type of ``exp`` output is always dense
Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L64
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def expm1(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns ``exp(x) - 1`` computed element-wise on the input.
This function provides greater precision than ``exp(x) - 1`` for small values of ``x``.
The storage type of ``expm1`` output depends upon the input storage type:
- expm1(default) = default
- expm1(row_sparse) = row_sparse
- expm1(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L244
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def fix(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise rounded value to the nearest \
integer towards zero of the input.
Example::
fix([-2.1, -1.9, 1.9, 2.1]) = [-2., -1., 1., 2.]
The storage type of ``fix`` output depends upon the input storage type:
- fix(default) = default
- fix(row_sparse) = row_sparse
- fix(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L874
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def floor(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise floor of the input.
The floor of the scalar x is the largest integer i, such that i <= x.
Example::
floor([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-3., -2., 1., 1., 2.]
The storage type of ``floor`` output depends upon the input storage type:
- floor(default) = default
- floor(row_sparse) = row_sparse
- floor(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L836
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def ftrl_update(weight=None, grad=None, z=None, n=None, lr=_Null, lamda1=_Null, beta=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, name=None, attr=None, out=None, **kwargs):
r"""Update function for Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
It updates the weights using::
rescaled_grad = clip(grad * rescale_grad, clip_gradient)
z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate
n += rescaled_grad**2
w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1)
If w, z and n are all of ``row_sparse`` storage type,
only the row slices whose indices appear in grad.indices are updated (for w, z and n)::
for row in grad.indices:
rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient)
z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate
n[row] += rescaled_grad[row]**2
w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1)
Defined in ../src/operator/optimizer_op.cc:L875
Parameters
----------
weight : Symbol
Weight
grad : Symbol
Gradient
z : Symbol
z
n : Symbol
Square of grad
lr : float, required
Learning rate
lamda1 : float, optional, default=0.00999999978
The L1 regularization coefficient.
beta : float, optional, default=1
Per-Coordinate Learning Rate beta.
wd : float, optional, default=0
Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight.
rescale_grad : float, optional, default=1
Rescale gradient to grad = rescale_grad*grad.
clip_gradient : float, optional, default=-1
Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient).
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def gamma(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns the gamma function (extension of the factorial function \
to the reals), computed element-wise on the input array.
The storage type of ``gamma`` output is always dense
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def gammaln(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise log of the absolute value of the gamma function \
of the input.
The storage type of ``gammaln`` output is always dense
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def log(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise Natural logarithmic value of the input.
The natural logarithm is logarithm in base *e*, so that ``log(exp(x)) = x``
The storage type of ``log`` output is always dense
Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L77
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def log10(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise Base-10 logarithmic value of the input.
``10**log10(x) = x``
The storage type of ``log10`` output is always dense
Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L94
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def log1p(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise ``log(1 + x)`` value of the input.
This function is more accurate than ``log(1 + x)`` for small ``x`` so that
:math:`1+x\approx 1`
The storage type of ``log1p`` output depends upon the input storage type:
- log1p(default) = default
- log1p(row_sparse) = row_sparse
- log1p(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L199
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def log2(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise Base-2 logarithmic value of the input.
``2**log2(x) = x``
The storage type of ``log2`` output is always dense
Defined in ../src/operator/tensor/elemwise_unary_op_logexp.cc:L106
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def make_loss(data=None, name=None, attr=None, out=None, **kwargs):
r"""Make your own loss function in network construction.
This operator accepts a customized loss function symbol as a terminal loss and
the symbol should be an operator with no backward dependency.
The output of this function is the gradient of loss with respect to the input data.
For example, if you are a making a cross entropy loss function. Assume ``out`` is the
predicted output and ``label`` is the true label, then the cross entropy can be defined as::
cross_entropy = label * log(out) + (1 - label) * log(1 - out)
loss = make_loss(cross_entropy)
We will need to use ``make_loss`` when we are creating our own loss function or we want to
combine multiple loss functions. Also we may want to stop some variables' gradients
from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``.
The storage type of ``make_loss`` output depends upon the input storage type:
- make_loss(default) = default
- make_loss(row_sparse) = row_sparse
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L358
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def mean(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs):
r"""Computes the mean of array elements over given axes.
Defined in ../src/operator/tensor/./broadcast_reduce_op.h:L83
Parameters
----------
data : Symbol
The input
axis : Shape or None, optional, default=None
The axis or axes along which to perform the reduction.
The default, `axis=()`, will compute over all elements into a
scalar array with shape `(1,)`.
If `axis` is int, a reduction is performed on a particular axis.
If `axis` is a tuple of ints, a reduction is performed on all the axes
specified in the tuple.
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Negative values means indexing from right to left.
keepdims : boolean, optional, default=0
If this is set to `True`, the reduced axes are left in the result as dimension with size one.
exclude : boolean, optional, default=0
Whether to perform reduction on axis that are NOT in axis instead.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def negative(data=None, name=None, attr=None, out=None, **kwargs):
r"""Numerical negative of the argument, element-wise.
The storage type of ``negative`` output depends upon the input storage type:
- negative(default) = default
- negative(row_sparse) = row_sparse
- negative(csr) = csr
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def norm(data=None, ord=_Null, axis=_Null, out_dtype=_Null, keepdims=_Null, name=None, attr=None, out=None, **kwargs):
r"""Computes the norm on an NDArray.
This operator computes the norm on an NDArray with the specified axis, depending
on the value of the ord parameter. By default, it computes the L2 norm on the entire
array. Currently only ord=2 supports sparse ndarrays.
Examples::
x = [[[1, 2],
[3, 4]],
[[2, 2],
[5, 6]]]
norm(x, ord=2, axis=1) = [[3.1622777 4.472136 ]
[5.3851647 6.3245554]]
norm(x, ord=1, axis=1) = [[4., 6.],
[7., 8.]]
rsp = x.cast_storage('row_sparse')
norm(rsp) = [5.47722578]
csr = x.cast_storage('csr')
norm(csr) = [5.47722578]
Defined in ../src/operator/tensor/broadcast_reduce_norm_value.cc:L88
Parameters
----------
data : Symbol
The input
ord : int, optional, default='2'
Order of the norm. Currently ord=1 and ord=2 is supported.
axis : Shape or None, optional, default=None
The axis or axes along which to perform the reduction.
The default, `axis=()`, will compute over all elements into a
scalar array with shape `(1,)`.
If `axis` is int, a reduction is performed on a particular axis.
If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices,
and the matrix norms of these matrices are computed.
out_dtype : {None, 'float16', 'float32', 'float64', 'int32', 'int64', 'int8'},optional, default='None'
The data type of the output.
keepdims : boolean, optional, default=0
If this is set to `True`, the reduced axis is left in the result as dimension with size one.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def radians(data=None, name=None, attr=None, out=None, **kwargs):
r"""Converts each element of the input array from degrees to radians.
.. math::
radians([0, 90, 180, 270, 360]) = [0, \pi/2, \pi, 3\pi/2, 2\pi]
The storage type of ``radians`` output depends upon the input storage type:
- radians(default) = default
- radians(row_sparse) = row_sparse
- radians(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L351
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def relu(data=None, name=None, attr=None, out=None, **kwargs):
r"""Computes rectified linear activation.
.. math::
max(features, 0)
The storage type of ``relu`` output depends upon the input storage type:
- relu(default) = default
- relu(row_sparse) = row_sparse
- relu(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L85
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def retain(data=None, indices=None, name=None, attr=None, out=None, **kwargs):
r"""Pick rows specified by user input index array from a row sparse matrix
and save them in the output sparse matrix.
Example::
data = [[1, 2], [3, 4], [5, 6]]
indices = [0, 1, 3]
shape = (4, 2)
rsp_in = row_sparse_array(data, indices)
to_retain = [0, 3]
rsp_out = retain(rsp_in, to_retain)
rsp_out.data = [[1, 2], [5, 6]]
rsp_out.indices = [0, 3]
The storage type of ``retain`` output depends on storage types of inputs
- retain(row_sparse, default) = row_sparse
- otherwise, ``retain`` is not supported
Defined in ../src/operator/tensor/sparse_retain.cc:L53
Parameters
----------
data : Symbol
The input array for sparse_retain operator.
indices : Symbol
The index array of rows ids that will be retained.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def rint(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise rounded value to the nearest integer of the input.
.. note::
- For input ``n.5`` ``rint`` returns ``n`` while ``round`` returns ``n+1``.
- For input ``-n.5`` both ``rint`` and ``round`` returns ``-n-1``.
Example::
rint([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 1., -2., 2., 2.]
The storage type of ``rint`` output depends upon the input storage type:
- rint(default) = default
- rint(row_sparse) = row_sparse
- rint(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L798
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def round(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise rounded value to the nearest integer of the input.
Example::
round([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 2., -2., 2., 2.]
The storage type of ``round`` output depends upon the input storage type:
- round(default) = default
- round(row_sparse) = row_sparse
- round(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L777
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def rsqrt(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise inverse square-root value of the input.
.. math::
rsqrt(x) = 1/\sqrt{x}
Example::
rsqrt([4,9,16]) = [0.5, 0.33333334, 0.25]
The storage type of ``rsqrt`` output is always dense
Defined in ../src/operator/tensor/elemwise_unary_op_pow.cc:L221
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def sgd_mom_update(weight=None, grad=None, mom=None, lr=_Null, momentum=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, lazy_update=_Null, name=None, attr=None, out=None, **kwargs):
r"""Momentum update function for Stochastic Gradient Descent (SGD) optimizer.
Momentum update has better convergence rates on neural networks. Mathematically it looks
like below:
.. math::
v_1 = \alpha * \nabla J(W_0)\\
v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\
W_t = W_{t-1} + v_t
It updates the weights using::
v = momentum * v - learning_rate * gradient
weight += v
Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch.
However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and weight's storage
type is the same as momentum's storage type,
only the row slices whose indices appear in grad.indices are updated (for both weight and momentum)::
for row in gradient.indices:
v[row] = momentum[row] * v[row] - learning_rate * gradient[row]
weight[row] += v[row]
Defined in ../src/operator/optimizer_op.cc:L564
Parameters
----------
weight : Symbol
Weight
grad : Symbol
Gradient
mom : Symbol
Momentum
lr : float, required
Learning rate
momentum : float, optional, default=0
The decay rate of momentum estimates at each epoch.
wd : float, optional, default=0
Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight.
rescale_grad : float, optional, default=1
Rescale gradient to grad = rescale_grad*grad.
clip_gradient : float, optional, default=-1
Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient).
lazy_update : boolean, optional, default=1
If true, lazy updates are applied if gradient's stype is row_sparse and both weight and momentum have the same stype
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def sgd_update(weight=None, grad=None, lr=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, lazy_update=_Null, name=None, attr=None, out=None, **kwargs):
r"""Update function for Stochastic Gradient Descent (SGD) optimizer.
It updates the weights using::
weight = weight - learning_rate * (gradient + wd * weight)
However, if gradient is of ``row_sparse`` storage type and ``lazy_update`` is True,
only the row slices whose indices appear in grad.indices are updated::
for row in gradient.indices:
weight[row] = weight[row] - learning_rate * (gradient[row] + wd * weight[row])
Defined in ../src/operator/optimizer_op.cc:L523
Parameters
----------
weight : Symbol
Weight
grad : Symbol
Gradient
lr : float, required
Learning rate
wd : float, optional, default=0
Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight.
rescale_grad : float, optional, default=1
Rescale gradient to grad = rescale_grad*grad.
clip_gradient : float, optional, default=-1
Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient).
lazy_update : boolean, optional, default=1
If true, lazy updates are applied if gradient's stype is row_sparse.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def sigmoid(data=None, name=None, attr=None, out=None, **kwargs):
r"""Computes sigmoid of x element-wise.
.. math::
y = 1 / (1 + exp(-x))
The storage type of ``sigmoid`` output is always dense
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L119
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def sign(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise sign of the input.
Example::
sign([-2, 0, 3]) = [-1, 0, 1]
The storage type of ``sign`` output depends upon the input storage type:
- sign(default) = default
- sign(row_sparse) = row_sparse
- sign(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L758
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def sin(data=None, name=None, attr=None, out=None, **kwargs):
r"""Computes the element-wise sine of the input array.
The input should be in radians (:math:`2\pi` rad equals 360 degrees).
.. math::
sin([0, \pi/4, \pi/2]) = [0, 0.707, 1]
The storage type of ``sin`` output depends upon the input storage type:
- sin(default) = default
- sin(row_sparse) = row_sparse
- sin(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L47
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def sinh(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns the hyperbolic sine of the input array, computed element-wise.
.. math::
sinh(x) = 0.5\times(exp(x) - exp(-x))
The storage type of ``sinh`` output depends upon the input storage type:
- sinh(default) = default
- sinh(row_sparse) = row_sparse
- sinh(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L371
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def slice(data=None, begin=_Null, end=_Null, step=_Null, name=None, attr=None, out=None, **kwargs):
r"""Slices a region of the array.
.. note:: ``crop`` is deprecated. Use ``slice`` instead.
This function returns a sliced array between the indices given
by `begin` and `end` with the corresponding `step`.
For an input array of ``shape=(d_0, d_1, ..., d_n-1)``,
slice operation with ``begin=(b_0, b_1...b_m-1)``,
``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``,
where m <= n, results in an array with the shape
``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``.
The resulting array's *k*-th dimension contains elements
from the *k*-th dimension of the input array starting
from index ``b_k`` (inclusive) with step ``s_k``
until reaching ``e_k`` (exclusive).
If the *k*-th elements are `None` in the sequence of `begin`, `end`,
and `step`, the following rule will be used to set default values.
If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`;
else, set `b_k=d_k-1`, `e_k=-1`.
The storage type of ``slice`` output depends on storage types of inputs
- slice(csr) = csr
- otherwise, ``slice`` generates output with default storage
.. note:: When input data storage type is csr, it only supports
step=(), or step=(None,), or step=(1,) to generate a csr output.
For other step parameter values, it falls back to slicing
a dense tensor.
Example::
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
slice(x, begin=(0,1), end=(2,4)) = [[ 2., 3., 4.],
[ 6., 7., 8.]]
slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = [[9., 11.],
[5., 7.],
[1., 3.]]
Defined in ../src/operator/tensor/matrix_op.cc:L481
Parameters
----------
data : Symbol
Source input
begin : Shape(tuple), required
starting indices for the slice operation, supports negative indices.
end : Shape(tuple), required
ending indices for the slice operation, supports negative indices.
step : Shape(tuple), optional, default=[]
step for the slice operation, supports negative values.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def sqrt(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise square-root value of the input.
.. math::
\textrm{sqrt}(x) = \sqrt{x}
Example::
sqrt([4, 9, 16]) = [2, 3, 4]
The storage type of ``sqrt`` output depends upon the input storage type:
- sqrt(default) = default
- sqrt(row_sparse) = row_sparse
- sqrt(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_pow.cc:L170
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def square(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns element-wise squared value of the input.
.. math::
square(x) = x^2
Example::
square([2, 3, 4]) = [4, 9, 16]
The storage type of ``square`` output depends upon the input storage type:
- square(default) = default
- square(row_sparse) = row_sparse
- square(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_pow.cc:L119
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def stop_gradient(data=None, name=None, attr=None, out=None, **kwargs):
r"""Stops gradient computation.
Stops the accumulated gradient of the inputs from flowing through this operator
in the backward direction. In other words, this operator prevents the contribution
of its inputs to be taken into account for computing gradients.
Example::
v1 = [1, 2]
v2 = [0, 1]
a = Variable('a')
b = Variable('b')
b_stop_grad = stop_gradient(3 * b)
loss = MakeLoss(b_stop_grad + a)
executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2))
executor.forward(is_train=True, a=v1, b=v2)
executor.outputs
[ 1. 5.]
executor.backward()
executor.grad_arrays
[ 0. 0.]
[ 1. 1.]
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L325
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def sum(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs):
r"""Computes the sum of array elements over given axes.
.. Note::
`sum` and `sum_axis` are equivalent.
For ndarray of csr storage type summation along axis 0 and axis 1 is supported.
Setting keepdims or exclude to True will cause a fallback to dense operator.
Example::
data = [[[1, 2], [2, 3], [1, 3]],
[[1, 4], [4, 3], [5, 2]],
[[7, 1], [7, 2], [7, 3]]]
sum(data, axis=1)
[[ 4. 8.]
[ 10. 9.]
[ 21. 6.]]
sum(data, axis=[1,2])
[ 12. 19. 27.]
data = [[1, 2, 0],
[3, 0, 1],
[4, 1, 0]]
csr = cast_storage(data, 'csr')
sum(csr, axis=0)
[ 8. 3. 1.]
sum(csr, axis=1)
[ 3. 4. 5.]
Defined in ../src/operator/tensor/broadcast_reduce_sum_value.cc:L66
Parameters
----------
data : Symbol
The input
axis : Shape or None, optional, default=None
The axis or axes along which to perform the reduction.
The default, `axis=()`, will compute over all elements into a
scalar array with shape `(1,)`.
If `axis` is int, a reduction is performed on a particular axis.
If `axis` is a tuple of ints, a reduction is performed on all the axes
specified in the tuple.
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Negative values means indexing from right to left.
keepdims : boolean, optional, default=0
If this is set to `True`, the reduced axes are left in the result as dimension with size one.
exclude : boolean, optional, default=0
Whether to perform reduction on axis that are NOT in axis instead.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def tan(data=None, name=None, attr=None, out=None, **kwargs):
r"""Computes the element-wise tangent of the input array.
The input should be in radians (:math:`2\pi` rad equals 360 degrees).
.. math::
tan([0, \pi/4, \pi/2]) = [0, 1, -inf]
The storage type of ``tan`` output depends upon the input storage type:
- tan(default) = default
- tan(row_sparse) = row_sparse
- tan(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L140
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def tanh(data=None, name=None, attr=None, out=None, **kwargs):
r"""Returns the hyperbolic tangent of the input array, computed element-wise.
.. math::
tanh(x) = sinh(x) / cosh(x)
The storage type of ``tanh`` output depends upon the input storage type:
- tanh(default) = default
- tanh(row_sparse) = row_sparse
- tanh(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_trig.cc:L451
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def trunc(data=None, name=None, attr=None, out=None, **kwargs):
r"""Return the element-wise truncated value of the input.
The truncated value of the scalar x is the nearest integer i which is closer to
zero than x is. In short, the fractional part of the signed number x is discarded.
Example::
trunc([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 1., 1., 2.]
The storage type of ``trunc`` output depends upon the input storage type:
- trunc(default) = default
- trunc(row_sparse) = row_sparse
- trunc(csr) = csr
Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L856
Parameters
----------
data : Symbol
The input array.
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def where(condition=None, x=None, y=None, name=None, attr=None, out=None, **kwargs):
r"""Return the elements, either from x or y, depending on the condition.
Given three ndarrays, condition, x, and y, return an ndarray with the elements from x or y,
depending on the elements from condition are true or false. x and y must have the same shape.
If condition has the same shape as x, each element in the output array is from x if the
corresponding element in the condition is true, and from y if false.
If condition does not have the same shape as x, it must be a 1D array whose size is
the same as x's first dimension size. Each row of the output array is from x's row
if the corresponding element from condition is true, and from y's row if false.
Note that all non-zero values are interpreted as ``True`` in condition.
Examples::
x = [[1, 2], [3, 4]]
y = [[5, 6], [7, 8]]
cond = [[0, 1], [-1, 0]]
where(cond, x, y) = [[5, 2], [3, 8]]
csr_cond = cast_storage(cond, 'csr')
where(csr_cond, x, y) = [[5, 2], [3, 8]]
Defined in ../src/operator/tensor/control_flow_op.cc:L56
Parameters
----------
condition : Symbol
condition array
x : Symbol
y : Symbol
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
def zeros_like(data=None, name=None, attr=None, out=None, **kwargs):
r"""Return an array of zeros with the same shape, type and storage type
as the input array.
The storage type of ``zeros_like`` output depends on the storage type of the input
- zeros_like(row_sparse) = row_sparse
- zeros_like(csr) = csr
- zeros_like(default) = default
Examples::
x = [[ 1., 1., 1.],
[ 1., 1., 1.]]
zeros_like(x) = [[ 0., 0., 0.],
[ 0., 0., 0.]]
Parameters
----------
data : Symbol
The input
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol.
"""
return (0,)
__all__ = ['ElementWiseSum', 'Embedding', 'FullyConnected', 'LinearRegressionOutput', 'LogisticRegressionOutput', 'MAERegressionOutput', '_contrib_round_ste', '_contrib_sign_ste', 'abs', 'adagrad_update', 'adam_update', 'add_n', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'broadcast_add', 'broadcast_div', 'broadcast_minus', 'broadcast_mul', 'broadcast_plus', 'broadcast_sub', 'cast_storage', 'cbrt', 'ceil', 'clip', 'concat', 'cos', 'cosh', 'degrees', 'dot', 'elemwise_add', 'elemwise_div', 'elemwise_mul', 'elemwise_sub', 'exp', 'expm1', 'fix', 'floor', 'ftrl_update', 'gamma', 'gammaln', 'log', 'log10', 'log1p', 'log2', 'make_loss', 'mean', 'negative', 'norm', 'radians', 'relu', 'retain', 'rint', 'round', 'rsqrt', 'sgd_mom_update', 'sgd_update', 'sigmoid', 'sign', 'sin', 'sinh', 'slice', 'sqrt', 'square', 'stop_gradient', 'sum', 'tan', 'tanh', 'trunc', 'where', 'zeros_like']
|
PypiClean
|
/python-smartqq-client-0.4.10.tar.gz/python-smartqq-client-0.4.10/pyqqclient/WaitForAuthHandler.py
|
import random
from requests import Response
from requests import Session
from .BarcodeExpiredException import BarcodeExpiredException
from .LoginStepHandler import LoginStepHandler
from .PollingHandler import PollingHandler
class WaitForAuthHandler(LoginStepHandler):
def __init__(self, session: Session, barcode_handler=None):
super().__init__(session)
self.barcode_handler = barcode_handler
@staticmethod
def bkn_hash(key, init_str=5381):
hash_str = init_str
for i in key:
hash_str += (hash_str << 5) + ord(i)
hash_str = int(hash_str & 2147483647)
return hash_str
def next_step(self, accumulated, last_response: Response) -> ({}, Response):
if self.barcode_handler is not None:
self.barcode_handler(accumulated["login_barcode"])
self.session.headers.update({"Referer": "https://ui.ptlogin2.qq.com/cgi-bin/login?daid=164&target=self&style=16"
"&mibao_css=m_webqq&appid=501004106&enable_qlogin=0&no_verifyimg=1 "
"&s_url=http%3A%2F%2Fw.qq.com%2Fproxy.html&f_url=loginerroralert "
"&strong_login=1&login_state=10&t=20131024001"})
self.session.cookies.update({
'RK': 'OfeLBai4FB',
'pgv_pvi': '911366144',
'pgv_info': 'ssid pgv_pvid=1051433466',
'ptcz': ('ad3bf14f9da2738e09e498bfeb93dd9da7'
'540dea2b7a71acfb97ed4d3da4e277')
})
url = ('https://ssl.ptlogin2.qq.com/ptqrlogin?ptqrtoken=' +
str(WaitForAuthHandler.bkn_hash(self.session.cookies['qrsig'], init_str=0)) +
'&webqq_type=10&remember_uin=1&login2qq=1&aid=501004106' +
'&u1=http%3A%2F%2Fw.qq.com%2Fproxy.html%3Flogin2qq%3D1%26' +
'webqq_type%3D10&ptredirect=0&ptlang=2052&daid=164&' +
'from_ui=1&pttype=1&dumy=&fp=loginerroralert&action=0-0-' +
repr(random.random() * 900000 + 1000000) +
'&mibao_css=m_webqq&t=undefined&g=1&js_type=0' +
'&js_ver=10141&login_sig=&pt_randsalt=0')
def response_handler(response: Response):
login_state = response.content.decode("utf-8")
if "已失效" in login_state:
raise BarcodeExpiredException
if "成功" in login_state:
login_data = login_state.split(",")
nonlocal accumulated
accumulated["login_success_url"] = login_data[2][1:-1]
accumulated["user_name"] = login_data[-1][2:-5]
return True
return False
response = PollingHandler(
lambda: self.session.get(url),
response_handler,
delay=3,
pass_through_exceptions=(BarcodeExpiredException,),
exception_handler=lambda ex: (print(ex), False)[1]
).run()
return accumulated, response
|
PypiClean
|
/paco_cloud-9.3.42-py3-none-any.whl/paco/cftemplates/dashboard.py
|
from paco.cftemplates.cftemplates import StackTemplate
from paco.core.exception import UnsupportedCloudFormationParameterType
import troposphere
import troposphere.cloudwatch
class CloudWatchDashboard(StackTemplate):
def __init__(self, stack, paco_ctx, netenv_name, env_name):
dashboard = stack.resource
super().__init__(stack, paco_ctx)
self.set_aws_name('Dashboard', self.resource_group_name, self.resource.name)
self.init_template('CloudWatch Dashboard')
if not dashboard.is_enabled(): return
# Parameters for variables
if dashboard.variables:
for key, value in dashboard.variables.items():
if type(value) == type(str()):
param_type = 'String'
elif type(value) == type(int()) or type(value) == type(float()):
param_type = 'Number'
else:
raise UnsupportedCloudFormationParameterType(
"Can not cast {} of type {} to a CloudFormation Parameter type.".format(
value, type(value)
)
)
variable_param = self.create_cfn_parameter(
param_type=param_type,
name=key,
description='Dashboard {} Variable'.format(key),
value=value
)
# Region Parameter
region_param = self.create_cfn_parameter(
param_type='String',
name='AwsRegion',
description='Dashboard Region Variable',
value=self.aws_region
)
# Dashboard resource
dashboard_logical_id = 'Dashboard'
dashboard_name = dashboard.dashboard_name
if dashboard.dashboard_name == None:
dashboard_name = f'{netenv_name.capitalize()}-{env_name.capitalize()}-{dashboard.title_or_name}'
body = troposphere.Sub(dashboard.dashboard_file)
cfn_export_dict = {
'DashboardBody': body,
'DashboardName': dashboard_name
}
dashboard_resource = troposphere.cloudwatch.Dashboard.from_dict(
dashboard_logical_id,
cfn_export_dict
)
self.template.add_resource(dashboard_resource)
|
PypiClean
|
/ulars-1.0.9b0-py3-none-any.whl/lars/controllers/parser.py
|
import glob
import yaml
from cement import Controller, ex
from cement.utils.shell import Prompt
from progress.bar import IncrementalBar
from ..utilities.db_context import DbContext
from ..utilities.value_validator import ValueValidator
from ..utilities.sql_builder import SqlBuilder
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
class Parser(Controller):
class Meta:
label = 'parser'
stacked_type = 'embedded'
stacked_on = 'base'
def _default(self):
"""Default action if no sub-command is passed."""
self.app.args.print_help()
@ex(
help='start parsing logs',
arguments=[
(['--id', '-i'],
{'help': 'Application ID',
'action': 'store',
'dest': 'id'}),
(['--name', '-n'],
{'help': 'Application name',
'action': 'store',
'dest': 'name'}),
(['--path', '-p'],
{'help': 'Path to logs folder',
'action': 'store',
'dest': 'path'}),
],
)
def parse(self):
p_id = self.app.pargs.id
p_name = self.app.pargs.name
p_path = self.app.pargs.path
# Validate arguments
if not ValueValidator.single_value(p_id, p_name, p_path):
self.app.log.error('Log file path is not specified. Please, use parse --help for more information')
return
# Specify log file path
path: str = p_path
if p_id is not None:
p_id = int(p_id)
app = self.app.db.get(doc_id=p_id)
if app is None:
self.app.log.warning(f'Application with id {p_id} not found. Use "list" for more info')
return
path = app['path']
if p_name is not None:
apps = self.app.db.all()
for app in apps:
if app['app_name'] == p_name:
path = app['path']
break
if path is None:
self.app.log.error('Log file path could not be specified')
if path[-1] in ('/', '\\'):
path = path[:-1]
# Find .log file
log_filenames = [f for f in glob.glob(path + '**/*.log', recursive=False)]
log_filename: str
if len(log_filenames) == 0:
self.app.log.error(f'Not found any .log files in specified path: {path}')
return
elif len(log_filenames) == 1:
log_filename = log_filenames[0]
else:
prompt = Prompt('Chose .log file to parse',
options=log_filenames,
numbered=True,
)
log_filename = prompt.input
self.app.log.info(f'Log file: {log_filename}')
# Find 'lars.yml' file
lars_config_filenames = [f for f in glob.glob(path + '**/lars.yml', recursive=False)]
if len(lars_config_filenames) == 0:
self.app.log.error(f'Not found "lars.yml" file in specified path: {path}')
return
else:
lars_config_filename = lars_config_filenames[0]
self.app.log.info(f'Config file: {lars_config_filename}')
# Read lars config
with open(lars_config_filename, 'r', encoding='utf8') as lars_config_file:
config = yaml.load(lars_config_file, Loader)
headers = config.get('headers', None)
primary_key = config.get('primary_key', None)
table_name = config.get('table_name', None)
separator = config.get('separator', None)
encoding = config.get('encoding', None)
db_filename = config.get('db_filename', None)
if not ValueValidator.all_values(headers, primary_key, table_name, separator, encoding, db_filename):
self.app.log.error('Error reading config file, please, use following example to create config file:')
self.app.log.warning('\rheaders:\n' +
' - guid\n' +
' - log_date\n' +
' - log_level\n' +
' - logger_name\n' +
' - msg\n' +
'primary_key: "guid"\n' +
'table_name: "logs"\n' +
'separator: " | "\n' +
'encoding: "utf8"\n' +
'db_filename: "logs.sqlite3"')
return
headers_count = len(headers)
self.app.log.info(f'Headers: {headers}')
self.app.log.info(f'Primary key: {primary_key}')
# Read logs from file into array
with open(log_filename, 'r', encoding=encoding) as log_file:
log_array = log_file.read().split('\n')
logs_count = len(log_array)
if logs_count == 0:
self.app.log.warning(f'Log file is empty!')
return
self.app.log.info(f'Logs count: {logs_count}')
# Init database
# noinspection PyBroadException
try:
db = DbContext(path, db_filename, primary_key, SqlBuilder(table_name, headers, primary_key))
except Exception as e:
self.app.log.error(f'Error occurred during database connection establishment: {e}')
return
# Init progress bar
progress_bar = IncrementalBar('Processing: ', max=logs_count)
# Parse logs to sqlite3
for i in range(logs_count):
progress_bar.next()
log = log_array[i]
if len(log) == 0:
continue
splitted_log = log.split(separator)
if len(splitted_log) != headers_count:
progress_bar.finish()
self.app.log.error(f'Column count of some logs does not match headers length!\n'
f'Log: {log}')
return
log_dict = dict()
for j in range(headers_count):
log_dict[headers[j]] = splitted_log[j]
# noinspection PyBroadException
try:
db.insert_log(log_dict)
except Exception as e:
progress_bar.finish()
self.app.log.error(f'Error inserting log to database: {e}')
return
# Dispose objects
progress_bar.finish()
db.dispose()
self.app.log.info(f'Finished parsing logs!')
|
PypiClean
|
/js.ckeditor-3.6.6.tar.gz/js.ckeditor-3.6.6/js/ckeditor/resources/lang/fa.js
|
/*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
/*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
/**
* @fileOverview Defines the {@link CKEDITOR.lang} object for the
* Persian language.
*/
/**#@+
@type String
@example
*/
/**
* Contains the dictionary of language entries.
* @namespace
*/
CKEDITOR.lang['fa'] =
{
/**
* The language reading direction. Possible values are "rtl" for
* Right-To-Left languages (like Arabic) and "ltr" for Left-To-Right
* languages (like English).
* @default 'ltr'
*/
dir : 'rtl',
/*
* Screenreader titles. Please note that screenreaders are not always capable
* of reading non-English words. So be careful while translating it.
*/
editorTitle : 'ویرایشگر متن غنی, %1',
editorHelp : 'کلید Alt+0 را برای راهنمایی بفشارید',
// ARIA descriptions.
toolbars : 'نوار ابزار',
editor : 'ویرایشگر متن غنی',
// Toolbar buttons without dialogs.
source : 'منبع',
newPage : 'برگهٴ تازه',
save : 'ذخیره',
preview : 'پیشنمایش',
cut : 'برش',
copy : 'کپی',
paste : 'چسباندن',
print : 'چاپ',
underline : 'زیرخطدار',
bold : 'درشت',
italic : 'خمیده',
selectAll : 'گزینش همه',
removeFormat : 'برداشتن فرمت',
strike : 'میانخط',
subscript : 'زیرنویس',
superscript : 'بالانویس',
horizontalrule : 'گنجاندن خط افقی',
pagebreak : 'گنجاندن شکستگی پایان برگه',
pagebreakAlt : 'شکستن صفحه',
unlink : 'برداشتن پیوند',
undo : 'واچیدن',
redo : 'بازچیدن',
// Common messages and labels.
common :
{
browseServer : 'فهرستنمایی سرور',
url : 'URL',
protocol : 'پروتکل',
upload : 'انتقال به سرور',
uploadSubmit : 'به سرور بفرست',
image : 'تصویر',
flash : 'فلش',
form : 'فرم',
checkbox : 'خانهٴ گزینهای',
radio : 'دکمهٴ رادیویی',
textField : 'فیلد متنی',
textarea : 'ناحیهٴ متنی',
hiddenField : 'فیلد پنهان',
button : 'دکمه',
select : 'فیلد چندگزینهای',
imageButton : 'دکمهٴ تصویری',
notSet : '<تعین نشده>',
id : 'شناسه',
name : 'نام',
langDir : 'جهتنمای زبان',
langDirLtr : 'چپ به راست (LTR)',
langDirRtl : 'راست به چپ (RTL)',
langCode : 'کد زبان',
longDescr : 'URL توصیف طولانی',
cssClass : 'کلاسهای شیوهنامه(Stylesheet)',
advisoryTitle : 'عنوان کمکی',
cssStyle : 'شیوه(style)',
ok : 'پذیرش',
cancel : 'انصراف',
close : 'بستن',
preview : 'پیشنمایش',
generalTab : 'عمومی',
advancedTab : 'پیشرفته',
validateNumberFailed : 'این مقدار یک عدد نیست.',
confirmNewPage : 'هر تغییر ایجاد شدهی ذخیره نشده از بین خواهد رفت. آیا اطمینان دارید که قصد بارگیری صفحه جدیدی را دارید؟',
confirmCancel : 'برخی از گزینهها تغییر کردهاند. آیا واقعا قصد بستن این پنجره را دارید؟',
options : 'گزینهها',
target : 'مسیر',
targetNew : 'پنجره جدید (_blank)',
targetTop : 'بالاترین پنجره (_top)',
targetSelf : 'همان پنجره (_self)',
targetParent : 'پنجره والد (_parent)',
langDirLTR : 'چپ به راست (LTR)',
langDirRTL : 'راست به چپ (RTL)',
styles : 'سبک',
cssClasses : 'کلاسهای شیوهنامه',
width : 'پهنا',
height : 'درازا',
align : 'چینش',
alignLeft : 'چپ',
alignRight : 'راست',
alignCenter : 'وسط',
alignTop : 'بالا',
alignMiddle : 'وسط',
alignBottom : 'پائین',
invalidValue : 'Invalid value.', // MISSING
invalidHeight : 'ارتفاع باید یک عدد باشد.',
invalidWidth : 'پهنا باید یک عدد باشد.',
invalidCssLength : 'عدد تعیین شده برای فیلد "%1" باید یک عدد مثبت با یا بدون یک واحد اندازه گیری CSS معتبر باشد (px, %, in, cm, mm, em, ex, pt, or pc).',
invalidHtmlLength : 'عدد تعیین شده برای فیلد "%1" باید یک عدد مثبت با یا بدون یک واحد اندازه گیری HTML معتبر باشد (px or %).',
invalidInlineStyle : 'عدد تعیین شده برای سبک درونخطی(Inline Style) باید دارای یک یا چند چندتایی با شکلی شبیه "name : value" که باید با یک ","(semi-colons) از هم جدا شوند.',
cssLengthTooltip : 'یک عدد برای یک مقدار بر حسب پیکسل و یا یک عدد با یک واحد CSS معتبر وارد کنید (px, %, in, cm, mm, em, ex, pt, or pc).',
// Put the voice-only part of the label in the span.
unavailable : '%1<span class="cke_accessibility">، غیر قابل دسترس</span>'
},
contextmenu :
{
options : 'گزینههای منوی زمینه'
},
// Special char dialog.
specialChar :
{
toolbar : 'گنجاندن نویسهٴ ویژه',
title : 'گزینش نویسهٴ ویژه',
options : 'گزینههای نویسههای ویژه'
},
// Link dialog.
link :
{
toolbar : 'گنجاندن/ویرایش پیوند',
other : '<سایر>',
menu : 'ویرایش پیوند',
title : 'پیوند',
info : 'اطلاعات پیوند',
target : 'مقصد',
upload : 'انتقال به سرور',
advanced : 'پیشرفته',
type : 'نوع پیوند',
toUrl : 'URL',
toAnchor : 'لنگر در همین صفحه',
toEmail : 'پست الکترونیکی',
targetFrame : '<فریم>',
targetPopup : '<پنجرهٴ پاپاپ>',
targetFrameName : 'نام فریم مقصد',
targetPopupName : 'نام پنجرهٴ پاپاپ',
popupFeatures : 'ویژگیهای پنجرهٴ پاپاپ',
popupResizable : 'قابل تغییر اندازه',
popupStatusBar : 'نوار وضعیت',
popupLocationBar: 'نوار موقعیت',
popupToolbar : 'نوارابزار',
popupMenuBar : 'نوار منو',
popupFullScreen : 'تمامصفحه (IE)',
popupScrollBars : 'میلههای پیمایش',
popupDependent : 'وابسته (Netscape)',
popupLeft : 'موقعیت چپ',
popupTop : 'موقعیت بالا',
id : 'شناسه',
langDir : 'جهتنمای زبان',
langDirLTR : 'چپ به راست (LTR)',
langDirRTL : 'راست به چپ (RTL)',
acccessKey : 'کلید دستیابی',
name : 'نام',
langCode : 'جهتنمای زبان',
tabIndex : 'نمایهٴ دسترسی با برگه',
advisoryTitle : 'عنوان کمکی',
advisoryContentType : 'نوع محتوای کمکی',
cssClasses : 'کلاسهای شیوهنامه(Stylesheet)',
charset : 'نویسهگان منبع پیوند شده',
styles : 'شیوه(style)',
rel : 'وابستگی',
selectAnchor : 'یک لنگر برگزینید',
anchorName : 'با نام لنگر',
anchorId : 'با شناسهٴ المان',
emailAddress : 'نشانی پست الکترونیکی',
emailSubject : 'موضوع پیام',
emailBody : 'متن پیام',
noAnchors : '(در این سند لنگری دردسترس نیست)',
noUrl : 'لطفا URL پیوند را بنویسید',
noEmail : 'لطفا نشانی پست الکترونیکی را بنویسید'
},
// Anchor dialog
anchor :
{
toolbar : 'گنجاندن/ویرایش لنگر',
menu : 'ویژگیهای لنگر',
title : 'ویژگیهای لنگر',
name : 'نام لنگر',
errorName : 'لطفا نام لنگر را بنویسید',
remove : 'حذف لنگر'
},
// List style dialog
list:
{
numberedTitle : 'ویژگیهای فهرست شمارهدار',
bulletedTitle : 'ویژگیهای فهرست گلولهدار',
type : 'نوع',
start : 'شروع',
validateStartNumber :'فهرست شماره شروع باید یک عدد صحیح باشد.',
circle : 'دایره',
disc : 'صفحه گرد',
square : 'چهارگوش',
none : 'هیچ',
notset : '<تنظیم نشده>',
armenian : 'شمارهگذاری ارمنی',
georgian : 'شمارهگذاری گریگورین (an, ban, gan, etc.)',
lowerRoman : 'پانویس رومی (i, ii, iii, iv, v, etc.)',
upperRoman : 'بالانویس رومی (I, II, III, IV, V, etc.)',
lowerAlpha : 'پانویس الفبایی (a, b, c, d, e, etc.)',
upperAlpha : 'بالانویس الفبایی (A, B, C, D, E, etc.)',
lowerGreek : 'پانویس یونانی (alpha, beta, gamma, etc.)',
decimal : 'دهدهی (1, 2, 3, etc.)',
decimalLeadingZero : 'دهدهی همراه با صفر (01, 02, 03, etc.)'
},
// Find And Replace Dialog
findAndReplace :
{
title : 'جستجو و جایگزینی',
find : 'جستجو',
replace : 'جایگزینی',
findWhat : 'چه چیز را مییابید:',
replaceWith : 'جایگزینی با:',
notFoundMsg : 'متن موردنظر یافت نشد.',
findOptions : 'گزینههای جستجو',
matchCase : 'همسانی در بزرگی و کوچکی نویسهها',
matchWord : 'همسانی با واژهٴ کامل',
matchCyclic : 'همسانی با چرخه',
replaceAll : 'جایگزینی همهٴ یافتهها',
replaceSuccessMsg : '%1 رخداد جایگزین شد.'
},
// Table Dialog
table :
{
toolbar : 'جدول',
title : 'ویژگیهای جدول',
menu : 'ویژگیهای جدول',
deleteTable : 'پاک کردن جدول',
rows : 'سطرها',
columns : 'ستونها',
border : 'اندازهٴ لبه',
widthPx : 'پیکسل',
widthPc : 'درصد',
widthUnit : 'واحد پهنا',
cellSpace : 'فاصلهٴ میان سلولها',
cellPad : 'فاصلهٴ پرشده در سلول',
caption : 'عنوان',
summary : 'خلاصه',
headers : 'سرنویسها',
headersNone : 'هیچ',
headersColumn : 'اولین ستون',
headersRow : 'اولین ردیف',
headersBoth : 'هردو',
invalidRows : 'تعداد ردیفها باید یک عدد بزرگتر از 0 باشد.',
invalidCols : 'تعداد ستونها باید یک عدد بزرگتر از 0 باشد.',
invalidBorder : 'مقدار اندازه خطوط باید یک عدد باشد.',
invalidWidth : 'مقدار پهنای جدول باید یک عدد باشد.',
invalidHeight : 'مقدار ارتفاع جدول باید یک عدد باشد.',
invalidCellSpacing : 'مقدار فاصلهگذاری سلول باید یک عدد باشد.',
invalidCellPadding : 'بالشتک سلول باید یک عدد باشد.',
cell :
{
menu : 'سلول',
insertBefore : 'افزودن سلول قبل از',
insertAfter : 'افزودن سلول بعد از',
deleteCell : 'حذف سلولها',
merge : 'ادغام سلولها',
mergeRight : 'ادغام به راست',
mergeDown : 'ادغام به پایین',
splitHorizontal : 'جدا کردن افقی سلول',
splitVertical : 'جدا کردن عمودی سلول',
title : 'ویژگیهای سلول',
cellType : 'نوع سلول',
rowSpan : 'محدوده ردیفها',
colSpan : 'محدوده ستونها',
wordWrap : 'شکستن کلمه',
hAlign : 'چینش افقی',
vAlign : 'چینش عمودی',
alignBaseline : 'خط مبنا',
bgColor : 'رنگ زمینه',
borderColor : 'رنگ خطوط',
data : 'اطلاعات',
header : 'سرنویس',
yes : 'بله',
no : 'خیر',
invalidWidth : 'عرض سلول باید یک عدد باشد.',
invalidHeight : 'ارتفاع سلول باید عدد باشد.',
invalidRowSpan : 'مقدار محدوده ردیفها باید یک عدد باشد.',
invalidColSpan : 'مقدار محدوده ستونها باید یک عدد باشد.',
chooseColor : 'انتخاب'
},
row :
{
menu : 'سطر',
insertBefore : 'افزودن سطر قبل از',
insertAfter : 'افزودن سطر بعد از',
deleteRow : 'حذف سطرها'
},
column :
{
menu : 'ستون',
insertBefore : 'افزودن ستون قبل از',
insertAfter : 'افزودن ستون بعد از',
deleteColumn : 'حذف ستونها'
}
},
// Button Dialog.
button :
{
title : 'ویژگیهای دکمه',
text : 'متن (مقدار)',
type : 'نوع',
typeBtn : 'دکمه',
typeSbm : 'ثبت',
typeRst : 'بازنشانی (Reset)'
},
// Checkbox and Radio Button Dialogs.
checkboxAndRadio :
{
checkboxTitle : 'ویژگیهای خانهٴ گزینهای',
radioTitle : 'ویژگیهای دکمهٴ رادیویی',
value : 'مقدار',
selected : 'برگزیده'
},
// Form Dialog.
form :
{
title : 'ویژگیهای فرم',
menu : 'ویژگیهای فرم',
action : 'رویداد',
method : 'متد',
encoding : 'رمزنگاری'
},
// Select Field Dialog.
select :
{
title : 'ویژگیهای فیلد چندگزینهای',
selectInfo : 'اطلاعات',
opAvail : 'گزینههای دردسترس',
value : 'مقدار',
size : 'اندازه',
lines : 'خطوط',
chkMulti : 'گزینش چندگانه فراهم باشد',
opText : 'متن',
opValue : 'مقدار',
btnAdd : 'افزودن',
btnModify : 'ویرایش',
btnUp : 'بالا',
btnDown : 'پائین',
btnSetValue : 'تنظیم به عنوان مقدار برگزیده',
btnDelete : 'پاککردن'
},
// Textarea Dialog.
textarea :
{
title : 'ویژگیهای ناحیهٴ متنی',
cols : 'ستونها',
rows : 'سطرها'
},
// Text Field Dialog.
textfield :
{
title : 'ویژگیهای فیلد متنی',
name : 'نام',
value : 'مقدار',
charWidth : 'پهنای نویسه',
maxChars : 'بیشینهٴ نویسهها',
type : 'نوع',
typeText : 'متن',
typePass : 'گذرواژه'
},
// Hidden Field Dialog.
hidden :
{
title : 'ویژگیهای فیلد پنهان',
name : 'نام',
value : 'مقدار'
},
// Image Dialog.
image :
{
title : 'ویژگیهای تصویر',
titleButton : 'ویژگیهای دکمهٴ تصویری',
menu : 'ویژگیهای تصویر',
infoTab : 'اطلاعات تصویر',
btnUpload : 'به سرور بفرست',
upload : 'انتقال به سرور',
alt : 'متن جایگزین',
lockRatio : 'قفل کردن نسبت',
resetSize : 'بازنشانی اندازه',
border : 'لبه',
hSpace : 'فاصلهٴ افقی',
vSpace : 'فاصلهٴ عمودی',
alertUrl : 'لطفا URL تصویر را بنویسید',
linkTab : 'پیوند',
button2Img : 'آیا مایلید از یک تصویر ساده روی دکمه تصویری انتخاب شده استفاده کنید؟',
img2Button : 'آیا مایلید از یک دکمه تصویری روی تصویر انتخاب شده استفاده کنید؟',
urlMissing : 'آدرس URL اصلی تصویر یافت نشد.',
validateBorder : 'مقدار خطوط باید یک عدد باشد.',
validateHSpace : 'مقدار فاصلهگذاری افقی باید یک عدد باشد.',
validateVSpace : 'مقدار فاصلهگذاری عمودی باید یک عدد باشد.'
},
// Flash Dialog
flash :
{
properties : 'ویژگیهای فلش',
propertiesTab : 'ویژگیها',
title : 'ویژگیهای فلش',
chkPlay : 'آغاز خودکار',
chkLoop : 'اجرای پیاپی',
chkMenu : 'در دسترس بودن منوی فلش',
chkFull : 'اجازه تمام صفحه',
scale : 'مقیاس',
scaleAll : 'نمایش همه',
scaleNoBorder : 'بدون کران',
scaleFit : 'جایگیری کامل',
access : 'دسترسی به اسکریپت',
accessAlways : 'همیشه',
accessSameDomain: 'همان دامنه',
accessNever : 'هرگز',
alignAbsBottom : 'پائین مطلق',
alignAbsMiddle : 'وسط مطلق',
alignBaseline : 'خط پایه',
alignTextTop : 'متن بالا',
quality : 'کیفیت',
qualityBest : 'بهترین',
qualityHigh : 'بالا',
qualityAutoHigh : 'بالا - خودکار',
qualityMedium : 'متوسط',
qualityAutoLow : 'پایین - خودکار',
qualityLow : 'پایین',
windowModeWindow: 'پنجره',
windowModeOpaque: 'مات',
windowModeTransparent : 'شفاف',
windowMode : 'حالت پنجره',
flashvars : 'مقادیر برای فلش',
bgcolor : 'رنگ پسزمینه',
hSpace : 'فاصلهٴ افقی',
vSpace : 'فاصلهٴ عمودی',
validateSrc : 'لطفا URL پیوند را بنویسید',
validateHSpace : 'مقدار فاصلهگذاری افقی باید یک عدد باشد.',
validateVSpace : 'مقدار فاصلهگذاری عمودی باید یک عدد باشد.'
},
// Speller Pages Dialog
spellCheck :
{
toolbar : 'بررسی املا',
title : 'بررسی املا',
notAvailable : 'با عرض پوزش خدمات الان در دسترس نیستند.',
errorLoading : 'خطا در بارگیری برنامه خدمات میزبان: %s.',
notInDic : 'در واژه~نامه یافت نشد',
changeTo : 'تغییر به',
btnIgnore : 'چشمپوشی',
btnIgnoreAll : 'چشمپوشی همه',
btnReplace : 'جایگزینی',
btnReplaceAll : 'جایگزینی همه',
btnUndo : 'واچینش',
noSuggestions : '- پیشنهادی نیست -',
progress : 'بررسی املا در حال انجام...',
noMispell : 'بررسی املا انجام شد. هیچ غلط املائی یافت نشد',
noChanges : 'بررسی املا انجام شد. هیچ واژهای تغییر نیافت',
oneChange : 'بررسی املا انجام شد. یک واژه تغییر یافت',
manyChanges : 'بررسی املا انجام شد. %1 واژه تغییر یافت',
ieSpellDownload : 'بررسی کنندهٴ املا نصب نشده است. آیا میخواهید آن را هماکنون دریافت کنید؟'
},
smiley :
{
toolbar : 'خندانک',
title : 'گنجاندن خندانک',
options : 'گزینههای خندانک'
},
elementsPath :
{
eleLabel : 'مسیر عناصر',
eleTitle : '%1 عنصر'
},
numberedlist : 'فهرست شمارهدار',
bulletedlist : 'فهرست نقطهای',
indent : 'افزایش تورفتگی',
outdent : 'کاهش تورفتگی',
justify :
{
left : 'چپچین',
center : 'میانچین',
right : 'راستچین',
block : 'بلوکچین'
},
blockquote : 'بلوک نقل قول',
clipboard :
{
title : 'چسباندن',
cutError : 'تنظیمات امنیتی مرورگر شما اجازه نمیدهد که ویرایشگر به طور خودکار عملکردهای برش را انجام دهد. لطفا با دکمههای صفحه کلید این کار را انجام دهید (Ctrl/Cmd+X).',
copyError : 'تنظیمات امنیتی مرورگر شما اجازه نمیدهد که ویرایشگر به طور خودکار عملکردهای کپی کردن را انجام دهد. لطفا با دکمههای صفحه کلید این کار را انجام دهید (Ctrl/Cmd+C).',
pasteMsg : 'لطفا متن را با کلیدهای (<STRONG>Ctrl/Cmd+V</STRONG>) در این جعبهٴ متنی بچسبانید و <STRONG>پذیرش</STRONG> را بزنید.',
securityMsg : 'به خاطر تنظیمات امنیتی مرورگر شما، ویرایشگر نمیتواند دسترسی مستقیم به دادههای clipboard داشته باشد. شما باید دوباره آنرا در این پنجره بچسبانید.',
pasteArea : 'محل چسباندن'
},
pastefromword :
{
confirmCleanup : 'متنی که میخواهید بچسبانید به نظر میرسد که از Word کپی شده است. آیا میخواهید قبل از چسباندن آن را پاکسازی کنید؟',
toolbar : 'چسباندن از Word',
title : 'چسباندن از Word',
error : 'به دلیل بروز خطای داخلی امکان پاکسازی اطلاعات بازنشانی شده وجود ندارد.'
},
pasteText :
{
button : 'چسباندن به عنوان متن ِساده',
title : 'چسباندن به عنوان متن ِساده'
},
templates :
{
button : 'الگوها',
title : 'الگوهای محتویات',
options : 'گزینههای الگو',
insertOption : 'محتویات کنونی جایگزین شوند',
selectPromptMsg : 'لطفا الگوی موردنظر را برای بازکردن در ویرایشگر برگزینید<br>(محتویات کنونی از دست خواهند رفت):',
emptyListMsg : '(الگوئی تعریف نشده است)'
},
showBlocks : 'نمایش بلوکها',
stylesCombo :
{
label : 'سبک',
panelTitle : 'سبکهای قالببندی',
panelTitle1 : 'سبکهای بلوک',
panelTitle2 : 'سبکهای درونخطی',
panelTitle3 : 'سبکهای شیء'
},
format :
{
label : 'فرمت',
panelTitle : 'فرمت',
tag_p : 'نرمال',
tag_pre : 'فرمت شده',
tag_address : 'آدرس',
tag_h1 : 'سرنویس 1',
tag_h2 : 'سرنویس 2',
tag_h3 : 'سرنویس 3',
tag_h4 : 'سرنویس 4',
tag_h5 : 'سرنویس 5',
tag_h6 : 'سرنویس 6',
tag_div : 'بند'
},
div :
{
title : 'ایجاد یک محل DIV',
toolbar : 'ایجاد یک محل DIV',
cssClassInputLabel : 'کلاسهای شیوهنامه',
styleSelectLabel : 'سبک',
IdInputLabel : 'شناسه',
languageCodeInputLabel : ' کد زبان',
inlineStyleInputLabel : 'سبک درونخطی(Inline Style)',
advisoryTitleInputLabel : 'عنوان مشاوره',
langDirLabel : 'جهت نوشتاری زبان',
langDirLTRLabel : 'چپ به راست (LTR)',
langDirRTLLabel : 'راست به چپ (RTL)',
edit : 'ویرایش Div',
remove : 'حذف Div'
},
iframe :
{
title : 'ویژگیهای IFrame',
toolbar : 'IFrame',
noUrl : 'لطفا مسیر URL iframe را درج کنید',
scrolling : 'نمایش خطکشها',
border : 'نمایش خطوط frame'
},
font :
{
label : 'قلم',
voiceLabel : 'قلم',
panelTitle : 'قلم'
},
fontSize :
{
label : 'اندازه',
voiceLabel : 'اندازه قلم',
panelTitle : 'اندازه'
},
colorButton :
{
textColorTitle : 'رنگ متن',
bgColorTitle : 'رنگ پسزمینه',
panelTitle : 'رنگها',
auto : 'خودکار',
more : 'رنگهای بیشتر...'
},
colors :
{
'000' : 'سیاه',
'800000' : 'خرمایی',
'8B4513' : 'قهوهای شکلاتی',
'2F4F4F' : 'ارغوانی مایل به خاکستری',
'008080' : 'آبی مایل به خاکستری',
'000080' : 'آبی سیر',
'4B0082' : 'نیلی',
'696969' : 'خاکستری تیره',
'B22222' : 'آتش آجری',
'A52A2A' : 'قهوهای',
'DAA520' : 'میلهی طلایی',
'006400' : 'سبز تیره',
'40E0D0' : 'فیروزهای',
'0000CD' : 'آبی روشن',
'800080' : 'ارغوانی',
'808080' : 'خاکستری',
'F00' : 'قرمز',
'FF8C00' : 'نارنجی پررنگ',
'FFD700' : 'طلایی',
'008000' : 'سبز',
'0FF' : 'آبی مایل به سبز',
'00F' : 'آبی',
'EE82EE' : 'بنفش',
'A9A9A9' : 'خاکستری مات',
'FFA07A' : 'صورتی کدر روشن',
'FFA500' : 'نارنجی',
'FFFF00' : 'زرد',
'00FF00' : 'فسفری',
'AFEEEE' : 'فیروزهای رنگ پریده',
'ADD8E6' : 'آبی کمرنگ',
'DDA0DD' : 'آلویی',
'D3D3D3' : 'خاکستری روشن',
'FFF0F5' : 'بنفش کمرنگ',
'FAEBD7' : 'عتیقه سفید',
'FFFFE0' : 'زرد روشن',
'F0FFF0' : 'عسلی',
'F0FFFF' : 'لاجوردی',
'F0F8FF' : 'آبی براق',
'E6E6FA' : 'بنفش کمرنگ',
'FFF' : 'سفید'
},
scayt :
{
title : 'بررسی املای تایپ شما',
opera_title : 'توسط اپرا پشتیبانی نمیشود',
enable : 'فعالسازی SCAYT',
disable : 'غیرفعالسازی SCAYT',
about : 'درباره SCAYT',
toggle : 'ضامن SCAYT',
options : 'گزینهها',
langs : 'زبانها',
moreSuggestions : 'پیشنهادهای بیشتر',
ignore : 'عبور کردن',
ignoreAll : 'عبور کردن از همه',
addWord : 'افزودن Word',
emptyDic : 'نام دیکشنری نباید خالی باشد.',
noSuggestions : 'No suggestions', // MISSING
optionsTab : 'گزینهها',
allCaps : 'نادیده گرفتن همه کلاه-واژهها',
ignoreDomainNames : 'عبور از نامهای دامنه',
mixedCase : 'عبور از کلماتی مرکب از حروف بزرگ و کوچک',
mixedWithDigits : 'عبور از کلمات به همراه عدد',
languagesTab : 'زبانها',
dictionariesTab : 'دیکشنریها',
dic_field_name : 'نام دیکشنری',
dic_create : 'ایجاد',
dic_restore : 'بازیافت',
dic_delete : 'حذف',
dic_rename : 'تغییر نام',
dic_info : 'در ابتدا دیکشنری کاربر در کوکی ذخیره میشود. با این حال، کوکیها در اندازه محدود شدهاند. وقتی که دیکشنری کاربری بزرگ میشود و به نقطهای که نمیتواند در کوکی ذخیره شود، پس از آن دیکشنری ممکن است بر روی سرور ما ذخیره شود. برای ذخیره دیکشنری شخصی شما بر روی سرور ما، باید یک نام برای دیکشنری خود مشخص نمایید. اگر شما قبلا یک دیکشنری روی سرور ما ذخیره کردهاید، لطفا نام آنرا درج و روی دکمه بازیافت کلیک نمایید.',
aboutTab : 'درباره'
},
about :
{
title : 'درباره CKEditor',
dlgTitle : 'درباره CKEditor',
help : 'بررسی $1 برای راهنمایی.',
userGuide : 'راهنمای کاربران CKEditor',
moreInfo : 'برای کسب اطلاعات مجوز لطفا به وب سایت ما مراجعه کنید:',
copy : 'حق نشر © $1. کلیه حقوق محفوظ است.'
},
maximize : 'حداکثر کردن',
minimize : 'حداقل کردن',
fakeobjects :
{
anchor : 'لنگر',
flash : 'انیمشن فلش',
iframe : 'IFrame',
hiddenfield : 'فیلد پنهان',
unknown : 'شیء ناشناخته'
},
resize : 'کشیدن برای تغییر اندازه',
colordialog :
{
title : 'انتخاب رنگ',
options : 'گزینههای رنگ',
highlight : 'متمایز',
selected : 'رنگ انتخاب شده',
clear : 'پاک کردن'
},
toolbarCollapse : 'بستن نوار ابزار',
toolbarExpand : 'بازکردن نوار ابزار',
toolbarGroups :
{
document : 'سند',
clipboard : 'حافظه موقت/برگشت',
editing : 'در حال ویرایش',
forms : 'فرمها',
basicstyles : 'شیوههای پایه',
paragraph : 'بند',
links : 'پیوندها',
insert : 'ورود',
styles : 'شیوهها',
colors : 'رنگها',
tools : 'ابزارها'
},
bidi :
{
ltr : 'نوشتار متن از چپ به راست',
rtl : 'نوشتار متن از راست به چپ'
},
docprops :
{
label : 'ویژگیهای سند',
title : 'ویژگیهای سند',
design : 'طراحی',
meta : 'فراداده',
chooseColor : 'انتخاب',
other : '<سایر>',
docTitle : 'عنوان صفحه',
charset : 'رمزگذاری نویسهگان',
charsetOther : 'رمزگذاری نویسهگان دیگر',
charsetASCII : 'ASCII',
charsetCE : 'اروپای مرکزی',
charsetCT : 'چینی رسمی (Big5)',
charsetCR : 'سیریلیک',
charsetGR : 'یونانی',
charsetJP : 'ژاپنی',
charsetKR : 'کرهای',
charsetTR : 'ترکی',
charsetUN : 'یونیکُد (UTF-8)',
charsetWE : 'اروپای غربی',
docType : 'عنوان نوع سند',
docTypeOther : 'عنوان نوع سند دیگر',
xhtmlDec : 'شامل تعاریف XHTML',
bgColor : 'رنگ پسزمینه',
bgImage : 'URL تصویر پسزمینه',
bgFixed : 'پسزمینهٴ پیمایش ناپذیر',
txtColor : 'رنگ متن',
margin : 'حاشیههای صفحه',
marginTop : 'بالا',
marginLeft : 'چپ',
marginRight : 'راست',
marginBottom : 'پایین',
metaKeywords : 'کلیدواژگان نمایهگذاری سند (با کاما جدا شوند)',
metaDescription : 'توصیف سند',
metaAuthor : 'نویسنده',
metaCopyright : 'حق انتشار',
previewHtml : '<p>این یک <strong>متن نمونه</strong> است. شما در حال استفاده از <a href="javascript:void(0)">CKEditor</a> هستید.</p>'
}
};
|
PypiClean
|
/flexmeasures-0.15.1.tar.gz/flexmeasures-0.15.1/to_pypi.sh
|
# Script to release FlexMeasures to PyPi.
#
# Cleans up build and dist dirs, checks for python files which are not in git, installs dependencies
# and finally uploads tar and wheel packages to Pypi.
#
#
# Usage
# ------------
#
# ./to_pypi [--dry-run]
#
# If the --dry-run flag is present, this script will do all steps, but skip the upload to Pypi.
#
#
# The version
# -------------
# The version comes from setuptools_scm. See `python setup.py --version`.
# setuptools_scm works via git tags that should implement a semantic versioning scheme, e.g. v0.2.3
#
# If there were zero commits since the most recent tag, we have a real release and the version basically *is* what the tag says.
# Otherwise, the version also includes a .devN identifier, where N is the number of commits since the last version tag.
#
# More information on creating a dev release
# -------------------------------------------
# Note that the only way to create a new dev release is to add another commit on your development branch.
# It might have been convenient to not have to commit to do that (for experimenting with very small changes),
# but we decided against that. Let's explore why for a bit:
#
# First, setuptools_scm has the ability to add a local scheme (git commit and date/time) to the version,
# but we've disabled that, as that extra part isn't formatted in a way that Pypi accepts it.
# Another way would have been to add a local version identifier ("+M", note the plus sign),
# which is allowed in PEP 440 but explicitly disallowed by Pypi.
# Finally, if we simply add a number to .devN (-> .devNM), the ordering of dev versions would be
# disturbed after the next local commit (e.g. we add 1 to .dev4, making it .dev41, and then the next version, .dev5,
# is not the highest version chosen by PyPi).
#
# So we'll use these tools as the experts intended.
# If you want, you can read more about acceptable versions in PEP 440: https://www.python.org/dev/peps/pep-0440/
NUM_PY_FILES_IN_FM=$(git status --porcelain flexmeasures | grep '??.*\.py' | wc -l)
if [ $NUM_PY_FILES_IN_FM -gt 0 ]; then
PY_FILES_IN_FM=$(git status --porcelain flexmeasures | grep '??.*\.py')
echo """[TO_PYPI] The following python files are not under git control but would be packaged anyways (unless explicitly excluded, e.g. in MANIFEST.in):
$PY_FILES_IN_FM
You probably want to remove any files with sensitive data; or add a MANIFEST.in file with 'exclude flexmeasures/path/to/filename' ...
"""
read -p "Continue (y/n)? " choice
case "$choice" in
y|Y ) echo "If you say so. Continuing ...";;
n|N ) echo "Aborting ..."; exit 2;;
* ) echo "invalid choice";;
esac
fi
echo "[TO_PYPI] Cleaning ..."
rm -rf build/* dist/*
echo "[TO_PYPI] Installing dependencies ..."
pip -q install twine
pip -q install wheel
echo "[TO_PYPI] Packaging ..."
python -m build
if [ "$1" == "--dry-run" ]; then
echo "[TO_PYPI] Not uploading to Pypi (--dry-run active) ..."
else
echo "[TO_PYPI] Uploading to Pypi ..."
twine upload dist/*
fi
|
PypiClean
|
/cohesity_management_sdk-1.10.1.tar.gz/cohesity_management_sdk-1.10.1/cohesity_management_sdk/models/view_protection_source.py
|
import cohesity_management_sdk.models.universal_id
class ViewProtectionSource(object):
"""Implementation of the 'ViewProtectionSource' model.
Specifies a Protection Source in a View environment.
Attributes:
id (UniversalId): Specifies a unique id of a Protection Source for a
View. The id is unique across Cohesity Clusters.
name (string): Specifies a human readable name of the Protection Source
of a View.
mtype (TypeViewProtectionSourceEnum): Specifies the type of managed
Object in a View Protection Source environment. Examples of View
Objects include 'kViewBox' or 'kView'. 'kViewBox' indicates Storage
Domain as a Protection Source type. 'kView' indicates View as a
Protection Source type.
"""
# Create a mapping from Model property names to API property names
_names = {
"id":'id',
"name":'name',
"mtype":'type',
}
def __init__(self,
id=None,
name=None,
mtype=None,
):
"""Constructor for the ViewProtectionSource class"""
# Initialize members of the class
self.id = id
self.name = name
self.mtype = mtype
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('id')) if dictionary.get('id') else None
name = dictionary.get('name')
mtype = dictionary.get('type')
# Return an object of this model
return cls(
id,
name,
mtype
)
|
PypiClean
|
/PyrogramPlus-1.2.22.tar.gz/PyrogramPlus-1.2.22/pyrogram/methods/chats/promote_chat_member.py
|
from typing import Union
from pyrogram import raw
from pyrogram.scaffold import Scaffold
class PromoteChatMember(Scaffold):
async def promote_chat_member(
self,
chat_id: Union[int, str],
user_id: Union[int, str],
is_anonymous: bool = False,
can_manage_chat: bool = True,
can_change_info: bool = False,
can_post_messages: bool = False,
can_edit_messages: bool = False,
can_delete_messages: bool = False,
can_restrict_members: bool = False,
can_invite_users: bool = False,
can_pin_messages: bool = False,
can_promote_members: bool = False,
can_manage_voice_chats: bool = False
) -> bool:
"""Promote or demote a user in a supergroup or a channel.
You must be an administrator in the chat for this to work and must have the appropriate admin rights.
Pass False for all boolean parameters to demote a user.
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
user_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target user.
For a contact that exists in your Telegram address book you can use his phone number (str).
is_anonymous (``bool``, *optional*):
Pass True, if the administrator's presence in the chat is hidden.
can_manage_chat (``bool``, *optional*):
Pass True, if the administrator can access the chat event log, chat statistics, message statistics
in channels, see channel members, see anonymous administrators in supergroups and ignore slow mode.
Implied by any other administrator privilege.
can_change_info (``bool``, *optional*):
Pass True, if the administrator can change chat title, photo and other settings.
can_post_messages (``bool``, *optional*):
Pass True, if the administrator can create channel posts, channels only.
can_edit_messages (``bool``, *optional*):
Pass True, if the administrator can edit messages of other users and can pin messages, channels only.
can_delete_messages (``bool``, *optional*):
Pass True, if the administrator can delete messages of other users.
can_restrict_members (``bool``, *optional*):
Pass True, if the administrator can restrict, ban or unban chat members.
can_invite_users (``bool``, *optional*):
Pass True, if the administrator can invite new users to the chat.
can_pin_messages (``bool``, *optional*):
Pass True, if the administrator can pin messages, supergroups only.
can_promote_members (``bool``, *optional*):
Pass True, if the administrator can add new administrators with a subset of his own privileges or
demote administrators that he has promoted, directly or indirectly (promoted by administrators that
were appointed by him).
can_manage_voice_chats (``bool``, *optional*):
Pass True, if the administration can manage voice chats (also called group calls).
Returns:
``bool``: True on success.
Example:
.. code-block:: python
# Promote chat member to supergroup admin
app.promote_chat_member(chat_id, user_id)
"""
await self.send(
raw.functions.channels.EditAdmin(
channel=await self.resolve_peer(chat_id),
user_id=await self.resolve_peer(user_id),
admin_rights=raw.types.ChatAdminRights(
anonymous=is_anonymous or None,
change_info=can_change_info or None,
post_messages=can_post_messages or None,
edit_messages=can_edit_messages or None,
delete_messages=can_delete_messages or None,
ban_users=can_restrict_members or None,
invite_users=can_invite_users or None,
pin_messages=can_pin_messages or None,
add_admins=can_promote_members or None,
manage_call=can_manage_voice_chats or None,
other=can_manage_chat or None
),
rank=""
)
)
return True
|
PypiClean
|
/retro_data_structures-0.23.0-py3-none-any.whl/retro_data_structures/properties/prime/objects/Platform.py
|
import dataclasses
import struct
import typing
from retro_data_structures.game_check import Game
from retro_data_structures.properties.base_property import BaseObjectType
from retro_data_structures.properties.prime.archetypes.ActorParameters import ActorParameters
from retro_data_structures.properties.prime.archetypes.DamageVulnerability import DamageVulnerability
from retro_data_structures.properties.prime.archetypes.HealthInfo import HealthInfo
from retro_data_structures.properties.prime.core.AnimationParameters import AnimationParameters
from retro_data_structures.properties.prime.core.AssetId import AssetId, default_asset_id
from retro_data_structures.properties.prime.core.Vector import Vector
@dataclasses.dataclass()
class Platform(BaseObjectType):
name: str = dataclasses.field(default='')
position: Vector = dataclasses.field(default_factory=Vector)
rotation: Vector = dataclasses.field(default_factory=Vector)
scale: Vector = dataclasses.field(default_factory=Vector)
unknown_1: Vector = dataclasses.field(default_factory=Vector)
scan_offset: Vector = dataclasses.field(default_factory=Vector)
model: AssetId = dataclasses.field(metadata={'asset_types': ['CMDL']}, default=default_asset_id)
animation_parameters: AnimationParameters = dataclasses.field(default_factory=AnimationParameters)
unnamed_0x00000008: ActorParameters = dataclasses.field(default_factory=ActorParameters)
unknown_2: float = dataclasses.field(default=0.0)
active: bool = dataclasses.field(default=False)
dcln: AssetId = dataclasses.field(metadata={'asset_types': ['DCLN']}, default=default_asset_id)
unnamed_0x0000000c: HealthInfo = dataclasses.field(default_factory=HealthInfo)
unnamed_0x0000000d: DamageVulnerability = dataclasses.field(default_factory=DamageVulnerability)
unknown_3: bool = dataclasses.field(default=False)
unknown_4: float = dataclasses.field(default=0.0)
unknown_5: bool = dataclasses.field(default=False)
unknown_6: int = dataclasses.field(default=0)
unknown_7: int = dataclasses.field(default=0)
@classmethod
def game(cls) -> Game:
return Game.PRIME
def get_name(self) -> typing.Optional[str]:
return self.name
def set_name(self, name: str) -> None:
self.name = name
@classmethod
def object_type(cls) -> int:
return 0x8
@classmethod
def from_stream(cls, data: typing.BinaryIO, size: typing.Optional[int] = None, default_override: typing.Optional[dict] = None):
property_size = None # Atomic
property_count = struct.unpack(">L", data.read(4))[0]
name = b"".join(iter(lambda: data.read(1), b'\x00')).decode("utf-8")
position = Vector.from_stream(data)
rotation = Vector.from_stream(data)
scale = Vector.from_stream(data)
unknown_1 = Vector.from_stream(data)
scan_offset = Vector.from_stream(data)
model = struct.unpack(">L", data.read(4))[0]
animation_parameters = AnimationParameters.from_stream(data, property_size)
unnamed_0x00000008 = ActorParameters.from_stream(data, property_size)
unknown_2 = struct.unpack('>f', data.read(4))[0]
active = struct.unpack('>?', data.read(1))[0]
dcln = struct.unpack(">L", data.read(4))[0]
unnamed_0x0000000c = HealthInfo.from_stream(data, property_size)
unnamed_0x0000000d = DamageVulnerability.from_stream(data, property_size)
unknown_3 = struct.unpack('>?', data.read(1))[0]
unknown_4 = struct.unpack('>f', data.read(4))[0]
unknown_5 = struct.unpack('>?', data.read(1))[0]
unknown_6 = struct.unpack('>l', data.read(4))[0]
unknown_7 = struct.unpack('>l', data.read(4))[0]
return cls(name, position, rotation, scale, unknown_1, scan_offset, model, animation_parameters, unnamed_0x00000008, unknown_2, active, dcln, unnamed_0x0000000c, unnamed_0x0000000d, unknown_3, unknown_4, unknown_5, unknown_6, unknown_7)
def to_stream(self, data: typing.BinaryIO, default_override: typing.Optional[dict] = None):
default_override = default_override or {}
data.write(b'\x00\x00\x00\x13') # 19 properties
data.write(self.name.encode("utf-8"))
data.write(b'\x00')
self.position.to_stream(data)
self.rotation.to_stream(data)
self.scale.to_stream(data)
self.unknown_1.to_stream(data)
self.scan_offset.to_stream(data)
data.write(struct.pack(">L", self.model))
self.animation_parameters.to_stream(data)
self.unnamed_0x00000008.to_stream(data)
data.write(struct.pack('>f', self.unknown_2))
data.write(struct.pack('>?', self.active))
data.write(struct.pack(">L", self.dcln))
self.unnamed_0x0000000c.to_stream(data)
self.unnamed_0x0000000d.to_stream(data)
data.write(struct.pack('>?', self.unknown_3))
data.write(struct.pack('>f', self.unknown_4))
data.write(struct.pack('>?', self.unknown_5))
data.write(struct.pack('>l', self.unknown_6))
data.write(struct.pack('>l', self.unknown_7))
@classmethod
def from_json(cls, data: dict):
return cls(
name=data['name'],
position=Vector.from_json(data['position']),
rotation=Vector.from_json(data['rotation']),
scale=Vector.from_json(data['scale']),
unknown_1=Vector.from_json(data['unknown_1']),
scan_offset=Vector.from_json(data['scan_offset']),
model=data['model'],
animation_parameters=AnimationParameters.from_json(data['animation_parameters']),
unnamed_0x00000008=ActorParameters.from_json(data['unnamed_0x00000008']),
unknown_2=data['unknown_2'],
active=data['active'],
dcln=data['dcln'],
unnamed_0x0000000c=HealthInfo.from_json(data['unnamed_0x0000000c']),
unnamed_0x0000000d=DamageVulnerability.from_json(data['unnamed_0x0000000d']),
unknown_3=data['unknown_3'],
unknown_4=data['unknown_4'],
unknown_5=data['unknown_5'],
unknown_6=data['unknown_6'],
unknown_7=data['unknown_7'],
)
def to_json(self) -> dict:
return {
'name': self.name,
'position': self.position.to_json(),
'rotation': self.rotation.to_json(),
'scale': self.scale.to_json(),
'unknown_1': self.unknown_1.to_json(),
'scan_offset': self.scan_offset.to_json(),
'model': self.model,
'animation_parameters': self.animation_parameters.to_json(),
'unnamed_0x00000008': self.unnamed_0x00000008.to_json(),
'unknown_2': self.unknown_2,
'active': self.active,
'dcln': self.dcln,
'unnamed_0x0000000c': self.unnamed_0x0000000c.to_json(),
'unnamed_0x0000000d': self.unnamed_0x0000000d.to_json(),
'unknown_3': self.unknown_3,
'unknown_4': self.unknown_4,
'unknown_5': self.unknown_5,
'unknown_6': self.unknown_6,
'unknown_7': self.unknown_7,
}
def _dependencies_for_model(self, asset_manager):
yield from asset_manager.get_dependencies_for_asset(self.model)
def _dependencies_for_animation_parameters(self, asset_manager):
yield from self.animation_parameters.dependencies_for(asset_manager)
def _dependencies_for_unnamed_0x00000008(self, asset_manager):
yield from self.unnamed_0x00000008.dependencies_for(asset_manager)
def _dependencies_for_dcln(self, asset_manager):
yield from asset_manager.get_dependencies_for_asset(self.dcln)
def _dependencies_for_unnamed_0x0000000c(self, asset_manager):
yield from self.unnamed_0x0000000c.dependencies_for(asset_manager)
def _dependencies_for_unnamed_0x0000000d(self, asset_manager):
yield from self.unnamed_0x0000000d.dependencies_for(asset_manager)
def dependencies_for(self, asset_manager):
for method, field_name, field_type in [
(self._dependencies_for_model, "model", "AssetId"),
(self._dependencies_for_animation_parameters, "animation_parameters", "AnimationParameters"),
(self._dependencies_for_unnamed_0x00000008, "unnamed_0x00000008", "ActorParameters"),
(self._dependencies_for_dcln, "dcln", "AssetId"),
(self._dependencies_for_unnamed_0x0000000c, "unnamed_0x0000000c", "HealthInfo"),
(self._dependencies_for_unnamed_0x0000000d, "unnamed_0x0000000d", "DamageVulnerability"),
]:
try:
yield from method(asset_manager)
except Exception as e:
raise Exception(
f"Error finding dependencies for Platform.{field_name} ({field_type}): {e}"
)
|
PypiClean
|
/tf_latino-0.0.5.tar.gz/tf_latino-0.0.5/tf_latino/lib/import_dotnet.py
|
import logging
import os
import sys
import traceback
from settings import PACKAGE_ROOT
#------------------------------------------------------------------------------
# prepare environment for loading .net components (Python.net interpreter should be used)
# see: http://pythonnet.sourceforge.net/
#------------------------------------------------------------------------------
logging.info("----------------- import_dotnet.py ------------------------>")
if sys.platform.startswith('win'):
dllPath = os.path.join(PACKAGE_ROOT, 'bin')
sys.path.append(dllPath)
try:
import clr
import System
# load all dll & exe files in dllPath directory
for f in os.listdir(dllPath):
if os.path.isfile(os.path.join(dllPath, f)) and (f.endswith(".dll") or f.endswith(".exe")):
logging.info("Loading .NET library '%s'", f)
try:
System.Reflection.Assembly.LoadFile(os.path.join(dllPath, f).replace('\\','/'))
except System.BadImageFormatException, e:
pass
# loading LATINO namespace
import Latino
import LatinoInterfaces
from LatinoInterfaces import LatinoCF
# loading LEMMAGEN namespace
import LemmaSharpInterfaces
from LemmaSharpInterfaces import LemmaSharpIntf
# loading CROSSBEE namespace
import CrossBeeInterfaces
from CrossBeeInterfaces import CrossBeeIntf
except Exception:
logging.warning("DotNet assemblies could not be loaded! Probable reasons: missing dlls or wrong interpreter (see http://pythonnet.sourceforge.net). "
"Other functionality of ClowdFlows (besides .Net assemblies) should be OK! "
"Original exception: %s" % traceback.format_exc())
pass
else:
logging.info("DotNet assemblies were not be loaded! Available only for Windows environments.")
logging.info("<----------------- import_dotnet.py ------------------------")
|
PypiClean
|
/django_ledger-0.5.4.2-py3-none-any.whl/django_ledger/migrations/0001_initial.py
|
from decimal import Decimal
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import django_ledger.io.io_mixin
import django_ledger.models.entity
import django_ledger.models.mixins
import re
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AccountModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('path', models.CharField(max_length=255, unique=True)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('code', models.CharField(max_length=10, verbose_name='Account Code')),
('name', models.CharField(max_length=100, verbose_name='Account Name')),
('role', models.CharField(choices=[('Assets', (('asset_ca_cash', 'Current Asset'), ('asset_ca_mkt_sec', 'Marketable Securities'), ('asset_ca_recv', 'Receivables'), ('asset_ca_inv', 'Inventory'), ('asset_ca_uncoll', 'Uncollectibles'), ('asset_ca_prepaid', 'Prepaid'), ('asset_ca_other', 'Other Liquid Assets'), ('asset_lti_notes', 'Notes Receivable'), ('asset_lti_land', 'Land'), ('asset_lti_sec', 'Securities'), ('asset_ppe_build', 'Buildings'), ('asset_ppe_build_accum_depr', 'Buildings - Accum. Depreciation'), ('asset_ppe_plant', 'Plant'), ('asset_ppe_plant_depr', 'Plant - Accum. Depreciation'), ('asset_ppe_equip', 'Equipment'), ('asset_ppe_equip_accum_depr', 'Equipment - Accum. Depreciation'), ('asset_ia', 'Intangible Assets'), ('asset_ia_accum_amort', 'Intangible Assets - Accum. Amortization'), ('asset_adjustment', 'Other Assets'))), ('Liabilities', (('lia_cl_acc_payable', 'Accounts Payable'), ('lia_cl_wages_payable', 'Wages Payable'), ('lia_cl_int_payable', 'Interest Payable'), ('lia_cl_taxes_payable', 'Taxes Payable'), ('lia_cl_st_notes_payable', 'Notes Payable'), ('lia_cl_ltd_mat', 'Current Maturities of Long Tern Debt'), ('lia_cl_def_rev', 'Deferred Revenue'), ('lia_cl_other', 'Other Liabilities'), ('lia_ltl_notes', 'Notes Payable'), ('lia_ltl_bonds', 'Bonds Payable'), ('lia_ltl_mortgage', 'Mortgage Payable'))), ('Equity', (('eq_capital', 'Capital'), ('eq_stock_common', 'Common Stock'), ('eq_stock_preferred', 'Preferred Stock'), ('eq_adjustment', 'Other Equity Adjustments'), ('eq_dividends', 'Dividends & Distributions to Shareholders'), ('in_operational', 'Operational Income'), ('in_passive', 'Investing/Passive Income'), ('in_interest', 'Interest Income'), ('in_gain_loss', 'Capital Gain/Loss Income'), ('in_other', 'Other Income'), ('ex_cogs', 'Cost of Goods Sold'), ('ex_regular', 'Regular Expense'), ('ex_interest', 'Interest Expense'), ('ex_taxes', 'Tax Expense'), ('ex_capital', 'Capital Expense'), ('ex_depreciation', 'Depreciation Expense'), ('ex_amortization', 'Amortization Expense'), ('ex_other', 'Other Expense')))], max_length=30, verbose_name='Account Role')),
('balance_type', models.CharField(choices=[('credit', 'Credit'), ('debit', 'Debit')], max_length=6, verbose_name='Account Balance Type')),
('locked', models.BooleanField(default=False, verbose_name='Locked')),
('active', models.BooleanField(default=False, verbose_name='Active')),
],
options={
'verbose_name': 'Account',
'verbose_name_plural': 'Accounts',
'ordering': ['-created'],
'abstract': False,
},
),
migrations.CreateModel(
name='BillModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('amount_due', models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Amount Due')),
('amount_paid', models.DecimalField(decimal_places=2, default=0, max_digits=20, validators=[django.core.validators.MinValueValidator(limit_value=0)], verbose_name='Amount Paid')),
('amount_receivable', models.DecimalField(decimal_places=2, default=0, max_digits=20, validators=[django.core.validators.MinValueValidator(limit_value=0)], verbose_name='Amount Receivable')),
('amount_unearned', models.DecimalField(decimal_places=2, default=0, max_digits=20, validators=[django.core.validators.MinValueValidator(limit_value=0)], verbose_name='Amount Unearned')),
('amount_earned', models.DecimalField(decimal_places=2, default=0, max_digits=20, validators=[django.core.validators.MinValueValidator(limit_value=0)], verbose_name='Amount Earned')),
('accrue', models.BooleanField(default=False, verbose_name='Accrue')),
('progress', models.DecimalField(decimal_places=2, default=0, max_digits=3, validators=[django.core.validators.MinValueValidator(limit_value=0), django.core.validators.MaxValueValidator(limit_value=1)], verbose_name='Progress Amount')),
('terms', models.CharField(choices=[('on_receipt', 'Due On Receipt'), ('net_30', 'Net 30 Days'), ('net_60', 'Net 60 Days'), ('net_90', 'Net 90 Days')], default='on_receipt', max_length=10, verbose_name='Terms')),
('date_due', models.DateField(blank=True, null=True, verbose_name='Due Date')),
('markdown_notes', models.TextField(blank=True, null=True, verbose_name='Markdown Notes')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('bill_number', models.SlugField(editable=False, max_length=20, verbose_name='Bill Number')),
('bill_status', models.CharField(choices=[('draft', 'Draft'), ('in_review', 'In Review'), ('approved', 'Approved'), ('paid', 'Paid'), ('canceled', 'Canceled'), ('void', 'Void')], default='draft', max_length=10, verbose_name='Bill Status')),
('xref', models.SlugField(blank=True, null=True, verbose_name='External Reference Number')),
('additional_info', models.JSONField(blank=True, default=dict, null=True, verbose_name='Bill Additional Info')),
('date_draft', models.DateField(blank=True, null=True, verbose_name='Draft Date')),
('date_in_review', models.DateField(blank=True, null=True, verbose_name='In Review Date')),
('date_approved', models.DateField(blank=True, null=True, verbose_name='Approved Date')),
('date_paid', models.DateField(blank=True, null=True, verbose_name='Paid Date')),
('date_void', models.DateField(blank=True, null=True, verbose_name='Void Date')),
('date_canceled', models.DateField(blank=True, null=True, verbose_name='Canceled Date')),
],
options={
'verbose_name': 'Bill',
'verbose_name_plural': 'Bills',
'ordering': ['-updated'],
'abstract': False,
},
),
migrations.CreateModel(
name='ChartOfAccountModel',
fields=[
('slug', models.SlugField(editable=False, unique=True, validators=[django.core.validators.MinLengthValidator(limit_value=10, message='Slug field must contain at least 10 characters.')])),
('name', models.CharField(blank=True, max_length=150, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('locked', models.BooleanField(default=False, verbose_name='Locked')),
('description', models.TextField(blank=True, null=True, verbose_name='CoA Description')),
],
options={
'verbose_name': 'Chart of Account',
'verbose_name_plural': 'Chart of Accounts',
'ordering': ['-created'],
'abstract': False,
},
),
migrations.CreateModel(
name='CustomerModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('address_1', models.CharField(max_length=70, verbose_name='Address Line 1')),
('address_2', models.CharField(blank=True, max_length=70, null=True, verbose_name='Address Line 2')),
('city', models.CharField(blank=True, max_length=70, null=True, verbose_name='City')),
('state', models.CharField(blank=True, max_length=70, null=True, verbose_name='State/Province')),
('zip_code', models.CharField(blank=True, max_length=20, null=True, verbose_name='Zip Code')),
('country', models.CharField(blank=True, max_length=70, null=True, verbose_name='Country')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email')),
('website', models.URLField(blank=True, null=True, verbose_name='Website')),
('phone', models.CharField(blank=True, max_length=30, null=True, verbose_name='Phone Number')),
('sales_tax_rate', models.FloatField(blank=True, default=0.0, null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0), django.core.validators.MaxValueValidator(limit_value=1.0)], verbose_name='Sales Tax Rate')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('customer_name', models.CharField(max_length=100)),
('customer_number', models.CharField(editable=False, max_length=30, verbose_name='Customer Number')),
('description', models.TextField()),
('active', models.BooleanField(default=True)),
('hidden', models.BooleanField(default=False)),
('additional_info', models.JSONField(blank=True, default=dict, null=True)),
],
options={
'verbose_name': 'Customer',
'abstract': False,
},
),
migrations.CreateModel(
name='EntityManagementModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('permission_level', models.CharField(choices=[('read', 'Read Permissions'), ('write', 'Read/Write Permissions'), ('suspended', 'No Permissions')], default='read', max_length=10, verbose_name='Permission Level')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='EntityModel',
fields=[
('slug', models.SlugField(editable=False, unique=True, validators=[django.core.validators.MinLengthValidator(limit_value=10, message='Slug field must contain at least 10 characters.')])),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('address_1', models.CharField(max_length=70, verbose_name='Address Line 1')),
('address_2', models.CharField(blank=True, max_length=70, null=True, verbose_name='Address Line 2')),
('city', models.CharField(blank=True, max_length=70, null=True, verbose_name='City')),
('state', models.CharField(blank=True, max_length=70, null=True, verbose_name='State/Province')),
('zip_code', models.CharField(blank=True, max_length=20, null=True, verbose_name='Zip Code')),
('country', models.CharField(blank=True, max_length=70, null=True, verbose_name='Country')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email')),
('website', models.URLField(blank=True, null=True, verbose_name='Website')),
('phone', models.CharField(blank=True, max_length=30, null=True, verbose_name='Phone Number')),
('path', models.CharField(max_length=255, unique=True)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=150, verbose_name='Entity Name')),
('hidden', models.BooleanField(default=False)),
('accrual_method', models.BooleanField(default=False, verbose_name='Use Accrual Method')),
('fy_start_month', models.IntegerField(choices=[(1, 'January'), (2, 'February'), (3, 'March'), (4, 'April'), (5, 'May'), (6, 'June'), (7, 'July'), (8, 'August'), (9, 'September'), (10, 'October'), (11, 'November'), (12, 'December')], default=1, verbose_name='Fiscal Year Start')),
('picture', models.ImageField(blank=True, null=True, upload_to='')),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='admin_of', to=settings.AUTH_USER_MODEL, verbose_name='Admin')),
('default_coa', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='django_ledger.chartofaccountmodel', verbose_name='Default Chart of Accounts')),
('managers', models.ManyToManyField(related_name='managed_by', through='django_ledger.EntityManagementModel', to=settings.AUTH_USER_MODEL, verbose_name='Managers')),
],
options={
'verbose_name': 'Entity',
'verbose_name_plural': 'Entities',
'ordering': ['-created'],
'abstract': False,
},
bases=(models.Model, django_ledger.io.io_mixin.IOMixIn, django_ledger.models.mixins.LoggingMixIn, django_ledger.models.entity.FiscalPeriodMixIn),
),
migrations.CreateModel(
name='EntityUnitModel',
fields=[
('name', models.CharField(blank=True, max_length=150, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('path', models.CharField(max_length=255, unique=True)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('slug', models.SlugField()),
('document_prefix', models.CharField(max_length=3)),
('active', models.BooleanField(default=True, verbose_name='Is Active')),
('hidden', models.BooleanField(default=False, verbose_name='Is Hidden')),
('entity', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='Unit Entity')),
],
options={
'verbose_name': 'Entity Unit Model',
'ordering': ['-created'],
'abstract': False,
},
bases=(django_ledger.io.io_mixin.IOMixIn, models.Model),
),
migrations.CreateModel(
name='EstimateModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('markdown_notes', models.TextField(blank=True, null=True, verbose_name='Markdown Notes')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('estimate_number', models.SlugField(editable=False, max_length=20, verbose_name='Estimate Number')),
('terms', models.CharField(choices=[('fixed', 'Fixed Price'), ('target', 'Target Price'), ('t&m', 'Time & Materials'), ('other', 'Other')], max_length=10, verbose_name='Contract Terms')),
('title', models.CharField(max_length=250, validators=[django.core.validators.MinLengthValidator(limit_value=5, message='PO Title length must be greater than 5')], verbose_name='Customer Estimate Title')),
('status', models.CharField(choices=[('draft', 'Draft'), ('in_review', 'In Review'), ('approved', 'Approved'), ('completed', 'Completed'), ('void', 'Void'), ('canceled', 'Canceled')], default='draft', max_length=10, verbose_name='Customer Estimate Status')),
('date_draft', models.DateField(blank=True, null=True, verbose_name='Date Draft')),
('date_in_review', models.DateField(blank=True, null=True, verbose_name='Date In Review')),
('date_approved', models.DateField(blank=True, null=True, verbose_name='Date Approved')),
('date_completed', models.DateField(blank=True, null=True, verbose_name='Date Completed')),
('date_canceled', models.DateField(blank=True, null=True, verbose_name='Date Canceled')),
('date_void', models.DateField(blank=True, null=True, verbose_name='Date Void')),
('revenue_estimate', models.DecimalField(decimal_places=2, default=Decimal('0.00'), help_text='Estimated cost to complete the quoted work.', max_digits=20, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Total revenue estimate')),
('labor_estimate', models.DecimalField(decimal_places=2, default=Decimal('0.00'), help_text='Estimated labor cost to complete the quoted work.', max_digits=20, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Labor Cost of labor estimate')),
('material_estimate', models.DecimalField(decimal_places=2, default=0.0, help_text='Estimated material cost to complete the quoted work.', max_digits=20, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Material Cost Estimate')),
('equipment_estimate', models.DecimalField(decimal_places=2, default=Decimal('0.00'), help_text='Estimated equipment cost to complete the quoted work.', max_digits=20, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Equipment Cost Estimate')),
('other_estimate', models.DecimalField(decimal_places=2, default=Decimal('0.00'), help_text='Estimated equipment cost to complete the quoted work.', max_digits=20, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Other Cost Estimate')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.customermodel', verbose_name='Customer')),
('entity', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='Entity Model')),
],
options={
'verbose_name': 'Customer Job',
'verbose_name_plural': 'Customer Jobs',
'ordering': ['-updated'],
'abstract': False,
},
),
migrations.CreateModel(
name='ImportJobModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('description', models.CharField(max_length=200, verbose_name='Description')),
('completed', models.BooleanField(default=False, verbose_name='Import Job Completed')),
],
options={
'verbose_name': 'Import Job Model',
'abstract': False,
},
),
migrations.CreateModel(
name='InvoiceModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('amount_due', models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Amount Due')),
('amount_paid', models.DecimalField(decimal_places=2, default=0, max_digits=20, validators=[django.core.validators.MinValueValidator(limit_value=0)], verbose_name='Amount Paid')),
('amount_receivable', models.DecimalField(decimal_places=2, default=0, max_digits=20, validators=[django.core.validators.MinValueValidator(limit_value=0)], verbose_name='Amount Receivable')),
('amount_unearned', models.DecimalField(decimal_places=2, default=0, max_digits=20, validators=[django.core.validators.MinValueValidator(limit_value=0)], verbose_name='Amount Unearned')),
('amount_earned', models.DecimalField(decimal_places=2, default=0, max_digits=20, validators=[django.core.validators.MinValueValidator(limit_value=0)], verbose_name='Amount Earned')),
('accrue', models.BooleanField(default=False, verbose_name='Accrue')),
('progress', models.DecimalField(decimal_places=2, default=0, max_digits=3, validators=[django.core.validators.MinValueValidator(limit_value=0), django.core.validators.MaxValueValidator(limit_value=1)], verbose_name='Progress Amount')),
('terms', models.CharField(choices=[('on_receipt', 'Due On Receipt'), ('net_30', 'Net 30 Days'), ('net_60', 'Net 60 Days'), ('net_90', 'Net 90 Days')], default='on_receipt', max_length=10, verbose_name='Terms')),
('date_due', models.DateField(blank=True, null=True, verbose_name='Due Date')),
('markdown_notes', models.TextField(blank=True, null=True, verbose_name='Markdown Notes')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('invoice_number', models.SlugField(editable=False, max_length=20, verbose_name='Invoice Number')),
('invoice_status', models.CharField(choices=[('draft', 'Draft'), ('in_review', 'In Review'), ('approved', 'Approved'), ('paid', 'Paid'), ('void', 'Void'), ('canceled', 'Canceled')], default='draft', max_length=10, verbose_name='Invoice Status')),
('additional_info', models.JSONField(blank=True, default=dict, null=True, verbose_name='Invoice Additional Info')),
('date_draft', models.DateField(blank=True, null=True, verbose_name='Draft Date')),
('date_in_review', models.DateField(blank=True, null=True, verbose_name='In Review Date')),
('date_approved', models.DateField(blank=True, null=True, verbose_name='Approved Date')),
('date_paid', models.DateField(blank=True, null=True, verbose_name='Paid Date')),
('date_void', models.DateField(blank=True, null=True, verbose_name='Void Date')),
('date_canceled', models.DateField(blank=True, null=True, verbose_name='Canceled Date')),
('cash_account', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, related_name='invoice_cash_account', to='django_ledger.accountmodel', verbose_name='Cash Account')),
('ce_model', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.estimatemodel', verbose_name='Associated Customer Job/Estimate')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.customermodel', verbose_name='Customer')),
],
options={
'verbose_name': 'Invoice',
'verbose_name_plural': 'Invoices',
'ordering': ['-updated'],
'abstract': False,
},
),
migrations.CreateModel(
name='ItemModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('path', models.CharField(max_length=255, unique=True)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, verbose_name='Item Name')),
('item_type', models.CharField(blank=True, choices=[('L', 'Labor'), ('M', 'Material'), ('E', 'Equipment'), ('S', 'Lump Sum'), ('O', 'Other')], max_length=1, null=True)),
('sku', models.CharField(blank=True, max_length=50, null=True, verbose_name='SKU Code')),
('upc', models.CharField(blank=True, max_length=50, null=True, verbose_name='UPC Code')),
('item_id', models.CharField(blank=True, max_length=50, null=True, verbose_name='Internal ID')),
('item_number', models.CharField(editable=False, max_length=30, verbose_name='Item Number')),
('is_active', models.BooleanField(default=True, verbose_name='Is Active')),
('default_amount', models.DecimalField(decimal_places=2, default=0, max_digits=20, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Default monetary value per unit of measure')),
('for_inventory', models.BooleanField(help_text='It is an item you require for your inventory.', verbose_name='Is an item for inventory')),
('is_product_or_service', models.BooleanField(help_text='Is a product or service you sell or provide to customers.', verbose_name='Is a product or service.')),
('sold_as_unit', models.BooleanField(default=False)),
('inventory_received', models.DecimalField(blank=True, decimal_places=3, max_digits=20, null=True, verbose_name='Total inventory received.')),
('inventory_received_value', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True, verbose_name='Total value of inventory received.')),
('additional_info', models.JSONField(blank=True, default=dict, null=True, verbose_name='Item Additional Info')),
('cogs_account', models.ForeignKey(blank=True, help_text='COGS account where cost will be recognized on Income Statement.', null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='item_cogs_account', to='django_ledger.accountmodel', verbose_name='COGS Account')),
('earnings_account', models.ForeignKey(blank=True, help_text='Earnings account where revenue will be recognized on Income Statement.', null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='item_earnings_account', to='django_ledger.accountmodel', verbose_name='Earnings Account')),
('entity', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='Item Entity')),
('expense_account', models.ForeignKey(blank=True, help_text='Expense account where cost will be recognized on Income Statement.', null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='item_expense_account', to='django_ledger.accountmodel', verbose_name='Expense Account')),
('inventory_account', models.ForeignKey(blank=True, help_text='Inventory account where cost will be capitalized.', null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='item_inventory_account', to='django_ledger.accountmodel', verbose_name='Inventory Account')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ItemTransactionModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('quantity', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='Quantity')),
('unit_cost', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='Cost Per Unit')),
('total_amount', models.DecimalField(blank=True, decimal_places=2, editable=False, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='Total Amount QTY x UnitCost')),
('po_quantity', models.FloatField(blank=True, help_text='Authorized item quantity for purchasing.', null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='PO Quantity')),
('po_unit_cost', models.FloatField(blank=True, help_text='Purchase Order unit cost.', null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='PO Unit Cost')),
('po_total_amount', models.DecimalField(blank=True, decimal_places=2, editable=False, help_text='Maximum authorized cost per Purchase Order.', max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='Authorized maximum item cost per Purchase Order')),
('po_item_status', models.CharField(blank=True, choices=[('not_ordered', 'Not Ordered'), ('ordered', 'Ordered'), ('in_transit', 'In Transit'), ('received', 'Received'), ('cancelled', 'Canceled')], max_length=15, null=True, verbose_name='PO Item Status')),
('ce_quantity', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='Estimated/Contract Quantity')),
('ce_unit_cost_estimate', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='Estimate/Contract Cost per Unit.')),
('ce_cost_estimate', models.DecimalField(blank=True, decimal_places=2, editable=False, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='Total Estimate/Contract Cost.')),
('ce_unit_revenue_estimate', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='Estimate/Contract Revenue per Unit.')),
('ce_revenue_estimate', models.DecimalField(blank=True, decimal_places=2, editable=False, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(limit_value=0.0)], verbose_name='Total Estimate/Contract Revenue.')),
('item_notes', models.CharField(blank=True, max_length=400, null=True, verbose_name='Description')),
('bill_model', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.billmodel', verbose_name='Bill Model')),
('ce_model', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.estimatemodel', verbose_name='Customer Estimate')),
('entity_unit', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.entityunitmodel', verbose_name='Associated Entity Unit')),
('invoice_model', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.invoicemodel', verbose_name='Invoice Model')),
('item_model', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.itemmodel', verbose_name='Item Model')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='JournalEntryModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('je_number', models.SlugField(editable=False, max_length=20, verbose_name='Journal Entry Number')),
('timestamp', models.DateTimeField(verbose_name='Date')),
('description', models.CharField(blank=True, max_length=70, null=True, verbose_name='Description')),
('activity', models.CharField(blank=True, choices=[('Operating', (('op', 'Operating'),)), ('Investing', (('inv_ppe', 'Purchase/Disposition of PPE'), ('inv_securities', 'Purchase/Disposition of Securities'), ('inv', 'Investing Activity Other'))), ('Financing', (('fin_std', 'Payoff of Short Term Debt'), ('fin_ltd', 'Payoff of Long Term Debt'), ('fin_equity', 'Issuance of Common Stock, Preferred Stock or Capital Contribution'), ('fin_dividends', 'Dividends or Distributions to Shareholders'), ('fin', 'Financing Activity Other')))], editable=False, max_length=20, null=True, verbose_name='Activity')),
('origin', models.CharField(blank=True, max_length=30, null=True, verbose_name='Origin')),
('posted', models.BooleanField(default=False, verbose_name='Posted')),
('locked', models.BooleanField(default=False, verbose_name='Locked')),
('entity_unit', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.entityunitmodel', verbose_name='Associated Entity Unit')),
],
options={
'verbose_name': 'Journal Entry',
'verbose_name_plural': 'Journal Entries',
'ordering': ['-created'],
'abstract': False,
},
managers=[
('on_coa', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='VendorModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('address_1', models.CharField(max_length=70, verbose_name='Address Line 1')),
('address_2', models.CharField(blank=True, max_length=70, null=True, verbose_name='Address Line 2')),
('city', models.CharField(blank=True, max_length=70, null=True, verbose_name='City')),
('state', models.CharField(blank=True, max_length=70, null=True, verbose_name='State/Province')),
('zip_code', models.CharField(blank=True, max_length=20, null=True, verbose_name='Zip Code')),
('country', models.CharField(blank=True, max_length=70, null=True, verbose_name='Country')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email')),
('website', models.URLField(blank=True, null=True, verbose_name='Website')),
('phone', models.CharField(blank=True, max_length=30, null=True, verbose_name='Phone Number')),
('account_number', models.CharField(blank=True, max_length=30, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\d+)*\\Z'), code='invalid', message='Only digits allowed')], verbose_name='Account Number')),
('routing_number', models.CharField(blank=True, max_length=30, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\d+)*\\Z'), code='invalid', message='Only digits allowed')], verbose_name='Routing Number')),
('aba_number', models.CharField(blank=True, max_length=30, null=True, verbose_name='ABA Number')),
('swift_number', models.CharField(blank=True, max_length=30, null=True, verbose_name='SWIFT Number')),
('account_type', models.CharField(choices=[('checking', 'Checking'), ('savings', 'Savings')], default='checking', max_length=10, verbose_name='Account Type')),
('tax_id_number', models.CharField(blank=True, max_length=30, null=True, verbose_name='Tax Registration Number')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('vendor_number', models.CharField(blank=True, max_length=30, null=True)),
('vendor_name', models.CharField(max_length=100)),
('description', models.TextField()),
('active', models.BooleanField(default=True)),
('hidden', models.BooleanField(default=False)),
('additional_info', models.JSONField(blank=True, default=dict, null=True)),
('entity_model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='Vendor Entity')),
],
options={
'verbose_name': 'Vendor',
'abstract': False,
},
),
migrations.CreateModel(
name='UnitOfMeasureModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50, verbose_name='Unit of Measure Name')),
('unit_abbr', models.SlugField(max_length=10, verbose_name='UoM Abbreviation')),
('is_active', models.BooleanField(default=True, verbose_name='Is Active')),
('entity', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='UoM Entity')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TransactionModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('tx_type', models.CharField(choices=[('credit', 'Credit'), ('debit', 'Debit')], max_length=10, verbose_name='Tx Type')),
('amount', models.DecimalField(blank=True, decimal_places=2, help_text='Account of the transaction.', max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Amount')),
('description', models.CharField(blank=True, help_text='A description to be included with this individual transaction', max_length=100, null=True, verbose_name='Tx Description')),
('account', models.ForeignKey(help_text='Account from Chart of Accounts to be associated with this transaction.', on_delete=django.db.models.deletion.PROTECT, to='django_ledger.accountmodel', verbose_name='Account')),
('journal_entry', models.ForeignKey(editable=False, help_text='Journal Entry to be associated with this transaction.', on_delete=django.db.models.deletion.PROTECT, to='django_ledger.journalentrymodel', verbose_name='Journal Entry')),
],
options={
'verbose_name': 'Transaction',
'verbose_name_plural': 'Transactions',
'ordering': ['-created'],
'abstract': False,
},
),
migrations.CreateModel(
name='StagedTransactionModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('fitid', models.CharField(max_length=100)),
('amount', models.DecimalField(decimal_places=2, max_digits=15)),
('date_posted', models.DateField()),
('name', models.CharField(blank=True, max_length=200, null=True)),
('memo', models.CharField(blank=True, max_length=200, null=True)),
('earnings_account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.accountmodel')),
('import_job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_ledger.importjobmodel')),
('tx', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_ledger.transactionmodel')),
],
options={
'verbose_name': 'Staged Transaction Model',
'abstract': False,
},
),
migrations.CreateModel(
name='PurchaseOrderModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('markdown_notes', models.TextField(blank=True, null=True, verbose_name='Markdown Notes')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('po_number', models.SlugField(editable=False, max_length=20, verbose_name='Purchase Order Number')),
('po_title', models.CharField(max_length=250, validators=[django.core.validators.MinLengthValidator(limit_value=5, message='PO Title must be greater than 5')], verbose_name='Purchase Order Title')),
('po_status', models.CharField(choices=[('draft', 'Draft'), ('in_review', 'In Review'), ('approved', 'Approved'), ('fulfilled', 'Fulfilled'), ('canceled', 'Canceled'), ('void', 'Void')], default='draft', max_length=10)),
('po_amount', models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Purchase Order Amount')),
('po_amount_received', models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Received Amount')),
('date_draft', models.DateField(blank=True, null=True, verbose_name='Draft Date')),
('date_in_review', models.DateField(blank=True, null=True, verbose_name='In Review Date')),
('date_approved', models.DateField(blank=True, null=True, verbose_name='Approved Date')),
('date_void', models.DateField(blank=True, null=True, verbose_name='Void Date')),
('date_fulfilled', models.DateField(blank=True, null=True, verbose_name='Fulfillment Date')),
('date_canceled', models.DateField(blank=True, null=True, verbose_name='Canceled Date')),
('ce_model', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.estimatemodel', verbose_name='Associated Customer Job/Estimate')),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='Entity')),
('po_items', models.ManyToManyField(through='django_ledger.ItemTransactionModel', to='django_ledger.itemmodel', verbose_name='Purchase Order Items')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LedgerModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=150, null=True, verbose_name='Ledger Name')),
('posted', models.BooleanField(default=False, verbose_name='Posted Ledger')),
('locked', models.BooleanField(default=False, verbose_name='Locked Ledger')),
('hidden', models.BooleanField(default=False, verbose_name='Hidden Ledger')),
('entity', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='Ledger Entity')),
],
options={
'verbose_name': 'Ledger',
'verbose_name_plural': 'Ledgers',
'ordering': ['-created'],
'abstract': False,
},
bases=(models.Model, django_ledger.io.io_mixin.IOMixIn),
),
migrations.AddField(
model_name='journalentrymodel',
name='ledger',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='journal_entries', to='django_ledger.ledgermodel', verbose_name='Ledger'),
),
migrations.AddField(
model_name='itemtransactionmodel',
name='po_model',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.purchaseordermodel', verbose_name='Purchase Order Model'),
),
migrations.AddField(
model_name='itemmodel',
name='uom',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.unitofmeasuremodel', verbose_name='Unit of Measure'),
),
migrations.AddField(
model_name='invoicemodel',
name='invoice_items',
field=models.ManyToManyField(through='django_ledger.ItemTransactionModel', to='django_ledger.itemmodel', verbose_name='Invoice Items'),
),
migrations.AddField(
model_name='invoicemodel',
name='ledger',
field=models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.ledgermodel', verbose_name='Ledger'),
),
migrations.AddField(
model_name='invoicemodel',
name='prepaid_account',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, related_name='invoice_prepaid_account', to='django_ledger.accountmodel', verbose_name='Prepaid Account'),
),
migrations.AddField(
model_name='invoicemodel',
name='unearned_account',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, related_name='invoice_unearned_account', to='django_ledger.accountmodel', verbose_name='Unearned Account'),
),
migrations.AddField(
model_name='importjobmodel',
name='ledger',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.ledgermodel', verbose_name='Ledger'),
),
migrations.CreateModel(
name='EntityStateModel',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('fiscal_year', models.SmallIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(limit_value=1900)], verbose_name='Fiscal Year')),
('key', models.CharField(choices=[('je', 'Journal Entry'), ('po', 'Purchase Order'), ('bill', 'Bill'), ('invoice', 'Invoice'), ('estimate', 'Estimate')], max_length=10)),
('sequence', models.BigIntegerField(default=0, validators=[django.core.validators.MinValueValidator(limit_value=0)])),
('entity_model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='Entity Model')),
('entity_unit', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.entityunitmodel', verbose_name='Entity Unit')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='entitymanagementmodel',
name='entity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entity_permissions', to='django_ledger.entitymodel', verbose_name='Entity'),
),
migrations.AddField(
model_name='entitymanagementmodel',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entity_permissions', to=settings.AUTH_USER_MODEL, verbose_name='Manager'),
),
migrations.AddField(
model_name='customermodel',
name='entity_model',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='Customer Entity'),
),
migrations.AddField(
model_name='chartofaccountmodel',
name='entity',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='Entity'),
),
migrations.AddField(
model_name='billmodel',
name='bill_items',
field=models.ManyToManyField(through='django_ledger.ItemTransactionModel', to='django_ledger.itemmodel', verbose_name='Bill Items'),
),
migrations.AddField(
model_name='billmodel',
name='cash_account',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='None_cash_account', to='django_ledger.accountmodel', verbose_name='Cash Account'),
),
migrations.AddField(
model_name='billmodel',
name='ce_model',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, to='django_ledger.estimatemodel', verbose_name='Associated Customer Job/Estimate'),
),
migrations.AddField(
model_name='billmodel',
name='ledger',
field=models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.ledgermodel', verbose_name='Ledger'),
),
migrations.AddField(
model_name='billmodel',
name='prepaid_account',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='None_prepaid_account', to='django_ledger.accountmodel', verbose_name='Prepaid Account'),
),
migrations.AddField(
model_name='billmodel',
name='unearned_account',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='None_unearned_account', to='django_ledger.accountmodel', verbose_name='Unearned Account'),
),
migrations.AddField(
model_name='billmodel',
name='vendor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_ledger.vendormodel', verbose_name='Vendor'),
),
migrations.CreateModel(
name='BankAccountModel',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('account_number', models.CharField(blank=True, max_length=30, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\d+)*\\Z'), code='invalid', message='Only digits allowed')], verbose_name='Account Number')),
('routing_number', models.CharField(blank=True, max_length=30, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\d+)*\\Z'), code='invalid', message='Only digits allowed')], verbose_name='Routing Number')),
('aba_number', models.CharField(blank=True, max_length=30, null=True, verbose_name='ABA Number')),
('swift_number', models.CharField(blank=True, max_length=30, null=True, verbose_name='SWIFT Number')),
('account_type', models.CharField(choices=[('checking', 'Checking'), ('savings', 'Savings')], default='checking', max_length=10, verbose_name='Account Type')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=150, null=True)),
('active', models.BooleanField(default=False)),
('hidden', models.BooleanField(default=False)),
('cash_account', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, related_name='bank_cash_account', to='django_ledger.accountmodel', verbose_name='Cash Account')),
('entity_model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_ledger.entitymodel', verbose_name='Entity Model')),
],
options={
'verbose_name': 'Bank Account',
'abstract': False,
},
),
migrations.AddField(
model_name='accountmodel',
name='coa_model',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='django_ledger.chartofaccountmodel', verbose_name='Chart of Accounts'),
),
migrations.AddIndex(
model_name='vendormodel',
index=models.Index(fields=['created'], name='django_ledg_created_6a5d9d_idx'),
),
migrations.AddIndex(
model_name='vendormodel',
index=models.Index(fields=['updated'], name='django_ledg_updated_736f82_idx'),
),
migrations.AddIndex(
model_name='vendormodel',
index=models.Index(fields=['active'], name='django_ledg_active_25e3b1_idx'),
),
migrations.AddIndex(
model_name='vendormodel',
index=models.Index(fields=['hidden'], name='django_ledg_hidden_379b0a_idx'),
),
migrations.AlterUniqueTogether(
name='vendormodel',
unique_together={('entity_model', 'vendor_number')},
),
migrations.AddIndex(
model_name='unitofmeasuremodel',
index=models.Index(fields=['entity'], name='django_ledg_entity__1c8986_idx'),
),
migrations.AlterUniqueTogether(
name='unitofmeasuremodel',
unique_together={('entity', 'unit_abbr')},
),
migrations.AddIndex(
model_name='transactionmodel',
index=models.Index(fields=['tx_type'], name='django_ledg_tx_type_da7ba9_idx'),
),
migrations.AddIndex(
model_name='transactionmodel',
index=models.Index(fields=['account'], name='django_ledg_account_c4bb7e_idx'),
),
migrations.AddIndex(
model_name='transactionmodel',
index=models.Index(fields=['journal_entry'], name='django_ledg_journal_46c77f_idx'),
),
migrations.AddIndex(
model_name='transactionmodel',
index=models.Index(fields=['created'], name='django_ledg_created_b74538_idx'),
),
migrations.AddIndex(
model_name='transactionmodel',
index=models.Index(fields=['updated'], name='django_ledg_updated_494252_idx'),
),
migrations.AddIndex(
model_name='stagedtransactionmodel',
index=models.Index(fields=['import_job'], name='django_ledg_import__8e6511_idx'),
),
migrations.AddIndex(
model_name='purchaseordermodel',
index=models.Index(fields=['entity'], name='django_ledg_entity__9ab6ba_idx'),
),
migrations.AddIndex(
model_name='purchaseordermodel',
index=models.Index(fields=['po_number'], name='django_ledg_po_numb_20b7d1_idx'),
),
migrations.AddIndex(
model_name='purchaseordermodel',
index=models.Index(fields=['po_status'], name='django_ledg_po_stat_ab8d41_idx'),
),
migrations.AddIndex(
model_name='purchaseordermodel',
index=models.Index(fields=['ce_model'], name='django_ledg_ce_mode_66c2cc_idx'),
),
migrations.AddIndex(
model_name='purchaseordermodel',
index=models.Index(fields=['date_draft'], name='django_ledg_date_dr_6fd15b_idx'),
),
migrations.AddIndex(
model_name='purchaseordermodel',
index=models.Index(fields=['date_in_review'], name='django_ledg_date_in_b6a1f3_idx'),
),
migrations.AddIndex(
model_name='purchaseordermodel',
index=models.Index(fields=['date_approved'], name='django_ledg_date_ap_4667c6_idx'),
),
migrations.AddIndex(
model_name='purchaseordermodel',
index=models.Index(fields=['date_fulfilled'], name='django_ledg_date_fu_b29b08_idx'),
),
migrations.AddIndex(
model_name='purchaseordermodel',
index=models.Index(fields=['date_canceled'], name='django_ledg_date_ca_3edc4b_idx'),
),
migrations.AddIndex(
model_name='purchaseordermodel',
index=models.Index(fields=['date_void'], name='django_ledg_date_vo_56612f_idx'),
),
migrations.AlterUniqueTogether(
name='purchaseordermodel',
unique_together={('entity', 'po_number')},
),
migrations.AddIndex(
model_name='ledgermodel',
index=models.Index(fields=['entity'], name='django_ledg_entity__e21c5d_idx'),
),
migrations.AddIndex(
model_name='ledgermodel',
index=models.Index(fields=['entity', 'posted'], name='django_ledg_entity__040422_idx'),
),
migrations.AddIndex(
model_name='ledgermodel',
index=models.Index(fields=['entity', 'locked'], name='django_ledg_entity__cde962_idx'),
),
migrations.AddIndex(
model_name='journalentrymodel',
index=models.Index(fields=['ledger'], name='django_ledg_ledger__ecaa89_idx'),
),
migrations.AddIndex(
model_name='journalentrymodel',
index=models.Index(fields=['timestamp'], name='django_ledg_timesta_ab91e8_idx'),
),
migrations.AddIndex(
model_name='journalentrymodel',
index=models.Index(fields=['activity'], name='django_ledg_activit_bae3bb_idx'),
),
migrations.AddIndex(
model_name='journalentrymodel',
index=models.Index(fields=['entity_unit'], name='django_ledg_entity__557f42_idx'),
),
migrations.AddIndex(
model_name='journalentrymodel',
index=models.Index(fields=['locked'], name='django_ledg_locked_5cc524_idx'),
),
migrations.AddIndex(
model_name='journalentrymodel',
index=models.Index(fields=['posted'], name='django_ledg_posted_097e54_idx'),
),
migrations.AddIndex(
model_name='journalentrymodel',
index=models.Index(fields=['je_number'], name='django_ledg_je_numb_987c8b_idx'),
),
migrations.AddIndex(
model_name='itemtransactionmodel',
index=models.Index(fields=['bill_model', 'item_model'], name='django_ledg_bill_mo_479f1f_idx'),
),
migrations.AddIndex(
model_name='itemtransactionmodel',
index=models.Index(fields=['invoice_model', 'item_model'], name='django_ledg_invoice_13ac3b_idx'),
),
migrations.AddIndex(
model_name='itemtransactionmodel',
index=models.Index(fields=['po_model', 'item_model'], name='django_ledg_po_mode_2675c9_idx'),
),
migrations.AddIndex(
model_name='itemtransactionmodel',
index=models.Index(fields=['ce_model', 'item_model'], name='django_ledg_ce_mode_ae4efd_idx'),
),
migrations.AddIndex(
model_name='itemtransactionmodel',
index=models.Index(fields=['po_item_status'], name='django_ledg_po_item_ac9475_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['inventory_account'], name='django_ledg_invento_dbf206_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['cogs_account'], name='django_ledg_cogs_ac_82d441_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['earnings_account'], name='django_ledg_earning_229a60_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['expense_account'], name='django_ledg_expense_f65128_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['for_inventory'], name='django_ledg_for_inv_c93303_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['is_product_or_service'], name='django_ledg_is_prod_4b9941_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['is_active'], name='django_ledg_is_acti_ffc9c6_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['item_type'], name='django_ledg_item_ty_0cef58_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['sku'], name='django_ledg_sku_02b4d8_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['upc'], name='django_ledg_upc_c19d0a_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['item_id'], name='django_ledg_item_id_972760_idx'),
),
migrations.AddIndex(
model_name='itemmodel',
index=models.Index(fields=['item_number'], name='django_ledg_item_nu_1e3c20_idx'),
),
migrations.AlterUniqueTogether(
name='itemmodel',
unique_together={('entity', 'item_number')},
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['invoice_status'], name='django_ledg_invoice_e349d0_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['terms'], name='django_ledg_terms_3b6577_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['cash_account'], name='django_ledg_cash_ac_00d697_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['prepaid_account'], name='django_ledg_prepaid_470f67_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['unearned_account'], name='django_ledg_unearne_bc18fd_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['date_due'], name='django_ledg_date_du_c72892_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['date_draft'], name='django_ledg_date_dr_ad2569_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['date_in_review'], name='django_ledg_date_in_e738b8_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['date_approved'], name='django_ledg_date_ap_ae90e1_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['date_paid'], name='django_ledg_date_pa_34b872_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['date_canceled'], name='django_ledg_date_ca_0f65e8_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['date_void'], name='django_ledg_date_vo_57c444_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['customer'], name='django_ledg_custome_34c881_idx'),
),
migrations.AddIndex(
model_name='invoicemodel',
index=models.Index(fields=['invoice_number'], name='django_ledg_invoice_d0881e_idx'),
),
migrations.AddIndex(
model_name='importjobmodel',
index=models.Index(fields=['ledger'], name='django_ledg_ledger__1e8758_idx'),
),
migrations.AddIndex(
model_name='estimatemodel',
index=models.Index(fields=['status'], name='django_ledg_status_6aa955_idx'),
),
migrations.AddIndex(
model_name='estimatemodel',
index=models.Index(fields=['customer'], name='django_ledg_custome_a2793e_idx'),
),
migrations.AddIndex(
model_name='estimatemodel',
index=models.Index(fields=['terms'], name='django_ledg_terms_9507f6_idx'),
),
migrations.AddIndex(
model_name='estimatemodel',
index=models.Index(fields=['entity'], name='django_ledg_entity__e2e967_idx'),
),
migrations.AddIndex(
model_name='estimatemodel',
index=models.Index(fields=['date_draft'], name='django_ledg_date_dr_502e29_idx'),
),
migrations.AddIndex(
model_name='estimatemodel',
index=models.Index(fields=['date_in_review'], name='django_ledg_date_in_b24ddd_idx'),
),
migrations.AddIndex(
model_name='estimatemodel',
index=models.Index(fields=['date_approved'], name='django_ledg_date_ap_a754e5_idx'),
),
migrations.AddIndex(
model_name='estimatemodel',
index=models.Index(fields=['date_canceled'], name='django_ledg_date_ca_9af3db_idx'),
),
migrations.AddIndex(
model_name='estimatemodel',
index=models.Index(fields=['date_void'], name='django_ledg_date_vo_74c7c8_idx'),
),
migrations.AddIndex(
model_name='estimatemodel',
index=models.Index(fields=['estimate_number'], name='django_ledg_estimat_9ff8a5_idx'),
),
migrations.AlterUniqueTogether(
name='estimatemodel',
unique_together={('entity', 'estimate_number')},
),
migrations.AddIndex(
model_name='entityunitmodel',
index=models.Index(fields=['active'], name='django_ledg_active_2c2caa_idx'),
),
migrations.AddIndex(
model_name='entityunitmodel',
index=models.Index(fields=['hidden'], name='django_ledg_hidden_a01d42_idx'),
),
migrations.AddIndex(
model_name='entityunitmodel',
index=models.Index(fields=['entity'], name='django_ledg_entity__0bdfdc_idx'),
),
migrations.AlterUniqueTogether(
name='entityunitmodel',
unique_together={('entity', 'slug'), ('entity', 'document_prefix')},
),
migrations.AddIndex(
model_name='entitystatemodel',
index=models.Index(fields=['key'], name='django_ledg_key_ac156a_idx'),
),
migrations.AddIndex(
model_name='entitystatemodel',
index=models.Index(fields=['entity_model', 'fiscal_year', 'entity_unit', 'key'], name='django_ledg_entity__eeae49_idx'),
),
migrations.AlterUniqueTogether(
name='entitystatemodel',
unique_together={('entity_model', 'entity_unit', 'fiscal_year', 'key')},
),
migrations.AddIndex(
model_name='entitymodel',
index=models.Index(fields=['admin'], name='django_ledg_admin_i_09f5c9_idx'),
),
migrations.AddIndex(
model_name='entitymanagementmodel',
index=models.Index(fields=['entity', 'user'], name='django_ledg_entity__9541e6_idx'),
),
migrations.AddIndex(
model_name='entitymanagementmodel',
index=models.Index(fields=['user', 'entity'], name='django_ledg_user_id_b7497b_idx'),
),
migrations.AddIndex(
model_name='customermodel',
index=models.Index(fields=['created'], name='django_ledg_created_ba7f4c_idx'),
),
migrations.AddIndex(
model_name='customermodel',
index=models.Index(fields=['updated'], name='django_ledg_updated_f9ac90_idx'),
),
migrations.AddIndex(
model_name='customermodel',
index=models.Index(fields=['active'], name='django_ledg_active_967a81_idx'),
),
migrations.AddIndex(
model_name='customermodel',
index=models.Index(fields=['hidden'], name='django_ledg_hidden_dda722_idx'),
),
migrations.AddIndex(
model_name='customermodel',
index=models.Index(fields=['customer_number'], name='django_ledg_custome_16f95a_idx'),
),
migrations.AlterUniqueTogether(
name='customermodel',
unique_together={('entity_model', 'customer_number')},
),
migrations.AddIndex(
model_name='chartofaccountmodel',
index=models.Index(fields=['entity'], name='django_ledg_entity__48d6e0_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['bill_status'], name='django_ledg_bill_st_9e158c_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['terms'], name='django_ledg_terms_752251_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['cash_account'], name='django_ledg_cash_ac_82021a_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['prepaid_account'], name='django_ledg_prepaid_5230e5_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['unearned_account'], name='django_ledg_unearne_806444_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['date_due'], name='django_ledg_date_du_a8c481_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['date_draft'], name='django_ledg_date_dr_7a448e_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['date_in_review'], name='django_ledg_date_in_8887cb_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['date_approved'], name='django_ledg_date_ap_3208a1_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['date_paid'], name='django_ledg_date_pa_daf06c_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['date_canceled'], name='django_ledg_date_ca_e43055_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['date_void'], name='django_ledg_date_vo_14c747_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['vendor'], name='django_ledg_vendor__28be85_idx'),
),
migrations.AddIndex(
model_name='billmodel',
index=models.Index(fields=['bill_number'], name='django_ledg_bill_nu_6b97b4_idx'),
),
migrations.AddIndex(
model_name='bankaccountmodel',
index=models.Index(fields=['account_type'], name='django_ledg_account_009a4a_idx'),
),
migrations.AddIndex(
model_name='bankaccountmodel',
index=models.Index(fields=['cash_account'], name='django_ledg_cash_ac_59a8af_idx'),
),
migrations.AlterUniqueTogether(
name='bankaccountmodel',
unique_together={('entity_model', 'account_number'), ('entity_model', 'cash_account', 'account_number', 'routing_number')},
),
migrations.AddIndex(
model_name='accountmodel',
index=models.Index(fields=['role'], name='django_ledg_role_812d08_idx'),
),
migrations.AddIndex(
model_name='accountmodel',
index=models.Index(fields=['balance_type'], name='django_ledg_balance_daddac_idx'),
),
migrations.AddIndex(
model_name='accountmodel',
index=models.Index(fields=['active'], name='django_ledg_active_f8adc2_idx'),
),
migrations.AddIndex(
model_name='accountmodel',
index=models.Index(fields=['coa_model'], name='django_ledg_coa_mod_e19964_idx'),
),
migrations.AddIndex(
model_name='accountmodel',
index=models.Index(fields=['role', 'balance_type', 'active'], name='django_ledg_role_1bff96_idx'),
),
migrations.AlterUniqueTogether(
name='accountmodel',
unique_together={('coa_model', 'code')},
),
]
|
PypiClean
|
/BWP-0.7.0.tar.gz/BWP-0.7.0/bwp/static/bwp/0.7.0/js/bwp.min.js
|
var NEWOBJECTKEY="newObject",FIELD=null,delay=null,ACTION_WAIT=null,FILTER_TYPES=[["exact","Равно",1],["gt","Больше",1],["gte","Больше или равно",1],["lt","Меньше",1],["lte","Меньше или равно",1],["range","Диапазон",2],["in","Список",99],["icontains","Содержит",1],["istartswith","Начинается",1],["iendswith","Заканчивается",1],["isnull","Пусто",0],["blank","Пустая строка",0],];window.TEMPLATES={};window.REGISTER={};_.mixin(_.str.exports());function isEmpty(b){for(var a in b){return false}return true}delay=(function(){var a=0;return function(c,b){clearTimeout(a);a=setTimeout(c,b)}})();function generatorID(d,h){var b=[],f="i",a=1000,g=9999,c=Math.floor(Math.random()*(g-a+1))+a;f+=$.now()+String(c);if(d){b.push(d)}b.push(f);if(h){b.push(h)}return validatorID(b)}function validatorID(a){if($.type(a)==="array"){a=a.join("_")}return a.replace(/[\.,\:,\/, ,\(,\),=,?]/g,"-")}function handlerHideAlert(){if(DEBUG){console.log("function:handlerHideAlert")}$(".alert").alert("close");$("#alert-place").css("z-index","-1000")}function handlerShowAlert(d,g,b,a){if(DEBUG){console.log("function:handlerShowAlert")}if(!b){b="alert-error"}var c,f=60000;if($.type(g)=="object"){g=$.toJSON(g).replace(/\,\"/g,', "').replace(/\"\:/g,'": ')}else{if(g.match(/<\!DOCTYPE/)){c=g.match(/<[title,TITLE]+>(.*)<\/[title,TITLE]+>/);if(c){d=c[1]}c=g.match(/<[body,BODY]+>([^+]*)<\/[body,BODY]+>/);if(c){g=c[1].replace(/<\/?[^>]+>/g,"").replace(/ [ ]+/g," ").replace(/\n[\n]+/g,"\n")}}}if(g.length>1024){g=g.substring(0,1024)+" ..."}html=TEMPLATES.alert({head:d,msg:g,cls:b});$("#alert-place").css("z-index","1000").html(html);$(window).scrollTop(0);$(".alert").alert().on("closed.bs.alert",handlerHideAlert);if(a){delay(a,f)}else{delay(handlerHideAlert,f)}return false}function jsonAPI(a,g,d,b,c){if(DEBUG){console.log("function:jsonAPI")}if(!a){a={method:"get_settings"}}if(b){console.log("SYNCRONOUS REQUEST!!!",a,d)}var f=$.quickAPI({url:BWP_API_URL,data:a,async:!b,timeout:c||AJAX_TIMEOUT,callback:g,log:undefined,});return f}function classSettings(a){this.meta={};self=this;if((typeof localStorage=="undefined")||(typeof $.evalJSON=="undefined")){return{}}_unique_key=SETTINGS_UNIQUE_KEY;_server={};_local={tabs:[],filters:{},};_values={server:_server,local:_local};_values_is_set=false;_responseServer=null;this.__defineGetter__("ready",function(){return _values_is_set});_callback=a;_run_callback=function(){if(_callback instanceof Function){_callback();if(!_callback.__not_reset_after__){_callback=a}}};this.callback=_callback;_last_set_server=null;this.last_set=_last_set_server;_last_get_server=null;this.last_get=_last_get_server;_init=function(b){if(b){_callback=b}_values_is_set=false;_local=$.evalJSON(localStorage.getItem(_unique_key))||_local;_get_server();return self};this.init=_init;this.reload=_init;_check_init=function(){if(!_values_is_set){_init()}return self};this.__defineGetter__("all",function(){_check_init();return _values});this.__defineGetter__("server",function(){_check_init();return _server});this.__defineGetter__("local",function(){_check_init();return _local});this.save=function(c,b){if(c){_callback=c}if(b!="local"){_set_server()}if(b!="server"){localStorage.setItem(_unique_key,$.toJSON(self.local))}_run_callback();return self};this.save_server=function(b){return self.save(b,"server")};this.save_local=function(b){return self.save(b,"local")};_set_server=function(){_responseServer=null;args={method:"set_settings",settings:self.server};cb=function(c,b,d){if(!c.data){handlerShowAlert("Ошибка",c.message)}else{_last_set_server=new Date();_responseServer=c}};jqxhr=new jsonAPI(args,cb,"SETTINGS.set_server() call jsonAPI()");return[_last_set_server,_responseServer,jqxhr]};_get_server=function(){_responseServer=null;args={method:"get_settings"};cb=function(c,b,d){_server=c.data;_last_get_server=new Date();_responseServer=c;_values_is_set=true;_run_callback()};jqxhr=new jsonAPI(args,cb,"SETTINGS.get_server() call jsonAPI()");return[_last_get_server,_responseServer,jqxhr]};this.cleanTabs=function(){_tabs=[];$.each(_local.tabs,function(b,c){if(c){_tabs.push(c)}});_local.tabs=_tabs;return self}}function classApp(b){this.has_module_perms=b.has_module_perms;this.name=b.name;this.id=validatorID(this.name);this.label=b.label;this.title="Приложение:"+this.label;var a=[];this.models=a;var c=this;$.each(b.models,function(d,f){a.push(new classModel(c,f))});REGISTER[this.id]=this}function classModel(g,f){this.template=TEMPLATES.layoutModel;this.app=g;this.perms=f.perms;this.meta=f.meta;this.name=f.name;this.model=this.name;this.id=validatorID(this.model);this.label=f.label;this.title=this.app.label+": "+this.label;this.query=null;this.fix={};this.paginator=null;var h=[];this.collection_reports=h;var c=[];this.object_reports=c;var b={};this.composes=b;var d={};this.actions=d;var a=this;if(f.meta.compositions){$.each(f.meta.compositions,function(i,j){b[j.meta.related_name]=j})}if(f.meta.reports){$.each(f.meta.reports,function(i,j){if(j.for_object){c.push(j)}else{h.push(j)}});if(this.collection_reports.length<1){this.collection_reports=null}if(this.object_reports.length<1){this.object_reports=null}}if((!this.meta)||(!this.meta.list_display)){console.log("Модель не может быть зарегистрирована.")}else{REGISTER[this.id]=this}}function classSelector(c,b){$.extend(true,this,c);this.id=validatorID([this.id,"selector"]);this.template=TEMPLATES.layoutSelector;this.perms={"delete":false,add:false,change:false};this.actions=null;this.meta.list_display=this.meta.list_display;this.meta.list_per_page=5;this.multiple=b?true:false;var a=this;REGISTER[this.id]=this;this.get_checked_items=function(){_checkboxes=$("#collection_"+this.id+" tbody td:nth-child(1) input[type=checkbox]:checked");return _checkboxes}}function classCompose(a,c){this.template=TEMPLATES.layoutCompose;this.object=a;this.editable=Boolean(this.object.pk);this.perms=c.perms;this.name=c.name;this.meta=c.meta;this.compose=this.meta.related_name;this.model=this.meta.related_model;this.is_m2m=this.meta.is_many_to_many;this.id=validatorID([this.object.id,this.name,this.compose]);this.label=c.label;this.title=this.object.label+": "+this.label;this.query=null;this.fix={};this.m2m_fix=[];this.m2m_fixaction=null;this.paginator=null;var b={};this.actions=b;var d=this;REGISTER[this.id]=this}function classObject(f){this.template=TEMPLATES.layoutObject;this.model=REGISTER[validatorID(f.model)];this.pk=f.pk;this.id=this.pk?validatorID(f.model+"."+this.pk):generatorID(NEWOBJECTKEY);this.__unicode__=this.pk?f.__unicode__:"Новый объект ("+this.model.label+")";this.label=this.__unicode__;this.title=this.model.label+": "+this.label;var a=f.properties;this.properties=a;var d=f.fields;this.fields=d;this.get_fields=function(){L={};$.each(this.model.meta.fields,function(g,h){L[h]=d[h]});return L};this.get_column_value=function(g){if(g in d){return d[g]}else{if(g in a){return a[g]}}return""};var c=[];this.composes=c;this.widgets=this.model.meta.widgets;this.actions=this.model.actions;this.fix={};this.fixaction=null;var b=this;if(this.model.composes){$.each(this.model.composes,function(g,h){c.push(new classCompose(b,h))})}REGISTER[this.id]=this}function handlerCommitInstance(b,c){if(DEBUG){console.log("function:handlerCommitInstance")}var g=[];var i=b.model;var f=function(j){$.extend(true,j.fields,j.fix);g.push({pk:j.pk,fields:j.fields,model:j.model.name,action:j.fixaction,fix:j.fix,})};if((b instanceof classModel)||(b instanceof classCompose)){i=b;$.each(b.fix,function(j,k){f(k)})}else{f(b)}var d={method:"commit",objects:g,};var a=function(k,j,l){handlerCollectionGet(i);if(c){c()}};var h=new jsonAPI(d,a,"handlerCommitInstance(instance) call jsonAPI()",true,300000);return h}function handlerCommitComposeM2M(d,b){if(DEBUG){console.log("function:handlerCommitComposeM2M")}var c={method:"m2m_commit",model:d.object.model.name,pk:d.object.pk,compose:d.compose,action:d.m2m_fixaction,objects:d.m2m_fix,};var a=function(h,g,i){handlerCollectionGet(d);if(b){b()}};var f=new jsonAPI(c,a,"handlerCommitComposeM2M(compose, done) call jsonAPI()");return f}function handlerLayoutRender(a,c){if(DEBUG){console.log("function:handlerLayoutRender")}var b=a.template({data:a});if(!c){$("#layout_"+a.id).html(b);if(a instanceof classObject){$("#layout_"+a.id+" button[data-loading=true]").one("click",eventLayoutLoad)}}return b}function handlerLayoutLoad(a){if(DEBUG){console.log("function:handlerLayoutLoad")}handlerLayoutRender(a);if((a instanceof classModel)||(a instanceof classCompose)){handlerCollectionGet(a)}}function eventLayoutLoad(c){if(DEBUG){console.log("function:eventLayoutLoad")}var b=$(this).data(),a=REGISTER[b.id];handlerLayoutLoad(a);$(this).removeAttr("data-loading");c.preventDefault()}function handlerSelectAllToggle(){var a=$(this).parents("table"),b=a.find("tbody td:nth-child(1) input[type=checkbox]"),c=this.checked;$.each(b,function(d,f){f.checked=c})}function eventRowClick(b){if(DEBUG){console.log("function:eventRowClick")}var a=$(this);a.addClass("info").siblings("tr").removeClass("info")}function handlerCollectionRender(a,c){if(DEBUG){console.log("function:handlerCollectionRender")}if(a instanceof classObject){return""}var b=TEMPLATES.collection({data:a});if(!c){$("#collection_"+a.id).html(b)}return b}function handlerCollectionGet(b){if(DEBUG){console.log("function:handlerCollectionGet")}var d=[];if(b.filters){$.each(b.filters,function(g,h){d.push({active:h.active,field:h.field,type:h.type,inverse:h.inverse,values:h.values,field_title:h.field_title,type_title:h.type_title,})})}var c={method:"get_collection",model:b.model,compose:b.compose||null,ordering:b.meta.ordering||null,fields:b.meta.search_fields||null,filters:d,};c[b.meta.search_key]=b.query||null;if(b.object){c.pk=b.object.pk||0}if(b.paginator){c.page=b.paginator.page||1;c.per_page=b.paginator.per_page||null}var a=function(i,g,j){b.paginator=i.data;var h=handlerCollectionRender(b);handlerSearchSpinner(b.id,false)};var f=new jsonAPI(c,a,"handlerCollectionGet() call jsonAPI()");return f}function handlerSearchSpinner(c,b){if(DEBUG){console.log("function:handlerSearchSpinner")}var a=$("[data-action=collection_search_refresh][data-id="+c+"] .fa");if(b){a.addClass("fa-spin")}else{a.removeClass("fa-spin")}}function eventCollectionSearch(f){if(DEBUG){console.log("function:eventCollectionSearch")}var b=this,d=$(b).data(),a=REGISTER[d.id];val=$(b).val()||null,init_delay=500,len=val?val.length:0;a.query=val;var c=function(){handlerSearchSpinner(a.id,true);return handlerCollectionGet(a)};if(len>10){init_delay=100}else{if(len>9){init_delay=150}else{if(len>8){init_delay=200}else{if(len>7){init_delay=250}else{if(len>6){init_delay=300}else{if(len>5){init_delay=500}else{if(len>4){init_delay=700}else{if(len>3){init_delay=900}else{if(len>2){init_delay=1100}else{if(len>1){init_delay=1300}else{if(len>0){init_delay=1500}}}}}}}}}}}delay(c,init_delay);f.preventDefault()}function eventCollectionSearchRefresh(d){if(DEBUG){console.log("function:eventCollectionSearchRefresh")}var b=$("input[data-action=collection_search][data-id="+$(this).data().id+"]"),c=$(b).data(),a=REGISTER[c.id];handlerSearchSpinner($(this).data().id,true);a.query=$(b).val()||null;handlerCollectionGet(a);$(this).blur();d.preventDefault()}function eventCollectionCount(c){if(DEBUG){console.log("function:eventCollectionCount")}var b=$(this).data(),a=REGISTER[b.id];if(a.paginator){a.paginator.page=1;a.paginator.per_page=$(this).val()||$(this).data()["count"]||a.meta.list_per_page;$("[data-placeholder=collection_count][data-id="+a.id+"]").text(a.paginator.per_page)}handlerCollectionGet(a);c.preventDefault()}function eventCollectionPage(c){if(DEBUG){console.log("function:eventCollectionPage")}var b=$(this).data(),a=REGISTER[b.id];if(a.paginator){a.paginator.page=$(this).val()||$(this).data()["page"]||1}handlerCollectionGet(a);c.preventDefault()}function eventCollectionSorted(f){if(DEBUG){console.log("function:eventCollectionSorted")}var d=$(this).data(),b=REGISTER[d.id],a=b.meta.ordering||[],c=$.inArray(d.column,a);if(c>-1){a[c]="-"+d.column}else{c=$.inArray("-"+d.column,a);if(c>-1){a=a.slice(0,c).concat(a.slice(c+1))}}if(c==-1){a.push(d.column)}b.meta.ordering=a;handlerCollectionGet(b);f.preventDefault()}function handlerObjectAdd(b){if(DEBUG){console.log("function:handlerObjectAdd")}var c={method:"get_object",model:b.name,pk:null,filler:{},};if(b instanceof classCompose){c.filler[b.meta.related_field]=b.object.pk}var a=function(h,f,i){var g=new classObject(h.data);g.fixaction="add";g.model.fix[g.id]=g;handlerTabOpen(g)};var d=new jsonAPI(c,a,"handlerObjectAdd(model) call jsonAPI()");return d}function handlerTempUploadFile(a,d){if(DEBUG){console.log("function:handlerTempUploadFile")}var c=new FormData(),b=d.files[0],f=new XMLHttpRequest();if(!b){a.fix[d.name]=0;return true}else{c.append("file",b);f.open("POST","upload/"+a.model.name+"/",true);f.onload=function(h){var g=JSON.parse(this.response);a.fix[d.name]=g.data;$(d).siblings("button[name]").text(b.name).attr("title",b.name)};f.send(c);return true}}function handlerObjectChange(b,d){if(DEBUG){console.log("function:handlerObjectChange")}var a=d.attr("name"),c=d.attr("type"),f=d.val();if(c){c=c.toLowerCase()}if(c in {file:0,image:0}){$("#filelabel_"+b.id+"_"+a).removeClass("hide").text(f);handlerTempUploadFile(b,d[0])}else{if(c==="datetime-local"){f=$.dateParser(f,true)||f||null}else{if($.type(b.fields[a])==="array"){f=[f,d.text()]}else{if($.type(b.fields[a])==="boolean"){f=d.is(":checked")}}}}b.fix[a]=f;b.fixaction=b.fixaction||"change";b.model.fix[b.id]=b}function handlerObjectCopy(c,f){if(DEBUG){console.log("function:handlerObjectCopy")}if(c.id){var b=REGISTER[c.id],c={};c.model=b.model.name;c.pk=b.pk}var a={method:"get_object",model:c.model,pk:c.pk,copy:true,clone:f,};cb=function(i,g,j){var h=new classObject(i.data);h.fixaction="add";h.model.fix[h.id]=h;handlerTabOpen(h)};var d=new jsonAPI(a,cb,"handlerObjectCopy(data, clone) call jsonAPI()")}function handlerObjectDelete(d,b){if(DEBUG){console.log("function:handlerObjectDelete")}var c={method:"commit",objects:[{pk:d.pk,fields:{},model:d.model,action:"delete"}],};var a=function(h,g,i){handlerCollectionGet(REGISTER[validatorID(d.model)]);if(b){b()}};var f=new jsonAPI(c,a,"handlerObjectDelete(data, done) call jsonAPI()",true,180000)}function handlerObjectRowMuted(a){if(DEBUG){console.log("function:handlerObjectRowMuted")}$('tr[data-model="'+a.model.name+'"][data-pk="'+a.pk+'"]').addClass("muted")}function handlerObjectRowUnmuted(a){if(DEBUG){console.log("function:handlerObjectRowUnmuted")}$('tr[data-model="'+a.model.name+'"][data-pk="'+a.pk+'"]').removeClass("muted")}function eventObjectOpen(){if(DEBUG){console.log("function:eventObjectOpen")}var f=$(this),d=f.data();if(!d.model){return false}var c=REGISTER[d.id];if(c){handlerTabOpen(c);return null}var b={method:"get_object",model:d.model,pk:d.pk||null,};var a=function(j,h,k){var i=new classObject(j.data);f.data("id",i.id);handlerTabOpen(i)};var g=new jsonAPI(b,a,"eventObjectOpen() call jsonAPI()");return g}function eventObjectAdd(f){if(DEBUG){console.log("function:eventObjectAdd")}var d=$(this),c=d.data(),a=REGISTER[c.id],b=a.is_m2m?a:null;if((b)&&(f)&&(f.data)&&(f.data.m2m)){handlerM2MSelect(b);return true}handlerObjectAdd(a);f.preventDefault()}function eventObjectCopy(a){if(DEBUG){console.log("function:eventObjectCopy")}handlerObjectCopy($(this).data());a.preventDefault()}function eventObjectClone(){if(DEBUG){console.log("function:eventObjectClone")}handlerObjectCopy($(this).data(),true);e.preventDefault()}function eventObjectDelete(g){if(DEBUG){console.log("function:eventObjectDelete")}var a=undefined,f=$(this),c=f.data();if((g)&&(g.data)&&(g.data.m2m)){var d=REGISTER[c.id];d.m2m_fix=[c.pk];d.m2m_fixaction="delete";handlerCommitComposeM2M(d);return true}var b=REGISTER[c.id];if(b instanceof classObject){c.model=b.model.name;c.pk=b.pk;a=function(){handlerTabClose(b)}}var h="<b>Вы действительно желаете удалить этот объект?</b><br><i>Удаление невозможно обратить, если этот объект не рассчитан на перемещение в корзину.</i>";handlerModalShowSubmit(h,handlerObjectDelete,c,a);g.preventDefault()}function eventObjectChange(d){if(DEBUG){console.log("function:eventObjectChange")}var c=$(this),b=c.data(),a=REGISTER[b.id];handlerObjectChange(a,c);d.preventDefault()}function eventObjectReset(d){if(DEBUG){console.log("function:eventObjectReset")}var c=$(this),b=c.data(),a=REGISTER[b.id];a.fix={};handlerLayoutRender(a);d.preventDefault()}function eventObjectSave(d){if(DEBUG){console.log("function:eventObjectSave")}var c=$(this),b=c.data(),a=REGISTER[b.id];handlerCommitInstance(a,function(){handlerTabClose(a)});d.preventDefault()}function eventObjectSelect(c){if(DEBUG){console.log("function:eventObjectSelect")}var b=$(this),a=b.data();FIELD.val(a.pk).text(a.unicode).attr("title",a.unicode).change().siblings("button[disabled]").removeAttr("disabled");$("#modal").modal("hide");c.preventDefault()}function handlerM2MSelect(d){if(DEBUG){console.log("function:handlerM2MSelect")}var b=REGISTER[validatorID(d.name)],a=new classSelector(b,true),i="Выберите требуемые объекты",c=handlerLayoutRender(a,true),h=null,g={},f=[{model:b,action:"object_add",label:"Новый",css:"btn-warning"},{model:null,action:"modal_close",label:"Закрыть",css:"btn-default"},{model:d,action:"selector_append",label:"Добавить",css:"btn-info"},{model:d,action:"selector_submit",label:"Выбрать",css:"btn-primary"},];g={buttons:f,selector:a};h=TEMPLATES.modalFooter({mfoot:g,});handlerModalShow(i,c,h,function(){handlerCollectionGet(a)})}function handlerFieldSelect(f){if(DEBUG){console.log("function:handlerFieldSelect")}FIELD=f;var g=f.data(),c=REGISTER[g.id],b=REGISTER[validatorID(g.model)],a=new classSelector(b),i="Выберите требуемый объект",d=handlerLayoutRender(a,true),h=null;handlerModalShow(i,d,h,function(){handlerCollectionGet(a)})}function handlerSelectorSubmit(d,a){if(DEBUG){console.log("function:handlerSelectorSubmit")}var b=[],c=a.get_checked_items();$.each(c,function(f,g){b.push($(g).data().pk)});d.m2m_fix=b;d.m2m_fixaction="add";handlerCommitComposeM2M(d)}function eventFieldClear(c){if(DEBUG){console.log("function:eventFieldClear")}var b=$(this);b.attr("disabled","disabled");if((c)&&(c.data)&&(c.data.file)){var a=b.siblings("input[name]");a.val(null).change()}b.siblings("button[name]").val(null).html(" ").attr("title","").change();c.preventDefault()}function eventFieldSelect(c){if(DEBUG){console.log("function:eventFieldSelect")}var b=$(this);if((c)&&(c.data)&&(c.data.file)){b.siblings("input[name]").click()}else{var a=b.siblings("button[name]");handlerFieldSelect(a)}c.preventDefault()}function eventSelectorSubmit(f){if(DEBUG){console.log("function:eventSelectorSubmit")}var d=$(this),b=d.data(),c=REGISTER[b.id],a=REGISTER[validatorID([c.name,"selector"])];handlerSelectorSubmit(c,a);if(f.data.close){handlerModalHide()}f.preventDefault()}function handlerModalShowSubmit(j,g,i,h,f,d){if(DEBUG){console.log("function:handlerModalShowSubmit")}var b="Подтверждение",a=j,c=TEMPLATES.modalFooter({mfoot:{}});ACTION_WAIT=function(){g(i,h,f,d)};handlerModalShow(b,a,c)}function handlerModalShow(h,d,g,a){if(DEBUG){console.log("function:handlerModalShow")}var b=$("#modal"),f={};f.mhead=h;f.mbody=d;f.mfoot=g;var c=TEMPLATES.modal({modal:f});b.html(c).modal("show");if(a){a()}}function handlerModalHide(a){if(DEBUG){console.log("function:handlerModalHide")}$("#modal").modal("hide");if(a){a()}}function handlerMenuAppLoad(f){if(DEBUG){console.log("function:handlerMenuAppLoad")}var c=f?false:true,b={method:"get_apps"},a=function(i,g,k){var j=[];$.each(i.data,function(l,m){j.push(new classApp(m))});var h=TEMPLATES.menuApp({data:j});$("#menu-app ul[role=menu]").html(h);$("#menu-app").show();if(f){f()}};var d=new jsonAPI(b,a,"handlerMenuAppLoad() call jsonAPI()",c);return d}function handlerTabOpen(c){if(DEBUG){console.log("function:handlerTabOpen")}handlerObjectRowMuted(c);var b=$("#main-tab #tab_"+c.id);if(b.length>0){b.find("a").tab("show")}else{var a=TEMPLATES.layoutDefault({data:c});$("#main-tab-content").append(a);a=TEMPLATES.tab({data:c});$("#main-tab").append(a);delay(function(){$("#main-tab a:last").tab("show").click()},1);if((c.id.indexOf(NEWOBJECTKEY)==-1)&&($.inArray(c.id,SETTINGS.local.tabs)<0)&&($("#menu-app li[class!=disabled] a[data-id="+c.id+"]").size()>0)){SETTINGS.local.tabs.push(c.id);SETTINGS.save_local()}$("#tab_"+c.id+" a").one("click",eventLayoutLoad)}return true}function handlerTabClose(c){if(DEBUG){console.log("function:handlerTabClose")}var d=c?c.id:null;$("#tab_"+d).remove();$("#layout_"+d).remove();var a=REGISTER[d];if(a){handlerObjectRowUnmuted(a);if(a instanceof classObject){$.each(a.composes,function(f,g){delete REGISTER[g.id]});delete REGISTER[d]}}var b=$.inArray(d,SETTINGS.local.tabs);if(b>-1){delete SETTINGS.local.tabs[b];SETTINGS.cleanTabs().save_local()}$("#main-tab a:last").click()}function handlerTabRestore(){if(DEBUG){console.log("function:handlerTabRestore")}$.each(SETTINGS.local.tabs,function(a,b){$("#menu-app li[class!=disabled] a[data-id="+b+"]").click()})}function eventTabOpen(b){if(DEBUG){console.log("function:eventTabOpen")}var a=$(this).data();a=REGISTER[a.id]||a;handlerTabOpen(a);b.preventDefault()}function eventTabClose(b){if(DEBUG){console.log("function:eventTabClose")}var a=$(this).data();a=REGISTER[a.id]||a;handlerTabClose(a);b.preventDefault()}function handlerFiltersFromSettings(){if(DEBUG){console.log("function:handlerFiltersFromSettings")}if(!SETTINGS.local.filters){return false}$.each(SETTINGS.local.filters,function(b,c){var a=REGISTER[b];if(a){a.filters=c}});return true}function handlerFiltersRender(a){if(DEBUG){console.log("function:handlerFiltersRender")}if(a instanceof classObject){return false}var b=TEMPLATES.filters({data:a,is_new:false});$("#collection_filters_"+a.id).html(b);return b}function eventFilters(d){if(DEBUG){console.log("function:eventFilters")}var c=$(this).data(),a=REGISTER[c.id];if($("#list-filters_"+c.id).size()>0&&a.filters){$("#collection_filters_"+c.id).html("");var b=[];$.each(a.filters,function(f,g){if(g.field&&g.type&&g.values){b.push(g)}});a.filters=b}else{handlerFiltersRender(a)}$(this).blur();d.preventDefault()}function handlerFilterAppend(b){if(DEBUG){console.log("function:handlerFilterAppend")}if(b instanceof classObject){return false}var a=b.meta.filters[0]?b.meta.filters[0].field:null;if(!a){handlerShowAlert("Ошибка","Не установлены поля для фильтров.");return false}var d={type:null,inverse:false,active:false,field:null,values:null,field_title:null,type_title:null,};if(!b.filters){b.filters=[]}b.filters.push(d);var c=TEMPLATES.filter({data:b,item:d,index:b.filters.length-1,is_new:true});$("#list-filters_"+b.id).append(c);return c}function eventFilterAppend(b){if(DEBUG){console.log("function:eventFilterAppend")}var a=REGISTER[$(this).data().id];handlerFilterAppend(a);b.preventDefault()}function eventFilterRemove(d){if(DEBUG){console.log("function:eventFilterRemove")}var c=$(this).data(),a=REGISTER[c.id],b=c.filter_index;a.filters.splice(b,b+1);$("#list-filters_"+a.id+" [data-filter_index="+b+"]").remove();handlerCollectionGet(a);if(!SETTINGS.local.filters){SETTINGS.local.filters={}}SETTINGS.local.filters[a.id]=a.filters;SETTINGS.save(null,"local");d.preventDefault()}function eventFilterChangeField(d){if(DEBUG){console.log("function:eventFilterChangeField")}var c=$(this).data(),a=REGISTER[c.id],b=c.filter_index,f=$(this).val();text=$(this).find("[value="+f+"]").text();a.filters[b].field=f;a.filters[b].field_title=text;handlerFilterChangeActive(a,b,false);$("#list-filters_"+a.id+" [data-place=filter_values][data-filter_index="+b+"]").html("");if(!f){$("#list-filters_"+a.id+" [data-action=filter_change_active][data-filter_index="+b+"]").attr("disabled","disabled");$("#list-filters_"+a.id+" [data-action=filter_change_type][data-filter_index="+b+"]").attr("disabled","disabled");$("#list-filters_"+a.id+" [data-action=filter_change_inverse][data-filter_index="+b+"]").attr("disabled","disabled");$("#list-filters_"+a.id+" [data-place=filter_values][data-filter_index="+b+"]").html("")}else{$("#list-filters_"+a.id+" [data-action=filter_change_type][data-filter_index="+b+"]").removeAttr("disabled")}d.preventDefault()}function eventFilterChangeType(g){if(DEBUG){console.log("function:eventFilterChangeType")}var f=$(this).data(),a=REGISTER[f.id],b=f.filter_index,h=$(this).val();text=$(this).find("[value="+h+"]").text();a.filters[b].type=h;a.filters[b].type_title=text;handlerFilterChangeActive(a,b,false);if(!h){$("#collection_filters_"+a.id+" [data-action=filter_change_active][data-filter_index="+b+"]").attr("disabled","disabled");$("#collection_filters_"+a.id+" [data-action=filter_change_inverse][data-filter_index="+b+"]").attr("disabled","disabled");$("#collection_filters_"+a.id+" [data-place=filter_values][data-filter_index="+b+"]").html("");handlerCollectionGet(a);return true}$("#list-filters_"+a.id+" [data-action=filter_change_active][data-filter_index="+b+"]").removeAttr("disabled");$("#list-filters_"+a.id+" [data-action=filter_change_inverse][data-filter_index="+b+"]").removeAttr("disabled");var c=TEMPLATES.filter_values({data:a,index:b}),d=$("#list-filters_"+a.id+" [data-place=filter_values][data-filter_index="+b+"]");if(a.filters[b].type!="blank"){d.html(c);if(a.filters[b].type=="range"){d.append(c)}}g.preventDefault()}function handlerFilterChangeActive(a,b,c){if(DEBUG){console.log("function:handlerFilterChangeActive")}a.filters[b].active=c;return true}function eventFilterChangeActive(f){if(DEBUG){console.log("function:eventFilterChangeActive")}var d=$(this).data(),a=REGISTER[d.id],b=d.filter_index,c=!$(this).hasClass("active");handlerFilterChangeValues(a,b);handlerFilterChangeActive(a,b,c);if(c){$("#list-filters_"+a.id+" [data-action=filter_change_field][data-filter_index="+b+"]").attr("disabled","disabled");$("#list-filters_"+a.id+" [data-action=filter_change_type][data-filter_index="+b+"]").attr("disabled","disabled")}else{$("#list-filters_"+a.id+" [data-action=filter_change_field][data-filter_index="+b+"]").removeAttr("disabled");$("#list-filters_"+a.id+" [data-action=filter_change_type][data-filter_index="+b+"]").removeAttr("disabled")}handlerCollectionGet(a);f.preventDefault()}function handlerFilterChangeInverse(a,c,b){if(DEBUG){console.log("function:handlerFilterChangeInverse")}a.filters[c].inverse=b;return true}function eventFilterChangeInverse(f){if(DEBUG){console.log("function:eventFilterChangeInverse")}var d=$(this).data(),a=REGISTER[d.id],b=d.filter_index,c=!$(this).hasClass("active");handlerFilterChangeInverse(a,b,c);if(a.filters[b].active){handlerCollectionGet(a)}f.preventDefault()}function handlerFilterChangeValues(a,b){if(DEBUG){console.log("function:handlerFilterChangeValues")}var d=$("#list-filters_"+a.id+" [data-place=filter_values][data-filter_index="+b+"]"),c=d.find("[data-action=filter_change_values]");a.filters[b].values=[];a.filters[b].values_html=d.html();if(!SETTINGS.local.filters){SETTINGS.local.filters={}}SETTINGS.local.filters[a.id]=a.filters;SETTINGS.save(null,"local");$.each(c,function(f,g){a.filters[b].values.push($(g).val())});return true}function eventFilterChangeValues(d){if(DEBUG){console.log("function:eventFilterChangeValues")}var c=$(this).data(),a=REGISTER[c.id],b=c.filter_index,f=$(this).val();if(f){$(this).attr("value",f)}handlerFilterChangeActive(a,b,false);handlerFilterChangeValues(a,b);if(a.filters[b].active){handlerCollectionGet(a)}d.preventDefault()}function handlerFilterAppendValue(a,b){if(DEBUG){console.log("function:handlerFilterAppendValue")}var c=TEMPLATES.filter_values({data:a,index:b}),d=$("#list-filters_"+a.id+" [data-place=filter_values][data-filter_index="+b+"]");d.append(c);return true}function eventFilterAppendValue(d){if(DEBUG){console.log("function:eventFilterAppendValue")}var c=$(this).data(),a=REGISTER[c.id],b=c.filter_index;handlerFilterAppendValue(a,b);$(this).remove();d.preventDefault()}function eventCollectionPrint(g){if(DEBUG){console.log("function:eventCollectionPrint")}var f=$(this).data(),b=REGISTER[f.id],d=true,c={method:"get_collection_report_url",report:f.report,model:b.model,query:b.query,order_by:b.order_by,fields:b.fields,filters:b.filters,},a=function(k,i,l){var j=k.data;window.open(j,"_blank")};var h=new jsonAPI(c,a,"eventCollectionPrint() call jsonAPI()",d);g.preventDefault()}function eventObjectPrint(g){if(DEBUG){console.log("function:eventObjectPrint")}var f=$(this).data(),c=REGISTER[f.id],d=true,b={method:"get_object_report_url",model:f.model,pk:f.pk,report:f.report,},a=function(i,h,j){window.open(i.data,"_blank")};jqxhr=new jsonAPI(b,a,"eventObjectPrint() call jsonAPI()",d);return true}function eventWaitCancel(a){if(DEBUG){console.log("function:eventWaitCancel")}ACTION_WAIT=null;a.preventDefault()}function eventWaitSubmit(a){if(DEBUG){console.log("function:eventWaitSubmit")}ACTION_WAIT();ACTION_WAIT=null;a.preventDefault()}function handlerTemplates(){if(DEBUG){console.log("function:handlerTemplates")}TEMPLATES.alert=_.template($("#underscore-alert").html());TEMPLATES.menuApp=_.template($("#underscore-menu-app").html());TEMPLATES.collection=_.template($("#underscore-collection").html());TEMPLATES.layoutModel=_.template($("#underscore-layout-model").html());TEMPLATES.layoutSelector=_.template($("#underscore-layout-selector").html());TEMPLATES.layoutCompose=_.template($("#underscore-layout-compose").html());TEMPLATES.layoutObject=_.template($("#underscore-layout-object").html());TEMPLATES.layoutDefault=_.template($("#underscore-layout-default").html());TEMPLATES.tab=_.template($("#underscore-tab").html());TEMPLATES.modal=_.template($("#underscore-modal").html());TEMPLATES.modalFooter=_.template($("#underscore-modal-footer").html());TEMPLATES.filter=_.template($("#underscore-filter").html());TEMPLATES.filters=_.template($("#underscore-filters").html());TEMPLATES.filter_values=_.template($("#underscore-filter-values").html());return true}function handlerBindinds(){if(DEBUG){console.log("function:handlerBindinds")}$("body").on("click","tr[data-pk]",eventRowClick);$("#menu-app li[class!=disabled]").on("click","a",eventTabOpen);$("#main-tab").on("click","button.close[data-id]",eventTabClose);$("body").on("click",".btn-group button[data-toggle=tab]",function(b){var a=$(this).hasClass("active");if(!a){$(this).blur().siblings("button").removeClass("active")}else{b.preventDefault()}});handlerTabRestore();$("body").on("keyup","[data-action=collection_search]",eventCollectionSearch);$("body").on("change","[data-action=collection_search]",eventCollectionSearch);$("body").on("click","[data-action=collection_search_refresh]",eventCollectionSearchRefresh);$("body").on("click","[data-action=collection_count]",eventCollectionCount);$("body").on("change","[data-action=collection_page]",eventCollectionPage);$("body").on("click","[data-action=collection_page]",eventCollectionPage);$("body").on("click","th.sorted",eventCollectionSorted);$("body").on("click","[data-action=object_open]",eventObjectOpen);$("body").on("click","[data-action=object_copy]",eventObjectCopy);$("body").on("click","[data-action=object_clone]",eventObjectClone);$("body").on("click","[data-action=object_add]",eventObjectAdd);$("body").on("click","[data-action=object_add_m2m]",{m2m:true},eventObjectAdd);$("body").on("click","[data-action=object_delete]",eventObjectDelete);$("body").on("click","[data-action=object_delete_m2m]",{m2m:true},eventObjectDelete);$("body").on("keyup","[data-action=object_change]",eventObjectChange);$("body").on("change","[data-action=object_change]",eventObjectChange);$("body").on("click","[data-action=object_reset]",eventObjectReset);$("body").on("click","[data-action=object_save]",eventObjectSave);$("body").on("click","[data-action=object_select]",eventObjectSelect);$("#modal").on("click","[data-action=selector_append]",{close:false},eventSelectorSubmit);$("#modal").on("click","[data-action=selector_submit]",{close:true},eventSelectorSubmit);$("#modal").on("click","[data-action=wait_cancel]",eventWaitCancel);$("#modal").on("click","[data-action=wait_submit]",eventWaitSubmit);$("body").on("click","[data-action=field_clear]",eventFieldClear);$("body").on("click","[data-action=file_field_clear]",{file:true},eventFieldClear);$("body").on("click","[data-action=field_select]",eventFieldSelect);$("body").on("click","[data-action=file_field_select]",{file:true},eventFieldSelect);$("body").on("click","[data-toggle=checkboxes]",handlerSelectAllToggle);$("body").on("click","[data-action=collection_filters]",eventFilters);$("body").on("click","[data-action=filter_append]",eventFilterAppend);$("body").on("click","[data-action=filter_remove]",eventFilterRemove);$("body").on("click","[data-action=filter_change_field]",eventFilterChangeField);$("body").on("click","[data-action=filter_change_type]",eventFilterChangeType);$("body").on("click","[data-action=filter_change_inverse]",eventFilterChangeInverse);$("body").on("click","[data-action=filter_change_active]",eventFilterChangeActive);$("body").on("click","[data-action=filter_change_values]",eventFilterChangeValues);$("body").on("change","[data-action=filter_change_values]",eventFilterChangeValues);$("body").on("click","[data-action=filter_append_value]",eventFilterAppendValue);$("body").on("click","[data-action=collection_print]",eventCollectionPrint);$("body").on("click","[data-action=object_print]",eventObjectPrint);return true}function datetimeLocale(c){var b=$.dateParser(c);if(b){var a=function(d){if(d<10){return"0"+d}return d};year=b.getFullYear(),month=a(b.getMonth()+1),day=a(b.getDate()),hours=a(b.getHours()),minutes=a(b.getMinutes()),seconds=a(b.getSeconds());return year+"-"+month+"-"+day+"T"+hours+":"+minutes+":"+seconds}return c}$(document).ready(function(a){if(DEBUG){console.log("function:$(document).ready")}handlerTemplates();a("#menu-app").hide();a("#menu-func").hide();handlerMenuAppLoad(function(){window.SETTINGS=new classSettings();a("alert").alert();a(".dropdown-toggle").dropdown();window.SETTINGS.init(function(){a("#search").focus();handlerBindinds();handlerFiltersFromSettings();moment.locale(a("html").attr("lang"))})})});
|
PypiClean
|
/python_outbreak_info-1.0.1.tar.gz/python_outbreak_info-1.0.1/src/outbreak_data/outbreak_data.py
|
import sys
import requests
import warnings
import pandas as pd
from outbreak_data import authenticate_user
server = 'api.outbreak.info' # or 'dev.outbreak.info'
nopage = 'fetch_all=true&page=0' # worth verifying that this works with newer ES versions as well
covid19_endpoint = 'covid19/query'
test_server = 'test.outbreak.info'
def check_user_authentication():
"""
Get the authorization token.
:return token: the users authorization token
"""
try:
token = authenticate_user.get_authentication()
except:
print("Issue retrieving token, please reauthenticate.")
sys.exit(1)
if token == "":
print("Issue retrieving token, please reauthenticate.")
sys.exit(1)
return(token)
def get_outbreak_data(endpoint, argstring, server=server, auth=None, collect_all=False, curr_page=0):
"""
Receives raw data using outbreak API.
Arguments:
:param endpoint: directory in server the data is stored
:param argstring: feature arguments to provide to API call
:param server: Server to request from
:param auth: Auth key (defaults to acceptable state)
:param collect_all: if True, returns all data.
:param curr_page: iterator state for paging
:return: A request object containing the raw data
"""
# To secure against None type
if isinstance(server, type(None)):
server = server
if auth is None:
#check the authentication
token = check_user_authentication()
else:
token = auth
token = 'Bearer ' + token
auth = {'Authorization': str(token)}
# initial request // used to collect data during recursion or as output of single API call
url = f'https://{server}/{endpoint}?{argstring}'
in_req = requests.get(url, headers=auth)
if in_req.headers.get('content-type') != 'application/json; charset=UTF-8':
raise ValueError('Warning!: Potentially missing endpoint. Data not being returned by server.')
if 400 <= in_req.status_code <= 499:
raise NameError(f'Request error (client-side/Error might be endpoint): {in_req.status_code}')
elif 500 <= in_req.status_code <= 599:
raise NameError(f'Request error (server-side): {in_req.status_code}')
in_json = in_req.json()
# checking that the request contains data
hits = 'hits' in in_json.keys()
results = 'results' in in_json.keys()
contains_data = hits | results
if collect_all is False:
if hits and (len(in_json['hits']) == 0):
warnings.warn('Warning!: Data has "hits" but length of data is 0')
elif results and (len(in_json['results']) == 0):
warnings.warn('Warning!: Data has "results" but length of data is 0')
return in_json
elif collect_all and not contains_data:
return
elif collect_all and contains_data:
# initial dict for collecting new json data
data_json = {k: v if isinstance(v, list) else [v] for k, v in in_json.items()}
del data_json['_scroll_id']
# recursion during collection
scroll_id = in_json['_scroll_id']
fetching_page = '&fetch_all=True&page='
page = fetching_page + str(curr_page)
to_scroll = 'scroll_id=' + scroll_id + page
in_req = get_outbreak_data(endpoint, to_scroll, server=server, collect_all=True, curr_page=curr_page+1)
if not isinstance(in_req, type(None)):
if hits and len(in_req['hits']) == 0:
warnings.warn('Warning!: Recursion step has "hits" key but empty data value')
elif results and len(in_req['results']) == 0:
warnings.warn('Warning!: Recursion step has "results" key but empty data value')
in_req = {k: v if isinstance(v, list) else [v] for k, v in in_req.items()}
for k in data_json.keys():
try:
data_json[k].extend(in_req[k])
except TypeError:
continue
return data_json
def cases_by_location(location, server=server, auth=None, pull_smoothed=0):
"""
Loads data from a location if input is a string, or from multiple locations
if location is a list of string locations. Since this API endpoint supports paging, collect_all is used to return all data.
Arguments:
:param location: A string or list of strings, separate multiple locations by ","
:param pull_smoothed: For every value >= 0, returns 1000 obs. (paging)
:return: A pandas dataframe
"""
# location names can be further checked to verify validity // proper format
if isinstance(location, str): # Converts all location input strings into lists: best universal input
location = location.replace(" ", "")
location = list(location.split(","))
if not isinstance(location, list) or len(location) == 0:
raise ValueError('Please enter at least 1 valid location id')
if pull_smoothed == 0:
confirmed='confirmed_numIncrease'
elif pull_smoothed == 1:
confirmed='confirmed_rolling'
elif pull_smoothed == 2:
confirmed='confirmed_rolling, confirmed_numIncrease'
else:
raise Exception("invalid parameter value for pull_smoothed!")
try:
locations = '(' + ' OR '.join(location) + ')'
args = f'q=location_id:{locations}&sort=date&fields=date,{confirmed},admin1&{nopage}'
raw_data = get_outbreak_data(covid19_endpoint, args, collect_all=True)
df = pd.DataFrame(raw_data['hits'])
refined_table=df.drop(columns=['_score', 'admin1'], axis=1)
for i in location: # checks each entry in location for invalid location ids after request
check = i[0:2] #checks for first 3 letters from string input in df; if they're there, the df passed
valid_loc = df.loc[df['_id'].str.startswith(check)]
if valid_loc.empty:
raise Exception('{} is not a valid location ID'.format(i))
if not df.empty:
return df
return refined_table
except:
for i in location:
raise Exception('{} is not a valid location ID'.format(i))
def prevalence_by_location(location, startswith=None, ndays=2048, nday_threshold=1, other_threshold=0, other_exclude=None, cumulative=None, server=server, auth=None):
"""
Loads prevalence data from a location
Arguments:
:param location: A string
:param startswith: A string; loads data for all lineages beginning with first letter(s) of name
:param other_threshold (Default: 0) Minimum prevalence threshold below which lineages must be accumulated under "Other".
:param nday_threshold (Default: 0) Minimum number of days in which the prevalence of a lineage must be below other_threshold to be accumulated under "Other".
:param ndays (Default: 2048) The number of days before the current date to be used as a window to accumulate linegaes under "Other".
:param other_exclude: Comma separated lineages that are NOT to be included under "Other" even if the conditions specified by the three thresholds above are met.
:param cumulative: (Default: false) If true return the cumulative prevalence.
:return: A pandas dataframe
"""
query = f'location_id={location}&sort=date&ndays={ndays}&nday_threshold={nday_threshold}&other_threshold={other_threshold}'
if cumulative:
query = query + '&' + 'cumulative=true'
if other_exclude:
other_exclude = other_exclude.replace(" ", "")
query = query + '&' + f'other_exclude={other_exclude}'
lins = get_outbreak_data('genomics/prevalence-by-location-all-lineages', query)
df = pd.DataFrame(lins['results'])
if startswith is not None:
return df.loc[df['lineage'].str.startswith(startswith)]
return df
def lineage_mutations(pango_lin, mutations=None, freq=0.8, server=server, auth=None):
"""Retrieves data from all mutations in a specified lineage above a frequency threshold.
- Mutiple queries for lineages and mutations can be separated by ","
- Use 'OR' in a string to return overlapping mutations in multiple lineages: 'BA.2 OR BA.1'
- param mutation is only useful for one lineage + mutation1 + mutation2 .... combinations
Arguments:
:param pango_lin: A string or list; loads data for all mutations in a specified PANGO lineage
:param mutation: A string or list; loads data of mutations for sequences classified as a specified PANGO lineage with mutation
:param freq: A number between 0 and 1 specifying the frequency threshold above which to return mutations (default = 0.8)
:return: A pandas dataframe"""
# Turns any string input into list format: most universal
if isinstance(pango_lin, str) is True:
if 'OR' in pango_lin:
lineages = pango_lin.split('OR')
lineages = "OR".join(lineages)
else:
lineages = pango_lin.replace(" ", "")
lineages = lineages.split(',')
lineages = ",".join(lineages)
elif isinstance(pango_lin, list):
lineages = ",".join(pango_lin)
if mutations:
if isinstance(mutations, str) is True:
mutations = mutations.replace(" ", "")
mutations = list(mutations.split(",")) # deals with string format for mutations
if isinstance(mutations, list) is True:
mutations = " AND ".join(mutations)
mutations = " AND " + mutations
lineages = '' + lineages + '' + mutations # fixed function
raw_data = get_outbreak_data('genomics/lineage-mutations', f'pangolin_lineage={lineages}', collect_all=False)
key_list = raw_data['results']
key_list = list(key_list)
for i in key_list: # Returns multiple lineages using ","
if i == key_list[0]:
df = pd.DataFrame(raw_data['results'][i])
else:
newdf = pd.DataFrame(raw_data['results'][i]) # append each dfs
df = pd.concat([df, newdf], sort=False)
if freq != 0.8:
if isinstance(freq, float) and freq > 0 and freq < 1:
return df.loc[df['prevalence'] >= freq]
else:
return df
def global_prevalence(pango_lin, mutations=None, cumulative=None, server=server):
"""Returns the global daily prevalence of a PANGO lineage
Arguments:
:param pangolin_lineage: (Required).
:param mutations: (Optional). Comma separated list of mutations.
:param cumulative: (Optional). If true returns the cumulative global prevalence since the first day of detection.
:return: A pandas dataframe."""
if mutations:
if isinstance(mutations, list):
mutations = ','.join(mutations)
elif isinstance(mutations, str):
mutations = mutations.replace(" ", "")
query = '' + pango_lin
if mutations:
query = query + '&' + f'mutations={mutations}'
if cumulative:
query = query + '&' + 'cumulative=true'
raw_data = get_outbreak_data('genomics/global-prevalence', f'pangolin_lineage={query}')
if cumulative:
data = {'Values' : raw_data['results']}
df = pd.DataFrame(data)
else:
df = pd.DataFrame(raw_data['results'])
return df
def sequence_counts(location=None, cumulative=None, sub_admin=None, server=server):
"""Returns number of sequences per day by location
Arguments:
:param location_id: (Optional). If not specified, the global total counts are returned.
:param cumulative: (Optional). If true returns the cumulative number of sequences till date.
:param subadmin: (Optional). If true and cumulative=true, returns the cumulative number of sequences for the immedaite lower admin level.
:return: A pandas dataframe.
"""
query = ''
if location:
query = query + f'location_id={location}'
if cumulative:
query = query + '&' + 'cumulative=true'
if sub_admin:
query = query + '&' + 'subadmin=true'
raw_data = get_outbreak_data('genomics/sequence-count', f'{query}')
if cumulative or sub_admin:
data = {'Values' : raw_data['results']}
df = pd.DataFrame(data)
else:
df = pd.DataFrame(raw_data['results'])
return df
def mutations_by_lineage(mutation, location=None, pango_lin=None, freq=None, server=server):
"""Returns the prevalence of a mutation or series of mutations across specified lineages by location
Arguments:
:param mutations: (Optional). List of mutations.
:param location_id: (Optional). If not specified, return most recent date globally.
:param pangolin_lineage: (Optional). If not specfied, returns all Pango lineages containing that mutation.
:param frequency: (Optional) Minimimum frequency threshold for the prevalence of a mutation in a lineage.
:return: A pandas dataframe.
"""
if isinstance(mutation, list):
pass
elif isinstance(mutation, str):
mutation = mutation.replace(" ", "")
mutation = list(mutation.split(","))
mutations = '' + ','.join(mutation) + ''
if location is not None and pango_lin is not None:
query = '' + f'mutations={mutations}&location_id={location}&pangolin_lineage={pango_lin}'
elif location is not None:
query = '' + f'mutations={mutations}&location_id={location}'
elif pango_lin is not None:
query = '' + f'mutations={mutations}&pangolin_lineage={pango_lin}'
else:
query = '' + f'mutations={mutations}'
raw_data = get_outbreak_data('genomics/mutations-by-lineage', f'{query}')
df = pd.DataFrame(raw_data['results'][mutations])
if isinstance(freq, float) and freq > 0 and freq < 1:
return df.loc[df['prevalence'] >= freq]
return df
def daily_prev(pango_lin, location='USA', mutations=None, cumulative=None, server=server):
"""Returns the daily prevalence of a PANGO lineage by location.
Arguments:
:param: pango_lin (Required). List of lineages separated by ,
:param: location_id (Optional). Default location: USA
:param: mutations (Optional). List of mutations separated by AND
:param: cumulative (Optional). If true returns the cumulative global prevalence since the first day of detection.
:return: A pandas dataframe."""
if isinstance(pango_lin, str):
pango_lin = pango_lin.replace(" ", "")
elif isinstance(pango_lin, list):
pango_lin = ','.join(pango_lin)
if mutations:
if isinstance(mutations, list):
pass
elif isinstance(mutations, str):
mutations = mutations.replace(" ", "")
mutations = list(mutations.split(","))
mutations = '' + ' AND '.join(mutations) + ''
query = pango_lin + '&' + f'location_id={location}'
if mutations:
query = query + '&' + f'mutations={mutations}'
if cumulative:
query = query + '&' + 'cumulative=true'
raw_data = get_outbreak_data('genomics/prevalence-by-location', f'pangolin_lineage={query}', collect_all=False)
key_list = raw_data['results']
key_list = list(key_list)
if cumulative:
for i in key_list:
if i == key_list[0]:
data = {'Values' : raw_data['results'][i]}
df = pd.DataFrame(data)
else:
newdf = {'Values' : raw_data['results'][i]}
df = pd.concat([data, newdf], sort=False)
else:
for i in key_list:
if i == key_list[0]:
df = pd.DataFrame(raw_data['results'][i])
else:
newdf = pd.DataFrame(raw_data['results'][i]) # append each df
df = pd.concat([df, newdf], sort=False)
return df
def lineage_by_sub_admin(pango_lin, mutations=None, location=None, ndays=0, detected=None, server=server):
"""Cumulative prevalence of a PANGO lineage by the immediate admin level of a location
Arguments:
:param pangolin_lineage: (Required). A list or string. List of lineages separated by ,
:param mutations: (Optional). A string or list of strings. Uses AND logic.
:param location_id: (Optional). A string. If not specified, returns cumulative prevalence at the country level globally.
:param ndays: (Optional). An integer. Specify number of days from current date to calculative cumuative counts. If not specified, there is no limit on the window.
:param detected: (Optional). If true returns only if at least found in location
:return: A pandas dataframe."""
if isinstance(pango_lin, str):
pango_lin = pango_lin.replace(" ", "")
elif isinstance(pango_lin, list):
pango_lin = ','.join(pango_lin)
query = pango_lin
if mutations:
if isinstance(mutations, list):
pass
elif isinstance(mutations, str):
mutations = mutations.replace(" ", "")
mutations = list(mutations.split(","))
mutations = '' + ' AND '.join(mutations) + ''
if mutations:
query = '' + pango_lin + '&' + f'mutations={mutations}'
if location:
query = query + '&' + f'location_id={location}'
if ndays > 0:
query = query + '&' + f'ndays={ndays}'
raw_data = get_outbreak_data('genomics/lineage-by-sub-admin-most-recent', f'pangolin_lineage={query}', collect_all=False)
key_list = raw_data['results']
key_list = list(key_list)
for i in key_list:
if i == key_list[0]:
df = pd.DataFrame(raw_data['results'][i])
else:
newdf = pd.DataFrame(raw_data['results'][i]) # append each df
df = pd.concat([df, newdf], sort=False)
return df
def collection_date(pango_lin, mutations=None, location=None, server=server):
"""Most recent collection date by location
Arguments:
:param pango_lin: A string. (Required).
:param mutations: (Optional). A string or list of strings. Comma separated list of mutations.
:param location: (Optional). If not specified, return most recent date globally.
:return: A pandas dataframe.
"""
if mutations:
if isinstance(mutations, list):
mutations = ','.join(mutations)
elif isinstance(mutations, str):
mutations = mutations.replace(" ", "")
query = pango_lin
if mutations:
query = query + '&' + f'mutations={mutations}'
if location:
query = query + '&' + f'location_id={location}'
raw_data = get_outbreak_data('genomics/most-recent-collection-date-by-location', f'pangolin_lineage={query}', collect_all=False)
data = {'Values' : raw_data['results']}
df = pd.DataFrame(data)
return df
def submission_date(pango_lin, mutations=None, location=None, server=server):
"""Returns the most recent submission date by location
Arguments:
:param pango_lin: A string. (Required).
:param mutations: (Optional). A string or list of strings. Comma separated list of mutations.
:param location: (Optional). If not specified, return most recent date globally.
:return: A pandas dataframe."""
if mutations:
if isinstance(mutations, list):
mutations = ','.join(mutations)
elif isinstance(mutations, str):
mutations = mutations.replace(" ", "")
query = pango_lin
if mutations:
query = query + '&' + f'mutations={mutations}'
if location:
query = query + '&' + f'location_id={location}'
raw_data = get_outbreak_data('genomics/most-recent-submission-date-by-location', f'pangolin_lineage={query}', collect_all=False)
data = {'Values' : raw_data['results']}
df = pd.DataFrame(data)
return df
def mutation_details(mutations, server=server):
""" Returns details of a mutation.
Arguments:
:param mutations: (Required). Comma separated list of mutations.
:return: A pandas dataframe.
"""
if isinstance(mutations, str):
mutations = mutations.replace(" ", "")
elif isinstance(mutations, list):
mutations = ','.join(mutations)
raw_data = get_outbreak_data('genomics/mutation-details', f'mutations={mutations}', collect_all=False)
r = raw_data['results']
keys = list(r[0])
for i in r: # for each seperate result
values = list(i.values())
if i == r[0]:
df=pd.DataFrame({"Key": keys,
"Values":values})
else:
newdf = pd.DataFrame({"Key": keys,
"Values":values}) # append each df
df = pd.concat([df, newdf], axis=1, sort=False)
return df
def daily_lag(location=None, server=server):
"""Return the daily lag between collection and submission dates by location
Arguments:
:param location_id: (Optional). If not specified, return lag globally.
:return: A pandas dataframe.
"""
query = ''
if location:
query = '&' + f'location_id={location}'
raw_data = get_outbreak_data('genomics/collection-submission', query, collect_all=False)
r = raw_data['results']
for i in r: # for each seperate result
values = tuple(i.values())
if i == r[0]:
df=pd.DataFrame({"date_collected": values[0], "date_submitted": values[1], "total_count": values[2]}, index=[0])
else:
newdf = pd.DataFrame({"date_collected": values[0], "date_submitted": values[1], "total_count": values[2]}, index=[0]) # append each df
df = pd.concat([df, newdf], sort=False)
return df
def wildcard_lineage(name, server=server):
"""Match lineage name using wildcards.
Arguments:
:param name: (Required). A string. Must use * at end of string. Supports wildcards. (Example: b.1*, ba.2*)
:return: A pandas dataframe."""
query = '' + '&' + f'name={name}'
raw_data = get_outbreak_data('genomics/lineage', query, collect_all=False)
r = raw_data['results']
for i in r: # for each seperate result
values = tuple(i.values())
if i == r[0]: # follow new procedure as found for daily_lag
df=pd.DataFrame({"name": values[0],
"total_count":values[1]}, index=[0])
else:
newdf = pd.DataFrame({"name": values[0],
"total_count":values[1]}, index=[0]) # append each df
df = pd.concat([df, newdf], sort=False)
return df
def wildcard_location(name, server=server):
"""Match location name using wildcards.
Arguments:
:param name: (Required). A string. Must use * at end of string. Supports wildcards. (Example: united*)
:return: A pandas dataframe."""
query = '' + '&' + f'name={name}'
raw_data = get_outbreak_data('genomics/location', query, collect_all=False)
r = raw_data['results']
for i in r: # for each seperate result
values = tuple(i.values())
if i == r[0]:
df=pd.DataFrame({"country": values[0], "country_id ": values[1],'id':values[2], "label":values[3],
"admin_level":values[4], "total_count":values[5]}, index = [0])
else:
newdf = pd.DataFrame({"country": values[0], "country_id ": values[1],'id':values[2], "label":values[3],
"admin_level":values[4], "total_count":values[5]}, index = [0]) # append each df
df = pd.concat([df, newdf], sort=False)
return df
def location_details(location, server=server):
"""Get location details using location ID.
Arguments:
:param location: A string. (Required).
:return: A pandas dataframe."""
query = '' + '&' + f'id={location}'
raw_data = get_outbreak_data('genomics/location-lookup', query, collect_all=False)
data = {'Values' : raw_data['results']}
df = pd.DataFrame(data)
return df
def wildcard_mutations(name, server=server):
"""Match mutations using wildcards.
Arguments:
:param name: (Required) A string. Must use * at end of string. Supports wildcards. (Example: s:e484*)
:return: A pandas dataframe."""
query = '' + '&' + f'name={name}'
raw_data = get_outbreak_data('genomics/mutations', query, collect_all=False)
r = raw_data['results']
for i in r: # for each seperate result
values = tuple(i.values())
if i == r[0]:
df=pd.DataFrame({"name": values[0],
"total_count":values[1]}, index=[0])
else:
newdf = pd.DataFrame({"name": values[0],
"total_count":values[1]}, index=[0]) # append each df
df = pd.concat([df, newdf], sort=False)
return df
|
PypiClean
|
/asone_ocr-1.6.2-py3-none-any.whl/easyocr/DBNet/decoders/seg_detector_loss.py
|
import sys
import torch
import torch.nn as nn
class SegDetectorLossBuilder():
'''
Build loss functions for SegDetector.
Details about the built functions:
Input:
pred: A dict which contains predictions.
thresh: The threshold prediction
binary: The text segmentation prediction.
thresh_binary: Value produced by `step_function(binary - thresh)`.
batch:
gt: Text regions bitmap gt.
mask: Ignore mask,
pexels where value is 1 indicates no contribution to loss.
thresh_mask: Mask indicates regions cared by thresh supervision.
thresh_map: Threshold gt.
Return:
(loss, metrics).
loss: A scalar loss value.
metrics: A dict contraining partial loss values.
'''
def __init__(self, loss_class, *args, **kwargs):
self.loss_class = loss_class
self.loss_args = args
self.loss_kwargs = kwargs
def build(self):
return getattr(sys.modules[__name__], self.loss_class)(*self.loss_args, **self.loss_kwargs)
class DiceLoss(nn.Module):
'''
DiceLoss on binary.
For SegDetector without adaptive module.
'''
def __init__(self, eps=1e-6):
super(DiceLoss, self).__init__()
from .dice_loss import DiceLoss as Loss
self.loss = Loss(eps)
def forward(self, pred, batch):
loss = self.loss(pred['binary'], batch['gt'], batch['mask'])
return loss, dict(dice_loss=loss)
class BalanceBCELoss(nn.Module):
'''
DiceLoss on binary.
For SegDetector without adaptive module.
'''
def __init__(self, eps=1e-6):
super(BalanceBCELoss, self).__init__()
from .balance_cross_entropy_loss import BalanceCrossEntropyLoss
self.loss = BalanceCrossEntropyLoss()
def forward(self, pred, batch):
loss = self.loss(pred['binary'], batch['gt'], batch['mask'])
return loss, dict(dice_loss=loss)
class AdaptiveDiceLoss(nn.Module):
'''
Integration of DiceLoss on both binary
prediction and thresh prediction.
'''
def __init__(self, eps=1e-6):
super(AdaptiveDiceLoss, self).__init__()
from .dice_loss import DiceLoss
self.main_loss = DiceLoss(eps)
self.thresh_loss = DiceLoss(eps)
def forward(self, pred, batch):
assert isinstance(pred, dict)
assert 'binary' in pred
assert 'thresh_binary' in pred
binary = pred['binary']
thresh_binary = pred['thresh_binary']
gt = batch['gt']
mask = batch['mask']
main_loss = self.main_loss(binary, gt, mask)
thresh_loss = self.thresh_loss(thresh_binary, gt, mask)
loss = main_loss + thresh_loss
return loss, dict(main_loss=main_loss, thresh_loss=thresh_loss)
class AdaptiveInstanceDiceLoss(nn.Module):
'''
InstanceDiceLoss on both binary and thresh_bianry.
'''
def __init__(self, iou_thresh=0.2, thresh=0.3):
super(AdaptiveInstanceDiceLoss, self).__init__()
from .dice_loss import InstanceDiceLoss, DiceLoss
self.main_loss = DiceLoss()
self.main_instance_loss = InstanceDiceLoss()
self.thresh_loss = DiceLoss()
self.thresh_instance_loss = InstanceDiceLoss()
self.weights = nn.ParameterDict(dict(
main=nn.Parameter(torch.ones(1)),
thresh=nn.Parameter(torch.ones(1)),
main_instance=nn.Parameter(torch.ones(1)),
thresh_instance=nn.Parameter(torch.ones(1))))
def partial_loss(self, weight, loss):
return loss / weight + torch.log(torch.sqrt(weight))
def forward(self, pred, batch):
main_loss = self.main_loss(pred['binary'], batch['gt'], batch['mask'])
thresh_loss = self.thresh_loss(pred['thresh_binary'], batch['gt'], batch['mask'])
main_instance_loss = self.main_instance_loss(
pred['binary'], batch['gt'], batch['mask'])
thresh_instance_loss = self.thresh_instance_loss(
pred['thresh_binary'], batch['gt'], batch['mask'])
loss = self.partial_loss(self.weights['main'], main_loss) \
+ self.partial_loss(self.weights['thresh'], thresh_loss) \
+ self.partial_loss(self.weights['main_instance'], main_instance_loss) \
+ self.partial_loss(self.weights['thresh_instance'], thresh_instance_loss)
metrics = dict(
main_loss=main_loss,
thresh_loss=thresh_loss,
main_instance_loss=main_instance_loss,
thresh_instance_loss=thresh_instance_loss)
metrics.update(self.weights)
return loss, metrics
class L1DiceLoss(nn.Module):
'''
L1Loss on thresh, DiceLoss on thresh_binary and binary.
'''
def __init__(self, eps=1e-6, l1_scale=10):
super(L1DiceLoss, self).__init__()
self.dice_loss = AdaptiveDiceLoss(eps=eps)
from .l1_loss import MaskL1Loss
self.l1_loss = MaskL1Loss()
self.l1_scale = l1_scale
def forward(self, pred, batch):
dice_loss, metrics = self.dice_loss(pred, batch)
l1_loss, l1_metric = self.l1_loss(
pred['thresh'], batch['thresh_map'], batch['thresh_mask'])
loss = dice_loss + self.l1_scale * l1_loss
metrics.update(**l1_metric)
return loss, metrics
class FullL1DiceLoss(L1DiceLoss):
'''
L1loss on thresh, pixels with topk losses in non-text regions are also counted.
DiceLoss on thresh_binary and binary.
'''
def __init__(self, eps=1e-6, l1_scale=10):
nn.Module.__init__(self)
self.dice_loss = AdaptiveDiceLoss(eps=eps)
from .l1_loss import BalanceL1Loss
self.l1_loss = BalanceL1Loss()
self.l1_scale = l1_scale
class L1BalanceCELoss(nn.Module):
'''
Balanced CrossEntropy Loss on `binary`,
MaskL1Loss on `thresh`,
DiceLoss on `thresh_binary`.
Note: The meaning of inputs can be figured out in `SegDetectorLossBuilder`.
'''
def __init__(self, eps=1e-6, l1_scale=10, bce_scale=5):
super(L1BalanceCELoss, self).__init__()
from .dice_loss import DiceLoss
from .l1_loss import MaskL1Loss
from .balance_cross_entropy_loss import BalanceCrossEntropyLoss
self.dice_loss = DiceLoss(eps=eps)
self.l1_loss = MaskL1Loss()
self.bce_loss = BalanceCrossEntropyLoss()
self.l1_scale = l1_scale
self.bce_scale = bce_scale
def forward(self, pred, batch):
bce_loss = self.bce_loss(pred['binary'], batch['gt'], batch['mask'])
metrics = dict(bce_loss=bce_loss)
if 'thresh' in pred:
l1_loss, l1_metric = self.l1_loss(pred['thresh'], batch['thresh_map'], batch['thresh_mask'])
dice_loss = self.dice_loss(pred['thresh_binary'], batch['gt'], batch['mask'])
metrics['thresh_loss'] = dice_loss
loss = dice_loss + self.l1_scale * l1_loss + bce_loss * self.bce_scale
metrics.update(**l1_metric)
else:
loss = bce_loss
return loss, metrics
class L1BCEMiningLoss(nn.Module):
'''
Basicly the same with L1BalanceCELoss, where the bce loss map is used as
attention weigts for DiceLoss
'''
def __init__(self, eps=1e-6, l1_scale=10, bce_scale=5):
super(L1BCEMiningLoss, self).__init__()
from .dice_loss import DiceLoss
from .l1_loss import MaskL1Loss
from .balance_cross_entropy_loss import BalanceCrossEntropyLoss
self.dice_loss = DiceLoss(eps=eps)
self.l1_loss = MaskL1Loss()
self.bce_loss = BalanceCrossEntropyLoss()
self.l1_scale = l1_scale
self.bce_scale = bce_scale
def forward(self, pred, batch):
bce_loss, bce_map = self.bce_loss(pred['binary'], batch['gt'], batch['mask'],
return_origin=True)
l1_loss, l1_metric = self.l1_loss(pred['thresh'], batch['thresh_map'], batch['thresh_mask'])
bce_map = (bce_map - bce_map.min()) / (bce_map.max() - bce_map.min())
dice_loss = self.dice_loss(
pred['thresh_binary'], batch['gt'],
batch['mask'], weights=bce_map + 1)
metrics = dict(bce_loss=bce_loss)
metrics['thresh_loss'] = dice_loss
loss = dice_loss + self.l1_scale * l1_loss + bce_loss * self.bce_scale
metrics.update(**l1_metric)
return loss, metrics
class L1LeakyDiceLoss(nn.Module):
'''
LeakyDiceLoss on binary,
MaskL1Loss on thresh,
DiceLoss on thresh_binary.
'''
def __init__(self, eps=1e-6, coverage_scale=5, l1_scale=10):
super(L1LeakyDiceLoss, self).__init__()
from .dice_loss import DiceLoss, LeakyDiceLoss
from .l1_loss import MaskL1Loss
self.main_loss = LeakyDiceLoss(coverage_scale=coverage_scale)
self.l1_loss = MaskL1Loss()
self.thresh_loss = DiceLoss(eps=eps)
self.l1_scale = l1_scale
def forward(self, pred, batch):
main_loss, metrics = self.main_loss(pred['binary'], batch['gt'], batch['mask'])
thresh_loss = self.thresh_loss(pred['thresh_binary'], batch['gt'], batch['mask'])
l1_loss, l1_metric = self.l1_loss(
pred['thresh'], batch['thresh_map'], batch['thresh_mask'])
metrics.update(**l1_metric, thresh_loss=thresh_loss)
loss = main_loss + thresh_loss + l1_loss * self.l1_scale
return loss, metrics
|
PypiClean
|
/smartsheet-python-sdk-3.0.2.tar.gz/smartsheet-python-sdk-3.0.2/smartsheet/models/cell.py
|
from __future__ import absolute_import
from ..object_value import assign_to_object_value
from ..types import Boolean, Number, String, TypedList, TypedObject, json, six
from ..util import deserialize, serialize
from .cell_link import CellLink
from .explicit_null import ExplicitNull
from .hyperlink import Hyperlink
from .image import Image
class Cell:
"""Smartsheet Cell data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the Cell model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._column_id = Number()
self._column_type = String()
self._conditional_format = String()
self._display_value = String()
self._format_ = String()
self._formula = String()
self._hyperlink = TypedObject(Hyperlink)
self._image = TypedObject(Image)
self._link_in_from_cell = TypedObject(CellLink)
self._links_out_to_cells = TypedList(CellLink)
self._object_value = None
self._override_validation = Boolean()
self._strict = Boolean()
self._value = None
if props:
deserialize(self, props)
self.__initialized = True
def __getattr__(self, key):
if key == "format":
return self.format_
else:
raise AttributeError(key)
def __setattr__(self, key, value):
if key == "format":
self.format_ = value
else:
super().__setattr__(key, value)
@property
def column_id(self):
return self._column_id.value
@column_id.setter
def column_id(self, value):
self._column_id.value = value
@property
def column_type(self):
return self._column_type.value
@column_type.setter
def column_type(self, value):
self._column_type.value = value
@property
def conditional_format(self):
return self._conditional_format.value
@conditional_format.setter
def conditional_format(self, value):
self._conditional_format.value = value
@property
def display_value(self):
return self._display_value.value
@display_value.setter
def display_value(self, value):
self._display_value.value = value
@property
def format_(self):
return self._format_.value
@format_.setter
def format_(self, value):
self._format_.value = value
@property
def formula(self):
return self._formula.value
@formula.setter
def formula(self, value):
self._formula.value = value
@property
def hyperlink(self):
return self._hyperlink.value
@hyperlink.setter
def hyperlink(self, value):
self._hyperlink.value = value
@property
def image(self):
return self._image.value
@image.setter
def image(self, value):
self._image.value = value
@property
def link_in_from_cell(self):
return self._link_in_from_cell.value
@link_in_from_cell.setter
def link_in_from_cell(self, value):
self._link_in_from_cell.value = value
@property
def links_out_to_cells(self):
return self._links_out_to_cells
@links_out_to_cells.setter
def links_out_to_cells(self, value):
self._links_out_to_cells.load(value)
@property
def object_value(self):
return self._object_value
@object_value.setter
def object_value(self, value):
self._object_value = assign_to_object_value(value)
@property
def override_validation(self):
return self._override_validation.value
@override_validation.setter
def override_validation(self, value):
self._override_validation.value = value
@property
def strict(self):
return self._strict.value
@strict.setter
def strict(self, value):
self._strict.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if isinstance(
value, (six.string_types, six.integer_types, float, bool, ExplicitNull)
):
self._value = value
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
|
PypiClean
|
/taskcc-alipay-sdk-python-3.3.398.tar.gz/taskcc-alipay-sdk-python-3.3.398/alipay/aop/api/domain/ZhimaCreditEpRatingInnerInitializeModel.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
class ZhimaCreditEpRatingInnerInitializeModel(object):
def __init__(self):
self._credit_category = None
self._ep_cert_no = None
self._ep_name = None
self._m_category = None
self._member_type = None
self._out_order_no = None
self._user_id = None
@property
def credit_category(self):
return self._credit_category
@credit_category.setter
def credit_category(self, value):
self._credit_category = value
@property
def ep_cert_no(self):
return self._ep_cert_no
@ep_cert_no.setter
def ep_cert_no(self, value):
self._ep_cert_no = value
@property
def ep_name(self):
return self._ep_name
@ep_name.setter
def ep_name(self, value):
self._ep_name = value
@property
def m_category(self):
return self._m_category
@m_category.setter
def m_category(self, value):
self._m_category = value
@property
def member_type(self):
return self._member_type
@member_type.setter
def member_type(self, value):
self._member_type = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.credit_category:
if hasattr(self.credit_category, 'to_alipay_dict'):
params['credit_category'] = self.credit_category.to_alipay_dict()
else:
params['credit_category'] = self.credit_category
if self.ep_cert_no:
if hasattr(self.ep_cert_no, 'to_alipay_dict'):
params['ep_cert_no'] = self.ep_cert_no.to_alipay_dict()
else:
params['ep_cert_no'] = self.ep_cert_no
if self.ep_name:
if hasattr(self.ep_name, 'to_alipay_dict'):
params['ep_name'] = self.ep_name.to_alipay_dict()
else:
params['ep_name'] = self.ep_name
if self.m_category:
if hasattr(self.m_category, 'to_alipay_dict'):
params['m_category'] = self.m_category.to_alipay_dict()
else:
params['m_category'] = self.m_category
if self.member_type:
if hasattr(self.member_type, 'to_alipay_dict'):
params['member_type'] = self.member_type.to_alipay_dict()
else:
params['member_type'] = self.member_type
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ZhimaCreditEpRatingInnerInitializeModel()
if 'credit_category' in d:
o.credit_category = d['credit_category']
if 'ep_cert_no' in d:
o.ep_cert_no = d['ep_cert_no']
if 'ep_name' in d:
o.ep_name = d['ep_name']
if 'm_category' in d:
o.m_category = d['m_category']
if 'member_type' in d:
o.member_type = d['member_type']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
PypiClean
|
/pulumi_alicloud-3.44.0a1693632188.tar.gz/pulumi_alicloud-3.44.0a1693632188/pulumi_alicloud/provider.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from ._inputs import *
__all__ = ['ProviderArgs', 'Provider']
@pulumi.input_type
class ProviderArgs:
def __init__(__self__, *,
access_key: Optional[pulumi.Input[str]] = None,
account_id: Optional[pulumi.Input[str]] = None,
assume_role: Optional[pulumi.Input['ProviderAssumeRoleArgs']] = None,
client_connect_timeout: Optional[pulumi.Input[int]] = None,
client_read_timeout: Optional[pulumi.Input[int]] = None,
configuration_source: Optional[pulumi.Input[str]] = None,
credentials_uri: Optional[pulumi.Input[str]] = None,
ecs_role_name: Optional[pulumi.Input[str]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['ProviderEndpointArgs']]]] = None,
fc: Optional[pulumi.Input[str]] = None,
log_endpoint: Optional[pulumi.Input[str]] = None,
max_retry_timeout: Optional[pulumi.Input[int]] = None,
mns_endpoint: Optional[pulumi.Input[str]] = None,
ots_instance_name: Optional[pulumi.Input[str]] = None,
profile: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
secure_transport: Optional[pulumi.Input[str]] = None,
security_token: Optional[pulumi.Input[str]] = None,
security_transport: Optional[pulumi.Input[str]] = None,
shared_credentials_file: Optional[pulumi.Input[str]] = None,
skip_region_validation: Optional[pulumi.Input[bool]] = None,
source_ip: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Provider resource.
:param pulumi.Input[str] access_key: The access key for API operations. You can retrieve this from the 'Security Management' section of the Alibaba Cloud
console.
:param pulumi.Input[str] account_id: The account ID for some service API operations. You can retrieve this from the 'Security Settings' section of the
Alibaba Cloud console.
:param pulumi.Input[int] client_connect_timeout: The maximum timeout of the client connection server.
:param pulumi.Input[int] client_read_timeout: The maximum timeout of the client read request.
:param pulumi.Input[str] configuration_source: Use this to mark a terraform configuration file source.
:param pulumi.Input[str] credentials_uri: The URI of sidecar credentials service.
:param pulumi.Input[str] ecs_role_name: The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section
of the Alibaba Cloud console.
:param pulumi.Input[int] max_retry_timeout: The maximum retry timeout of the request.
:param pulumi.Input[str] profile: The profile for API operations. If not set, the default profile created with `aliyun configure` will be used.
:param pulumi.Input[str] region: The region where Alibaba Cloud operations will take place. Examples are cn-beijing, cn-hangzhou, eu-central-1, etc.
:param pulumi.Input[str] secret_key: The secret key for API operations. You can retrieve this from the 'Security Management' section of the Alibaba Cloud
console.
:param pulumi.Input[str] secure_transport: The security transport for the assume role invoking.
:param pulumi.Input[str] security_token: security token. A security token is only required if you are using Security Token Service.
:param pulumi.Input[str] shared_credentials_file: The path to the shared credentials file. If not set this defaults to ~/.aliyun/config.json
:param pulumi.Input[bool] skip_region_validation: Skip static validation of region ID. Used by users of alternative AlibabaCloud-like APIs or users w/ access to regions
that are not public (yet).
:param pulumi.Input[str] source_ip: The source ip for the assume role invoking.
"""
if access_key is not None:
pulumi.set(__self__, "access_key", access_key)
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if assume_role is not None:
pulumi.set(__self__, "assume_role", assume_role)
if client_connect_timeout is not None:
pulumi.set(__self__, "client_connect_timeout", client_connect_timeout)
if client_read_timeout is not None:
pulumi.set(__self__, "client_read_timeout", client_read_timeout)
if configuration_source is not None:
pulumi.set(__self__, "configuration_source", configuration_source)
if credentials_uri is not None:
pulumi.set(__self__, "credentials_uri", credentials_uri)
if ecs_role_name is None:
ecs_role_name = _utilities.get_env('ALICLOUD_ECS_ROLE_NAME')
if ecs_role_name is not None:
pulumi.set(__self__, "ecs_role_name", ecs_role_name)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if fc is not None:
warnings.warn("""Field 'fc' has been deprecated from provider version 1.28.0. New field 'fc' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""fc is deprecated: Field 'fc' has been deprecated from provider version 1.28.0. New field 'fc' which in nested endpoints instead.""")
if fc is not None:
pulumi.set(__self__, "fc", fc)
if log_endpoint is not None:
warnings.warn("""Field 'log_endpoint' has been deprecated from provider version 1.28.0. New field 'log' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""log_endpoint is deprecated: Field 'log_endpoint' has been deprecated from provider version 1.28.0. New field 'log' which in nested endpoints instead.""")
if log_endpoint is not None:
pulumi.set(__self__, "log_endpoint", log_endpoint)
if max_retry_timeout is not None:
pulumi.set(__self__, "max_retry_timeout", max_retry_timeout)
if mns_endpoint is not None:
warnings.warn("""Field 'mns_endpoint' has been deprecated from provider version 1.28.0. New field 'mns' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""mns_endpoint is deprecated: Field 'mns_endpoint' has been deprecated from provider version 1.28.0. New field 'mns' which in nested endpoints instead.""")
if mns_endpoint is not None:
pulumi.set(__self__, "mns_endpoint", mns_endpoint)
if ots_instance_name is not None:
warnings.warn("""Field 'ots_instance_name' has been deprecated from provider version 1.10.0. New field 'instance_name' of resource 'alicloud_ots_table' instead.""", DeprecationWarning)
pulumi.log.warn("""ots_instance_name is deprecated: Field 'ots_instance_name' has been deprecated from provider version 1.10.0. New field 'instance_name' of resource 'alicloud_ots_table' instead.""")
if ots_instance_name is not None:
pulumi.set(__self__, "ots_instance_name", ots_instance_name)
if profile is None:
profile = _utilities.get_env('ALICLOUD_PROFILE')
if profile is not None:
pulumi.set(__self__, "profile", profile)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if region is None:
region = _utilities.get_env('ALICLOUD_REGION')
if region is not None:
pulumi.set(__self__, "region", region)
if secret_key is not None:
pulumi.set(__self__, "secret_key", secret_key)
if secure_transport is not None:
pulumi.set(__self__, "secure_transport", secure_transport)
if security_token is not None:
pulumi.set(__self__, "security_token", security_token)
if security_transport is not None:
pulumi.set(__self__, "security_transport", security_transport)
if shared_credentials_file is not None:
pulumi.set(__self__, "shared_credentials_file", shared_credentials_file)
if skip_region_validation is not None:
pulumi.set(__self__, "skip_region_validation", skip_region_validation)
if source_ip is not None:
pulumi.set(__self__, "source_ip", source_ip)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> Optional[pulumi.Input[str]]:
"""
The access key for API operations. You can retrieve this from the 'Security Management' section of the Alibaba Cloud
console.
"""
return pulumi.get(self, "access_key")
@access_key.setter
def access_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_key", value)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
The account ID for some service API operations. You can retrieve this from the 'Security Settings' section of the
Alibaba Cloud console.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter(name="assumeRole")
def assume_role(self) -> Optional[pulumi.Input['ProviderAssumeRoleArgs']]:
return pulumi.get(self, "assume_role")
@assume_role.setter
def assume_role(self, value: Optional[pulumi.Input['ProviderAssumeRoleArgs']]):
pulumi.set(self, "assume_role", value)
@property
@pulumi.getter(name="clientConnectTimeout")
def client_connect_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The maximum timeout of the client connection server.
"""
return pulumi.get(self, "client_connect_timeout")
@client_connect_timeout.setter
def client_connect_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "client_connect_timeout", value)
@property
@pulumi.getter(name="clientReadTimeout")
def client_read_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The maximum timeout of the client read request.
"""
return pulumi.get(self, "client_read_timeout")
@client_read_timeout.setter
def client_read_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "client_read_timeout", value)
@property
@pulumi.getter(name="configurationSource")
def configuration_source(self) -> Optional[pulumi.Input[str]]:
"""
Use this to mark a terraform configuration file source.
"""
return pulumi.get(self, "configuration_source")
@configuration_source.setter
def configuration_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configuration_source", value)
@property
@pulumi.getter(name="credentialsUri")
def credentials_uri(self) -> Optional[pulumi.Input[str]]:
"""
The URI of sidecar credentials service.
"""
return pulumi.get(self, "credentials_uri")
@credentials_uri.setter
def credentials_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "credentials_uri", value)
@property
@pulumi.getter(name="ecsRoleName")
def ecs_role_name(self) -> Optional[pulumi.Input[str]]:
"""
The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section
of the Alibaba Cloud console.
"""
return pulumi.get(self, "ecs_role_name")
@ecs_role_name.setter
def ecs_role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ecs_role_name", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ProviderEndpointArgs']]]]:
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ProviderEndpointArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter
def fc(self) -> Optional[pulumi.Input[str]]:
warnings.warn("""Field 'fc' has been deprecated from provider version 1.28.0. New field 'fc' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""fc is deprecated: Field 'fc' has been deprecated from provider version 1.28.0. New field 'fc' which in nested endpoints instead.""")
return pulumi.get(self, "fc")
@fc.setter
def fc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fc", value)
@property
@pulumi.getter(name="logEndpoint")
def log_endpoint(self) -> Optional[pulumi.Input[str]]:
warnings.warn("""Field 'log_endpoint' has been deprecated from provider version 1.28.0. New field 'log' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""log_endpoint is deprecated: Field 'log_endpoint' has been deprecated from provider version 1.28.0. New field 'log' which in nested endpoints instead.""")
return pulumi.get(self, "log_endpoint")
@log_endpoint.setter
def log_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_endpoint", value)
@property
@pulumi.getter(name="maxRetryTimeout")
def max_retry_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The maximum retry timeout of the request.
"""
return pulumi.get(self, "max_retry_timeout")
@max_retry_timeout.setter
def max_retry_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_retry_timeout", value)
@property
@pulumi.getter(name="mnsEndpoint")
def mns_endpoint(self) -> Optional[pulumi.Input[str]]:
warnings.warn("""Field 'mns_endpoint' has been deprecated from provider version 1.28.0. New field 'mns' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""mns_endpoint is deprecated: Field 'mns_endpoint' has been deprecated from provider version 1.28.0. New field 'mns' which in nested endpoints instead.""")
return pulumi.get(self, "mns_endpoint")
@mns_endpoint.setter
def mns_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mns_endpoint", value)
@property
@pulumi.getter(name="otsInstanceName")
def ots_instance_name(self) -> Optional[pulumi.Input[str]]:
warnings.warn("""Field 'ots_instance_name' has been deprecated from provider version 1.10.0. New field 'instance_name' of resource 'alicloud_ots_table' instead.""", DeprecationWarning)
pulumi.log.warn("""ots_instance_name is deprecated: Field 'ots_instance_name' has been deprecated from provider version 1.10.0. New field 'instance_name' of resource 'alicloud_ots_table' instead.""")
return pulumi.get(self, "ots_instance_name")
@ots_instance_name.setter
def ots_instance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ots_instance_name", value)
@property
@pulumi.getter
def profile(self) -> Optional[pulumi.Input[str]]:
"""
The profile for API operations. If not set, the default profile created with `aliyun configure` will be used.
"""
return pulumi.get(self, "profile")
@profile.setter
def profile(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "profile", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region where Alibaba Cloud operations will take place. Examples are cn-beijing, cn-hangzhou, eu-central-1, etc.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> Optional[pulumi.Input[str]]:
"""
The secret key for API operations. You can retrieve this from the 'Security Management' section of the Alibaba Cloud
console.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter(name="secureTransport")
def secure_transport(self) -> Optional[pulumi.Input[str]]:
"""
The security transport for the assume role invoking.
"""
return pulumi.get(self, "secure_transport")
@secure_transport.setter
def secure_transport(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secure_transport", value)
@property
@pulumi.getter(name="securityToken")
def security_token(self) -> Optional[pulumi.Input[str]]:
"""
security token. A security token is only required if you are using Security Token Service.
"""
return pulumi.get(self, "security_token")
@security_token.setter
def security_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_token", value)
@property
@pulumi.getter(name="securityTransport")
def security_transport(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "security_transport")
@security_transport.setter
def security_transport(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_transport", value)
@property
@pulumi.getter(name="sharedCredentialsFile")
def shared_credentials_file(self) -> Optional[pulumi.Input[str]]:
"""
The path to the shared credentials file. If not set this defaults to ~/.aliyun/config.json
"""
return pulumi.get(self, "shared_credentials_file")
@shared_credentials_file.setter
def shared_credentials_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_credentials_file", value)
@property
@pulumi.getter(name="skipRegionValidation")
def skip_region_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Skip static validation of region ID. Used by users of alternative AlibabaCloud-like APIs or users w/ access to regions
that are not public (yet).
"""
return pulumi.get(self, "skip_region_validation")
@skip_region_validation.setter
def skip_region_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_region_validation", value)
@property
@pulumi.getter(name="sourceIp")
def source_ip(self) -> Optional[pulumi.Input[str]]:
"""
The source ip for the assume role invoking.
"""
return pulumi.get(self, "source_ip")
@source_ip.setter
def source_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_ip", value)
class Provider(pulumi.ProviderResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_key: Optional[pulumi.Input[str]] = None,
account_id: Optional[pulumi.Input[str]] = None,
assume_role: Optional[pulumi.Input[pulumi.InputType['ProviderAssumeRoleArgs']]] = None,
client_connect_timeout: Optional[pulumi.Input[int]] = None,
client_read_timeout: Optional[pulumi.Input[int]] = None,
configuration_source: Optional[pulumi.Input[str]] = None,
credentials_uri: Optional[pulumi.Input[str]] = None,
ecs_role_name: Optional[pulumi.Input[str]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProviderEndpointArgs']]]]] = None,
fc: Optional[pulumi.Input[str]] = None,
log_endpoint: Optional[pulumi.Input[str]] = None,
max_retry_timeout: Optional[pulumi.Input[int]] = None,
mns_endpoint: Optional[pulumi.Input[str]] = None,
ots_instance_name: Optional[pulumi.Input[str]] = None,
profile: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
secure_transport: Optional[pulumi.Input[str]] = None,
security_token: Optional[pulumi.Input[str]] = None,
security_transport: Optional[pulumi.Input[str]] = None,
shared_credentials_file: Optional[pulumi.Input[str]] = None,
skip_region_validation: Optional[pulumi.Input[bool]] = None,
source_ip: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The provider type for the alicloud package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_key: The access key for API operations. You can retrieve this from the 'Security Management' section of the Alibaba Cloud
console.
:param pulumi.Input[str] account_id: The account ID for some service API operations. You can retrieve this from the 'Security Settings' section of the
Alibaba Cloud console.
:param pulumi.Input[int] client_connect_timeout: The maximum timeout of the client connection server.
:param pulumi.Input[int] client_read_timeout: The maximum timeout of the client read request.
:param pulumi.Input[str] configuration_source: Use this to mark a terraform configuration file source.
:param pulumi.Input[str] credentials_uri: The URI of sidecar credentials service.
:param pulumi.Input[str] ecs_role_name: The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section
of the Alibaba Cloud console.
:param pulumi.Input[int] max_retry_timeout: The maximum retry timeout of the request.
:param pulumi.Input[str] profile: The profile for API operations. If not set, the default profile created with `aliyun configure` will be used.
:param pulumi.Input[str] region: The region where Alibaba Cloud operations will take place. Examples are cn-beijing, cn-hangzhou, eu-central-1, etc.
:param pulumi.Input[str] secret_key: The secret key for API operations. You can retrieve this from the 'Security Management' section of the Alibaba Cloud
console.
:param pulumi.Input[str] secure_transport: The security transport for the assume role invoking.
:param pulumi.Input[str] security_token: security token. A security token is only required if you are using Security Token Service.
:param pulumi.Input[str] shared_credentials_file: The path to the shared credentials file. If not set this defaults to ~/.aliyun/config.json
:param pulumi.Input[bool] skip_region_validation: Skip static validation of region ID. Used by users of alternative AlibabaCloud-like APIs or users w/ access to regions
that are not public (yet).
:param pulumi.Input[str] source_ip: The source ip for the assume role invoking.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ProviderArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The provider type for the alicloud package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param ProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_key: Optional[pulumi.Input[str]] = None,
account_id: Optional[pulumi.Input[str]] = None,
assume_role: Optional[pulumi.Input[pulumi.InputType['ProviderAssumeRoleArgs']]] = None,
client_connect_timeout: Optional[pulumi.Input[int]] = None,
client_read_timeout: Optional[pulumi.Input[int]] = None,
configuration_source: Optional[pulumi.Input[str]] = None,
credentials_uri: Optional[pulumi.Input[str]] = None,
ecs_role_name: Optional[pulumi.Input[str]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProviderEndpointArgs']]]]] = None,
fc: Optional[pulumi.Input[str]] = None,
log_endpoint: Optional[pulumi.Input[str]] = None,
max_retry_timeout: Optional[pulumi.Input[int]] = None,
mns_endpoint: Optional[pulumi.Input[str]] = None,
ots_instance_name: Optional[pulumi.Input[str]] = None,
profile: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
secure_transport: Optional[pulumi.Input[str]] = None,
security_token: Optional[pulumi.Input[str]] = None,
security_transport: Optional[pulumi.Input[str]] = None,
shared_credentials_file: Optional[pulumi.Input[str]] = None,
skip_region_validation: Optional[pulumi.Input[bool]] = None,
source_ip: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProviderArgs.__new__(ProviderArgs)
__props__.__dict__["access_key"] = access_key
__props__.__dict__["account_id"] = account_id
__props__.__dict__["assume_role"] = pulumi.Output.from_input(assume_role).apply(pulumi.runtime.to_json) if assume_role is not None else None
__props__.__dict__["client_connect_timeout"] = pulumi.Output.from_input(client_connect_timeout).apply(pulumi.runtime.to_json) if client_connect_timeout is not None else None
__props__.__dict__["client_read_timeout"] = pulumi.Output.from_input(client_read_timeout).apply(pulumi.runtime.to_json) if client_read_timeout is not None else None
__props__.__dict__["configuration_source"] = configuration_source
__props__.__dict__["credentials_uri"] = credentials_uri
if ecs_role_name is None:
ecs_role_name = _utilities.get_env('ALICLOUD_ECS_ROLE_NAME')
__props__.__dict__["ecs_role_name"] = ecs_role_name
__props__.__dict__["endpoints"] = pulumi.Output.from_input(endpoints).apply(pulumi.runtime.to_json) if endpoints is not None else None
if fc is not None and not opts.urn:
warnings.warn("""Field 'fc' has been deprecated from provider version 1.28.0. New field 'fc' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""fc is deprecated: Field 'fc' has been deprecated from provider version 1.28.0. New field 'fc' which in nested endpoints instead.""")
__props__.__dict__["fc"] = fc
if log_endpoint is not None and not opts.urn:
warnings.warn("""Field 'log_endpoint' has been deprecated from provider version 1.28.0. New field 'log' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""log_endpoint is deprecated: Field 'log_endpoint' has been deprecated from provider version 1.28.0. New field 'log' which in nested endpoints instead.""")
__props__.__dict__["log_endpoint"] = log_endpoint
__props__.__dict__["max_retry_timeout"] = pulumi.Output.from_input(max_retry_timeout).apply(pulumi.runtime.to_json) if max_retry_timeout is not None else None
if mns_endpoint is not None and not opts.urn:
warnings.warn("""Field 'mns_endpoint' has been deprecated from provider version 1.28.0. New field 'mns' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""mns_endpoint is deprecated: Field 'mns_endpoint' has been deprecated from provider version 1.28.0. New field 'mns' which in nested endpoints instead.""")
__props__.__dict__["mns_endpoint"] = mns_endpoint
if ots_instance_name is not None and not opts.urn:
warnings.warn("""Field 'ots_instance_name' has been deprecated from provider version 1.10.0. New field 'instance_name' of resource 'alicloud_ots_table' instead.""", DeprecationWarning)
pulumi.log.warn("""ots_instance_name is deprecated: Field 'ots_instance_name' has been deprecated from provider version 1.10.0. New field 'instance_name' of resource 'alicloud_ots_table' instead.""")
__props__.__dict__["ots_instance_name"] = ots_instance_name
if profile is None:
profile = _utilities.get_env('ALICLOUD_PROFILE')
__props__.__dict__["profile"] = profile
__props__.__dict__["protocol"] = protocol
if region is None:
region = _utilities.get_env('ALICLOUD_REGION')
__props__.__dict__["region"] = region
__props__.__dict__["secret_key"] = secret_key
__props__.__dict__["secure_transport"] = secure_transport
__props__.__dict__["security_token"] = security_token
__props__.__dict__["security_transport"] = security_transport
__props__.__dict__["shared_credentials_file"] = shared_credentials_file
__props__.__dict__["skip_region_validation"] = pulumi.Output.from_input(skip_region_validation).apply(pulumi.runtime.to_json) if skip_region_validation is not None else None
__props__.__dict__["source_ip"] = source_ip
super(Provider, __self__).__init__(
'alicloud',
resource_name,
__props__,
opts)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> pulumi.Output[Optional[str]]:
"""
The access key for API operations. You can retrieve this from the 'Security Management' section of the Alibaba Cloud
console.
"""
return pulumi.get(self, "access_key")
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Output[Optional[str]]:
"""
The account ID for some service API operations. You can retrieve this from the 'Security Settings' section of the
Alibaba Cloud console.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="configurationSource")
def configuration_source(self) -> pulumi.Output[Optional[str]]:
"""
Use this to mark a terraform configuration file source.
"""
return pulumi.get(self, "configuration_source")
@property
@pulumi.getter(name="credentialsUri")
def credentials_uri(self) -> pulumi.Output[Optional[str]]:
"""
The URI of sidecar credentials service.
"""
return pulumi.get(self, "credentials_uri")
@property
@pulumi.getter(name="ecsRoleName")
def ecs_role_name(self) -> pulumi.Output[Optional[str]]:
"""
The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section
of the Alibaba Cloud console.
"""
return pulumi.get(self, "ecs_role_name")
@property
@pulumi.getter
def fc(self) -> pulumi.Output[Optional[str]]:
warnings.warn("""Field 'fc' has been deprecated from provider version 1.28.0. New field 'fc' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""fc is deprecated: Field 'fc' has been deprecated from provider version 1.28.0. New field 'fc' which in nested endpoints instead.""")
return pulumi.get(self, "fc")
@property
@pulumi.getter(name="logEndpoint")
def log_endpoint(self) -> pulumi.Output[Optional[str]]:
warnings.warn("""Field 'log_endpoint' has been deprecated from provider version 1.28.0. New field 'log' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""log_endpoint is deprecated: Field 'log_endpoint' has been deprecated from provider version 1.28.0. New field 'log' which in nested endpoints instead.""")
return pulumi.get(self, "log_endpoint")
@property
@pulumi.getter(name="mnsEndpoint")
def mns_endpoint(self) -> pulumi.Output[Optional[str]]:
warnings.warn("""Field 'mns_endpoint' has been deprecated from provider version 1.28.0. New field 'mns' which in nested endpoints instead.""", DeprecationWarning)
pulumi.log.warn("""mns_endpoint is deprecated: Field 'mns_endpoint' has been deprecated from provider version 1.28.0. New field 'mns' which in nested endpoints instead.""")
return pulumi.get(self, "mns_endpoint")
@property
@pulumi.getter(name="otsInstanceName")
def ots_instance_name(self) -> pulumi.Output[Optional[str]]:
warnings.warn("""Field 'ots_instance_name' has been deprecated from provider version 1.10.0. New field 'instance_name' of resource 'alicloud_ots_table' instead.""", DeprecationWarning)
pulumi.log.warn("""ots_instance_name is deprecated: Field 'ots_instance_name' has been deprecated from provider version 1.10.0. New field 'instance_name' of resource 'alicloud_ots_table' instead.""")
return pulumi.get(self, "ots_instance_name")
@property
@pulumi.getter
def profile(self) -> pulumi.Output[Optional[str]]:
"""
The profile for API operations. If not set, the default profile created with `aliyun configure` will be used.
"""
return pulumi.get(self, "profile")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def region(self) -> pulumi.Output[Optional[str]]:
"""
The region where Alibaba Cloud operations will take place. Examples are cn-beijing, cn-hangzhou, eu-central-1, etc.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> pulumi.Output[Optional[str]]:
"""
The secret key for API operations. You can retrieve this from the 'Security Management' section of the Alibaba Cloud
console.
"""
return pulumi.get(self, "secret_key")
@property
@pulumi.getter(name="secureTransport")
def secure_transport(self) -> pulumi.Output[Optional[str]]:
"""
The security transport for the assume role invoking.
"""
return pulumi.get(self, "secure_transport")
@property
@pulumi.getter(name="securityToken")
def security_token(self) -> pulumi.Output[Optional[str]]:
"""
security token. A security token is only required if you are using Security Token Service.
"""
return pulumi.get(self, "security_token")
@property
@pulumi.getter(name="securityTransport")
def security_transport(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "security_transport")
@property
@pulumi.getter(name="sharedCredentialsFile")
def shared_credentials_file(self) -> pulumi.Output[Optional[str]]:
"""
The path to the shared credentials file. If not set this defaults to ~/.aliyun/config.json
"""
return pulumi.get(self, "shared_credentials_file")
@property
@pulumi.getter(name="sourceIp")
def source_ip(self) -> pulumi.Output[Optional[str]]:
"""
The source ip for the assume role invoking.
"""
return pulumi.get(self, "source_ip")
|
PypiClean
|
/bdd-coder-2.2.1.tar.gz/bdd-coder-2.2.1/README.md
|
# BDD Coder
[](https://badge.fury.io/py/bdd-coder) [](https://img.shields.io/pypi/dm/bdd-coder)
A package devoted to agile implementation of **class-based behavior tests**. It consists of (see [example](https://bitbucket.org/coleopter/bdd-coder/src/master/example/)):
* [coders](https://bitbucket.org/coleopter/bdd-coder/src/master/bdd_coder/coders.py) module able to
- make a tester package - test suite - blueprint from user story specifications in YAML files
- patch such tester package with new YAML specifications
* [tester](https://bitbucket.org/coleopter/bdd-coder/src/master/bdd_coder/tester.py) module employed to run such blueprint tests, which also has the ability to export their docs as YAML specifications
Although this package is intended to be used with [pytest](https://docs.pytest.org/en/stable/contents.html), until version 2.0.0 the base test case class for all test suits `bdd_coder.tester.tester.BaseTestCase` was a `unittest.TestCase` subclass. From version 2.0.0 `unittest.TestCase` is no longer supported, so that `pytest`'s setup and teardown functions - see [pytest-xunit_setup](https://docs.pytest.org/en/latest/xunit_setup.html) - should be implemented instead. See [pytest-unittest](https://docs.pytest.org/en/stable/unittest.html#pytest-features-in-unittest-testcase-subclasses) on the benefits of dropping `unittest.TestCase`.
See [mastermind](https://bitbucket.org/coleopter/mastermind) for an example testing a Django REST Framework API.
Test this package with [tox](https://tox.readthedocs.io/en/latest/) - see tox.ini.
## Story
This package was born as a study of Behavior Driven Development; and from the wish of having a handy implementation of Gherkin language in class-based tests, to be employed so that development cycles start with coding a behavior test suite containing the scenario specifications in test case method `__doc__`s - as `bdd_coder.tester` achieves.
In conjunction with `bdd_coder.coder`, development cycles *start* with:
1. A set of YAML specifications is agreed and crafted
2. From these, a test suite is automatically created or patched
3. New *test step methods* are crafted to efficiently achieve 100% behavior coverage
## User Story (feature) specifications
Each test suite (tester package) has a structure
```
├─ __init__.py
├─ base.py
└─ test_stories.py
```
corresponding to a specifications directory with story YAML files
```
├─ some-story.yml
├─ another-story.yml
├─ ...
└─ this-story.yml
```
A story file corresponds to a test case class declared into `test_stories.py`, consisting mainly of scenario declarations:
```
Title: <Story title> # --> class __name__
Story: |- # free text --> class __doc__
As a <user group>
I want <feature>
In order to/so that <goal>
Scenarios:
Scenario name: # --> scenario __doc__
- Given an event $(1) with $(A) and $first_param that gives `x` and `y`
- When it happens that...
- And the parameters $second and $third enter
- Then finally we assert that...
# ...
# ...
```
Only the keys `Title`, `Story`, `Scenarios` are required and mean something.
### Step declarations
A scenario declaration consists of a list of step declarations, which:
* Correspond to a test step method to be defined
* Start with a whole word - normally 'Given', 'When', or 'Then' - that is ignored by the tester (only order matters)
* May contain:
+ Input string values as $(...), which are passed as Pytest fixture parameters to the step method, so that they are available from the Pytest `request` fixture as the tuple `request.param`
+ Input parameter names as $param_name, which are passed to Pytest's parametrize
+ Output variable name sequence using backticks - if non-empty, the method should return the output values as a tuple, which are collected by the `bdd_coder.tester.decorators.Gherkin` decorator instance, by name into its `outputs` map of sequences
* May refer to a scenario name, either belonging to the same class (story), or to an inherited class
## Tester
The core of each test suite consists of the following required class declaration in its `base.py` module:
```python
from bdd_coder import decorators
from bdd_coder import tester
gherkin = decorators.Gherkin(logs_path='example/tests/bdd_runs.log')
@gherkin
class BddTester(tester.BddTester):
"""
The decorated BddTester subclass of this tester package.
It manages scenario runs. All test classes inherit from this one,
so generic test methods for this package are expected to be defined here
"""
```
Then, story test cases are declared in `test_stories.py`, with the `base` module imported, scenario declarations such as
```python
class StoryTitle(BddTesterSubclass, AnotherBddTesterSubclass):
@base.gherkin.scenario(['param1_value1'], ['param1_value2'])
def test_scenario_name(self):
"""
Given $(input1) and $param1 step one gives `x` and `y`
...
Last step with $(input2) gives `result`
"""
```
that will run according to their `__doc__`s, and the necessary step method definitions.
### Commands
#### Export test suite docs as YAML
```
usage: bdd-make-yaml-specs [-h] [--overwrite] test_module specs_path
positional arguments:
test_module str. Passed to `importlib.import_module`
specs_path str. Will try to write the YAML files in here
keyword arguments:
--overwrite, -o
```
Additionally, validates code against generated specifications.
## Coder commands
### Make a test suite blueprint
```
usage: bdd-blueprint [-h] [--base-class BASE_CLASS]
[--specs-path SPECS_PATH] [--tests-path TESTS_PATH]
[--test-module-name TEST_MODULE_NAME] [--overwrite]
keyword arguments:
--base-class BASE_CLASS, -b BASE_CLASS
str. Base test case class
--specs-path SPECS_PATH, -s SPECS_PATH
str. Default: behaviour/specs. Directory containing the YAML specs
--tests-path TESTS_PATH, -t TESTS_PATH
str. Default: next to specs
--test-module-name TEST_MODULE_NAME, -tm TEST_MODULE_NAME
str. Default: stories. Name for test_<name>.py
--overwrite, -o
```
The following:
```
bdd-coder$ bdd-blueprint -s example/specs -t example/tests --overwrite
```
will rewrite [example/tests](https://bitbucket.org/coleopter/bdd-coder/src/master/example/tests) (with no changes if [example/specs](https://bitbucket.org/coleopter/bdd-coder/src/master/example/specs) is unmodified), and run `pytest` on the blueprint yielding the output, like
```
============================= test session starts ==============================
platform [...]
collecting ... collected 2 items
example/tests/test_stories.py::TestClearBoard::test_odd_boards PASSED [ 50%]
example/tests/test_stories.py::TestClearBoard::test_start_board PASSED [100%]
=========================== 2 passed in 0.04 seconds ===========================
```
### Patch a test suite with new specifications
Use this command in order to update a tester package with new YAML specifications. It removes scenario declarations *only*; it changes the scenario set, which may imply a new test class hierarchy with new stories and scenarios; it adds the necessary step methods, and new aliases (if any).
```
usage: bdd-patch [-h] test_module [specs_path]
positional arguments:
test_module str. Passed to `importlib.import_module`
specs_path str. Directory to take new specs from. Default: specs/ next to test package
```
The following:
```
bdd-coder$ bdd-patch example.tests.test_stories example/new_specs
```
will turn [example/tests](https://bitbucket.org/coleopter/bdd-coder/src/master/example/tests) into [example/new_tests](https://bitbucket.org/coleopter/bdd-coder/src/master/example/new_tests), and run `pytest` on the suite yielding something like
```
============================= test session starts ==============================
platform [...]
collecting ... collected 3 items
example/tests/test_stories.py::TestNewGame::test_even_boards PASSED [ 33%]
example/tests/test_stories.py::TestNewGame::test_funny_boards PASSED [ 66%]
example/tests/test_stories.py::TestNewGame::test_more_boards PASSED [100%]
=========================== 3 passed in 0.04 seconds ===========================
```
|
PypiClean
|
/aspose-words-cloud-23.7.0.tar.gz/aspose-words-cloud-23.7.0/asposewordscloud/models/form_field_response.py
|
import pprint
import re # noqa: F401
import datetime
import six
import json
class FormFieldResponse(object):
"""The REST response with a form field.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'form_field': 'FormField'
}
attribute_map = {
'request_id': 'RequestId',
'form_field': 'FormField'
}
def __init__(self, request_id=None, form_field=None): # noqa: E501
"""FormFieldResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._form_field = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
if form_field is not None:
self.form_field = form_field
@property
def request_id(self):
"""Gets the request_id of this FormFieldResponse. # noqa: E501
Gets or sets the request Id. # noqa: E501
:return: The request_id of this FormFieldResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this FormFieldResponse.
Gets or sets the request Id. # noqa: E501
:param request_id: The request_id of this FormFieldResponse. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def form_field(self):
"""Gets the form_field of this FormFieldResponse. # noqa: E501
Gets or sets the form field. # noqa: E501
:return: The form_field of this FormFieldResponse. # noqa: E501
:rtype: FormField
"""
return self._form_field
@form_field.setter
def form_field(self, form_field):
"""Sets the form_field of this FormFieldResponse.
Gets or sets the form field. # noqa: E501
:param form_field: The form_field of this FormFieldResponse. # noqa: E501
:type: FormField
"""
self._form_field = form_field
def extract_files_content(self, filesContentResult):
"""Append the file content result list"""
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, (datetime.datetime, datetime.date)):
result[self.attribute_map[attr]] = value.isoformat()
else:
result[self.attribute_map[attr]] = value
return result
def to_json(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, (datetime.datetime, datetime.date)):
result[self.attribute_map[attr]] = value.isoformat()
else:
result[self.attribute_map[attr]] = value
return json.dumps(result)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FormFieldResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/TextTest-4.3.0.tar.gz/TextTest-4.3.0/texttestlib/default/knownbugs/github.py
|
# Interface to GitHub using the JSON API.
import os
import sys
import ssl
import urllib.request
import json
def findBugInfo(bugId, location, *args):
if location and location[-1] != '/':
location += '/'
request = "%sissues/%s" % (location, bugId)
try:
if request.startswith("https") and getattr(sys, 'frozen', False):
certs = os.path.join(os.path.dirname(sys.executable), "etc", "cacert.pem")
reply = urllib.request.urlopen(request, context=ssl.create_default_context(cafile=certs))
else:
reply = urllib.request.urlopen(request)
content = reply.read().decode(reply.headers.get_content_charset())
info = json.loads(content)
except Exception as e:
message = ("Failed to open URL '" + request + "': " + str(e) +
".\n\nPlease make sure that bug " + bugId + " exists\n" +
"and that the configuration entry 'bug_system_location' " +
"points to the correct GitHub repository.\nThe current value is '" + location +
"', it often looks like: 'https://api.github.com/repos/<user>/<repo>/'.")
return "NONEXISTENT", message, False, bugId
if len(info) <= 1:
message = "Could not parse reply from GitHub, maybe incompatible interface."
return "BAD SCRIPT", message, False, bugId
bugText = "******************************************************\n" + \
"Ticket #%s (%s)\n" % (bugId, info['state']) + \
"%s\n%sticket/%s\n" % (info['title'], location, bugId) + \
"Reported By: %s Owned by: %s\n" % (info['user']['login'], info['assignee']) + \
"Updated: %s Milestone: %s\n" % (info['updated_at'], info['milestone']) + \
"Description:\n" + info['body'] + "\n" + \
"******************************************************"
return info['state'], bugText, info['state'] == "closed", bugId
if __name__ == "__main__": # pragma: no cover - test code
import sys
for item in findBugInfo(sys.argv[1], sys.argv[2]):
print(item)
|
PypiClean
|
/auto_archiver-0.6.6.tar.gz/auto_archiver-0.6.6/src/auto_archiver/archivers/telegram_archiver.py
|
import requests, re, html
from bs4 import BeautifulSoup
from loguru import logger
from . import Archiver
from ..core import Metadata, Media
class TelegramArchiver(Archiver):
"""
Archiver for telegram that does not require login, but the telethon_archiver is much more advised, will only return if at least one image or one video is found
"""
name = "telegram_archiver"
def __init__(self, config: dict) -> None:
super().__init__(config)
@staticmethod
def configs() -> dict:
return {}
def download(self, item: Metadata) -> Metadata:
url = item.get_url()
# detect URLs that we definitely cannot handle
if 't.me' != item.netloc:
return False
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
}
# TODO: check if we can do this more resilient to variable URLs
if url[-8:] != "?embed=1":
url += "?embed=1"
t = requests.get(url, headers=headers)
s = BeautifulSoup(t.content, 'html.parser')
result = Metadata()
result.set_content(html.escape(str(t.content)))
if (timestamp := (s.find_all('time') or [{}])[0].get('datetime')):
result.set_timestamp(timestamp)
video = s.find("video")
if video is None:
logger.warning("could not find video")
image_tags = s.find_all(class_="tgme_widget_message_photo_wrap")
image_urls = []
for im in image_tags:
urls = [u.replace("'", "") for u in re.findall(r'url\((.*?)\)', im['style'])]
image_urls += urls
if not len(image_urls): return False
for img_url in image_urls:
result.add_media(Media(self.download_from_url(img_url, item=item)))
else:
video_url = video.get('src')
m_video = Media(self.download_from_url(video_url, item=item))
# extract duration from HTML
try:
duration = s.find_all('time')[0].contents[0]
if ':' in duration:
duration = float(duration.split(
':')[0]) * 60 + float(duration.split(':')[1])
else:
duration = float(duration)
m_video.set("duration", duration)
except: pass
result.add_media(m_video)
return result.success("telegram")
|
PypiClean
|
/apples-2.0.11.tar.gz/apples-2.0.11/README.md
|
------------------------------------
Summary
------------------------------------
APPLES stands for `Accurate Phylogenetic Placement with LEast Squares` and addresses the problem of phylogenetic placement of DNA and protein sequences into an already existing reference tree. APPLES is a command-line tool and it can run on **Linux, Mac OSX, and Windows**.
------------------------------------
Publication
------------------------------------
* Metin Balaban, Shahab Sarmashghi, and Siavash Mirarab. “APPLES: Scalable Distance-Based Phylogenetic Placement with or without Alignments.” Systematic Biology 69, no. 3 (2020): 566–78. [https://doi.org/10.1093/sysbio/syz063](https://doi.org/10.1093/sysbio/syz063)
------------------------------------
Requirements
------------------------------------
1. Python: Version >= 3.0
------------------------------------
Installation on Linux, Mac OSX, or Windows
------------------------------------
Install APPLES using the following command in the command-line:
`pip install apples`
---------------------------------------------
Getting Started with APPLES
---------------------------------------------
For listing all options, run the following command:
`run_apples.py -h`
---------------------------------------------
Input & Output Specification
---------------------------------------------
Input reference (backbone) tree must be in newick format. APPLES can perform placements based on FASTA alignments of nucleotide or amino acid sequences or a distance table.
APPLES input can be either
* An alignment of query sequences to the backbone. In this case, by default, hamming distances will be computed using pairwise comparison and will be then corrected using then JC69 model. If the input is amino acid alignment, APPLES uses scoredist model instead.
* A distance matrix computed using other tools.
#### Input an alignment
APPLES require a reference alignment and a query alignment. All species in the backbone tree must have a corresponding sequence in the reference alignment. You can find an example reference alignment and query alignment for ten query sequences under [data/ref.fa](data/ref.fa) and [data/query.fa](data/query.fa) respectively.
You can run APPLES with the following command on the example input nucleotide alignment dataset:
`run_apples.py -s data/ref.fa -q data/query.fa -t data/backbone.nwk`
If input sequences are amino acid, use the flag `-p`.
#### Input a distance matrix
The format for distance matrix is a tab delimited csv file with column and row headers. Rows should represent query sequences and columns should represent reference sequences. You can find an example distance matrix for ten query sequences under [data/dist.mat](data/dist.mat).
You can run APPLES on the example distance matrix by running the following command:
`run_apples.py -d data/dist.mat -t data/backbone.nwk`
#### Output
Output is a jplace file containing placement results for all queries. For more information about jplace files, please refer to Matsen et. al. (2012) [https://doi.org/10.1371/journal.pone.0031009](https://doi.org/10.1371/journal.pone.0031009). The output file can be specified using `-o` command. When output file is not specified, the result will be printed to the standard output.
### ! IMPORTANT NOTE !
Backbone tree provided to APPLES has to have its branch lengths estimated using a distance based method such as minimum evolution. This is a requirement for getting good results. We recommend [FastTree2](http://www.microbesonline.org/fasttree/) for re-estimating branch lengths if the backbone tree is estimated using Maximum Likelihood based methods (e.g. RAxML, PASTA). We support re-estimation of branch lengths within APPLES; in fact by default, FastTree is run prior to placement using the following command.
`FastTreeMP -nosupport -nt -nome -noml -log tree.log -intree backbone.nwk < ref.fa > minimum_evo_backbone.nwk`
Then perform placement on the new tree:
`run_apples.py -s ref.fa -q query.fa -t minimum_evo_backbone.nwk`
If you already reestimated the backbone branch lengths, you can skip this step in APPLES using the option "-D" for speedup.
------------------------------------
Detailed tutorial on how to run APPLES on various datasets
------------------------------------
Please refer to the tutorial below for detailed examples of usage in alignment-based and alignment-free settings.
[https://github.com/smirarab/tutorials/blob/master/Skmer-APPLES-tutorial.md](https://github.com/smirarab/tutorials/blob/master/Skmer-APPLES-tutorial.md)
-------------
CHANGELOG
-------------
2.0.10
* APPLES-2 raises a warning when a query sequence is already present in the backbone.
APPLES-2 places the query anyway (after adding "-query" suffix on the name), ignoring the record in the backbone.
2.0.9
* Amino acid distances are now computed using the same BLOSUM45 matrix used in FastTree2.
2.0.8
* Pairwise distance overlap requirement parameter can now be changed by the user.
|
PypiClean
|
/django-odnoklassniki-groups-0.1.4.tar.gz/django-odnoklassniki-groups-0.1.4/odnoklassniki_groups/migrations/0002_auto__add_field_group_attrs.py
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Group.attrs'
db.add_column(u'odnoklassniki_groups_group', 'attrs',
self.gf('annoying.fields.JSONField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Group.attrs'
db.delete_column(u'odnoklassniki_groups_group', 'attrs')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'odnoklassniki_discussions.discussion': {
'Meta': {'object_name': 'Discussion'},
'attrs': ('annoying.fields.JSONField', [], {'null': 'True'}),
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'odnoklassniki_discussions_authors'", 'to': u"orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}),
'comments_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'entities': ('annoying.fields.JSONField', [], {'null': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True'}),
'last_activity_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'last_user_access_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'liked_it': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'message': ('django.db.models.fields.TextField', [], {}),
'new_comments_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'odnoklassniki_discussions_owners'", 'to': u"orm['contenttypes.ContentType']"}),
'owner_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}),
'ref_objects': ('annoying.fields.JSONField', [], {'null': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'odnoklassniki_groups.group': {
'Meta': {'object_name': 'Group'},
'attrs': ('annoying.fields.JSONField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'discussions_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True'}),
'members_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '800'}),
'photo_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'pic_avatar': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'premium': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'shop_visible_admin': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'shop_visible_public': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'users': ('m2m_history.fields.ManyToManyHistoryField', [], {'to': u"orm['odnoklassniki_users.User']", 'symmetrical': 'False'})
},
u'odnoklassniki_users.user': {
'Meta': {'object_name': 'User'},
'allows_anonym_access': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'birthday': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'current_status': ('django.db.models.fields.TextField', [], {}),
'current_status_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'current_status_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'has_email': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'has_service_invisible': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_online': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'photo_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'pic1024x768': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'pic128max': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'pic128x128': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'pic180min': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'pic190x190': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'pic240min': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'pic320min': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'pic50x50': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'pic640x480': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'private': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'url_profile': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url_profile_mobile': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['odnoklassniki_groups']
|
PypiClean
|
/ems-deploy-0.0.13.tar.gz/ems-deploy-0.0.13/deploy/chutils.py
|
import os
g_offset = {
"u": 1,
"g": 4,
"o": 7
}
p_index = {
"r": 0,
"w": 1,
"x": 2
}
def bits(number):
bit = 1
while number >= bit:
if number & bit:
yield bit
bit <<= 1
def parse_chmod(s: str, start="----------"):
"""
Parses a chmod ddelta style string to a full chmod style string.
>>> parse_chmod('go+rw,u+x')
>>> '---xrw-rw-'
One can also specify a start, to which the delta is applied
>>> parse_chmod("a-w", start='---xrw-rw-')
>>> '---xr--r--'
If one supplies a full chmod style string, it just returns it
:param s: Symbolic chmod string
:param start: Initial symbolic chmod string
:return: Resulting symbolic chmod string
"""
k = list(start)
if len(s) == 10 and "," not in s:
k = list(s)
else:
for part in s.split(","):
action = next(x for x in ["=", "-", "+"] if x in part)
groups, bs = part.split(action)
if not groups or groups == "a":
groups = "ugo"
for g in groups:
for b in bs:
k[g_offset[g] + p_index[b]] = b if action != "-" else "-"
return "".join(k)
def octal_to_symbolic(octal: int):
"""
Transforms an octal representation to a full symbolic string
:param octal: Octal representation (as integer)
:return: Symbolic string
"""
k = ""
for idx, s in enumerate(["x", "w", "r"] * 3):
if (octal >> idx) & 1:
k = s + k
else:
k = "-" + k
return "-" + k
def symbolic_to_octal(s, start="----------"):
"""
Transforms a symbolic string to an octal representation. It accepts both delta an full. In case of a delta,
an optional start-parameter can be supplied.
:param s: Symbolic string
:param start: Symblic string
:return: Octal representation (integer)
"""
k = parse_chmod(s, start)
octal = "0" + "".join(["0" if x == "-" else "1" for x in k[1:]])
return int(octal, base=2)
def get_from_path(path):
"""
Extracts the permissions from a file at at specific path
:param path: Path to the file
:return: tuple with (symbolic,octal)
"""
st = os.stat(path)
oct_perm = st.st_mode & 0o777
current = octal_to_symbolic(oct_perm)
return current, oct_perm
def check_permissions(path, target):
"""
Checks if the permissions of a file/directory match the target.
:param path: Path to file/directory
:param target: String - either a symbolic or octal representation
:return: Boolean
"""
current, oct_perm = get_from_path(path)
if target.isnumeric():
k = int(target, base=8)
else:
k = symbolic_to_octal(target, start=current)
return oct_perm == k
def extract_uid_gid(owner_ship, defaultpath=None):
import grp, pwd
if ":" not in owner_ship:
owner_ship += ":"
uid = None
gid = None
if defaultpath:
st = os.stat(defaultpath)
uid = st.st_uid
gid = st.st_gid
n_uid, n_gid = owner_ship.split(":")
if n_uid:
try:
uid = int(n_uid)
except ValueError:
uid = pwd.getpwnam(n_uid).pw_uid
if n_gid:
try:
uid = int(n_gid)
except ValueError:
gid = grp.getgrnam(n_gid).gr_gid
return uid, gid
def check_ownership(path, owner_ship):
st = os.stat(path)
uid = st.st_uid
gid = st.st_gid
t_uid, t_gid = extract_uid_gid(owner_ship, defaultpath=path)
return t_uid == uid and t_gid == gid
if __name__ == "__main__":
is_same = check_ownership("../test", "")
|
PypiClean
|
/cobra_utils-0.3.1-py3-none-any.whl/cobra_utils/topology/reporter_metabolites.py
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
import scipy.stats as stats
from sklearn.utils import resample
from cobra_utils import query
def reporter_metabolites(model, p_val_df, genes=None, verbose=True):
'''
This function computes an aggregate p-value for each metabolite based on the network topology of the metabolic
reconstruction. It takes the p-value for differential expression of each gene and compute the aggregate p-value
for the neighbor reactions of a given metabolite.
More information on:
https://www.pnas.org/cgi/doi/10.1073/pnas.0406811102
This code was adapted from RAVEN 2.0 code available on:
https://github.com/SysBioChalmers/RAVEN/blob/master/core/reporterMetabolites.m
Parameters
----------
model : cobra.core.Model.Model
A cobra model.
p_val_df : pandas.DataFrame
A dataframe with gene names as index. It have to contains the p-values for the differential expression
of the respective indexing genes.
genes : array-like
An array or list containing gene names (str) to be considered.
verbose : boolean, True by default.
A variable to enable or disable the printings of this function.
Returns
-------
met_p_values : pandas.DataFrame
A dataframe reporting the respective p-values for the metabolites that had associated genes containing a p-value
in p_val_matrix. Additionally, the corrected, mean and std Z values as well as gene number for the given metabolite
are reported in each case.
'''
if verbose:
print('Running reporter metabolites analysis')
# Drop nan genes
df = p_val_df.dropna(how='all', axis=0)
# Evaluate information of dataframe
if 'value' not in list(df.columns):
cols = list(df.columns)
df.rename(columns={cols[0] : 'value'}, inplace=True)
if not isinstance(df.index, str):
df.index = df.index.map(str)
# Get gene Z scores
gene_Z_scores = pd.DataFrame(stats.norm.ppf(df.values) * -1.0, index=df.index, columns=['value'])
# Convert inf values to numerical values
gene_Z_scores = gene_Z_scores.replace(np.inf, 15.0)
gene_Z_scores = gene_Z_scores.replace(-np.inf, -15.0)
gene_Z_scores = gene_Z_scores.dropna()
# Mets - Genes info
met_info = query.met_info_from_model(model=model,
verbose=verbose)
met_info = met_info.loc[met_info.GeneID.isin(list(gene_Z_scores.index))]
if genes is not None:
met_info = met_info.loc[met_info.GeneID.isin(genes)]
unique_mets = met_info.MetID.unique()
met_info = met_info[['MetID', 'GeneID']]
met_info = met_info.loc[met_info.GeneID != '']
met_info.drop_duplicates(inplace=True)
# For each metabolite calculate the aggregate Z-score and keep track of the number of neighbouring genes
Z_scores = np.empty((len(unique_mets), 4))
Z_scores[:] = np.nan
Z_scores = pd.DataFrame(Z_scores, index=unique_mets, columns=['Z-score', 'Mean-Z', 'Std-Z', 'Genes-Number'])
for met in unique_mets:
met_genes = met_info.loc[met_info.MetID == met]['GeneID'].unique()
met_genes = list(set(met_genes).intersection(set(gene_Z_scores.index)))
if len(met_genes) > 0:
Z_scores.loc[met, 'Z-score'] = np.nansum(gene_Z_scores.loc[met_genes]['value'].values) / np.sqrt(len(met_genes))
Z_scores.loc[met, 'Mean-Z'] = np.nanmean(gene_Z_scores.loc[met_genes]['value'].values)
Z_scores.loc[met, 'Std-Z'] = np.nanstd(gene_Z_scores.loc[met_genes]['value'].values)
Z_scores.loc[met, 'Genes-Number'] = len(met_genes)
# Remove the metabolites which have no Z-scores
Z_scores = Z_scores.loc[~Z_scores['Z-score'].isna()]
# Correct for background by calculating the mean Z-score for random sets of the same size as the ones that
# were found for the metabolites
for i, size in enumerate(Z_scores['Genes-Number'].unique()):
size = int(size)
# Sample 100000 sets for each size. Sample with replacement
n_samples = 100000
random_Z_set = np.empty((n_samples, size))
for j in range(size):
random_Z_set[:, j] = resample(gene_Z_scores.values, n_samples=n_samples).flatten()
bg_Z = np.nansum(random_Z_set, axis=1) / np.sqrt(size)
mean_bg_Z = np.nanmean(bg_Z)
std_bg_Z = np.nanstd(bg_Z)
Z_scores.loc[Z_scores['Genes-Number'] == size, 'Z-score'] = (Z_scores.loc[Z_scores['Genes-Number'] == size, 'Z-score'].values - mean_bg_Z) / std_bg_Z
# Calculate p-values
met_p_values = Z_scores['Z-score'].apply(lambda x: 1.0 - stats.norm.cdf(x)).to_frame()
met_p_values.rename(columns={'Z-score': 'p-value'}, inplace=True)
# Report results
met_p_values['corrected Z'] = Z_scores['Z-score'].values
met_p_values['mean Z'] = Z_scores['Mean-Z'].values
met_p_values['std Z'] = Z_scores['Std-Z'].values
met_p_values['gene number'] = Z_scores['Genes-Number'].values
#Sort p-values from smallest value.
met_p_values.sort_values(by='p-value', ascending=True, inplace=True)
return met_p_values
|
PypiClean
|
/apache-beam-li-2.38.7.0.zip/apache-beam-li-2.38.7.0/apache_beam/ml/gcp/visionml.py
|
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from apache_beam import typehints
from apache_beam.metrics import Metrics
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms import util
from cachetools.func import ttl_cache
try:
from google.cloud import vision
except ImportError:
raise ImportError(
'Google Cloud Vision not supported for this execution environment '
'(could not import google.cloud.vision).')
__all__ = [
'AnnotateImage',
'AnnotateImageWithContext',
]
@ttl_cache(maxsize=128, ttl=3600)
def get_vision_client(client_options=None):
"""Returns a Cloud Vision API client."""
_client = vision.ImageAnnotatorClient(client_options=client_options)
return _client
class AnnotateImage(PTransform):
"""A ``PTransform`` for annotating images using the GCP Vision API.
ref: https://cloud.google.com/vision/docs/
Batches elements together using ``util.BatchElements`` PTransform and sends
each batch of elements to the GCP Vision API.
Element is a Union[str, bytes] of either an URI (e.g. a GCS URI)
or bytes base64-encoded image data.
Accepts an `AsDict` side input that maps each image to an image context.
"""
MAX_BATCH_SIZE = 5
MIN_BATCH_SIZE = 1
def __init__(
self,
features,
retry=None,
timeout=120,
max_batch_size=None,
min_batch_size=None,
client_options=None,
context_side_input=None,
metadata=None):
"""
Args:
features: (List[``vision.types.Feature.enums.Feature``]) Required.
The Vision API features to detect
retry: (google.api_core.retry.Retry) Optional.
A retry object used to retry requests.
If None is specified (default), requests will not be retried.
timeout: (float) Optional.
The time in seconds to wait for the response from the Vision API.
Default is 120.
max_batch_size: (int) Optional.
Maximum number of images to batch in the same request to the Vision API.
Default is 5 (which is also the Vision API max).
This parameter is primarily intended for testing.
min_batch_size: (int) Optional.
Minimum number of images to batch in the same request to the Vision API.
Default is None. This parameter is primarily intended for testing.
client_options:
(Union[dict, google.api_core.client_options.ClientOptions]) Optional.
Client options used to set user options on the client.
API Endpoint should be set through client_options.
context_side_input: (beam.pvalue.AsDict) Optional.
An ``AsDict`` of a PCollection to be passed to the
_ImageAnnotateFn as the image context mapping containing additional
image context and/or feature-specific parameters.
Example usage::
image_contexts =
[(''gs://cloud-samples-data/vision/ocr/sign.jpg'', Union[dict,
``vision.types.ImageContext()``]),
(''gs://cloud-samples-data/vision/ocr/sign.jpg'', Union[dict,
``vision.types.ImageContext()``]),]
context_side_input =
(
p
| "Image contexts" >> beam.Create(image_contexts)
)
visionml.AnnotateImage(features,
context_side_input=beam.pvalue.AsDict(context_side_input)))
metadata: (Optional[Sequence[Tuple[str, str]]]): Optional.
Additional metadata that is provided to the method.
"""
super().__init__()
self.features = features
self.retry = retry
self.timeout = timeout
self.max_batch_size = max_batch_size or AnnotateImage.MAX_BATCH_SIZE
if self.max_batch_size > AnnotateImage.MAX_BATCH_SIZE:
raise ValueError(
'Max batch_size exceeded. '
'Batch size needs to be smaller than {}'.format(
AnnotateImage.MAX_BATCH_SIZE))
self.min_batch_size = min_batch_size or AnnotateImage.MIN_BATCH_SIZE
self.client_options = client_options
self.context_side_input = context_side_input
self.metadata = metadata
def expand(self, pvalue):
return (
pvalue
| FlatMap(self._create_image_annotation_pairs, self.context_side_input)
| util.BatchElements(
min_batch_size=self.min_batch_size,
max_batch_size=self.max_batch_size)
| ParDo(
_ImageAnnotateFn(
features=self.features,
retry=self.retry,
timeout=self.timeout,
client_options=self.client_options,
metadata=self.metadata)))
@typehints.with_input_types(
Union[str, bytes], Optional[vision.types.ImageContext])
@typehints.with_output_types(List[vision.types.AnnotateImageRequest])
def _create_image_annotation_pairs(self, element, context_side_input):
if context_side_input: # If we have a side input image context, use that
image_context = context_side_input.get(element)
else:
image_context = None
if isinstance(element, str):
image = vision.types.Image(
source=vision.types.ImageSource(image_uri=element))
else: # Typehint checks only allows str or bytes
image = vision.types.Image(content=element)
request = vision.types.AnnotateImageRequest(
image=image, features=self.features, image_context=image_context)
yield request
class AnnotateImageWithContext(AnnotateImage):
"""A ``PTransform`` for annotating images using the GCP Vision API.
ref: https://cloud.google.com/vision/docs/
Batches elements together using ``util.BatchElements`` PTransform and sends
each batch of elements to the GCP Vision API.
Element is a tuple of::
(Union[str, bytes],
Optional[``vision.types.ImageContext``])
where the former is either an URI (e.g. a GCS URI) or bytes
base64-encoded image data.
"""
def __init__(
self,
features,
retry=None,
timeout=120,
max_batch_size=None,
min_batch_size=None,
client_options=None,
metadata=None):
"""
Args:
features: (List[``vision.types.Feature.enums.Feature``]) Required.
The Vision API features to detect
retry: (google.api_core.retry.Retry) Optional.
A retry object used to retry requests.
If None is specified (default), requests will not be retried.
timeout: (float) Optional.
The time in seconds to wait for the response from the Vision API.
Default is 120.
max_batch_size: (int) Optional.
Maximum number of images to batch in the same request to the Vision API.
Default is 5 (which is also the Vision API max).
This parameter is primarily intended for testing.
min_batch_size: (int) Optional.
Minimum number of images to batch in the same request to the Vision API.
Default is None. This parameter is primarily intended for testing.
client_options:
(Union[dict, google.api_core.client_options.ClientOptions]) Optional.
Client options used to set user options on the client.
API Endpoint should be set through client_options.
metadata: (Optional[Sequence[Tuple[str, str]]]): Optional.
Additional metadata that is provided to the method.
"""
super().__init__(
features=features,
retry=retry,
timeout=timeout,
max_batch_size=max_batch_size,
min_batch_size=min_batch_size,
client_options=client_options,
metadata=metadata)
def expand(self, pvalue):
return (
pvalue
| FlatMap(self._create_image_annotation_pairs)
| util.BatchElements(
min_batch_size=self.min_batch_size,
max_batch_size=self.max_batch_size)
| ParDo(
_ImageAnnotateFn(
features=self.features,
retry=self.retry,
timeout=self.timeout,
client_options=self.client_options,
metadata=self.metadata)))
@typehints.with_input_types(
Tuple[Union[str, bytes], Optional[vision.types.ImageContext]])
@typehints.with_output_types(List[vision.types.AnnotateImageRequest])
def _create_image_annotation_pairs(self, element, **kwargs):
element, image_context = element # Unpack (image, image_context) tuple
if isinstance(element, str):
image = vision.types.Image(
source=vision.types.ImageSource(image_uri=element))
else: # Typehint checks only allows str or bytes
image = vision.types.Image(content=element)
request = vision.types.AnnotateImageRequest(
image=image, features=self.features, image_context=image_context)
yield request
@typehints.with_input_types(List[vision.types.AnnotateImageRequest])
class _ImageAnnotateFn(DoFn):
"""A DoFn that sends each input element to the GCP Vision API.
Returns ``google.cloud.vision.types.BatchAnnotateImagesResponse``.
"""
def __init__(self, features, retry, timeout, client_options, metadata):
super().__init__()
self._client = None
self.features = features
self.retry = retry
self.timeout = timeout
self.client_options = client_options
self.metadata = metadata
self.counter = Metrics.counter(self.__class__, "API Calls")
def setup(self):
self._client = get_vision_client(self.client_options)
def process(self, element, *args, **kwargs):
response = self._client.batch_annotate_images(
requests=element,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata)
self.counter.inc()
yield response
|
PypiClean
|
/scikit_labs-0.0.1rc2-cp39-cp39-musllinux_1_1_x86_64.whl/sklabs/utilities.py
|
import numpy as np
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
def new_data_check(self, X, y=None, weights=None):
"""
Check new data for predicting, scoring or else.
"""
# Check1 : whether fit had been called
check_is_fitted(self)
# Check2 : X validation
X = check_array(X, accept_sparse=True)
if X.shape[1] != self.n_features_in_:
raise ValueError("X.shape[1] should be " +
str(self.n_features_in_))
# Check3 : X, y validation
if (y is not None) and (weights is None):
X, y = check_X_y(X,
y,
accept_sparse=True,
multi_output=True,
y_numeric=True)
return X, y
# Check4: X, y, weights validation
if weights is not None:
X, y = check_X_y(X,
y,
accept_sparse=True,
multi_output=True,
y_numeric=True)
weights = np.array(weights, dtype=float)
if len(weights.shape) != 1:
raise ValueError("weights should be 1-dimension.")
if weights.shape[0] != X.shape[0]:
raise ValueError("weights should have a length of X.shape[0].")
return X, y, weights
return X
def categorical_to_dummy(x, classes=None):
"""
Transfer categorical variable into dummy variable.
Parameters
----------
x: array-like, shape(n,)
Data of the categorical variable.
classes: array-like, shape(M,), optional, default=numpy.unique(x)
All possible classes in x.
If not given, it would be set as numpy.unique(x).
Returns
-------
dummy_x: array-like, shape(n, M)
The transfered dummy data.
"""
if not classes:
classes = np.unique(x)
# print("classes: {}".format(classes))
if x.shape == ():
x = np.array([x])
n = len(x)
M = len(classes)
index = dict(zip(classes, np.arange(M)))
dummy_x = np.zeros((n, M), dtype=float)
for i, x_i in enumerate(x):
if x_i in classes:
dummy_x[i, index[x_i]] = 1
# else:
# print(
# "Data {} (index {}) is not in classes.".format(
# x_i,
# i))
return dummy_x, classes
|
PypiClean
|
/django_bpp-1.0.9-py3-none-any.whl/django_bpp/staticroot/lolex/src/lolex-src.js
|
"use strict";
var userAgent = global.navigator && global.navigator.userAgent;
var isRunningInIE = userAgent && userAgent.indexOf("MSIE ") > -1;
var maxTimeout = Math.pow(2, 31) - 1; //see https://heycam.github.io/webidl/#abstract-opdef-converttoint
// Make properties writable in IE, as per
// http://www.adequatelygood.com/Replacing-setTimeout-Globally.html
if (isRunningInIE) {
global.setTimeout = global.setTimeout;
global.clearTimeout = global.clearTimeout;
global.setInterval = global.setInterval;
global.clearInterval = global.clearInterval;
global.Date = global.Date;
}
// setImmediate is not a standard function
// avoid adding the prop to the window object if not present
if (global.setImmediate !== undefined) {
global.setImmediate = global.setImmediate;
global.clearImmediate = global.clearImmediate;
}
// node expects setTimeout/setInterval to return a fn object w/ .ref()/.unref()
// browsers, a number.
// see https://github.com/cjohansen/Sinon.JS/pull/436
var NOOP = function () { return undefined; };
var timeoutResult = setTimeout(NOOP, 0);
var addTimerReturnsObject = typeof timeoutResult === "object";
var hrtimePresent = (global.process && typeof global.process.hrtime === "function");
var nextTickPresent = (global.process && typeof global.process.nextTick === "function");
var performancePresent = (global.performance && typeof global.performance.now === "function");
var requestAnimationFramePresent = (global.requestAnimationFrame && typeof global.requestAnimationFrame === "function");
var cancelAnimationFramePresent = (global.cancelAnimationFrame && typeof global.cancelAnimationFrame === "function");
clearTimeout(timeoutResult);
var NativeDate = Date;
var uniqueTimerId = 1;
/**
* Parse strings like "01:10:00" (meaning 1 hour, 10 minutes, 0 seconds) into
* number of milliseconds. This is used to support human-readable strings passed
* to clock.tick()
*/
function parseTime(str) {
if (!str) {
return 0;
}
var strings = str.split(":");
var l = strings.length;
var i = l;
var ms = 0;
var parsed;
if (l > 3 || !/^(\d\d:){0,2}\d\d?$/.test(str)) {
throw new Error("tick only understands numbers, 'm:s' and 'h:m:s'. Each part must be two digits");
}
while (i--) {
parsed = parseInt(strings[i], 10);
if (parsed >= 60) {
throw new Error("Invalid time " + str);
}
ms += parsed * Math.pow(60, (l - i - 1));
}
return ms * 1000;
}
/**
* Floor function that also works for negative numbers
*/
function fixedFloor(n) {
return (n >= 0 ? Math.floor(n) : Math.ceil(n));
}
/**
* % operator that also works for negative numbers
*/
function fixedModulo(n, m) {
return ((n % m) + m) % m;
}
/**
* Used to grok the `now` parameter to createClock.
* @param epoch {Date|number} the system time
*/
function getEpoch(epoch) {
if (!epoch) { return 0; }
if (typeof epoch.getTime === "function") { return epoch.getTime(); }
if (typeof epoch === "number") { return epoch; }
throw new TypeError("now should be milliseconds since UNIX epoch");
}
function inRange(from, to, timer) {
return timer && timer.callAt >= from && timer.callAt <= to;
}
function mirrorDateProperties(target, source) {
var prop;
for (prop in source) {
if (source.hasOwnProperty(prop)) {
target[prop] = source[prop];
}
}
// set special now implementation
if (source.now) {
target.now = function now() {
return target.clock.now;
};
} else {
delete target.now;
}
// set special toSource implementation
if (source.toSource) {
target.toSource = function toSource() {
return source.toSource();
};
} else {
delete target.toSource;
}
// set special toString implementation
target.toString = function toString() {
return source.toString();
};
target.prototype = source.prototype;
target.parse = source.parse;
target.UTC = source.UTC;
target.prototype.toUTCString = source.prototype.toUTCString;
return target;
}
function createDate() {
function ClockDate(year, month, date, hour, minute, second, ms) {
// Defensive and verbose to avoid potential harm in passing
// explicit undefined when user does not pass argument
switch (arguments.length) {
case 0:
return new NativeDate(ClockDate.clock.now);
case 1:
return new NativeDate(year);
case 2:
return new NativeDate(year, month);
case 3:
return new NativeDate(year, month, date);
case 4:
return new NativeDate(year, month, date, hour);
case 5:
return new NativeDate(year, month, date, hour, minute);
case 6:
return new NativeDate(year, month, date, hour, minute, second);
default:
return new NativeDate(year, month, date, hour, minute, second, ms);
}
}
return mirrorDateProperties(ClockDate, NativeDate);
}
function enqueueJob(clock, job) {
// enqueues a microtick-deferred task - ecma262/#sec-enqueuejob
if (!clock.jobs) {
clock.jobs = [];
}
clock.jobs.push(job);
}
function runJobs(clock) {
// runs all microtick-deferred tasks - ecma262/#sec-runjobs
if (!clock.jobs) {
return;
}
for (var i = 0; i < clock.jobs.length; i++) {
var job = clock.jobs[i];
job.func.apply(null, job.args);
}
clock.jobs = [];
}
function addTimer(clock, timer) {
if (timer.func === undefined) {
throw new Error("Callback must be provided to timer calls");
}
timer.type = timer.immediate ? "Immediate" : "Timeout";
if (timer.hasOwnProperty("delay")) {
timer.delay = timer.delay > maxTimeout ? 1 : timer.delay;
timer.delay = Math.max(0, timer.delay);
}
if (timer.hasOwnProperty("interval")) {
timer.type = "Interval";
timer.interval = timer.interval > maxTimeout ? 1 : timer.interval;
}
if (timer.hasOwnProperty("animation")) {
timer.type = "AnimationFrame";
timer.animation = true;
}
if (!clock.timers) {
clock.timers = {};
}
timer.id = uniqueTimerId++;
timer.createdAt = clock.now;
timer.callAt = clock.now + (parseInt(timer.delay) || (clock.duringTick ? 1 : 0));
clock.timers[timer.id] = timer;
if (addTimerReturnsObject) {
return {
id: timer.id,
ref: NOOP,
unref: NOOP
};
}
return timer.id;
}
/* eslint consistent-return: "off" */
function compareTimers(a, b) {
// Sort first by absolute timing
if (a.callAt < b.callAt) {
return -1;
}
if (a.callAt > b.callAt) {
return 1;
}
// Sort next by immediate, immediate timers take precedence
if (a.immediate && !b.immediate) {
return -1;
}
if (!a.immediate && b.immediate) {
return 1;
}
// Sort next by creation time, earlier-created timers take precedence
if (a.createdAt < b.createdAt) {
return -1;
}
if (a.createdAt > b.createdAt) {
return 1;
}
// Sort next by id, lower-id timers take precedence
if (a.id < b.id) {
return -1;
}
if (a.id > b.id) {
return 1;
}
// As timer ids are unique, no fallback `0` is necessary
}
function firstTimerInRange(clock, from, to) {
var timers = clock.timers;
var timer = null;
var id, isInRange;
for (id in timers) {
if (timers.hasOwnProperty(id)) {
isInRange = inRange(from, to, timers[id]);
if (isInRange && (!timer || compareTimers(timer, timers[id]) === 1)) {
timer = timers[id];
}
}
}
return timer;
}
function firstTimer(clock) {
var timers = clock.timers;
var timer = null;
var id;
for (id in timers) {
if (timers.hasOwnProperty(id)) {
if (!timer || compareTimers(timer, timers[id]) === 1) {
timer = timers[id];
}
}
}
return timer;
}
function lastTimer(clock) {
var timers = clock.timers;
var timer = null;
var id;
for (id in timers) {
if (timers.hasOwnProperty(id)) {
if (!timer || compareTimers(timer, timers[id]) === -1) {
timer = timers[id];
}
}
}
return timer;
}
function callTimer(clock, timer) {
if (typeof timer.interval === "number") {
clock.timers[timer.id].callAt += timer.interval;
} else {
delete clock.timers[timer.id];
}
if (typeof timer.func === "function") {
timer.func.apply(null, timer.args);
} else {
/* eslint no-eval: "off" */
eval(timer.func);
}
}
function clearTimer(clock, timerId, ttype) {
if (!timerId) {
// null appears to be allowed in most browsers, and appears to be
// relied upon by some libraries, like Bootstrap carousel
return;
}
if (!clock.timers) {
clock.timers = [];
}
// in Node, timerId is an object with .ref()/.unref(), and
// its .id field is the actual timer id.
if (typeof timerId === "object") {
timerId = timerId.id;
}
if (clock.timers.hasOwnProperty(timerId)) {
// check that the ID matches a timer of the correct type
var timer = clock.timers[timerId];
if (timer.type === ttype) {
delete clock.timers[timerId];
} else {
var clear = ttype === "AnimationFrame" ? "cancelAnimationFrame" : "clear" + ttype;
var schedule = timer.type === "AnimationFrame" ? "requestAnimationFrame" : "set" + timer.type;
throw new Error("Cannot clear timer: timer created with " + schedule
+ "() but cleared with " + clear + "()");
}
}
}
function uninstall(clock, target, config) {
var method,
i,
l;
var installedHrTime = "_hrtime";
var installedNextTick = "_nextTick";
for (i = 0, l = clock.methods.length; i < l; i++) {
method = clock.methods[i];
if (method === "hrtime" && target.process) {
target.process.hrtime = clock[installedHrTime];
} else if (method === "nextTick" && target.process) {
target.process.nextTick = clock[installedNextTick];
} else {
if (target[method] && target[method].hadOwnProperty) {
target[method] = clock["_" + method];
if (method === "clearInterval" && config.shouldAdvanceTime === true) {
target[method](clock.attachedInterval);
}
} else {
try {
delete target[method];
} catch (ignore) { /* eslint empty-block: "off" */ }
}
}
}
// Prevent multiple executions which will completely remove these props
clock.methods = [];
// return pending timers, to enable checking what timers remained on uninstall
if (!clock.timers) {
return [];
}
return Object.keys(clock.timers).map(function mapper(key) {
return clock.timers[key];
});
}
function hijackMethod(target, method, clock) {
var prop;
clock[method].hadOwnProperty = Object.prototype.hasOwnProperty.call(target, method);
clock["_" + method] = target[method];
if (method === "Date") {
var date = mirrorDateProperties(clock[method], target[method]);
target[method] = date;
} else {
target[method] = function () {
return clock[method].apply(clock, arguments);
};
for (prop in clock[method]) {
if (clock[method].hasOwnProperty(prop)) {
target[method][prop] = clock[method][prop];
}
}
}
target[method].clock = clock;
}
function doIntervalTick(clock, advanceTimeDelta) {
clock.tick(advanceTimeDelta);
}
var timers = {
setTimeout: setTimeout,
clearTimeout: clearTimeout,
setImmediate: global.setImmediate,
clearImmediate: global.clearImmediate,
setInterval: setInterval,
clearInterval: clearInterval,
Date: Date
};
if (hrtimePresent) {
timers.hrtime = global.process.hrtime;
}
if (nextTickPresent) {
timers.nextTick = global.process.nextTick;
}
if (performancePresent) {
timers.performance = global.performance;
}
if (requestAnimationFramePresent) {
timers.requestAnimationFrame = global.requestAnimationFrame;
}
if (cancelAnimationFramePresent) {
timers.cancelAnimationFrame = global.cancelAnimationFrame;
}
var keys = Object.keys || function (obj) {
var ks = [];
var key;
for (key in obj) {
if (obj.hasOwnProperty(key)) {
ks.push(key);
}
}
return ks;
};
exports.timers = timers;
/**
* @param start {Date|number} the system time
* @param loopLimit {number} maximum number of timers that will be run when calling runAll()
*/
function createClock(start, loopLimit) {
start = start || 0;
loopLimit = loopLimit || 1000;
var clock = {
now: getEpoch(start),
hrNow: 0,
timeouts: {},
Date: createDate(),
loopLimit: loopLimit
};
clock.Date.clock = clock;
function getTimeToNextFrame() {
return 16 - ((clock.now - start) % 16);
}
clock.setTimeout = function setTimeout(func, timeout) {
return addTimer(clock, {
func: func,
args: Array.prototype.slice.call(arguments, 2),
delay: timeout
});
};
clock.clearTimeout = function clearTimeout(timerId) {
return clearTimer(clock, timerId, "Timeout");
};
clock.nextTick = function nextTick(func) {
return enqueueJob(clock, {
func: func,
args: Array.prototype.slice.call(arguments, 1)
});
};
clock.setInterval = function setInterval(func, timeout) {
return addTimer(clock, {
func: func,
args: Array.prototype.slice.call(arguments, 2),
delay: timeout,
interval: timeout
});
};
clock.clearInterval = function clearInterval(timerId) {
return clearTimer(clock, timerId, "Interval");
};
clock.setImmediate = function setImmediate(func) {
return addTimer(clock, {
func: func,
args: Array.prototype.slice.call(arguments, 1),
immediate: true
});
};
clock.clearImmediate = function clearImmediate(timerId) {
return clearTimer(clock, timerId, "Immediate");
};
clock.requestAnimationFrame = function requestAnimationFrame(func) {
var result = addTimer(clock, {
func: func,
delay: getTimeToNextFrame(),
args: [clock.now + getTimeToNextFrame()],
animation: true
});
return result.id || result;
};
clock.cancelAnimationFrame = function cancelAnimationFrame(timerId) {
return clearTimer(clock, timerId, "AnimationFrame");
};
function updateHrTime(newNow) {
clock.hrNow += (newNow - clock.now);
}
clock.tick = function tick(ms) {
ms = typeof ms === "number" ? ms : parseTime(ms);
var tickFrom = clock.now;
var tickTo = clock.now + ms;
var previous = clock.now;
var timer, firstException, oldNow;
clock.duringTick = true;
// perform process.nextTick()s
oldNow = clock.now;
runJobs(clock);
if (oldNow !== clock.now) {
// compensate for any setSystemTime() call during process.nextTick() callback
tickFrom += clock.now - oldNow;
tickTo += clock.now - oldNow;
}
// perform each timer in the requested range
timer = firstTimerInRange(clock, tickFrom, tickTo);
while (timer && tickFrom <= tickTo) {
if (clock.timers[timer.id]) {
updateHrTime(timer.callAt);
tickFrom = timer.callAt;
clock.now = timer.callAt;
oldNow = clock.now;
try {
runJobs(clock);
callTimer(clock, timer);
} catch (e) {
firstException = firstException || e;
}
// compensate for any setSystemTime() call during timer callback
if (oldNow !== clock.now) {
tickFrom += clock.now - oldNow;
tickTo += clock.now - oldNow;
previous += clock.now - oldNow;
}
}
timer = firstTimerInRange(clock, previous, tickTo);
previous = tickFrom;
}
// perform process.nextTick()s again
oldNow = clock.now;
runJobs(clock);
if (oldNow !== clock.now) {
// compensate for any setSystemTime() call during process.nextTick() callback
tickFrom += clock.now - oldNow;
tickTo += clock.now - oldNow;
}
clock.duringTick = false;
// corner case: during runJobs, new timers were scheduled which could be in the range [clock.now, tickTo]
timer = firstTimerInRange(clock, tickFrom, tickTo);
if (timer) {
try {
clock.tick(tickTo - clock.now); // do it all again - for the remainder of the requested range
} catch (e) {
firstException = firstException || e;
}
} else {
// no timers remaining in the requested range: move the clock all the way to the end
updateHrTime(tickTo);
clock.now = tickTo;
}
if (firstException) {
throw firstException;
}
return clock.now;
};
clock.next = function next() {
runJobs(clock);
var timer = firstTimer(clock);
if (!timer) {
return clock.now;
}
clock.duringTick = true;
try {
updateHrTime(timer.callAt);
clock.now = timer.callAt;
callTimer(clock, timer);
runJobs(clock);
return clock.now;
} finally {
clock.duringTick = false;
}
};
clock.runAll = function runAll() {
var numTimers, i;
runJobs(clock);
for (i = 0; i < clock.loopLimit; i++) {
if (!clock.timers) {
return clock.now;
}
numTimers = keys(clock.timers).length;
if (numTimers === 0) {
return clock.now;
}
clock.next();
}
throw new Error("Aborting after running " + clock.loopLimit + " timers, assuming an infinite loop!");
};
clock.runToFrame = function runToFrame() {
return clock.tick(getTimeToNextFrame());
};
clock.runToLast = function runToLast() {
var timer = lastTimer(clock);
if (!timer) {
runJobs(clock);
return clock.now;
}
return clock.tick(timer.callAt);
};
clock.reset = function reset() {
clock.timers = {};
};
clock.setSystemTime = function setSystemTime(systemTime) {
// determine time difference
var newNow = getEpoch(systemTime);
var difference = newNow - clock.now;
var id, timer;
// update 'system clock'
clock.now = newNow;
// update timers and intervals to keep them stable
for (id in clock.timers) {
if (clock.timers.hasOwnProperty(id)) {
timer = clock.timers[id];
timer.createdAt += difference;
timer.callAt += difference;
}
}
};
if (performancePresent) {
clock.performance = Object.create(global.performance);
clock.performance.now = function lolexNow() {
return clock.hrNow;
};
}
if (hrtimePresent) {
clock.hrtime = function (prev) {
if (Array.isArray(prev)) {
var oldSecs = (prev[0] + prev[1] / 1e9);
var newSecs = (clock.hrNow / 1000);
var difference = (newSecs - oldSecs);
var secs = fixedFloor(difference);
var nanosecs = fixedModulo(difference * 1e9, 1e9);
return [
secs,
nanosecs
];
}
return [
fixedFloor(clock.hrNow / 1000),
fixedModulo(clock.hrNow * 1e6, 1e9)
];
};
}
return clock;
}
exports.createClock = createClock;
/**
* @param config {Object} optional config
* @param config.target {Object} the target to install timers in (default `window`)
* @param config.now {number|Date} a number (in milliseconds) or a Date object (default epoch)
* @param config.toFake {string[]} names of the methods that should be faked.
* @param config.loopLimit {number} the maximum number of timers that will be run when calling runAll()
* @param config.shouldAdvanceTime {Boolean} tells lolex to increment mocked time automatically (default false)
* @param config.advanceTimeDelta {Number} increment mocked time every <<advanceTimeDelta>> ms (default: 20ms)
*/
exports.install = function install(config) {
if ( arguments.length > 1 || config instanceof Date || Array.isArray(config) || typeof config === "number") {
throw new TypeError("lolex.install called with " + String(config) +
" lolex 2.0+ requires an object parameter - see https://github.com/sinonjs/lolex");
}
config = typeof config !== "undefined" ? config : {};
config.shouldAdvanceTime = config.shouldAdvanceTime || false;
config.advanceTimeDelta = config.advanceTimeDelta || 20;
var i, l;
var target = config.target || global;
var clock = createClock(config.now, config.loopLimit);
clock.uninstall = function () {
return uninstall(clock, target, config);
};
clock.methods = config.toFake || [];
if (clock.methods.length === 0) {
// do not fake nextTick by default - GitHub#126
clock.methods = keys(timers).filter(function (key) {return key !== "nextTick";});
}
for (i = 0, l = clock.methods.length; i < l; i++) {
if (clock.methods[i] === "hrtime") {
if (target.process && typeof target.process.hrtime === "function") {
hijackMethod(target.process, clock.methods[i], clock);
}
} else if (clock.methods[i] === "nextTick") {
if (target.process && typeof target.process.nextTick === "function") {
hijackMethod(target.process, clock.methods[i], clock);
}
} else {
if (clock.methods[i] === "setInterval" && config.shouldAdvanceTime === true) {
var intervalTick = doIntervalTick.bind(null, clock, config.advanceTimeDelta);
var intervalId = target[clock.methods[i]](
intervalTick,
config.advanceTimeDelta);
clock.attachedInterval = intervalId;
}
hijackMethod(target, clock.methods[i], clock);
}
}
return clock;
};
|
PypiClean
|
/reqmgr2ms-monitor-2.2.4rc2.tar.gz/reqmgr2ms-monitor-2.2.4rc2/src/python/Utils/ProcessStats.py
|
# system modules
import os
import sys
import json
import time
import argparse
import threading
import traceback
try:
import psutil
except ImportError:
pass
def _baseProcessStatusFormat(pid=None):
if not pid:
pid = os.getpid()
ttime = time.localtime()
tstamp = time.strftime('%d/%b/%Y:%H:%M:%S', ttime)
return {'pid': pid, 'timestamp': tstamp, 'time': time.time()}
def processStatus(pid=None):
"Return status of the process in a dictionary format"
pdict = _baseProcessStatusFormat(pid)
if 'psutil' not in sys.modules:
return pdict
proc = psutil.Process(pid)
pdict.update(proc.as_dict())
return pdict
def processStatusDict(pid=None):
"Return status of the process in a dictionary format"
pdict = _baseProcessStatusFormat(pid)
if 'psutil' not in sys.modules:
return pdict
proc = psutil.Process(pid)
pdict.update({"cpu_times": dict(proc.cpu_times()._asdict())})
pdict.update({"cpu_percent": proc.cpu_percent(interval=1.0)})
pdict.update({"cpu_num": proc.cpu_num()})
pdict.update({"memory_full_info": dict(proc.memory_full_info()._asdict())})
pdict.update({"memory_percent": proc.memory_percent()})
return pdict
def threadStack():
"""
Return context of all threads in dictionary format where individual
thread information stored in its own dicts and all threads are groupped
into threads list. Code based on example from StackOverflow:
http://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application
"""
tdict = {}
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
threads = []
for tid, stack in list(sys._current_frames().items()):
tdict = {"thead": id2name.get(tid, ""), "thead_id": tid}
stacklist = []
for filename, lineno, name, line in traceback.extract_stack(stack):
sdict = dict(filename=filename, line_number=lineno, name=name, line=line)
stacklist.append(sdict)
tdict.update({"stack": stacklist})
threads.append(tdict)
return dict(threads=threads)
def main():
"Main function to use this module as a stand-along script."
parser = argparse.ArgumentParser(prog='PROG')
parser.add_argument("--pid", action="store", dest="pid", help="process id")
opts = parser.parse_args()
pdict = processStatus(int(opts.pid))
pdict.update(threadStack())
print(json.dumps(pdict))
if __name__ == '__main__':
main()
|
PypiClean
|
/realms-wiki-0.9.3.tar.gz/realms-wiki-0.9.3/realms/static/vendor/ace-builds/src-min/theme-solarized_light.js
|
define("ace/theme/solarized_light",["require","exports","module","ace/lib/dom"],function(e,t,n){t.isDark=!1,t.cssClass="ace-solarized-light",t.cssText=".ace-solarized-light .ace_gutter {background: #fbf1d3;color: #333}.ace-solarized-light .ace_print-margin {width: 1px;background: #e8e8e8}.ace-solarized-light {background-color: #FDF6E3;color: #586E75}.ace-solarized-light .ace_cursor {color: #000000}.ace-solarized-light .ace_marker-layer .ace_selection {background: rgba(7, 54, 67, 0.09)}.ace-solarized-light.ace_multiselect .ace_selection.ace_start {box-shadow: 0 0 3px 0px #FDF6E3;}.ace-solarized-light .ace_marker-layer .ace_step {background: rgb(255, 255, 0)}.ace-solarized-light .ace_marker-layer .ace_bracket {margin: -1px 0 0 -1px;border: 1px solid rgba(147, 161, 161, 0.50)}.ace-solarized-light .ace_marker-layer .ace_active-line {background: #EEE8D5}.ace-solarized-light .ace_gutter-active-line {background-color : #EDE5C1}.ace-solarized-light .ace_marker-layer .ace_selected-word {border: 1px solid #073642}.ace-solarized-light .ace_invisible {color: rgba(147, 161, 161, 0.50)}.ace-solarized-light .ace_keyword,.ace-solarized-light .ace_meta,.ace-solarized-light .ace_support.ace_class,.ace-solarized-light .ace_support.ace_type {color: #859900}.ace-solarized-light .ace_constant.ace_character,.ace-solarized-light .ace_constant.ace_other {color: #CB4B16}.ace-solarized-light .ace_constant.ace_language {color: #B58900}.ace-solarized-light .ace_constant.ace_numeric {color: #D33682}.ace-solarized-light .ace_fold {background-color: #268BD2;border-color: #586E75}.ace-solarized-light .ace_entity.ace_name.ace_function,.ace-solarized-light .ace_entity.ace_name.ace_tag,.ace-solarized-light .ace_support.ace_function,.ace-solarized-light .ace_variable,.ace-solarized-light .ace_variable.ace_language {color: #268BD2}.ace-solarized-light .ace_storage {color: #073642}.ace-solarized-light .ace_string {color: #2AA198}.ace-solarized-light .ace_string.ace_regexp {color: #D30102}.ace-solarized-light .ace_comment,.ace-solarized-light .ace_entity.ace_other.ace_attribute-name {color: #93A1A1}.ace-solarized-light .ace_indent-guide {background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAACCAYAAACZgbYnAAAAEklEQVQImWNgYGBgYHjy8NJ/AAjgA5fzQUmBAAAAAElFTkSuQmCC) right repeat-y}";var r=e("../lib/dom");r.importCssString(t.cssText,t.cssClass)})
|
PypiClean
|
/conscript-0.1.6.tar.gz/conscript-0.1.6/CHANGES.md
|
# Conscript Release Notes
## 0.1.5
+ [2f55788](https://github.com/jsirois/conscript/commit/2f55788) Handle EntryPoints.get removal. (#5)
## 0.1.4
+ [019442d](https://github.com/jsirois/conscript/commit/019442d) add support for Python 3.10 & 3.11 (#2)
## 0.1.3
+ [305cf38](https://github.com/jsirois/conscript/commit/305cf38) Fixup changelog.
## 0.1.2
+ [2b9d5c7](https://github.com/jsirois/conscript/commit/2b9d5c7) Expand README.
+ [32678ee](https://github.com/jsirois/conscript/commit/32678ee) Unify main completion modes.
## 0.1.1
+ [0f7a388](https://github.com/jsirois/conscript/commit/0f7a388) Fixup release metadata.
## 0.1.0
Initial public release.
|
PypiClean
|
/safegate_pro-2021.7.6-py3-none-any.whl/homeassistant/components/homekit/const.py
|
# #### Misc ####
DEBOUNCE_TIMEOUT = 0.5
DEVICE_PRECISION_LEEWAY = 6
DOMAIN = "homekit"
HOMEKIT_FILE = ".homekit.state"
HOMEKIT_PAIRING_QR = "homekit-pairing-qr"
HOMEKIT_PAIRING_QR_SECRET = "homekit-pairing-qr-secret"
HOMEKIT = "homekit"
SHUTDOWN_TIMEOUT = 30
CONF_ENTRY_INDEX = "index"
# ### Codecs ####
VIDEO_CODEC_COPY = "copy"
VIDEO_CODEC_LIBX264 = "libx264"
AUDIO_CODEC_OPUS = "libopus"
VIDEO_CODEC_H264_OMX = "h264_omx"
AUDIO_CODEC_COPY = "copy"
# #### Attributes ####
ATTR_DISPLAY_NAME = "display_name"
ATTR_VALUE = "value"
ATTR_INTEGRATION = "platform"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MODEL = "model"
ATTR_SOFTWARE_VERSION = "sw_version"
ATTR_KEY_NAME = "key_name"
# Current attribute used by homekit_controller
ATTR_OBSTRUCTION_DETECTED = "obstruction-detected"
# #### Config ####
CONF_HOMEKIT_MODE = "mode"
CONF_ADVERTISE_IP = "advertise_ip"
CONF_AUDIO_CODEC = "audio_codec"
CONF_AUDIO_MAP = "audio_map"
CONF_AUDIO_PACKET_SIZE = "audio_packet_size"
CONF_AUTO_START = "auto_start"
CONF_ENTITY_CONFIG = "entity_config"
CONF_FEATURE = "feature"
CONF_FEATURE_LIST = "feature_list"
CONF_FILTER = "filter"
CONF_EXCLUDE_ACCESSORY_MODE = "exclude_accessory_mode"
CONF_LINKED_BATTERY_SENSOR = "linked_battery_sensor"
CONF_LINKED_BATTERY_CHARGING_SENSOR = "linked_battery_charging_sensor"
CONF_LINKED_DOORBELL_SENSOR = "linked_doorbell_sensor"
CONF_LINKED_MOTION_SENSOR = "linked_motion_sensor"
CONF_LINKED_HUMIDITY_SENSOR = "linked_humidity_sensor"
CONF_LINKED_OBSTRUCTION_SENSOR = "linked_obstruction_sensor"
CONF_LOW_BATTERY_THRESHOLD = "low_battery_threshold"
CONF_MAX_FPS = "max_fps"
CONF_MAX_HEIGHT = "max_height"
CONF_MAX_WIDTH = "max_width"
CONF_SAFE_MODE = "safe_mode"
CONF_ZEROCONF_DEFAULT_INTERFACE = "zeroconf_default_interface"
CONF_STREAM_ADDRESS = "stream_address"
CONF_STREAM_SOURCE = "stream_source"
CONF_SUPPORT_AUDIO = "support_audio"
CONF_VIDEO_CODEC = "video_codec"
CONF_VIDEO_MAP = "video_map"
CONF_VIDEO_PACKET_SIZE = "video_packet_size"
CONF_STREAM_COUNT = "stream_count"
# #### Config Defaults ####
DEFAULT_SUPPORT_AUDIO = False
DEFAULT_AUDIO_CODEC = AUDIO_CODEC_OPUS
DEFAULT_AUDIO_MAP = "0:a:0"
DEFAULT_AUDIO_PACKET_SIZE = 188
DEFAULT_AUTO_START = True
DEFAULT_EXCLUDE_ACCESSORY_MODE = False
DEFAULT_LOW_BATTERY_THRESHOLD = 20
DEFAULT_MAX_FPS = 30
DEFAULT_MAX_HEIGHT = 1080
DEFAULT_MAX_WIDTH = 1920
DEFAULT_PORT = 21063
DEFAULT_CONFIG_FLOW_PORT = 21064
DEFAULT_SAFE_MODE = False
DEFAULT_VIDEO_CODEC = VIDEO_CODEC_LIBX264
DEFAULT_VIDEO_MAP = "0:v:0"
DEFAULT_VIDEO_PACKET_SIZE = 1316
DEFAULT_STREAM_COUNT = 3
# #### Features ####
FEATURE_ON_OFF = "on_off"
FEATURE_PLAY_PAUSE = "play_pause"
FEATURE_PLAY_STOP = "play_stop"
FEATURE_TOGGLE_MUTE = "toggle_mute"
# #### HomeKit Component Event ####
EVENT_HOMEKIT_CHANGED = "homekit_state_change"
EVENT_HOMEKIT_TV_REMOTE_KEY_PRESSED = "homekit_tv_remote_key_pressed"
# #### HomeKit Modes ####
HOMEKIT_MODE_ACCESSORY = "accessory"
HOMEKIT_MODE_BRIDGE = "bridge"
DEFAULT_HOMEKIT_MODE = HOMEKIT_MODE_BRIDGE
HOMEKIT_MODES = [HOMEKIT_MODE_BRIDGE, HOMEKIT_MODE_ACCESSORY]
# #### HomeKit Component Services ####
SERVICE_HOMEKIT_START = "start"
SERVICE_HOMEKIT_RESET_ACCESSORY = "reset_accessory"
# #### String Constants ####
BRIDGE_MODEL = "Bridge"
BRIDGE_NAME = "Safegate Pro Bridge"
SHORT_BRIDGE_NAME = "HASS Bridge"
SHORT_ACCESSORY_NAME = "HASS Accessory"
BRIDGE_SERIAL_NUMBER = "homekit.bridge"
MANUFACTURER = "Safegate Pro"
# #### Switch Types ####
TYPE_FAUCET = "faucet"
TYPE_OUTLET = "outlet"
TYPE_SHOWER = "shower"
TYPE_SPRINKLER = "sprinkler"
TYPE_SWITCH = "switch"
TYPE_VALVE = "valve"
# #### Services ####
SERV_ACCESSORY_INFO = "AccessoryInformation"
SERV_AIR_QUALITY_SENSOR = "AirQualitySensor"
SERV_BATTERY_SERVICE = "BatteryService"
SERV_CAMERA_RTP_STREAM_MANAGEMENT = "CameraRTPStreamManagement"
SERV_CARBON_DIOXIDE_SENSOR = "CarbonDioxideSensor"
SERV_CARBON_MONOXIDE_SENSOR = "CarbonMonoxideSensor"
SERV_CONTACT_SENSOR = "ContactSensor"
SERV_DOORBELL = "Doorbell"
SERV_FANV2 = "Fanv2"
SERV_GARAGE_DOOR_OPENER = "GarageDoorOpener"
SERV_HUMIDIFIER_DEHUMIDIFIER = "HumidifierDehumidifier"
SERV_HUMIDITY_SENSOR = "HumiditySensor"
SERV_INPUT_SOURCE = "InputSource"
SERV_LEAK_SENSOR = "LeakSensor"
SERV_LIGHT_SENSOR = "LightSensor"
SERV_LIGHTBULB = "Lightbulb"
SERV_LOCK = "LockMechanism"
SERV_MOTION_SENSOR = "MotionSensor"
SERV_OCCUPANCY_SENSOR = "OccupancySensor"
SERV_OUTLET = "Outlet"
SERV_SECURITY_SYSTEM = "SecuritySystem"
SERV_SMOKE_SENSOR = "SmokeSensor"
SERV_SPEAKER = "Speaker"
SERV_STATELESS_PROGRAMMABLE_SWITCH = "StatelessProgrammableSwitch"
SERV_SWITCH = "Switch"
SERV_TELEVISION = "Television"
SERV_TELEVISION_SPEAKER = "TelevisionSpeaker"
SERV_TEMPERATURE_SENSOR = "TemperatureSensor"
SERV_THERMOSTAT = "Thermostat"
SERV_VALVE = "Valve"
SERV_WINDOW = "Window"
SERV_WINDOW_COVERING = "WindowCovering"
# #### Characteristics ####
CHAR_ACTIVE = "Active"
CHAR_ACTIVE_IDENTIFIER = "ActiveIdentifier"
CHAR_AIR_PARTICULATE_DENSITY = "AirParticulateDensity"
CHAR_AIR_QUALITY = "AirQuality"
CHAR_BATTERY_LEVEL = "BatteryLevel"
CHAR_BRIGHTNESS = "Brightness"
CHAR_CARBON_DIOXIDE_DETECTED = "CarbonDioxideDetected"
CHAR_CARBON_DIOXIDE_LEVEL = "CarbonDioxideLevel"
CHAR_CARBON_DIOXIDE_PEAK_LEVEL = "CarbonDioxidePeakLevel"
CHAR_CARBON_MONOXIDE_DETECTED = "CarbonMonoxideDetected"
CHAR_CARBON_MONOXIDE_LEVEL = "CarbonMonoxideLevel"
CHAR_CARBON_MONOXIDE_PEAK_LEVEL = "CarbonMonoxidePeakLevel"
CHAR_CHARGING_STATE = "ChargingState"
CHAR_COLOR_TEMPERATURE = "ColorTemperature"
CHAR_CONFIGURED_NAME = "ConfiguredName"
CHAR_CONTACT_SENSOR_STATE = "ContactSensorState"
CHAR_COOLING_THRESHOLD_TEMPERATURE = "CoolingThresholdTemperature"
CHAR_CURRENT_AMBIENT_LIGHT_LEVEL = "CurrentAmbientLightLevel"
CHAR_CURRENT_DOOR_STATE = "CurrentDoorState"
CHAR_CURRENT_HEATING_COOLING = "CurrentHeatingCoolingState"
CHAR_CURRENT_HUMIDIFIER_DEHUMIDIFIER = "CurrentHumidifierDehumidifierState"
CHAR_CURRENT_POSITION = "CurrentPosition"
CHAR_CURRENT_HUMIDITY = "CurrentRelativeHumidity"
CHAR_CURRENT_SECURITY_STATE = "SecuritySystemCurrentState"
CHAR_CURRENT_TEMPERATURE = "CurrentTemperature"
CHAR_CURRENT_TILT_ANGLE = "CurrentHorizontalTiltAngle"
CHAR_CURRENT_VISIBILITY_STATE = "CurrentVisibilityState"
CHAR_DEHUMIDIFIER_THRESHOLD_HUMIDITY = "RelativeHumidityDehumidifierThreshold"
CHAR_FIRMWARE_REVISION = "FirmwareRevision"
CHAR_HEATING_THRESHOLD_TEMPERATURE = "HeatingThresholdTemperature"
CHAR_HUE = "Hue"
CHAR_HUMIDIFIER_THRESHOLD_HUMIDITY = "RelativeHumidityHumidifierThreshold"
CHAR_IDENTIFIER = "Identifier"
CHAR_IN_USE = "InUse"
CHAR_INPUT_SOURCE_TYPE = "InputSourceType"
CHAR_IS_CONFIGURED = "IsConfigured"
CHAR_LEAK_DETECTED = "LeakDetected"
CHAR_LOCK_CURRENT_STATE = "LockCurrentState"
CHAR_LOCK_TARGET_STATE = "LockTargetState"
CHAR_LINK_QUALITY = "LinkQuality"
CHAR_MANUFACTURER = "Manufacturer"
CHAR_MODEL = "Model"
CHAR_MOTION_DETECTED = "MotionDetected"
CHAR_MUTE = "Mute"
CHAR_NAME = "Name"
CHAR_OBSTRUCTION_DETECTED = "ObstructionDetected"
CHAR_OCCUPANCY_DETECTED = "OccupancyDetected"
CHAR_ON = "On"
CHAR_OUTLET_IN_USE = "OutletInUse"
CHAR_POSITION_STATE = "PositionState"
CHAR_PROGRAMMABLE_SWITCH_EVENT = "ProgrammableSwitchEvent"
CHAR_REMOTE_KEY = "RemoteKey"
CHAR_ROTATION_DIRECTION = "RotationDirection"
CHAR_ROTATION_SPEED = "RotationSpeed"
CHAR_SATURATION = "Saturation"
CHAR_SERIAL_NUMBER = "SerialNumber"
CHAR_SLEEP_DISCOVER_MODE = "SleepDiscoveryMode"
CHAR_SMOKE_DETECTED = "SmokeDetected"
CHAR_STATUS_LOW_BATTERY = "StatusLowBattery"
CHAR_STREAMING_STRATUS = "StreamingStatus"
CHAR_SWING_MODE = "SwingMode"
CHAR_TARGET_DOOR_STATE = "TargetDoorState"
CHAR_TARGET_HEATING_COOLING = "TargetHeatingCoolingState"
CHAR_TARGET_POSITION = "TargetPosition"
CHAR_TARGET_HUMIDIFIER_DEHUMIDIFIER = "TargetHumidifierDehumidifierState"
CHAR_TARGET_HUMIDITY = "TargetRelativeHumidity"
CHAR_TARGET_SECURITY_STATE = "SecuritySystemTargetState"
CHAR_TARGET_TEMPERATURE = "TargetTemperature"
CHAR_TARGET_TILT_ANGLE = "TargetHorizontalTiltAngle"
CHAR_HOLD_POSITION = "HoldPosition"
CHAR_TEMP_DISPLAY_UNITS = "TemperatureDisplayUnits"
CHAR_VALVE_TYPE = "ValveType"
CHAR_VOLUME = "Volume"
CHAR_VOLUME_SELECTOR = "VolumeSelector"
CHAR_VOLUME_CONTROL_TYPE = "VolumeControlType"
# #### Properties ####
PROP_MAX_VALUE = "maxValue"
PROP_MIN_VALUE = "minValue"
PROP_MIN_STEP = "minStep"
PROP_CELSIUS = {"minValue": -273, "maxValue": 999}
PROP_VALID_VALUES = "ValidValues"
# #### Device Classes ####
DEVICE_CLASS_DOOR = "door"
DEVICE_CLASS_GARAGE_DOOR = "garage_door"
DEVICE_CLASS_GAS = "gas"
DEVICE_CLASS_MOISTURE = "moisture"
DEVICE_CLASS_MOTION = "motion"
DEVICE_CLASS_OCCUPANCY = "occupancy"
DEVICE_CLASS_OPENING = "opening"
DEVICE_CLASS_PM25 = "pm25"
DEVICE_CLASS_SMOKE = "smoke"
DEVICE_CLASS_WINDOW = "window"
# #### Thresholds ####
THRESHOLD_CO = 25
THRESHOLD_CO2 = 1000
# #### Default values ####
DEFAULT_MIN_TEMP_WATER_HEATER = 40 # °C
DEFAULT_MAX_TEMP_WATER_HEATER = 60 # °C
# #### Media Player Key Names ####
KEY_ARROW_DOWN = "arrow_down"
KEY_ARROW_LEFT = "arrow_left"
KEY_ARROW_RIGHT = "arrow_right"
KEY_ARROW_UP = "arrow_up"
KEY_BACK = "back"
KEY_EXIT = "exit"
KEY_FAST_FORWARD = "fast_forward"
KEY_INFORMATION = "information"
KEY_NEXT_TRACK = "next_track"
KEY_PREVIOUS_TRACK = "previous_track"
KEY_REWIND = "rewind"
KEY_SELECT = "select"
KEY_PLAY_PAUSE = "play_pause"
# #### Door states ####
HK_DOOR_OPEN = 0
HK_DOOR_CLOSED = 1
HK_DOOR_OPENING = 2
HK_DOOR_CLOSING = 3
HK_DOOR_STOPPED = 4
# ### Position State ####
HK_POSITION_GOING_TO_MIN = 0
HK_POSITION_GOING_TO_MAX = 1
HK_POSITION_STOPPED = 2
# ### Charging State ###
HK_NOT_CHARGING = 0
HK_CHARGING = 1
HK_NOT_CHARGABLE = 2
# ### Config Options ###
CONFIG_OPTIONS = [
CONF_FILTER,
CONF_AUTO_START,
CONF_SAFE_MODE,
CONF_ENTITY_CONFIG,
CONF_HOMEKIT_MODE,
]
|
PypiClean
|
/wai.pytorchimageclass-0.0.2.tar.gz/wai.pytorchimageclass-0.0.2/src/pic/main.py
|
import argparse
import os
import random
import time
import traceback
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from pic.utils import save_checkpoint, NORMALIZE
best_acc1 = 0
def enable_cuda(layer):
"""
Enables CUDA in the layer, if possible.
:param layer: the layer to enable cuda for
:return: the original layer or the cuda-enabled layer
"""
return layer.cuda() if torch.cuda.is_available() else layer
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
# Data loading code
train_dataset = datasets.ImageFolder(
args.train_dir,
transforms.Compose([
transforms.Resize((args.width, args.height)),
transforms.RandomResizedCrop((int(args.width*0.9), int(args.height*0.9))),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
NORMALIZE,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
classes = train_loader.dataset.classes
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
args.test_dir,
transforms.Compose([
transforms.Resize((args.width, args.height)),
transforms.CenterCrop((int(args.width*0.9), int(args.height*0.9))),
transforms.ToTensor(),
NORMALIZE,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# configure output layer for new classes
# this is, unfortunately, architecture-specific. see example discussion here:
# https://github.com/pytorch/examples/pull/58
num_network_classes= len(classes)
if args.arch.startswith("resnet"):
# https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html#resnet
model.fc = enable_cuda(nn.Linear(model.fc.in_features, len(classes)))
elif args.arch == "mobilenet_v2":
# https://stackoverflow.com/a/57286341/4698227
model.classifier[1] = enable_cuda(torch.nn.Linear(in_features=model.classifier[1].in_features, out_features=len(classes)))
elif args.arch == "densenet":
# https://discuss.pytorch.org/t/pytorch-transfer-learning-with-densenet/15579/5
model.classifier = enable_cuda(torch.nn.Linear(in_features=model.classifier.in_features, out_features=len(classes)))
else:
num_network_classes = 1000 # imagenet
print("WARNING: cannot replace final layer for new classes on architecture '%s', will stick with imagenet's 1000 classes!" % args.arch)
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
if (args.output_interval == -1) or (((epoch + 1) % args.output_interval) == 0) or (epoch + 1 == args.epochs):
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if args.output_interval == -1:
checkpoint_filename = 'checkpoint.pth'
else:
checkpoint_filename = None
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'width': args.width,
'height': args.height,
'classes': classes,
'num_network_classes': num_network_classes,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best, filename=checkpoint_filename, output_dir=args.output_dir)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmt_str = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmt_str.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmt_str = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmt_str.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def main(args=None):
"""
Performs the model building/evaluation.
Use -h to see all options.
:param args: the command-line arguments to use, uses sys.argv if None
:type args: list
"""
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch Image Classification - Training',
prog="pic-main",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-t', '--train_dir', metavar='DIR', required=True,
help='path to top-level directory of training set, with each sub-directory being treated as a category')
parser.add_argument('-T', '--test_dir', metavar='DIR', required=True,
help='path to top-level directory of test, with each sub-directory being treated as a category')
parser.add_argument('-o', '--output_dir', metavar='DIR', default=".",
help='the directory to store the models and checkpoints in')
parser.add_argument('-i', '--output_interval', metavar='INT', default=-1, type=int,
help='the output interval in epochs for checkpoints. Use -1 to always overwrite last checkpoint.')
parser.add_argument('--width', default=256, type=int,
metavar='WIDTH', help='The image width to scale to')
parser.add_argument('--height', default=256, type=int,
metavar='HEIGHT', help='The image height to scale to')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names))
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size, this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parsed = parser.parse_args(args=args)
if parsed.seed is not None:
random.seed(parsed.seed)
torch.manual_seed(parsed.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if parsed.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if parsed.dist_url == "env://" and parsed.world_size == -1:
parsed.world_size = int(os.environ["WORLD_SIZE"])
parsed.distributed = parsed.world_size > 1 or parsed.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if parsed.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
parsed.world_size = ngpus_per_node * parsed.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, parsed))
else:
# Simply call main_worker function
main_worker(parsed.gpu, ngpus_per_node, parsed)
def sys_main():
"""
Runs the main function using the system cli arguments, and
returns a system error code.
:return: 0 for success, 1 for failure.
:rtype: int
"""
try:
main()
return 0
except Exception:
print(traceback.format_exc())
return 1
if __name__ == "__main__":
try:
main()
except Exception:
print(traceback.format_exc())
|
PypiClean
|
/ak_M2Crypto-0.26.1-cp37-cp37m-macosx_10_13_x86_64.whl/M2Crypto/m2crypto.py
|
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_m2crypto', [dirname(__file__)])
except ImportError:
import _m2crypto
return _m2crypto
if fp is not None:
try:
_mod = imp.load_module('_m2crypto', fp, pathname, description)
finally:
fp.close()
return _mod
_m2crypto = swig_import_helper()
del swig_import_helper
else:
import _m2crypto
del version_info
from _m2crypto import *
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
|
PypiClean
|
/ka-lite-static-0.17.6b2.tar.gz/ka-lite-static-0.17.6b2/kalite/packages/dist/django/contrib/sessions/backends/cache.py
|
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import get_cache
from django.utils.six.moves import xrange
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = get_cache(settings.SESSION_CACHE_ALIAS)
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key, None)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError("Unable to create a new session key.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return (KEY_PREFIX + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
@classmethod
def clear_expired(cls):
pass
|
PypiClean
|
/pulumi_sdm-0.4.2.tar.gz/pulumi_sdm-0.4.2/pulumi_sdm/secret_store.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SecretStoreArgs', 'SecretStore']
@pulumi.input_type
class SecretStoreArgs:
def __init__(__self__, *,
aws: Optional[pulumi.Input['SecretStoreAwsArgs']] = None,
azure_store: Optional[pulumi.Input['SecretStoreAzureStoreArgs']] = None,
cyberark_conjur: Optional[pulumi.Input['SecretStoreCyberarkConjurArgs']] = None,
cyberark_pam: Optional[pulumi.Input['SecretStoreCyberarkPamArgs']] = None,
cyberark_pam_experimental: Optional[pulumi.Input['SecretStoreCyberarkPamExperimentalArgs']] = None,
delinea_store: Optional[pulumi.Input['SecretStoreDelineaStoreArgs']] = None,
gcp_store: Optional[pulumi.Input['SecretStoreGcpStoreArgs']] = None,
vault_approle: Optional[pulumi.Input['SecretStoreVaultApproleArgs']] = None,
vault_tls: Optional[pulumi.Input['SecretStoreVaultTlsArgs']] = None,
vault_token: Optional[pulumi.Input['SecretStoreVaultTokenArgs']] = None):
"""
The set of arguments for constructing a SecretStore resource.
:param pulumi.Input['SecretStoreCyberarkPamExperimentalArgs'] cyberark_pam_experimental: CyberarkPAMExperimentalStore is currently unstable, and its API may change, or it may be removed, without a major
version bump.
"""
if aws is not None:
pulumi.set(__self__, "aws", aws)
if azure_store is not None:
pulumi.set(__self__, "azure_store", azure_store)
if cyberark_conjur is not None:
pulumi.set(__self__, "cyberark_conjur", cyberark_conjur)
if cyberark_pam is not None:
pulumi.set(__self__, "cyberark_pam", cyberark_pam)
if cyberark_pam_experimental is not None:
pulumi.set(__self__, "cyberark_pam_experimental", cyberark_pam_experimental)
if delinea_store is not None:
pulumi.set(__self__, "delinea_store", delinea_store)
if gcp_store is not None:
pulumi.set(__self__, "gcp_store", gcp_store)
if vault_approle is not None:
pulumi.set(__self__, "vault_approle", vault_approle)
if vault_tls is not None:
pulumi.set(__self__, "vault_tls", vault_tls)
if vault_token is not None:
pulumi.set(__self__, "vault_token", vault_token)
@property
@pulumi.getter
def aws(self) -> Optional[pulumi.Input['SecretStoreAwsArgs']]:
return pulumi.get(self, "aws")
@aws.setter
def aws(self, value: Optional[pulumi.Input['SecretStoreAwsArgs']]):
pulumi.set(self, "aws", value)
@property
@pulumi.getter(name="azureStore")
def azure_store(self) -> Optional[pulumi.Input['SecretStoreAzureStoreArgs']]:
return pulumi.get(self, "azure_store")
@azure_store.setter
def azure_store(self, value: Optional[pulumi.Input['SecretStoreAzureStoreArgs']]):
pulumi.set(self, "azure_store", value)
@property
@pulumi.getter(name="cyberarkConjur")
def cyberark_conjur(self) -> Optional[pulumi.Input['SecretStoreCyberarkConjurArgs']]:
return pulumi.get(self, "cyberark_conjur")
@cyberark_conjur.setter
def cyberark_conjur(self, value: Optional[pulumi.Input['SecretStoreCyberarkConjurArgs']]):
pulumi.set(self, "cyberark_conjur", value)
@property
@pulumi.getter(name="cyberarkPam")
def cyberark_pam(self) -> Optional[pulumi.Input['SecretStoreCyberarkPamArgs']]:
return pulumi.get(self, "cyberark_pam")
@cyberark_pam.setter
def cyberark_pam(self, value: Optional[pulumi.Input['SecretStoreCyberarkPamArgs']]):
pulumi.set(self, "cyberark_pam", value)
@property
@pulumi.getter(name="cyberarkPamExperimental")
def cyberark_pam_experimental(self) -> Optional[pulumi.Input['SecretStoreCyberarkPamExperimentalArgs']]:
"""
CyberarkPAMExperimentalStore is currently unstable, and its API may change, or it may be removed, without a major
version bump.
"""
return pulumi.get(self, "cyberark_pam_experimental")
@cyberark_pam_experimental.setter
def cyberark_pam_experimental(self, value: Optional[pulumi.Input['SecretStoreCyberarkPamExperimentalArgs']]):
pulumi.set(self, "cyberark_pam_experimental", value)
@property
@pulumi.getter(name="delineaStore")
def delinea_store(self) -> Optional[pulumi.Input['SecretStoreDelineaStoreArgs']]:
return pulumi.get(self, "delinea_store")
@delinea_store.setter
def delinea_store(self, value: Optional[pulumi.Input['SecretStoreDelineaStoreArgs']]):
pulumi.set(self, "delinea_store", value)
@property
@pulumi.getter(name="gcpStore")
def gcp_store(self) -> Optional[pulumi.Input['SecretStoreGcpStoreArgs']]:
return pulumi.get(self, "gcp_store")
@gcp_store.setter
def gcp_store(self, value: Optional[pulumi.Input['SecretStoreGcpStoreArgs']]):
pulumi.set(self, "gcp_store", value)
@property
@pulumi.getter(name="vaultApprole")
def vault_approle(self) -> Optional[pulumi.Input['SecretStoreVaultApproleArgs']]:
return pulumi.get(self, "vault_approle")
@vault_approle.setter
def vault_approle(self, value: Optional[pulumi.Input['SecretStoreVaultApproleArgs']]):
pulumi.set(self, "vault_approle", value)
@property
@pulumi.getter(name="vaultTls")
def vault_tls(self) -> Optional[pulumi.Input['SecretStoreVaultTlsArgs']]:
return pulumi.get(self, "vault_tls")
@vault_tls.setter
def vault_tls(self, value: Optional[pulumi.Input['SecretStoreVaultTlsArgs']]):
pulumi.set(self, "vault_tls", value)
@property
@pulumi.getter(name="vaultToken")
def vault_token(self) -> Optional[pulumi.Input['SecretStoreVaultTokenArgs']]:
return pulumi.get(self, "vault_token")
@vault_token.setter
def vault_token(self, value: Optional[pulumi.Input['SecretStoreVaultTokenArgs']]):
pulumi.set(self, "vault_token", value)
@pulumi.input_type
class _SecretStoreState:
def __init__(__self__, *,
aws: Optional[pulumi.Input['SecretStoreAwsArgs']] = None,
azure_store: Optional[pulumi.Input['SecretStoreAzureStoreArgs']] = None,
cyberark_conjur: Optional[pulumi.Input['SecretStoreCyberarkConjurArgs']] = None,
cyberark_pam: Optional[pulumi.Input['SecretStoreCyberarkPamArgs']] = None,
cyberark_pam_experimental: Optional[pulumi.Input['SecretStoreCyberarkPamExperimentalArgs']] = None,
delinea_store: Optional[pulumi.Input['SecretStoreDelineaStoreArgs']] = None,
gcp_store: Optional[pulumi.Input['SecretStoreGcpStoreArgs']] = None,
vault_approle: Optional[pulumi.Input['SecretStoreVaultApproleArgs']] = None,
vault_tls: Optional[pulumi.Input['SecretStoreVaultTlsArgs']] = None,
vault_token: Optional[pulumi.Input['SecretStoreVaultTokenArgs']] = None):
"""
Input properties used for looking up and filtering SecretStore resources.
:param pulumi.Input['SecretStoreCyberarkPamExperimentalArgs'] cyberark_pam_experimental: CyberarkPAMExperimentalStore is currently unstable, and its API may change, or it may be removed, without a major
version bump.
"""
if aws is not None:
pulumi.set(__self__, "aws", aws)
if azure_store is not None:
pulumi.set(__self__, "azure_store", azure_store)
if cyberark_conjur is not None:
pulumi.set(__self__, "cyberark_conjur", cyberark_conjur)
if cyberark_pam is not None:
pulumi.set(__self__, "cyberark_pam", cyberark_pam)
if cyberark_pam_experimental is not None:
pulumi.set(__self__, "cyberark_pam_experimental", cyberark_pam_experimental)
if delinea_store is not None:
pulumi.set(__self__, "delinea_store", delinea_store)
if gcp_store is not None:
pulumi.set(__self__, "gcp_store", gcp_store)
if vault_approle is not None:
pulumi.set(__self__, "vault_approle", vault_approle)
if vault_tls is not None:
pulumi.set(__self__, "vault_tls", vault_tls)
if vault_token is not None:
pulumi.set(__self__, "vault_token", vault_token)
@property
@pulumi.getter
def aws(self) -> Optional[pulumi.Input['SecretStoreAwsArgs']]:
return pulumi.get(self, "aws")
@aws.setter
def aws(self, value: Optional[pulumi.Input['SecretStoreAwsArgs']]):
pulumi.set(self, "aws", value)
@property
@pulumi.getter(name="azureStore")
def azure_store(self) -> Optional[pulumi.Input['SecretStoreAzureStoreArgs']]:
return pulumi.get(self, "azure_store")
@azure_store.setter
def azure_store(self, value: Optional[pulumi.Input['SecretStoreAzureStoreArgs']]):
pulumi.set(self, "azure_store", value)
@property
@pulumi.getter(name="cyberarkConjur")
def cyberark_conjur(self) -> Optional[pulumi.Input['SecretStoreCyberarkConjurArgs']]:
return pulumi.get(self, "cyberark_conjur")
@cyberark_conjur.setter
def cyberark_conjur(self, value: Optional[pulumi.Input['SecretStoreCyberarkConjurArgs']]):
pulumi.set(self, "cyberark_conjur", value)
@property
@pulumi.getter(name="cyberarkPam")
def cyberark_pam(self) -> Optional[pulumi.Input['SecretStoreCyberarkPamArgs']]:
return pulumi.get(self, "cyberark_pam")
@cyberark_pam.setter
def cyberark_pam(self, value: Optional[pulumi.Input['SecretStoreCyberarkPamArgs']]):
pulumi.set(self, "cyberark_pam", value)
@property
@pulumi.getter(name="cyberarkPamExperimental")
def cyberark_pam_experimental(self) -> Optional[pulumi.Input['SecretStoreCyberarkPamExperimentalArgs']]:
"""
CyberarkPAMExperimentalStore is currently unstable, and its API may change, or it may be removed, without a major
version bump.
"""
return pulumi.get(self, "cyberark_pam_experimental")
@cyberark_pam_experimental.setter
def cyberark_pam_experimental(self, value: Optional[pulumi.Input['SecretStoreCyberarkPamExperimentalArgs']]):
pulumi.set(self, "cyberark_pam_experimental", value)
@property
@pulumi.getter(name="delineaStore")
def delinea_store(self) -> Optional[pulumi.Input['SecretStoreDelineaStoreArgs']]:
return pulumi.get(self, "delinea_store")
@delinea_store.setter
def delinea_store(self, value: Optional[pulumi.Input['SecretStoreDelineaStoreArgs']]):
pulumi.set(self, "delinea_store", value)
@property
@pulumi.getter(name="gcpStore")
def gcp_store(self) -> Optional[pulumi.Input['SecretStoreGcpStoreArgs']]:
return pulumi.get(self, "gcp_store")
@gcp_store.setter
def gcp_store(self, value: Optional[pulumi.Input['SecretStoreGcpStoreArgs']]):
pulumi.set(self, "gcp_store", value)
@property
@pulumi.getter(name="vaultApprole")
def vault_approle(self) -> Optional[pulumi.Input['SecretStoreVaultApproleArgs']]:
return pulumi.get(self, "vault_approle")
@vault_approle.setter
def vault_approle(self, value: Optional[pulumi.Input['SecretStoreVaultApproleArgs']]):
pulumi.set(self, "vault_approle", value)
@property
@pulumi.getter(name="vaultTls")
def vault_tls(self) -> Optional[pulumi.Input['SecretStoreVaultTlsArgs']]:
return pulumi.get(self, "vault_tls")
@vault_tls.setter
def vault_tls(self, value: Optional[pulumi.Input['SecretStoreVaultTlsArgs']]):
pulumi.set(self, "vault_tls", value)
@property
@pulumi.getter(name="vaultToken")
def vault_token(self) -> Optional[pulumi.Input['SecretStoreVaultTokenArgs']]:
return pulumi.get(self, "vault_token")
@vault_token.setter
def vault_token(self, value: Optional[pulumi.Input['SecretStoreVaultTokenArgs']]):
pulumi.set(self, "vault_token", value)
class SecretStore(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws: Optional[pulumi.Input[pulumi.InputType['SecretStoreAwsArgs']]] = None,
azure_store: Optional[pulumi.Input[pulumi.InputType['SecretStoreAzureStoreArgs']]] = None,
cyberark_conjur: Optional[pulumi.Input[pulumi.InputType['SecretStoreCyberarkConjurArgs']]] = None,
cyberark_pam: Optional[pulumi.Input[pulumi.InputType['SecretStoreCyberarkPamArgs']]] = None,
cyberark_pam_experimental: Optional[pulumi.Input[pulumi.InputType['SecretStoreCyberarkPamExperimentalArgs']]] = None,
delinea_store: Optional[pulumi.Input[pulumi.InputType['SecretStoreDelineaStoreArgs']]] = None,
gcp_store: Optional[pulumi.Input[pulumi.InputType['SecretStoreGcpStoreArgs']]] = None,
vault_approle: Optional[pulumi.Input[pulumi.InputType['SecretStoreVaultApproleArgs']]] = None,
vault_tls: Optional[pulumi.Input[pulumi.InputType['SecretStoreVaultTlsArgs']]] = None,
vault_token: Optional[pulumi.Input[pulumi.InputType['SecretStoreVaultTokenArgs']]] = None,
__props__=None):
"""
## Import
SecretStore can be imported using the id, e.g.,
```sh
$ pulumi import sdm:index/secretStore:SecretStore example se-12345678
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['SecretStoreCyberarkPamExperimentalArgs']] cyberark_pam_experimental: CyberarkPAMExperimentalStore is currently unstable, and its API may change, or it may be removed, without a major
version bump.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[SecretStoreArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
SecretStore can be imported using the id, e.g.,
```sh
$ pulumi import sdm:index/secretStore:SecretStore example se-12345678
```
:param str resource_name: The name of the resource.
:param SecretStoreArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecretStoreArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws: Optional[pulumi.Input[pulumi.InputType['SecretStoreAwsArgs']]] = None,
azure_store: Optional[pulumi.Input[pulumi.InputType['SecretStoreAzureStoreArgs']]] = None,
cyberark_conjur: Optional[pulumi.Input[pulumi.InputType['SecretStoreCyberarkConjurArgs']]] = None,
cyberark_pam: Optional[pulumi.Input[pulumi.InputType['SecretStoreCyberarkPamArgs']]] = None,
cyberark_pam_experimental: Optional[pulumi.Input[pulumi.InputType['SecretStoreCyberarkPamExperimentalArgs']]] = None,
delinea_store: Optional[pulumi.Input[pulumi.InputType['SecretStoreDelineaStoreArgs']]] = None,
gcp_store: Optional[pulumi.Input[pulumi.InputType['SecretStoreGcpStoreArgs']]] = None,
vault_approle: Optional[pulumi.Input[pulumi.InputType['SecretStoreVaultApproleArgs']]] = None,
vault_tls: Optional[pulumi.Input[pulumi.InputType['SecretStoreVaultTlsArgs']]] = None,
vault_token: Optional[pulumi.Input[pulumi.InputType['SecretStoreVaultTokenArgs']]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecretStoreArgs.__new__(SecretStoreArgs)
__props__.__dict__["aws"] = aws
__props__.__dict__["azure_store"] = azure_store
__props__.__dict__["cyberark_conjur"] = cyberark_conjur
__props__.__dict__["cyberark_pam"] = cyberark_pam
__props__.__dict__["cyberark_pam_experimental"] = cyberark_pam_experimental
__props__.__dict__["delinea_store"] = delinea_store
__props__.__dict__["gcp_store"] = gcp_store
__props__.__dict__["vault_approle"] = vault_approle
__props__.__dict__["vault_tls"] = vault_tls
__props__.__dict__["vault_token"] = vault_token
super(SecretStore, __self__).__init__(
'sdm:index/secretStore:SecretStore',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
aws: Optional[pulumi.Input[pulumi.InputType['SecretStoreAwsArgs']]] = None,
azure_store: Optional[pulumi.Input[pulumi.InputType['SecretStoreAzureStoreArgs']]] = None,
cyberark_conjur: Optional[pulumi.Input[pulumi.InputType['SecretStoreCyberarkConjurArgs']]] = None,
cyberark_pam: Optional[pulumi.Input[pulumi.InputType['SecretStoreCyberarkPamArgs']]] = None,
cyberark_pam_experimental: Optional[pulumi.Input[pulumi.InputType['SecretStoreCyberarkPamExperimentalArgs']]] = None,
delinea_store: Optional[pulumi.Input[pulumi.InputType['SecretStoreDelineaStoreArgs']]] = None,
gcp_store: Optional[pulumi.Input[pulumi.InputType['SecretStoreGcpStoreArgs']]] = None,
vault_approle: Optional[pulumi.Input[pulumi.InputType['SecretStoreVaultApproleArgs']]] = None,
vault_tls: Optional[pulumi.Input[pulumi.InputType['SecretStoreVaultTlsArgs']]] = None,
vault_token: Optional[pulumi.Input[pulumi.InputType['SecretStoreVaultTokenArgs']]] = None) -> 'SecretStore':
"""
Get an existing SecretStore resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['SecretStoreCyberarkPamExperimentalArgs']] cyberark_pam_experimental: CyberarkPAMExperimentalStore is currently unstable, and its API may change, or it may be removed, without a major
version bump.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecretStoreState.__new__(_SecretStoreState)
__props__.__dict__["aws"] = aws
__props__.__dict__["azure_store"] = azure_store
__props__.__dict__["cyberark_conjur"] = cyberark_conjur
__props__.__dict__["cyberark_pam"] = cyberark_pam
__props__.__dict__["cyberark_pam_experimental"] = cyberark_pam_experimental
__props__.__dict__["delinea_store"] = delinea_store
__props__.__dict__["gcp_store"] = gcp_store
__props__.__dict__["vault_approle"] = vault_approle
__props__.__dict__["vault_tls"] = vault_tls
__props__.__dict__["vault_token"] = vault_token
return SecretStore(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def aws(self) -> pulumi.Output[Optional['outputs.SecretStoreAws']]:
return pulumi.get(self, "aws")
@property
@pulumi.getter(name="azureStore")
def azure_store(self) -> pulumi.Output[Optional['outputs.SecretStoreAzureStore']]:
return pulumi.get(self, "azure_store")
@property
@pulumi.getter(name="cyberarkConjur")
def cyberark_conjur(self) -> pulumi.Output[Optional['outputs.SecretStoreCyberarkConjur']]:
return pulumi.get(self, "cyberark_conjur")
@property
@pulumi.getter(name="cyberarkPam")
def cyberark_pam(self) -> pulumi.Output[Optional['outputs.SecretStoreCyberarkPam']]:
return pulumi.get(self, "cyberark_pam")
@property
@pulumi.getter(name="cyberarkPamExperimental")
def cyberark_pam_experimental(self) -> pulumi.Output[Optional['outputs.SecretStoreCyberarkPamExperimental']]:
"""
CyberarkPAMExperimentalStore is currently unstable, and its API may change, or it may be removed, without a major
version bump.
"""
return pulumi.get(self, "cyberark_pam_experimental")
@property
@pulumi.getter(name="delineaStore")
def delinea_store(self) -> pulumi.Output[Optional['outputs.SecretStoreDelineaStore']]:
return pulumi.get(self, "delinea_store")
@property
@pulumi.getter(name="gcpStore")
def gcp_store(self) -> pulumi.Output[Optional['outputs.SecretStoreGcpStore']]:
return pulumi.get(self, "gcp_store")
@property
@pulumi.getter(name="vaultApprole")
def vault_approle(self) -> pulumi.Output[Optional['outputs.SecretStoreVaultApprole']]:
return pulumi.get(self, "vault_approle")
@property
@pulumi.getter(name="vaultTls")
def vault_tls(self) -> pulumi.Output[Optional['outputs.SecretStoreVaultTls']]:
return pulumi.get(self, "vault_tls")
@property
@pulumi.getter(name="vaultToken")
def vault_token(self) -> pulumi.Output[Optional['outputs.SecretStoreVaultToken']]:
return pulumi.get(self, "vault_token")
|
PypiClean
|
/paradoxdjango-0.0.7.tar.gz/paradoxdjango-0.0.7/django/db/backends/sqlite3/features.py
|
import operator
import platform
from django.db import transaction
from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.utils import OperationalError
from django.utils.functional import cached_property
from .base import Database
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite can read from a cursor since SQLite 3.6.5, subject to the caveat
# that statements within a connection aren't isolated from each other. See
# https://sqlite.org/isolation.html.
can_use_chunked_reads = True
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
max_query_params = 999
supports_mixed_date_datetime_comparisons = False
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
can_create_inline_fk = False
supports_paramstyle_pyformat = False
can_clone_databases = True
supports_temporal_subtraction = True
ignores_table_name_case = True
supports_cast_with_precision = False
time_cast_precision = 3
can_release_savepoints = True
# Is "ALTER TABLE ... RENAME COLUMN" supported?
can_alter_table_rename_column = Database.sqlite_version_info >= (3, 25, 0)
supports_parentheses_in_compound = False
# Deferred constraint checks can be emulated on SQLite < 3.20 but not in a
# reasonably performant way.
supports_pragma_foreign_key_check = Database.sqlite_version_info >= (3, 20, 0)
can_defer_constraint_checks = supports_pragma_foreign_key_check
supports_functions_in_partial_indexes = Database.sqlite_version_info >= (3, 15, 0)
supports_over_clause = Database.sqlite_version_info >= (3, 25, 0)
supports_frame_range_fixed_distance = Database.sqlite_version_info >= (3, 28, 0)
supports_aggregate_filter_clause = Database.sqlite_version_info >= (3, 30, 1)
supports_order_by_nulls_modifier = Database.sqlite_version_info >= (3, 30, 0)
order_by_nulls_first = True
supports_json_field_contains = False
test_collations = {
"ci": "nocase",
"cs": "binary",
"non_default": "nocase",
}
@cached_property
def django_test_skips(self):
skips = {
"SQLite stores values rounded to 15 significant digits.": {
"model_fields.test_decimalfield.DecimalFieldTests."
"test_fetch_from_db_without_float_rounding",
},
"SQLite naively remakes the table on field alteration.": {
"schema.tests.SchemaTests.test_unique_no_unnecessary_fk_drops",
"schema.tests.SchemaTests.test_unique_and_reverse_m2m",
"schema.tests.SchemaTests."
"test_alter_field_default_doesnt_perform_queries",
"schema.tests.SchemaTests."
"test_rename_column_renames_deferred_sql_references",
},
"SQLite doesn't have a constraint.": {
"model_fields.test_integerfield.PositiveIntegerFieldTests."
"test_negative_values",
},
"SQLite doesn't support negative precision for ROUND().": {
"db_functions.math.test_round.RoundTests."
"test_null_with_negative_precision",
"db_functions.math.test_round.RoundTests."
"test_decimal_with_negative_precision",
"db_functions.math.test_round.RoundTests."
"test_float_with_negative_precision",
"db_functions.math.test_round.RoundTests."
"test_integer_with_negative_precision",
},
}
if Database.sqlite_version_info < (3, 27):
skips.update(
{
"Nondeterministic failure on SQLite < 3.27.": {
"expressions_window.tests.WindowFunctionTests."
"test_subquery_row_range_rank",
},
}
)
if self.connection.is_in_memory_db():
skips.update(
{
"the sqlite backend's close() method is a no-op when using an "
"in-memory database": {
"servers.test_liveserverthread.LiveServerThreadTest."
"test_closes_connections",
"servers.tests.LiveServerTestCloseConnectionTest."
"test_closes_connections",
},
}
)
return skips
@cached_property
def supports_atomic_references_rename(self):
# SQLite 3.28.0 bundled with MacOS 10.15 does not support renaming
# references atomically.
if platform.mac_ver()[0].startswith(
"10.15."
) and Database.sqlite_version_info == (3, 28, 0):
return False
return Database.sqlite_version_info >= (3, 26, 0)
@cached_property
def introspected_field_types(self):
return {
**super().introspected_field_types,
"BigAutoField": "AutoField",
"DurationField": "BigIntegerField",
"GenericIPAddressField": "CharField",
"SmallAutoField": "AutoField",
}
@cached_property
def supports_json_field(self):
with self.connection.cursor() as cursor:
try:
with transaction.atomic(self.connection.alias):
cursor.execute('SELECT JSON(\'{"a": "b"}\')')
except OperationalError:
return False
return True
can_introspect_json_field = property(operator.attrgetter("supports_json_field"))
has_json_object_function = property(operator.attrgetter("supports_json_field"))
@cached_property
def can_return_columns_from_insert(self):
return Database.sqlite_version_info >= (3, 35)
can_return_rows_from_bulk_insert = property(
operator.attrgetter("can_return_columns_from_insert")
)
|
PypiClean
|
/django-adminlte-3-0.1.6.tar.gz/django-adminlte-3-0.1.6/adminlte3/static/admin-lte/plugins/datatables-scroller/js/dataTables.scroller.min.js
|
(function(e){"function"===typeof define&&define.amd?define(["jquery","datatables.net"],function(h){return e(h,window,document)}):"object"===typeof exports?module.exports=function(h,i){h||(h=window);if(!i||!i.fn.dataTable)i=require("datatables.net")(h,i).$;return e(i,h,h.document)}:e(jQuery,window,document)})(function(e,h,i,l){var m=e.fn.dataTable,g=function(a,b){if(this instanceof g){b===l&&(b={});var c=e.fn.dataTable.Api(a);this.s={dt:c.settings()[0],dtApi:c,tableTop:0,tableBottom:0,redrawTop:0,
redrawBottom:0,autoHeight:!0,viewportRows:0,stateTO:null,drawTO:null,heights:{jump:null,page:null,virtual:null,scroll:null,row:null,viewport:null,labelFactor:1},topRowFloat:0,scrollDrawDiff:null,loaderVisible:!1,forceReposition:!1,baseRowTop:0,baseScrollTop:0,mousedown:!1,lastScrollTop:0};this.s=e.extend(this.s,g.oDefaults,b);this.s.heights.row=this.s.rowHeight;this.dom={force:i.createElement("div"),label:e('<div class="dts_label">0</div>'),scroller:null,table:null,loader:null};this.s.dt.oScroller||
(this.s.dt.oScroller=this,this.construct())}else alert("Scroller warning: Scroller must be initialised with the 'new' keyword.")};e.extend(g.prototype,{measure:function(a){this.s.autoHeight&&this._calcRowHeight();var b=this.s.heights;b.row&&(b.viewport=e.contains(i,this.dom.scroller)?this.dom.scroller.clientHeight:this._parseHeight(e(this.dom.scroller).css("height")),b.viewport||(b.viewport=this._parseHeight(e(this.dom.scroller).css("max-height"))),this.s.viewportRows=parseInt(b.viewport/b.row,10)+
1,this.s.dt._iDisplayLength=this.s.viewportRows*this.s.displayBuffer);var c=this.dom.label.outerHeight();b.labelFactor=(b.viewport-c)/b.scroll;(a===l||a)&&this.s.dt.oInstance.fnDraw(!1)},pageInfo:function(){var a=this.dom.scroller.scrollTop,b=this.s.dt.fnRecordsDisplay(),c=Math.ceil(this.pixelsToRow(a+this.s.heights.viewport,!1,this.s.ani));return{start:Math.floor(this.pixelsToRow(a,!1,this.s.ani)),end:b<c?b-1:c-1}},pixelsToRow:function(a,b,c){a-=this.s.baseScrollTop;c=c?(this._domain("physicalToVirtual",
this.s.baseScrollTop)+a)/this.s.heights.row:a/this.s.heights.row+this.s.baseRowTop;return b||b===l?parseInt(c,10):c},rowToPixels:function(a,b,c){a-=this.s.baseRowTop;c=c?this._domain("virtualToPhysical",this.s.baseScrollTop):this.s.baseScrollTop;c+=a*this.s.heights.row;return b||b===l?parseInt(c,10):c},scrollToRow:function(a,b){var c=this,d=!1,f=this.rowToPixels(a),k=a-(this.s.displayBuffer-1)/2*this.s.viewportRows;0>k&&(k=0);if((f>this.s.redrawBottom||f<this.s.redrawTop)&&this.s.dt._iDisplayStart!==
k)d=!0,f=this._domain("virtualToPhysical",a*this.s.heights.row),this.s.redrawTop<f&&f<this.s.redrawBottom&&(this.s.forceReposition=!0,b=!1);b===l||b?(this.s.ani=d,e(this.dom.scroller).animate({scrollTop:f},function(){setTimeout(function(){c.s.ani=!1},250)})):e(this.dom.scroller).scrollTop(f)},construct:function(){var a=this,b=this.s.dtApi;if(this.s.dt.oFeatures.bPaginate){this.dom.force.style.position="relative";this.dom.force.style.top="0px";this.dom.force.style.left="0px";this.dom.force.style.width=
"1px";this.dom.scroller=e("div."+this.s.dt.oClasses.sScrollBody,this.s.dt.nTableWrapper)[0];this.dom.scroller.appendChild(this.dom.force);this.dom.scroller.style.position="relative";this.dom.table=e(">table",this.dom.scroller)[0];this.dom.table.style.position="absolute";this.dom.table.style.top="0px";this.dom.table.style.left="0px";e(b.table().container()).addClass("dts DTS");this.s.loadingIndicator&&(this.dom.loader=e('<div class="dataTables_processing dts_loading">'+this.s.dt.oLanguage.sLoadingRecords+
"</div>").css("display","none"),e(this.dom.scroller.parentNode).css("position","relative").append(this.dom.loader));this.dom.label.appendTo(this.dom.scroller);this.s.heights.row&&"auto"!=this.s.heights.row&&(this.s.autoHeight=!1);this.measure(!1);this.s.ingnoreScroll=!0;this.s.stateSaveThrottle=this.s.dt.oApi._fnThrottle(function(){a.s.dtApi.state.save()},500);e(this.dom.scroller).on("scroll.dt-scroller",function(){a._scroll.call(a)});e(this.dom.scroller).on("touchstart.dt-scroller",function(){a._scroll.call(a)});
e(this.dom.scroller).on("mousedown.dt-scroller",function(){a.s.mousedown=true}).on("mouseup.dt-scroller",function(){a.s.mouseup=false;a.dom.label.css("display","none")});e(h).on("resize.dt-scroller",function(){a.measure(false);a._info()});var c=!0,d=b.state.loaded();b.on("stateSaveParams.scroller",function(b,e,h){h.scroller={topRow:c&&d&&d.scroller?d.scroller.topRow:a.s.topRowFloat,baseScrollTop:a.s.baseScrollTop,baseRowTop:a.s.baseRowTop};c=false});d&&d.scroller&&(this.s.topRowFloat=d.scroller.topRow,
this.s.baseScrollTop=d.scroller.baseScrollTop,this.s.baseRowTop=d.scroller.baseRowTop);b.on("init.scroller",function(){a.measure(false);a.s.scrollType="jump";a._draw();b.on("draw.scroller",function(){a._draw()})});b.on("preDraw.dt.scroller",function(){a._scrollForce()});b.on("destroy.scroller",function(){e(h).off("resize.dt-scroller");e(a.dom.scroller).off(".dt-scroller");e(a.s.dt.nTable).off(".scroller");e(a.s.dt.nTableWrapper).removeClass("DTS");e("div.DTS_Loading",a.dom.scroller.parentNode).remove();
a.dom.table.style.position="";a.dom.table.style.top="";a.dom.table.style.left=""})}else this.s.dt.oApi._fnLog(this.s.dt,0,"Pagination must be enabled for Scroller")},_calcRowHeight:function(){var a=this.s.dt,b=a.nTable,c=b.cloneNode(!1),d=e("<tbody/>").appendTo(c),f=e('<div class="'+a.oClasses.sWrapper+' DTS"><div class="'+a.oClasses.sScrollWrapper+'"><div class="'+a.oClasses.sScrollBody+'"></div></div></div>');e("tbody tr:lt(4)",b).clone().appendTo(d);var k=e("tr",d).length;if(1===k)d.prepend("<tr><td> </td></tr>"),
d.append("<tr><td> </td></tr>");else for(;3>k;k++)d.append("<tr><td> </td></tr>");e("div."+a.oClasses.sScrollBody,f).append(c);a=this.s.dt.nHolding||b.parentNode;e(a).is(":visible")||(a="body");f.appendTo(a);this.s.heights.row=e("tr",d).eq(1).outerHeight();f.remove()},_draw:function(){var a=this,b=this.s.heights,c=this.dom.scroller.scrollTop,d=e(this.s.dt.nTable).height(),f=this.s.dt._iDisplayStart,k=this.s.dt._iDisplayLength,h=this.s.dt.fnRecordsDisplay();this.s.skip=!0;if((this.s.dt.bSorted||
this.s.dt.bFiltered)&&0===f&&!this.s.dt._drawHold)this.s.topRowFloat=0;c="jump"===this.s.scrollType?this._domain("virtualToPhysical",this.s.topRowFloat*b.row):c;this.s.baseScrollTop=c;this.s.baseRowTop=this.s.topRowFloat;var g=c-(this.s.topRowFloat-f)*b.row;0===f?g=0:f+k>=h&&(g=b.scroll-d);this.dom.table.style.top=g+"px";this.s.tableTop=g;this.s.tableBottom=d+this.s.tableTop;d=(c-this.s.tableTop)*this.s.boundaryScale;this.s.redrawTop=c-d;this.s.redrawBottom=c+d>b.scroll-b.viewport-b.row?b.scroll-
b.viewport-b.row:c+d;this.s.skip=!1;this.s.dt.oFeatures.bStateSave&&null!==this.s.dt.oLoadedState&&"undefined"!=typeof this.s.dt.oLoadedState.iScroller?((c=(this.s.dt.sAjaxSource||a.s.dt.ajax)&&!this.s.dt.oFeatures.bServerSide?!0:!1)&&2==this.s.dt.iDraw||!c&&1==this.s.dt.iDraw)&&setTimeout(function(){e(a.dom.scroller).scrollTop(a.s.dt.oLoadedState.iScroller);a.s.redrawTop=a.s.dt.oLoadedState.iScroller-b.viewport/2;setTimeout(function(){a.s.ingnoreScroll=!1},0)},0):a.s.ingnoreScroll=!1;this.s.dt.oFeatures.bInfo&&
setTimeout(function(){a._info.call(a)},0);this.dom.loader&&this.s.loaderVisible&&(this.dom.loader.css("display","none"),this.s.loaderVisible=!1)},_domain:function(a,b){var c=this.s.heights,d;if(c.virtual===c.scroll||1E4>b)return b;if("virtualToPhysical"===a&&b>=c.virtual-1E4)return d=c.virtual-b,c.scroll-d;if("physicalToVirtual"===a&&b>=c.scroll-1E4)return d=c.scroll-b,c.virtual-d;c=(c.virtual-1E4-1E4)/(c.scroll-1E4-1E4);d=1E4-1E4*c;return"virtualToPhysical"===a?(b-d)/c:c*b+d},_info:function(){if(this.s.dt.oFeatures.bInfo){var a=
this.s.dt,b=a.oLanguage,c=this.dom.scroller.scrollTop,d=Math.floor(this.pixelsToRow(c,!1,this.s.ani)+1),f=a.fnRecordsTotal(),g=a.fnRecordsDisplay(),c=Math.ceil(this.pixelsToRow(c+this.s.heights.viewport,!1,this.s.ani)),c=g<c?g:c,h=a.fnFormatNumber(d),i=a.fnFormatNumber(c),j=a.fnFormatNumber(f),l=a.fnFormatNumber(g),h=0===a.fnRecordsDisplay()&&a.fnRecordsDisplay()==a.fnRecordsTotal()?b.sInfoEmpty+b.sInfoPostFix:0===a.fnRecordsDisplay()?b.sInfoEmpty+" "+b.sInfoFiltered.replace("_MAX_",j)+b.sInfoPostFix:
a.fnRecordsDisplay()==a.fnRecordsTotal()?b.sInfo.replace("_START_",h).replace("_END_",i).replace("_MAX_",j).replace("_TOTAL_",l)+b.sInfoPostFix:b.sInfo.replace("_START_",h).replace("_END_",i).replace("_MAX_",j).replace("_TOTAL_",l)+" "+b.sInfoFiltered.replace("_MAX_",a.fnFormatNumber(a.fnRecordsTotal()))+b.sInfoPostFix;(b=b.fnInfoCallback)&&(h=b.call(a.oInstance,a,d,c,f,g,h));d=a.aanFeatures.i;if("undefined"!=typeof d){f=0;for(g=d.length;f<g;f++)e(d[f]).html(h)}e(a.nTable).triggerHandler("info.dt")}},
_parseHeight:function(a){var b,c=/^([+-]?(?:\d+(?:\.\d+)?|\.\d+))(px|em|rem|vh)$/.exec(a);if(null===c)return 0;a=parseFloat(c[1]);c=c[2];"px"===c?b=a:"vh"===c?b=a/100*e(h).height():"rem"===c?b=a*parseFloat(e(":root").css("font-size")):"em"===c&&(b=a*parseFloat(e("body").css("font-size")));return b?b:0},_scroll:function(){var a=this,b=this.s.heights,c=this.dom.scroller.scrollTop,d;if(!this.s.skip&&!this.s.ingnoreScroll&&c!==this.s.lastScrollTop)if(this.s.dt.bFiltered||this.s.dt.bSorted)this.s.lastScrollTop=
0;else{this._info();clearTimeout(this.s.stateTO);this.s.stateTO=setTimeout(function(){a.s.dtApi.state.save()},250);this.s.scrollType=Math.abs(c-this.s.lastScrollTop)>b.viewport?"jump":"cont";this.s.topRowFloat="cont"===this.s.scrollType?this.pixelsToRow(c,!1,!1):this._domain("physicalToVirtual",c)/b.row;0>this.s.topRowFloat&&(this.s.topRowFloat=0);if(this.s.forceReposition||c<this.s.redrawTop||c>this.s.redrawBottom){var f=Math.ceil((this.s.displayBuffer-1)/2*this.s.viewportRows);d=parseInt(this.s.topRowFloat,
10)-f;this.s.forceReposition=!1;0>=d?d=0:d+this.s.dt._iDisplayLength>this.s.dt.fnRecordsDisplay()?(d=this.s.dt.fnRecordsDisplay()-this.s.dt._iDisplayLength,0>d&&(d=0)):0!==d%2&&d++;if(d!=this.s.dt._iDisplayStart&&(this.s.tableTop=e(this.s.dt.nTable).offset().top,this.s.tableBottom=e(this.s.dt.nTable).height()+this.s.tableTop,f=function(){if(a.s.scrollDrawReq===null)a.s.scrollDrawReq=c;a.s.dt._iDisplayStart=d;a.s.dt.oApi._fnDraw(a.s.dt)},this.s.dt.oFeatures.bServerSide?(clearTimeout(this.s.drawTO),
this.s.drawTO=setTimeout(f,this.s.serverWait)):f(),this.dom.loader&&!this.s.loaderVisible))this.dom.loader.css("display","block"),this.s.loaderVisible=!0}else this.s.topRowFloat=this.pixelsToRow(c,!1,!0);this.s.lastScrollTop=c;this.s.stateSaveThrottle();"jump"===this.s.scrollType&&this.s.mousedown&&this.dom.label.html(this.s.dt.fnFormatNumber(parseInt(this.s.topRowFloat,10)+1)).css("top",c+c*b.labelFactor).css("display","block")}},_scrollForce:function(){var a=this.s.heights;a.virtual=a.row*this.s.dt.fnRecordsDisplay();
a.scroll=a.virtual;1E6<a.scroll&&(a.scroll=1E6);this.dom.force.style.height=a.scroll>this.s.heights.row?a.scroll+"px":this.s.heights.row+"px"}});g.defaults={boundaryScale:0.5,displayBuffer:9,loadingIndicator:!1,rowHeight:"auto",serverWait:200};g.oDefaults=g.defaults;g.version="2.0.1";e(i).on("preInit.dt.dtscroller",function(a,b){if("dt"===a.namespace){var c=b.oInit.scroller,d=m.defaults.scroller;if(c||d)d=e.extend({},c,d),!1!==c&&new g(b,d)}});e.fn.dataTable.Scroller=g;e.fn.DataTable.Scroller=g;var j=
e.fn.dataTable.Api;j.register("scroller()",function(){return this});j.register("scroller().rowToPixels()",function(a,b,c){var d=this.context;if(d.length&&d[0].oScroller)return d[0].oScroller.rowToPixels(a,b,c)});j.register("scroller().pixelsToRow()",function(a,b,c){var d=this.context;if(d.length&&d[0].oScroller)return d[0].oScroller.pixelsToRow(a,b,c)});j.register(["scroller().scrollToRow()","scroller.toPosition()"],function(a,b){this.iterator("table",function(c){c.oScroller&&c.oScroller.scrollToRow(a,
b)});return this});j.register("row().scrollTo()",function(a){var b=this;this.iterator("row",function(c,d){if(c.oScroller){var e=b.rows({order:"applied",search:"applied"}).indexes().indexOf(d);c.oScroller.scrollToRow(e,a)}});return this});j.register("scroller.measure()",function(a){this.iterator("table",function(b){b.oScroller&&b.oScroller.measure(a)});return this});j.register("scroller.page()",function(){var a=this.context;if(a.length&&a[0].oScroller)return a[0].oScroller.pageInfo()});return g});
|
PypiClean
|
/tw2.jqplugins.fancytree-0.0.2.tar.gz/tw2.jqplugins.fancytree-0.0.2/tw2/jqplugins/fancytree/static/jquery/plugins/fancytree/2.3.0/src/jquery.fancytree.table.js
|
;(function($, window, document, undefined) {
"use strict";
/* *****************************************************************************
* Private functions and variables
*/
function _assert(cond, msg){
msg = msg || "";
if(!cond){
$.error("Assertion failed " + msg);
}
}
function insertSiblingAfter(referenceNode, newNode) {
referenceNode.parentNode.insertBefore(newNode, referenceNode.nextSibling);
}
/* Show/hide all rows that are structural descendants of `parent`. */
function setChildRowVisibility(parent, flag) {
parent.visit(function(node){
var tr = node.tr;
// currentFlag = node.hide ? false : flag; // fix for ext-filter
if(tr){
tr.style.display = (node.hide || !flag) ? "none" : "";
}
if(!node.expanded){
return "skip";
}
});
}
/* Find node that is rendered in previous row. */
function findPrevRowNode(node){
var i, last, prev,
parent = node.parent,
siblings = parent ? parent.children : null;
if(siblings && siblings.length > 1 && siblings[0] !== node){
// use the lowest descendant of the preceeding sibling
i = $.inArray(node, siblings);
prev = siblings[i - 1];
_assert(prev.tr);
// descend to lowest child (with a <tr> tag)
while(prev.children){
last = prev.children[prev.children.length - 1];
if(!last.tr){
break;
}
prev = last;
}
}else{
// if there is no preceding sibling, use the direct parent
prev = parent;
}
return prev;
}
$.ui.fancytree.registerExtension({
name: "table",
version: "0.2.0",
// Default options for this extension.
options: {
checkboxColumnIdx: null, // render the checkboxes into the this column index (default: nodeColumnIdx)
customStatus: false, // true: generate renderColumns events for status nodes
indentation: 16, // indent every node level by 16px
nodeColumnIdx: 0 // render node expander, icon, and title to this column (default: #0)
},
// Overide virtual methods for this extension.
// `this` : is this extension object
// `this._super`: the virtual function that was overriden (member of prev. extension or Fancytree)
treeInit: function(ctx){
var i, $row, tdRole,
tree = ctx.tree,
$table = tree.widget.element;
$table.addClass("fancytree-container fancytree-ext-table");
tree.tbody = $table.find("> tbody")[0];
tree.columnCount = $("thead >tr >th", $table).length;
$(tree.tbody).empty();
tree.rowFragment = document.createDocumentFragment();
$row = $("<tr />");
tdRole = "";
if(ctx.options.aria){
$row.attr("role", "row");
tdRole = " role='gridcell'";
}
for(i=0; i<tree.columnCount; i++) {
if(ctx.options.table.nodeColumnIdx === i){
$row.append("<td" + tdRole + "><span class='fancytree-node' /></td>");
}else{
$row.append("<td" + tdRole + " />");
}
}
tree.rowFragment.appendChild($row.get(0));
// Make sure that status classes are set on the node's <tr> elements
tree.statusClassPropName = "tr";
tree.ariaPropName = "tr";
this.nodeContainerAttrName = "tr";
this._super(ctx);
// standard Fancytree created a root UL
$(tree.rootNode.ul).remove();
tree.rootNode.ul = null;
tree.$container = $table;
// Add container to the TAB chain
this.$container.attr("tabindex", this.options.tabbable ? "0" : "-1");
if(this.options.aria){
tree.$container
.attr("role", "treegrid")
.attr("aria-readonly", true);
}
},
/* Called by nodeRender to sync node order with tag order.*/
// nodeFixOrder: function(ctx) {
// },
nodeRemoveChildMarkup: function(ctx) {
var node = ctx.node;
// DT.debug("nodeRemoveChildMarkup()", node.toString());
node.visit(function(n){
if(n.tr){
$(n.tr).remove();
n.tr = null;
}
});
},
nodeRemoveMarkup: function(ctx) {
var node = ctx.node;
// DT.debug("nodeRemoveMarkup()", node.toString());
if(node.tr){
$(node.tr).remove();
node.tr = null;
}
this.nodeRemoveChildMarkup(ctx);
},
/* Override standard render. */
nodeRender: function(ctx, force, deep, collapsed, _recursive) {
var children, firstTr, i, l, newRow, prevNode, prevTr, subCtx,
tree = ctx.tree,
node = ctx.node,
opts = ctx.options,
isRootNode = !node.parent;
if( !_recursive ){
ctx.hasCollapsedParents = node.parent && !node.parent.expanded;
}
// $.ui.fancytree.debug("*** nodeRender " + node + ", isRoot=" + isRootNode, "tr=" + node.tr, "hcp=" + ctx.hasCollapsedParents, "parent.tr=" + (node.parent && node.parent.tr));
if( !isRootNode ){
if(!node.tr){
if( ctx.hasCollapsedParents /*&& !node.parent.tr*/ ) {
// #166: we assume that the parent will be (recursively) rendered
// later anyway.
node.debug("nodeRender ignored due to unrendered parent");
return;
}
// Create new <tr> after previous row
newRow = tree.rowFragment.firstChild.cloneNode(true);
prevNode = findPrevRowNode(node);
// $.ui.fancytree.debug("*** nodeRender " + node + ": prev: " + prevNode.key);
_assert(prevNode);
if(collapsed === true && _recursive){
// hide all child rows, so we can use an animation to show it later
newRow.style.display = "none";
}else if(deep && ctx.hasCollapsedParents){
// also hide this row if deep === true but any parent is collapsed
newRow.style.display = "none";
// newRow.style.color = "red";
}
if(!prevNode.tr){
_assert(!prevNode.parent, "prev. row must have a tr, or is system root");
tree.tbody.appendChild(newRow);
}else{
insertSiblingAfter(prevNode.tr, newRow);
}
node.tr = newRow;
if( node.key && opts.generateIds ){
node.tr.id = opts.idPrefix + node.key;
}
node.tr.ftnode = node;
if(opts.aria){
// TODO: why doesn't this work:
// node.li.role = "treeitem";
$(node.tr).attr("aria-labelledby", "ftal_" + node.key);
}
node.span = $("span.fancytree-node", node.tr).get(0);
// Set icon, link, and title (normally this is only required on initial render)
this.nodeRenderTitle(ctx);
// Allow tweaking, binding, after node was created for the first time
// tree._triggerNodeEvent("createNode", ctx);
if ( opts.createNode ){
opts.createNode.call(tree, {type: "createNode"}, ctx);
}
} else {
if( force ) {
// Set icon, link, and title (normally this is only required on initial render)
this.nodeRenderTitle(ctx); // triggers renderColumns()
} else {
// Update element classes according to node state
this.nodeRenderStatus(ctx);
}
}
}
// Allow tweaking after node state was rendered
// tree._triggerNodeEvent("renderNode", ctx);
if ( opts.renderNode ){
opts.renderNode.call(tree, {type: "renderNode"}, ctx);
}
// Visit child nodes
// Add child markup
children = node.children;
if(children && (isRootNode || deep || node.expanded)){
for(i=0, l=children.length; i<l; i++) {
subCtx = $.extend({}, ctx, {node: children[i]});
subCtx.hasCollapsedParents = subCtx.hasCollapsedParents || !node.expanded;
this.nodeRender(subCtx, force, deep, collapsed, true);
}
}
// Make sure, that <tr> order matches node.children order.
if(children && !_recursive){ // we only have to do it once, for the root branch
prevTr = node.tr || null;
firstTr = tree.tbody.firstChild;
// Iterate over all descendants
node.visit(function(n){
if(n.tr){
if(!n.parent.expanded && n.tr.style.display !== "none"){
// fix after a node was dropped over a collapsed
n.tr.style.display = "none";
setChildRowVisibility(n, false);
}
if(n.tr.previousSibling !== prevTr){
node.debug("_fixOrder: mismatch at node: " + n);
var nextTr = prevTr ? prevTr.nextSibling : firstTr;
tree.tbody.insertBefore(n.tr, nextTr);
}
prevTr = n.tr;
}
});
}
// Update element classes according to node state
// if(!isRootNode){
// this.nodeRenderStatus(ctx);
// }
},
nodeRenderTitle: function(ctx, title) {
var $cb,
node = ctx.node,
opts = ctx.options;
this._super(ctx);
// Move checkbox to custom column
if(opts.checkbox && opts.table.checkboxColumnIdx != null ){
$cb = $("span.fancytree-checkbox", node.span).detach();
$(node.tr).find("td:first").html($cb);
}
// Let user code write column content
// ctx.tree._triggerNodeEvent("renderColumns", node);
// Update element classes according to node state
if( ! node.isRoot() ){
this.nodeRenderStatus(ctx);
}
if( !opts.table.customStatus && node.isStatusNode() ) {
// default rendering for status node: leave other cells empty
} else if ( opts.renderColumns ) {
opts.renderColumns.call(ctx.tree, {type: "renderColumns"}, ctx);
}
},
nodeRenderStatus: function(ctx) {
var indent,
node = ctx.node,
opts = ctx.options;
this._super(ctx);
$(node.tr).removeClass("fancytree-node");
// indent
indent = (node.getLevel() - 1) * opts.table.indentation;
$(node.span).css({marginLeft: indent + "px"});
},
/* Expand node, return Deferred.promise. */
nodeSetExpanded: function(ctx, flag, opts) {
var dfd = new $.Deferred(),
subOpts = $.extend({}, opts, {noEvents: true, noAnimation: true});
opts = opts || {};
function _afterExpand(ok) {
flag = (flag !== false);
setChildRowVisibility(ctx.node, flag);
if( ok ) {
if( flag && ctx.options.autoScroll && !opts.noAnimation && ctx.node.hasChildren() ) {
// Scroll down to last child, but keep current node visible
ctx.node.getLastChild().scrollIntoView(true, {topNode: ctx.node}).always(function(){
if( !opts.noEvents ) {
ctx.tree._triggerNodeEvent(flag ? "expand" : "collapse", ctx);
}
dfd.resolveWith(ctx.node);
});
} else {
if( !opts.noEvents ) {
ctx.tree._triggerNodeEvent(flag ? "expand" : "collapse", ctx);
}
dfd.resolveWith(ctx.node);
}
} else {
if( !opts.noEvents ) {
ctx.tree._triggerNodeEvent(flag ? "expand" : "collapse", ctx);
}
dfd.rejectWith(ctx.node);
}
}
// Call base-expand with disabled events and animation
this._super(ctx, flag, subOpts).done(function () {
_afterExpand(true);
}).fail(function () {
_afterExpand(false);
});
return dfd.promise();
},
nodeSetStatus: function(ctx, status, message, details) {
if(status === "ok"){
var node = ctx.node,
firstChild = ( node.children ? node.children[0] : null );
if ( firstChild && firstChild.isStatusNode() ) {
$(firstChild.tr).remove();
}
}
this._super(ctx, status, message, details);
},
treeClear: function(ctx) {
this.nodeRemoveChildMarkup(this._makeHookContext(this.rootNode));
return this._super(ctx);
}
/*,
treeSetFocus: function(ctx, flag) {
// alert("treeSetFocus" + ctx.tree.$container);
ctx.tree.$container.focus();
$.ui.fancytree.focusTree = ctx.tree;
}*/
});
}(jQuery, window, document));
|
PypiClean
|
/seishub.core-1.2.1.zip/seishub.core-1.2.1/seishub/core/packages/admin/web/statics/yui2/build/treeview/treeview-debug.js
|
(function () {
var Dom = YAHOO.util.Dom,
Event = YAHOO.util.Event,
Lang = YAHOO.lang,
Widget = YAHOO.widget;
/**
* The treeview widget is a generic tree building tool.
* @module treeview
* @title TreeView Widget
* @requires yahoo, dom, event
* @optional animation, json, calendar
* @namespace YAHOO.widget
*/
/**
* Contains the tree view state data and the root node.
*
* @class TreeView
* @uses YAHOO.util.EventProvider
* @constructor
* @param {string|HTMLElement} id The id of the element, or the element itself that the tree will be inserted into.
* Existing markup in this element, if valid, will be used to build the tree
* @param {Array|Object|String} oConfig (optional) If present, it will be used to build the tree via method <a href="#method_buildTreeFromObject">buildTreeFromObject</a>
*
*/
YAHOO.widget.TreeView = function(id, oConfig) {
if (id) { this.init(id); }
if (oConfig) {
this.buildTreeFromObject(oConfig);
} else if (Lang.trim(this._el.innerHTML)) {
this.buildTreeFromMarkup(id);
}
};
var TV = Widget.TreeView;
TV.prototype = {
/**
* The id of tree container element
* @property id
* @type String
*/
id: null,
/**
* The host element for this tree
* @property _el
* @private
* @type HTMLelement
*/
_el: null,
/**
* Flat collection of all nodes in this tree. This is a sparse
* array, so the length property can't be relied upon for a
* node count for the tree.
* @property _nodes
* @type Node[]
* @private
*/
_nodes: null,
/**
* We lock the tree control while waiting for the dynamic loader to return
* @property locked
* @type boolean
*/
locked: false,
/**
* The animation to use for expanding children, if any
* @property _expandAnim
* @type string
* @private
*/
_expandAnim: null,
/**
* The animation to use for collapsing children, if any
* @property _collapseAnim
* @type string
* @private
*/
_collapseAnim: null,
/**
* The current number of animations that are executing
* @property _animCount
* @type int
* @private
*/
_animCount: 0,
/**
* The maximum number of animations to run at one time.
* @property maxAnim
* @type int
*/
maxAnim: 2,
/**
* Whether there is any subscriber to dblClickEvent
* @property _hasDblClickSubscriber
* @type boolean
* @private
*/
_hasDblClickSubscriber: false,
/**
* Stores the timer used to check for double clicks
* @property _dblClickTimer
* @type window.timer object
* @private
*/
_dblClickTimer: null,
/**
* A reference to the Node currently having the focus or null if none.
* @property currentFocus
* @type YAHOO.widget.Node
*/
currentFocus: null,
/**
* If true, only one Node can be highlighted at a time
* @property singleNodeHighlight
* @type boolean
* @default false
*/
singleNodeHighlight: false,
/**
* A reference to the Node that is currently highlighted.
* It is only meaningful if singleNodeHighlight is enabled
* @property _currentlyHighlighted
* @type YAHOO.widget.Node
* @default null
* @private
*/
_currentlyHighlighted: null,
/**
* Sets up the animation for expanding children
* @method setExpandAnim
* @param {string} type the type of animation (acceptable values defined
* in YAHOO.widget.TVAnim)
*/
setExpandAnim: function(type) {
this._expandAnim = (Widget.TVAnim.isValid(type)) ? type : null;
},
/**
* Sets up the animation for collapsing children
* @method setCollapseAnim
* @param {string} type of animation (acceptable values defined in
* YAHOO.widget.TVAnim)
*/
setCollapseAnim: function(type) {
this._collapseAnim = (Widget.TVAnim.isValid(type)) ? type : null;
},
/**
* Perform the expand animation if configured, or just show the
* element if not configured or too many animations are in progress
* @method animateExpand
* @param el {HTMLElement} the element to animate
* @param node {YAHOO.util.Node} the node that was expanded
* @return {boolean} true if animation could be invoked, false otherwise
*/
animateExpand: function(el, node) {
this.logger.log("animating expand");
if (this._expandAnim && this._animCount < this.maxAnim) {
// this.locked = true;
var tree = this;
var a = Widget.TVAnim.getAnim(this._expandAnim, el,
function() { tree.expandComplete(node); });
if (a) {
++this._animCount;
this.fireEvent("animStart", {
"node": node,
"type": "expand"
});
a.animate();
}
return true;
}
return false;
},
/**
* Perform the collapse animation if configured, or just show the
* element if not configured or too many animations are in progress
* @method animateCollapse
* @param el {HTMLElement} the element to animate
* @param node {YAHOO.util.Node} the node that was expanded
* @return {boolean} true if animation could be invoked, false otherwise
*/
animateCollapse: function(el, node) {
this.logger.log("animating collapse");
if (this._collapseAnim && this._animCount < this.maxAnim) {
// this.locked = true;
var tree = this;
var a = Widget.TVAnim.getAnim(this._collapseAnim, el,
function() { tree.collapseComplete(node); });
if (a) {
++this._animCount;
this.fireEvent("animStart", {
"node": node,
"type": "collapse"
});
a.animate();
}
return true;
}
return false;
},
/**
* Function executed when the expand animation completes
* @method expandComplete
*/
expandComplete: function(node) {
this.logger.log("expand complete: " + this.id);
--this._animCount;
this.fireEvent("animComplete", {
"node": node,
"type": "expand"
});
// this.locked = false;
},
/**
* Function executed when the collapse animation completes
* @method collapseComplete
*/
collapseComplete: function(node) {
this.logger.log("collapse complete: " + this.id);
--this._animCount;
this.fireEvent("animComplete", {
"node": node,
"type": "collapse"
});
// this.locked = false;
},
/**
* Initializes the tree
* @method init
* @parm {string|HTMLElement} id the id of the element that will hold the tree
* @private
*/
init: function(id) {
this._el = Dom.get(id);
this.id = Dom.generateId(this._el,"yui-tv-auto-id-");
/**
* When animation is enabled, this event fires when the animation
* starts
* @event animStart
* @type CustomEvent
* @param {YAHOO.widget.Node} oArgs.node the node that is expanding/collapsing
* @param {String} oArgs.type the type of animation ("expand" or "collapse")
*/
this.createEvent("animStart", this);
/**
* When animation is enabled, this event fires when the animation
* completes
* @event animComplete
* @type CustomEvent
* @param {YAHOO.widget.Node} oArgs.node the node that is expanding/collapsing
* @param {String} oArgs.type the type of animation ("expand" or "collapse")
*/
this.createEvent("animComplete", this);
/**
* Fires when a node is going to be collapsed. Return false to stop
* the collapse.
* @event collapse
* @type CustomEvent
* @param {YAHOO.widget.Node} node the node that is collapsing
*/
this.createEvent("collapse", this);
/**
* Fires after a node is successfully collapsed. This event will not fire
* if the "collapse" event was cancelled.
* @event collapseComplete
* @type CustomEvent
* @param {YAHOO.widget.Node} node the node that was collapsed
*/
this.createEvent("collapseComplete", this);
/**
* Fires when a node is going to be expanded. Return false to stop
* the collapse.
* @event expand
* @type CustomEvent
* @param {YAHOO.widget.Node} node the node that is expanding
*/
this.createEvent("expand", this);
/**
* Fires after a node is successfully expanded. This event will not fire
* if the "expand" event was cancelled.
* @event expandComplete
* @type CustomEvent
* @param {YAHOO.widget.Node} node the node that was expanded
*/
this.createEvent("expandComplete", this);
/**
* Fires when the Enter key is pressed on a node that has the focus
* @event enterKeyPressed
* @type CustomEvent
* @param {YAHOO.widget.Node} node the node that has the focus
*/
this.createEvent("enterKeyPressed", this);
/**
* Fires when the label in a TextNode or MenuNode or content in an HTMLNode receives a Click.
* The listener may return false to cancel toggling and focusing on the node.
* @event clickEvent
* @type CustomEvent
* @param oArgs.event {HTMLEvent} The event object
* @param oArgs.node {YAHOO.widget.Node} node the node that was clicked
*/
this.createEvent("clickEvent", this);
/**
* Fires when the focus receives the focus, when it changes from a Node
* to another Node or when it is completely lost (blurred)
* @event focusChanged
* @type CustomEvent
* @param oArgs.oldNode {YAHOO.widget.Node} Node that had the focus or null if none
* @param oArgs.newNode {YAHOO.widget.Node} Node that receives the focus or null if none
*/
this.createEvent('focusChanged',this);
/**
* Fires when the label in a TextNode or MenuNode or content in an HTMLNode receives a double Click
* @event dblClickEvent
* @type CustomEvent
* @param oArgs.event {HTMLEvent} The event object
* @param oArgs.node {YAHOO.widget.Node} node the node that was clicked
*/
var self = this;
this.createEvent("dblClickEvent", {
scope:this,
onSubscribeCallback: function() {
self._hasDblClickSubscriber = true;
}
});
/**
* Custom event that is fired when the text node label is clicked.
* The node clicked is provided as an argument
*
* @event labelClick
* @type CustomEvent
* @param {YAHOO.widget.Node} node the node clicked
* @deprecated use clickEvent or dblClickEvent
*/
this.createEvent("labelClick", this);
/**
* Custom event fired when the highlight of a node changes.
* The node that triggered the change is provided as an argument:
* The status of the highlight can be checked in
* <a href="YAHOO.widget.Node.html#property_highlightState">nodeRef.highlightState</a>.
* Depending on <a href="YAHOO.widget.Node.html#property_propagateHighlight">nodeRef.propagateHighlight</a>, other nodes might have changed
* @event highlightEvent
* @type CustomEvent
* @param node {YAHOO.widget.Node} the node that started the change in highlighting state
*/
this.createEvent("highlightEvent",this);
this._nodes = [];
// store a global reference
TV.trees[this.id] = this;
// Set up the root node
this.root = new Widget.RootNode(this);
var LW = Widget.LogWriter;
this.logger = (LW) ? new LW(this.toString()) : YAHOO;
this.logger.log("tree init: " + this.id);
if (this._initEditor) {
this._initEditor();
}
// YAHOO.util.Event.onContentReady(this.id, this.handleAvailable, this, true);
// YAHOO.util.Event.on(this.id, "click", this.handleClick, this, true);
},
//handleAvailable: function() {
//var Event = YAHOO.util.Event;
//Event.on(this.id,
//},
/**
* Builds the TreeView from an object.
* This is the method called by the constructor to build the tree when it has a second argument.
* A tree can be described by an array of objects, each object corresponding to a node.
* Node descriptions may contain values for any property of a node plus the following extra properties: <ul>
* <li>type: can be one of the following:<ul>
* <li> A shortname for a node type (<code>'text','menu','html'</code>) </li>
* <li>The name of a Node class under YAHOO.widget (<code>'TextNode', 'MenuNode', 'DateNode'</code>, etc) </li>
* <li>a reference to an actual class: <code>YAHOO.widget.DateNode</code></li>
* </ul></li>
* <li>children: an array containing further node definitions</li></ul>
* A string instead of an object will produce a node of type 'text' with the given string as its label.
* @method buildTreeFromObject
* @param oConfig {Array|Object|String} array containing a full description of the tree.
* An object or a string will be turned into an array with the given object or string as its only element.
*
*/
buildTreeFromObject: function (oConfig) {
var logger = this.logger;
logger.log('Building tree from object');
var build = function (parent, oConfig) {
var i, item, node, children, type, NodeType, ThisType;
for (i = 0; i < oConfig.length; i++) {
item = oConfig[i];
if (Lang.isString(item)) {
node = new Widget.TextNode(item, parent);
} else if (Lang.isObject(item)) {
children = item.children;
delete item.children;
type = item.type || 'text';
delete item.type;
switch (Lang.isString(type) && type.toLowerCase()) {
case 'text':
node = new Widget.TextNode(item, parent);
break;
case 'menu':
node = new Widget.MenuNode(item, parent);
break;
case 'html':
node = new Widget.HTMLNode(item, parent);
break;
default:
if (Lang.isString(type)) {
NodeType = Widget[type];
} else {
NodeType = type;
}
if (Lang.isObject(NodeType)) {
for (ThisType = NodeType; ThisType && ThisType !== Widget.Node; ThisType = ThisType.superclass.constructor) {}
if (ThisType) {
node = new NodeType(item, parent);
} else {
logger.log('Invalid type in node definition: ' + type,'error');
}
} else {
logger.log('Invalid type in node definition: ' + type,'error');
}
}
if (children) {
build(node,children);
}
} else {
logger.log('Invalid node definition','error');
}
}
};
if (!Lang.isArray(oConfig)) {
oConfig = [oConfig];
}
build(this.root,oConfig);
},
/**
* Builds the TreeView from existing markup. Markup should consist of <UL> or <OL> elements containing <LI> elements.
* Each <LI> can have one element used as label and a second optional element which is to be a <UL> or <OL>
* containing nested nodes.
* Depending on what the first element of the <LI> element is, the following Nodes will be created: <ul>
* <li>plain text: a regular TextNode</li>
* <li>anchor <A>: a TextNode with its <code>href</code> and <code>target</code> taken from the anchor</li>
* <li>anything else: an HTMLNode</li></ul>
* Only the first outermost (un-)ordered list in the markup and its children will be parsed.
* Nodes will be collapsed unless an <LI> tag has a className called 'expanded'.
* All other className attributes will be copied over to the Node className property.
* If the <LI> element contains an attribute called <code>yuiConfig</code>, its contents should be a JSON-encoded object
* as the one used in method <a href="#method_buildTreeFromObject">buildTreeFromObject</a>.
* @method buildTreeFromMarkup
* @param id {string|HTMLElement} The id of the element that contains the markup or a reference to it.
*/
buildTreeFromMarkup: function (id) {
this.logger.log('Building tree from existing markup');
var build = function (markup) {
var el, child, branch = [], config = {}, label, yuiConfig;
// Dom's getFirstChild and getNextSibling skip over text elements
for (el = Dom.getFirstChild(markup); el; el = Dom.getNextSibling(el)) {
switch (el.tagName.toUpperCase()) {
case 'LI':
label = '';
config = {
expanded: Dom.hasClass(el,'expanded'),
title: el.title || el.alt || null,
className: Lang.trim(el.className.replace(/\bexpanded\b/,'')) || null
};
// I cannot skip over text elements here because I want them for labels
child = el.firstChild;
if (child.nodeType == 3) {
// nodes with only whitespace, tabs and new lines don't count, they are probably just formatting.
label = Lang.trim(child.nodeValue.replace(/[\n\t\r]*/g,''));
if (label) {
config.type = 'text';
config.label = label;
} else {
child = Dom.getNextSibling(child);
}
}
if (!label) {
if (child.tagName.toUpperCase() == 'A') {
config.type = 'text';
config.label = child.innerHTML;
config.href = child.href;
config.target = child.target;
config.title = child.title || child.alt || config.title;
} else {
config.type = 'html';
var d = document.createElement('div');
d.appendChild(child.cloneNode(true));
config.html = d.innerHTML;
config.hasIcon = true;
}
}
// see if after the label it has a further list which will become children of this node.
child = Dom.getNextSibling(child);
switch (child && child.tagName.toUpperCase()) {
case 'UL':
case 'OL':
config.children = build(child);
break;
}
// if there are further elements or text, it will be ignored.
if (YAHOO.lang.JSON) {
yuiConfig = el.getAttribute('yuiConfig');
if (yuiConfig) {
yuiConfig = YAHOO.lang.JSON.parse(yuiConfig);
config = YAHOO.lang.merge(config,yuiConfig);
}
}
branch.push(config);
break;
case 'UL':
case 'OL':
this.logger.log('ULs or OLs can only contain LI elements, not other UL or OL. This will not work in some browsers','error');
config = {
type: 'text',
label: '',
children: build(child)
};
branch.push(config);
break;
}
}
return branch;
};
var markup = Dom.getChildrenBy(Dom.get(id),function (el) {
var tag = el.tagName.toUpperCase();
return tag == 'UL' || tag == 'OL';
});
if (markup.length) {
this.buildTreeFromObject(build(markup[0]));
} else {
this.logger.log('Markup contains no UL or OL elements','warn');
}
},
/**
* Returns the TD element where the event has occurred
* @method _getEventTargetTdEl
* @private
*/
_getEventTargetTdEl: function (ev) {
var target = Event.getTarget(ev);
// go up looking for a TD with a className with a ygtv prefix
while (target && !(target.tagName.toUpperCase() == 'TD' && Dom.hasClass(target.parentNode,'ygtvrow'))) {
target = Dom.getAncestorByTagName(target,'td');
}
if (Lang.isNull(target)) { return null; }
// If it is a spacer cell, do nothing
if (/\bygtv(blank)?depthcell/.test(target.className)) { return null;}
// If it has an id, search for the node number and see if it belongs to a node in this tree.
if (target.id) {
var m = target.id.match(/\bygtv([^\d]*)(.*)/);
if (m && m[2] && this._nodes[m[2]]) {
return target;
}
}
return null;
},
/**
* Event listener for click events
* @method _onClickEvent
* @private
*/
_onClickEvent: function (ev) {
var self = this,
td = this._getEventTargetTdEl(ev),
node,
target,
toggle = function (force) {
node.focus();
if (force || !node.href) {
node.toggle();
try {
Event.preventDefault(ev);
} catch (e) {
// @TODO
// For some reason IE8 is providing an event object with
// most of the fields missing, but only when clicking on
// the node's label, and only when working with inline
// editing. This generates a "Member not found" error
// in that browser. Determine if this is a browser
// bug, or a problem with this code. Already checked to
// see if the problem has to do with access the event
// in the outer scope, and that isn't the problem.
// Maybe the markup for inline editing is broken.
}
}
};
if (!td) {
return;
}
node = this.getNodeByElement(td);
if (!node) {
return;
}
// exception to handle deprecated event labelClick
// @TODO take another look at this deprecation. It is common for people to
// only be interested in the label click, so why make them have to test
// the node type to figure out whether the click was on the label?
target = Event.getTarget(ev);
if (Dom.hasClass(target, node.labelStyle) || Dom.getAncestorByClassName(target,node.labelStyle)) {
this.logger.log("onLabelClick " + node.label);
this.fireEvent('labelClick',node);
}
// If it is a toggle cell, toggle
if (/\bygtv[tl][mp]h?h?/.test(td.className)) {
toggle(true);
} else {
if (this._dblClickTimer) {
window.clearTimeout(this._dblClickTimer);
this._dblClickTimer = null;
} else {
if (this._hasDblClickSubscriber) {
this._dblClickTimer = window.setTimeout(function () {
self._dblClickTimer = null;
if (self.fireEvent('clickEvent', {event:ev,node:node}) !== false) {
toggle();
}
}, 200);
} else {
if (self.fireEvent('clickEvent', {event:ev,node:node}) !== false) {
toggle();
}
}
}
}
},
/**
* Event listener for double-click events
* @method _onDblClickEvent
* @private
*/
_onDblClickEvent: function (ev) {
if (!this._hasDblClickSubscriber) { return; }
var td = this._getEventTargetTdEl(ev);
if (!td) {return;}
if (!(/\bygtv[tl][mp]h?h?/.test(td.className))) {
this.fireEvent('dblClickEvent', {event:ev, node:this.getNodeByElement(td)});
if (this._dblClickTimer) {
window.clearTimeout(this._dblClickTimer);
this._dblClickTimer = null;
}
}
},
/**
* Event listener for mouse over events
* @method _onMouseOverEvent
* @private
*/
_onMouseOverEvent:function (ev) {
var target;
if ((target = this._getEventTargetTdEl(ev)) && (target = this.getNodeByElement(target)) && (target = target.getToggleEl())) {
target.className = target.className.replace(/\bygtv([lt])([mp])\b/gi,'ygtv$1$2h');
}
},
/**
* Event listener for mouse out events
* @method _onMouseOutEvent
* @private
*/
_onMouseOutEvent: function (ev) {
var target;
if ((target = this._getEventTargetTdEl(ev)) && (target = this.getNodeByElement(target)) && (target = target.getToggleEl())) {
target.className = target.className.replace(/\bygtv([lt])([mp])h\b/gi,'ygtv$1$2');
}
},
/**
* Event listener for key down events
* @method _onKeyDownEvent
* @private
*/
_onKeyDownEvent: function (ev) {
var target = Event.getTarget(ev),
node = this.getNodeByElement(target),
newNode = node,
KEY = YAHOO.util.KeyListener.KEY;
switch(ev.keyCode) {
case KEY.UP:
this.logger.log('UP');
do {
if (newNode.previousSibling) {
newNode = newNode.previousSibling;
} else {
newNode = newNode.parent;
}
} while (newNode && !newNode._canHaveFocus());
if (newNode) { newNode.focus(); }
Event.preventDefault(ev);
break;
case KEY.DOWN:
this.logger.log('DOWN');
do {
if (newNode.nextSibling) {
newNode = newNode.nextSibling;
} else {
newNode.expand();
newNode = (newNode.children.length || null) && newNode.children[0];
}
} while (newNode && !newNode._canHaveFocus);
if (newNode) { newNode.focus();}
Event.preventDefault(ev);
break;
case KEY.LEFT:
this.logger.log('LEFT');
do {
if (newNode.parent) {
newNode = newNode.parent;
} else {
newNode = newNode.previousSibling;
}
} while (newNode && !newNode._canHaveFocus());
if (newNode) { newNode.focus();}
Event.preventDefault(ev);
break;
case KEY.RIGHT:
this.logger.log('RIGHT');
var self = this,
moveFocusRight,
focusOnExpand = function (newNode) {
self.unsubscribe('expandComplete',focusOnExpand);
moveFocusRight(newNode);
};
moveFocusRight = function (newNode) {
do {
if (newNode.isDynamic() && !newNode.childrenRendered) {
self.subscribe('expandComplete',focusOnExpand);
newNode.expand();
newNode = null;
break;
} else {
newNode.expand();
if (newNode.children.length) {
newNode = newNode.children[0];
} else {
newNode = newNode.nextSibling;
}
}
} while (newNode && !newNode._canHaveFocus());
if (newNode) { newNode.focus();}
};
moveFocusRight(newNode);
Event.preventDefault(ev);
break;
case KEY.ENTER:
this.logger.log('ENTER: ' + newNode.href);
if (node.href) {
if (node.target) {
window.open(node.href,node.target);
} else {
window.location(node.href);
}
} else {
node.toggle();
}
this.fireEvent('enterKeyPressed',node);
Event.preventDefault(ev);
break;
case KEY.HOME:
this.logger.log('HOME');
newNode = this.getRoot();
if (newNode.children.length) {newNode = newNode.children[0];}
if (newNode._canHaveFocus()) { newNode.focus(); }
Event.preventDefault(ev);
break;
case KEY.END:
this.logger.log('END');
newNode = newNode.parent.children;
newNode = newNode[newNode.length -1];
if (newNode._canHaveFocus()) { newNode.focus(); }
Event.preventDefault(ev);
break;
// case KEY.PAGE_UP:
// this.logger.log('PAGE_UP');
// break;
// case KEY.PAGE_DOWN:
// this.logger.log('PAGE_DOWN');
// break;
case 107: // plus key
if (ev.shiftKey) {
this.logger.log('Shift-PLUS');
node.parent.expandAll();
} else {
this.logger.log('PLUS');
node.expand();
}
break;
case 109: // minus key
if (ev.shiftKey) {
this.logger.log('Shift-MINUS');
node.parent.collapseAll();
} else {
this.logger.log('MINUS');
node.collapse();
}
break;
default:
break;
}
},
/**
* Renders the tree boilerplate and visible nodes
* @method render
*/
render: function() {
var html = this.root.getHtml(),
el = this.getEl();
el.innerHTML = html;
if (!this._hasEvents) {
Event.on(el, 'click', this._onClickEvent, this, true);
Event.on(el, 'dblclick', this._onDblClickEvent, this, true);
Event.on(el, 'mouseover', this._onMouseOverEvent, this, true);
Event.on(el, 'mouseout', this._onMouseOutEvent, this, true);
Event.on(el, 'keydown', this._onKeyDownEvent, this, true);
}
this._hasEvents = true;
},
/**
* Returns the tree's host element
* @method getEl
* @return {HTMLElement} the host element
*/
getEl: function() {
if (! this._el) {
this._el = Dom.get(this.id);
}
return this._el;
},
/**
* Nodes register themselves with the tree instance when they are created.
* @method regNode
* @param node {Node} the node to register
* @private
*/
regNode: function(node) {
this._nodes[node.index] = node;
},
/**
* Returns the root node of this tree
* @method getRoot
* @return {Node} the root node
*/
getRoot: function() {
return this.root;
},
/**
* Configures this tree to dynamically load all child data
* @method setDynamicLoad
* @param {function} fnDataLoader the function that will be called to get the data
* @param iconMode {int} configures the icon that is displayed when a dynamic
* load node is expanded the first time without children. By default, the
* "collapse" icon will be used. If set to 1, the leaf node icon will be
* displayed.
*/
setDynamicLoad: function(fnDataLoader, iconMode) {
this.root.setDynamicLoad(fnDataLoader, iconMode);
},
/**
* Expands all child nodes. Note: this conflicts with the "multiExpand"
* node property. If expand all is called in a tree with nodes that
* do not allow multiple siblings to be displayed, only the last sibling
* will be expanded.
* @method expandAll
*/
expandAll: function() {
if (!this.locked) {
this.root.expandAll();
}
},
/**
* Collapses all expanded child nodes in the entire tree.
* @method collapseAll
*/
collapseAll: function() {
if (!this.locked) {
this.root.collapseAll();
}
},
/**
* Returns a node in the tree that has the specified index (this index
* is created internally, so this function probably will only be used
* in html generated for a given node.)
* @method getNodeByIndex
* @param {int} nodeIndex the index of the node wanted
* @return {Node} the node with index=nodeIndex, null if no match
*/
getNodeByIndex: function(nodeIndex) {
var n = this._nodes[nodeIndex];
return (n) ? n : null;
},
/**
* Returns a node that has a matching property and value in the data
* object that was passed into its constructor.
* @method getNodeByProperty
* @param {object} property the property to search (usually a string)
* @param {object} value the value we want to find (usuall an int or string)
* @return {Node} the matching node, null if no match
*/
getNodeByProperty: function(property, value) {
for (var i in this._nodes) {
if (this._nodes.hasOwnProperty(i)) {
var n = this._nodes[i];
if ((property in n && n[property] == value) || (n.data && value == n.data[property])) {
return n;
}
}
}
return null;
},
/**
* Returns a collection of nodes that have a matching property
* and value in the data object that was passed into its constructor.
* @method getNodesByProperty
* @param {object} property the property to search (usually a string)
* @param {object} value the value we want to find (usuall an int or string)
* @return {Array} the matching collection of nodes, null if no match
*/
getNodesByProperty: function(property, value) {
var values = [];
for (var i in this._nodes) {
if (this._nodes.hasOwnProperty(i)) {
var n = this._nodes[i];
if ((property in n && n[property] == value) || (n.data && value == n.data[property])) {
values.push(n);
}
}
}
return (values.length) ? values : null;
},
/**
* Returns a collection of nodes that have passed the test function
* passed as its only argument.
* The function will receive a reference to each node to be tested.
* @method getNodesBy
* @param {function} a boolean function that receives a Node instance and returns true to add the node to the results list
* @return {Array} the matching collection of nodes, null if no match
*/
getNodesBy: function(fn) {
var values = [];
for (var i in this._nodes) {
if (this._nodes.hasOwnProperty(i)) {
var n = this._nodes[i];
if (fn(n)) {
values.push(n);
}
}
}
return (values.length) ? values : null;
},
/**
* Returns the treeview node reference for an ancestor element
* of the node, or null if it is not contained within any node
* in this tree.
* @method getNodeByElement
* @param el {HTMLElement} the element to test
* @return {YAHOO.widget.Node} a node reference or null
*/
getNodeByElement: function(el) {
var p=el, m, re=/ygtv([^\d]*)(.*)/;
do {
if (p && p.id) {
m = p.id.match(re);
if (m && m[2]) {
return this.getNodeByIndex(m[2]);
}
}
p = p.parentNode;
if (!p || !p.tagName) {
break;
}
}
while (p.id !== this.id && p.tagName.toLowerCase() !== "body");
return null;
},
/**
* When in singleNodeHighlight it returns the node highlighted
* or null if none. Returns null if singleNodeHighlight is false.
* @method getHighlightedNode
* @return {YAHOO.widget.Node} a node reference or null
*/
getHighlightedNode: function() {
return this._currentlyHighlighted;
},
/**
* Removes the node and its children, and optionally refreshes the
* branch of the tree that was affected.
* @method removeNode
* @param {Node} node to remove
* @param {boolean} autoRefresh automatically refreshes branch if true
* @return {boolean} False is there was a problem, true otherwise.
*/
removeNode: function(node, autoRefresh) {
// Don't delete the root node
if (node.isRoot()) {
return false;
}
// Get the branch that we may need to refresh
var p = node.parent;
if (p.parent) {
p = p.parent;
}
// Delete the node and its children
this._deleteNode(node);
// Refresh the parent of the parent
if (autoRefresh && p && p.childrenRendered) {
p.refresh();
}
return true;
},
/**
* wait until the animation is complete before deleting
* to avoid javascript errors
* @method _removeChildren_animComplete
* @param o the custom event payload
* @private
*/
_removeChildren_animComplete: function(o) {
this.unsubscribe(this._removeChildren_animComplete);
this.removeChildren(o.node);
},
/**
* Deletes this nodes child collection, recursively. Also collapses
* the node, and resets the dynamic load flag. The primary use for
* this method is to purge a node and allow it to fetch its data
* dynamically again.
* @method removeChildren
* @param {Node} node the node to purge
*/
removeChildren: function(node) {
if (node.expanded) {
// wait until the animation is complete before deleting to
// avoid javascript errors
if (this._collapseAnim) {
this.subscribe("animComplete",
this._removeChildren_animComplete, this, true);
Widget.Node.prototype.collapse.call(node);
return;
}
node.collapse();
}
this.logger.log("Removing children for " + node);
while (node.children.length) {
this._deleteNode(node.children[0]);
}
if (node.isRoot()) {
Widget.Node.prototype.expand.call(node);
}
node.childrenRendered = false;
node.dynamicLoadComplete = false;
node.updateIcon();
},
/**
* Deletes the node and recurses children
* @method _deleteNode
* @private
*/
_deleteNode: function(node) {
// Remove all the child nodes first
this.removeChildren(node);
// Remove the node from the tree
this.popNode(node);
},
/**
* Removes the node from the tree, preserving the child collection
* to make it possible to insert the branch into another part of the
* tree, or another tree.
* @method popNode
* @param {Node} node to remove
*/
popNode: function(node) {
var p = node.parent;
// Update the parent's collection of children
var a = [];
for (var i=0, len=p.children.length;i<len;++i) {
if (p.children[i] != node) {
a[a.length] = p.children[i];
}
}
p.children = a;
// reset the childrenRendered flag for the parent
p.childrenRendered = false;
// Update the sibling relationship
if (node.previousSibling) {
node.previousSibling.nextSibling = node.nextSibling;
}
if (node.nextSibling) {
node.nextSibling.previousSibling = node.previousSibling;
}
if (this.currentFocus == node) {
this.currentFocus = null;
}
if (this._currentlyHighlighted == node) {
this._currentlyHighlighted = null;
}
node.parent = null;
node.previousSibling = null;
node.nextSibling = null;
node.tree = null;
// Update the tree's node collection
delete this._nodes[node.index];
},
/**
* Nulls out the entire TreeView instance and related objects, removes attached
* event listeners, and clears out DOM elements inside the container. After
* calling this method, the instance reference should be expliclitly nulled by
* implementer, as in myDataTable = null. Use with caution!
*
* @method destroy
*/
destroy : function() {
// Since the label editor can be separated from the main TreeView control
// the destroy method for it might not be there.
if (this._destroyEditor) { this._destroyEditor(); }
var el = this.getEl();
Event.removeListener(el,'click');
Event.removeListener(el,'dblclick');
Event.removeListener(el,'mouseover');
Event.removeListener(el,'mouseout');
Event.removeListener(el,'keydown');
for (var i = 0 ; i < this._nodes.length; i++) {
var node = this._nodes[i];
if (node && node.destroy) {node.destroy(); }
}
el.innerHTML = '';
this._hasEvents = false;
},
/**
* TreeView instance toString
* @method toString
* @return {string} string representation of the tree
*/
toString: function() {
return "TreeView " + this.id;
},
/**
* Count of nodes in tree
* @method getNodeCount
* @return {int} number of nodes in the tree
*/
getNodeCount: function() {
return this.getRoot().getNodeCount();
},
/**
* Returns an object which could be used to rebuild the tree.
* It can be passed to the tree constructor to reproduce the same tree.
* It will return false if any node loads dynamically, regardless of whether it is loaded or not.
* @method getTreeDefinition
* @return {Object | false} definition of the tree or false if any node is defined as dynamic
*/
getTreeDefinition: function() {
return this.getRoot().getNodeDefinition();
},
/**
* Abstract method that is executed when a node is expanded
* @method onExpand
* @param node {Node} the node that was expanded
* @deprecated use treeobj.subscribe("expand") instead
*/
onExpand: function(node) { },
/**
* Abstract method that is executed when a node is collapsed.
* @method onCollapse
* @param node {Node} the node that was collapsed.
* @deprecated use treeobj.subscribe("collapse") instead
*/
onCollapse: function(node) { },
/**
* Sets the value of a property for all loaded nodes in the tree.
* @method setNodesProperty
* @param name {string} Name of the property to be set
* @param value {any} value to be set
* @param refresh {boolean} if present and true, it does a refresh
*/
setNodesProperty: function(name, value, refresh) {
this.root.setNodesProperty(name,value);
if (refresh) {
this.root.refresh();
}
},
/**
* Event listener to toggle node highlight.
* Can be assigned as listener to clickEvent, dblClickEvent and enterKeyPressed.
* It returns false to prevent the default action.
* @method onEventToggleHighlight
* @param oArgs {any} it takes the arguments of any of the events mentioned above
* @return {false} Always cancels the default action for the event
*/
onEventToggleHighlight: function (oArgs) {
var node;
if ('node' in oArgs && oArgs.node instanceof Widget.Node) {
node = oArgs.node;
} else if (oArgs instanceof Widget.Node) {
node = oArgs;
} else {
return false;
}
node.toggleHighlight();
return false;
}
};
/* Backwards compatibility aliases */
var PROT = TV.prototype;
/**
* Renders the tree boilerplate and visible nodes.
* Alias for render
* @method draw
* @deprecated Use render instead
*/
PROT.draw = PROT.render;
/* end backwards compatibility aliases */
YAHOO.augment(TV, YAHOO.util.EventProvider);
/**
* Running count of all nodes created in all trees. This is
* used to provide unique identifies for all nodes. Deleting
* nodes does not change the nodeCount.
* @property YAHOO.widget.TreeView.nodeCount
* @type int
* @static
*/
TV.nodeCount = 0;
/**
* Global cache of tree instances
* @property YAHOO.widget.TreeView.trees
* @type Array
* @static
* @private
*/
TV.trees = [];
/**
* Global method for getting a tree by its id. Used in the generated
* tree html.
* @method YAHOO.widget.TreeView.getTree
* @param treeId {String} the id of the tree instance
* @return {TreeView} the tree instance requested, null if not found.
* @static
*/
TV.getTree = function(treeId) {
var t = TV.trees[treeId];
return (t) ? t : null;
};
/**
* Global method for getting a node by its id. Used in the generated
* tree html.
* @method YAHOO.widget.TreeView.getNode
* @param treeId {String} the id of the tree instance
* @param nodeIndex {String} the index of the node to return
* @return {Node} the node instance requested, null if not found
* @static
*/
TV.getNode = function(treeId, nodeIndex) {
var t = TV.getTree(treeId);
return (t) ? t.getNodeByIndex(nodeIndex) : null;
};
/**
* Class name assigned to elements that have the focus
*
* @property TreeView.FOCUS_CLASS_NAME
* @type String
* @static
* @final
* @default "ygtvfocus"
*/
TV.FOCUS_CLASS_NAME = 'ygtvfocus';
})();
(function () {
var Dom = YAHOO.util.Dom,
Lang = YAHOO.lang,
Event = YAHOO.util.Event;
/**
* The base class for all tree nodes. The node's presentation and behavior in
* response to mouse events is handled in Node subclasses.
* @namespace YAHOO.widget
* @class Node
* @uses YAHOO.util.EventProvider
* @param oData {object} a string or object containing the data that will
* be used to render this node, and any custom attributes that should be
* stored with the node (which is available in noderef.data).
* All values in oData will be used to set equally named properties in the node
* as long as the node does have such properties, they are not undefined, private or functions,
* the rest of the values will be stored in noderef.data
* @param oParent {Node} this node's parent node
* @param expanded {boolean} the initial expanded/collapsed state (deprecated, use oData.expanded)
* @constructor
*/
YAHOO.widget.Node = function(oData, oParent, expanded) {
if (oData) { this.init(oData, oParent, expanded); }
};
YAHOO.widget.Node.prototype = {
/**
* The index for this instance obtained from global counter in YAHOO.widget.TreeView.
* @property index
* @type int
*/
index: 0,
/**
* This node's child node collection.
* @property children
* @type Node[]
*/
children: null,
/**
* Tree instance this node is part of
* @property tree
* @type TreeView
*/
tree: null,
/**
* The data linked to this node. This can be any object or primitive
* value, and the data can be used in getNodeHtml().
* @property data
* @type object
*/
data: null,
/**
* Parent node
* @property parent
* @type Node
*/
parent: null,
/**
* The depth of this node. We start at -1 for the root node.
* @property depth
* @type int
*/
depth: -1,
/**
* The node's expanded/collapsed state
* @property expanded
* @type boolean
*/
expanded: false,
/**
* Can multiple children be expanded at once?
* @property multiExpand
* @type boolean
*/
multiExpand: true,
/**
* Should we render children for a collapsed node? It is possible that the
* implementer will want to render the hidden data... @todo verify that we
* need this, and implement it if we do.
* @property renderHidden
* @type boolean
*/
renderHidden: false,
/**
* This flag is set to true when the html is generated for this node's
* children, and set to false when new children are added.
* @property childrenRendered
* @type boolean
*/
childrenRendered: false,
/**
* Dynamically loaded nodes only fetch the data the first time they are
* expanded. This flag is set to true once the data has been fetched.
* @property dynamicLoadComplete
* @type boolean
*/
dynamicLoadComplete: false,
/**
* This node's previous sibling
* @property previousSibling
* @type Node
*/
previousSibling: null,
/**
* This node's next sibling
* @property nextSibling
* @type Node
*/
nextSibling: null,
/**
* We can set the node up to call an external method to get the child
* data dynamically.
* @property _dynLoad
* @type boolean
* @private
*/
_dynLoad: false,
/**
* Function to execute when we need to get this node's child data.
* @property dataLoader
* @type function
*/
dataLoader: null,
/**
* This is true for dynamically loading nodes while waiting for the
* callback to return.
* @property isLoading
* @type boolean
*/
isLoading: false,
/**
* The toggle/branch icon will not show if this is set to false. This
* could be useful if the implementer wants to have the child contain
* extra info about the parent, rather than an actual node.
* @property hasIcon
* @type boolean
*/
hasIcon: true,
/**
* Used to configure what happens when a dynamic load node is expanded
* and we discover that it does not have children. By default, it is
* treated as if it still could have children (plus/minus icon). Set
* iconMode to have it display like a leaf node instead.
* @property iconMode
* @type int
*/
iconMode: 0,
/**
* Specifies whether or not the content area of the node should be allowed
* to wrap.
* @property nowrap
* @type boolean
* @default false
*/
nowrap: false,
/**
* If true, the node will alway be rendered as a leaf node. This can be
* used to override the presentation when dynamically loading the entire
* tree. Setting this to true also disables the dynamic load call for the
* node.
* @property isLeaf
* @type boolean
* @default false
*/
isLeaf: false,
/**
* The CSS class for the html content container. Defaults to ygtvhtml, but
* can be overridden to provide a custom presentation for a specific node.
* @property contentStyle
* @type string
*/
contentStyle: "",
/**
* The generated id that will contain the data passed in by the implementer.
* @property contentElId
* @type string
*/
contentElId: null,
/**
* Enables node highlighting. If true, the node can be highlighted and/or propagate highlighting
* @property enableHighlight
* @type boolean
* @default true
*/
enableHighlight: true,
/**
* Stores the highlight state. Can be any of:
* <ul>
* <li>0 - not highlighted</li>
* <li>1 - highlighted</li>
* <li>2 - some children highlighted</li>
* </ul>
* @property highlightState
* @type integer
* @default 0
*/
highlightState: 0,
/**
* Tells whether highlighting will be propagated up to the parents of the clicked node
* @property propagateHighlightUp
* @type boolean
* @default false
*/
propagateHighlightUp: false,
/**
* Tells whether highlighting will be propagated down to the children of the clicked node
* @property propagateHighlightDown
* @type boolean
* @default false
*/
propagateHighlightDown: false,
/**
* User-defined className to be added to the Node
* @property className
* @type string
* @default null
*/
className: null,
/**
* The node type
* @property _type
* @private
* @type string
* @default "Node"
*/
_type: "Node",
/*
spacerPath: "http://l.yimg.com/a/i/space.gif",
expandedText: "Expanded",
collapsedText: "Collapsed",
loadingText: "Loading",
*/
/**
* Initializes this node, gets some of the properties from the parent
* @method init
* @param oData {object} a string or object containing the data that will
* be used to render this node
* @param oParent {Node} this node's parent node
* @param expanded {boolean} the initial expanded/collapsed state
*/
init: function(oData, oParent, expanded) {
this.data = {};
this.children = [];
this.index = YAHOO.widget.TreeView.nodeCount;
++YAHOO.widget.TreeView.nodeCount;
this.contentElId = "ygtvcontentel" + this.index;
if (Lang.isObject(oData)) {
for (var property in oData) {
if (oData.hasOwnProperty(property)) {
if (property.charAt(0) != '_' && !Lang.isUndefined(this[property]) && !Lang.isFunction(this[property]) ) {
this[property] = oData[property];
} else {
this.data[property] = oData[property];
}
}
}
}
if (!Lang.isUndefined(expanded) ) { this.expanded = expanded; }
this.logger = new YAHOO.widget.LogWriter(this.toString());
/**
* The parentChange event is fired when a parent element is applied
* to the node. This is useful if you need to apply tree-level
* properties to a tree that need to happen if a node is moved from
* one tree to another.
*
* @event parentChange
* @type CustomEvent
*/
this.createEvent("parentChange", this);
// oParent should never be null except when we create the root node.
if (oParent) {
oParent.appendChild(this);
}
},
/**
* Certain properties for the node cannot be set until the parent
* is known. This is called after the node is inserted into a tree.
* the parent is also applied to this node's children in order to
* make it possible to move a branch from one tree to another.
* @method applyParent
* @param {Node} parentNode this node's parent node
* @return {boolean} true if the application was successful
*/
applyParent: function(parentNode) {
if (!parentNode) {
return false;
}
this.tree = parentNode.tree;
this.parent = parentNode;
this.depth = parentNode.depth + 1;
// @todo why was this put here. This causes new nodes added at the
// root level to lose the menu behavior.
// if (! this.multiExpand) {
// this.multiExpand = parentNode.multiExpand;
// }
this.tree.regNode(this);
parentNode.childrenRendered = false;
// cascade update existing children
for (var i=0, len=this.children.length;i<len;++i) {
this.children[i].applyParent(this);
}
this.fireEvent("parentChange");
return true;
},
/**
* Appends a node to the child collection.
* @method appendChild
* @param childNode {Node} the new node
* @return {Node} the child node
* @private
*/
appendChild: function(childNode) {
if (this.hasChildren()) {
var sib = this.children[this.children.length - 1];
sib.nextSibling = childNode;
childNode.previousSibling = sib;
}
this.children[this.children.length] = childNode;
childNode.applyParent(this);
// part of the IE display issue workaround. If child nodes
// are added after the initial render, and the node was
// instantiated with expanded = true, we need to show the
// children div now that the node has a child.
if (this.childrenRendered && this.expanded) {
this.getChildrenEl().style.display = "";
}
return childNode;
},
/**
* Appends this node to the supplied node's child collection
* @method appendTo
* @param parentNode {Node} the node to append to.
* @return {Node} The appended node
*/
appendTo: function(parentNode) {
return parentNode.appendChild(this);
},
/**
* Inserts this node before this supplied node
* @method insertBefore
* @param node {Node} the node to insert this node before
* @return {Node} the inserted node
*/
insertBefore: function(node) {
this.logger.log("insertBefore: " + node);
var p = node.parent;
if (p) {
if (this.tree) {
this.tree.popNode(this);
}
var refIndex = node.isChildOf(p);
//this.logger.log(refIndex);
p.children.splice(refIndex, 0, this);
if (node.previousSibling) {
node.previousSibling.nextSibling = this;
}
this.previousSibling = node.previousSibling;
this.nextSibling = node;
node.previousSibling = this;
this.applyParent(p);
}
return this;
},
/**
* Inserts this node after the supplied node
* @method insertAfter
* @param node {Node} the node to insert after
* @return {Node} the inserted node
*/
insertAfter: function(node) {
this.logger.log("insertAfter: " + node);
var p = node.parent;
if (p) {
if (this.tree) {
this.tree.popNode(this);
}
var refIndex = node.isChildOf(p);
this.logger.log(refIndex);
if (!node.nextSibling) {
this.nextSibling = null;
return this.appendTo(p);
}
p.children.splice(refIndex + 1, 0, this);
node.nextSibling.previousSibling = this;
this.previousSibling = node;
this.nextSibling = node.nextSibling;
node.nextSibling = this;
this.applyParent(p);
}
return this;
},
/**
* Returns true if the Node is a child of supplied Node
* @method isChildOf
* @param parentNode {Node} the Node to check
* @return {boolean} The node index if this Node is a child of
* supplied Node, else -1.
* @private
*/
isChildOf: function(parentNode) {
if (parentNode && parentNode.children) {
for (var i=0, len=parentNode.children.length; i<len ; ++i) {
if (parentNode.children[i] === this) {
return i;
}
}
}
return -1;
},
/**
* Returns a node array of this node's siblings, null if none.
* @method getSiblings
* @return Node[]
*/
getSiblings: function() {
var sib = this.parent.children.slice(0);
for (var i=0;i < sib.length && sib[i] != this;i++) {}
sib.splice(i,1);
if (sib.length) { return sib; }
return null;
},
/**
* Shows this node's children
* @method showChildren
*/
showChildren: function() {
if (!this.tree.animateExpand(this.getChildrenEl(), this)) {
if (this.hasChildren()) {
this.getChildrenEl().style.display = "";
}
}
},
/**
* Hides this node's children
* @method hideChildren
*/
hideChildren: function() {
this.logger.log("hiding " + this.index);
if (!this.tree.animateCollapse(this.getChildrenEl(), this)) {
this.getChildrenEl().style.display = "none";
}
},
/**
* Returns the id for this node's container div
* @method getElId
* @return {string} the element id
*/
getElId: function() {
return "ygtv" + this.index;
},
/**
* Returns the id for this node's children div
* @method getChildrenElId
* @return {string} the element id for this node's children div
*/
getChildrenElId: function() {
return "ygtvc" + this.index;
},
/**
* Returns the id for this node's toggle element
* @method getToggleElId
* @return {string} the toggel element id
*/
getToggleElId: function() {
return "ygtvt" + this.index;
},
/*
* Returns the id for this node's spacer image. The spacer is positioned
* over the toggle and provides feedback for screen readers.
* @method getSpacerId
* @return {string} the id for the spacer image
*/
/*
getSpacerId: function() {
return "ygtvspacer" + this.index;
},
*/
/**
* Returns this node's container html element
* @method getEl
* @return {HTMLElement} the container html element
*/
getEl: function() {
return Dom.get(this.getElId());
},
/**
* Returns the div that was generated for this node's children
* @method getChildrenEl
* @return {HTMLElement} this node's children div
*/
getChildrenEl: function() {
return Dom.get(this.getChildrenElId());
},
/**
* Returns the element that is being used for this node's toggle.
* @method getToggleEl
* @return {HTMLElement} this node's toggle html element
*/
getToggleEl: function() {
return Dom.get(this.getToggleElId());
},
/**
* Returns the outer html element for this node's content
* @method getContentEl
* @return {HTMLElement} the element
*/
getContentEl: function() {
return Dom.get(this.contentElId);
},
/*
* Returns the element that is being used for this node's spacer.
* @method getSpacer
* @return {HTMLElement} this node's spacer html element
*/
/*
getSpacer: function() {
return document.getElementById( this.getSpacerId() ) || {};
},
*/
/*
getStateText: function() {
if (this.isLoading) {
return this.loadingText;
} else if (this.hasChildren(true)) {
if (this.expanded) {
return this.expandedText;
} else {
return this.collapsedText;
}
} else {
return "";
}
},
*/
/**
* Hides this nodes children (creating them if necessary), changes the toggle style.
* @method collapse
*/
collapse: function() {
// Only collapse if currently expanded
if (!this.expanded) { return; }
// fire the collapse event handler
var ret = this.tree.onCollapse(this);
if (false === ret) {
this.logger.log("Collapse was stopped by the abstract onCollapse");
return;
}
ret = this.tree.fireEvent("collapse", this);
if (false === ret) {
this.logger.log("Collapse was stopped by a custom event handler");
return;
}
if (!this.getEl()) {
this.expanded = false;
} else {
// hide the child div
this.hideChildren();
this.expanded = false;
this.updateIcon();
}
// this.getSpacer().title = this.getStateText();
ret = this.tree.fireEvent("collapseComplete", this);
},
/**
* Shows this nodes children (creating them if necessary), changes the
* toggle style, and collapses its siblings if multiExpand is not set.
* @method expand
*/
expand: function(lazySource) {
// Only expand if currently collapsed.
if (this.isLoading || (this.expanded && !lazySource)) {
return;
}
var ret = true;
// When returning from the lazy load handler, expand is called again
// in order to render the new children. The "expand" event already
// fired before fething the new data, so we need to skip it now.
if (!lazySource) {
// fire the expand event handler
ret = this.tree.onExpand(this);
if (false === ret) {
this.logger.log("Expand was stopped by the abstract onExpand");
return;
}
ret = this.tree.fireEvent("expand", this);
}
if (false === ret) {
this.logger.log("Expand was stopped by the custom event handler");
return;
}
if (!this.getEl()) {
this.expanded = true;
return;
}
if (!this.childrenRendered) {
this.logger.log("children not rendered yet");
this.getChildrenEl().innerHTML = this.renderChildren();
} else {
this.logger.log("children already rendered");
}
this.expanded = true;
this.updateIcon();
// this.getSpacer().title = this.getStateText();
// We do an extra check for children here because the lazy
// load feature can expose nodes that have no children.
// if (!this.hasChildren()) {
if (this.isLoading) {
this.expanded = false;
return;
}
if (! this.multiExpand) {
var sibs = this.getSiblings();
for (var i=0; sibs && i<sibs.length; ++i) {
if (sibs[i] != this && sibs[i].expanded) {
sibs[i].collapse();
}
}
}
this.showChildren();
ret = this.tree.fireEvent("expandComplete", this);
},
updateIcon: function() {
if (this.hasIcon) {
var el = this.getToggleEl();
if (el) {
el.className = el.className.replace(/\bygtv(([tl][pmn]h?)|(loading))\b/gi,this.getStyle());
}
}
},
/**
* Returns the css style name for the toggle
* @method getStyle
* @return {string} the css class for this node's toggle
*/
getStyle: function() {
// this.logger.log("No children, " + " isDyanmic: " + this.isDynamic() + " expanded: " + this.expanded);
if (this.isLoading) {
this.logger.log("returning the loading icon");
return "ygtvloading";
} else {
// location top or bottom, middle nodes also get the top style
var loc = (this.nextSibling) ? "t" : "l";
// type p=plus(expand), m=minus(collapase), n=none(no children)
var type = "n";
if (this.hasChildren(true) || (this.isDynamic() && !this.getIconMode())) {
// if (this.hasChildren(true)) {
type = (this.expanded) ? "m" : "p";
}
// this.logger.log("ygtv" + loc + type);
return "ygtv" + loc + type;
}
},
/**
* Returns the hover style for the icon
* @return {string} the css class hover state
* @method getHoverStyle
*/
getHoverStyle: function() {
var s = this.getStyle();
if (this.hasChildren(true) && !this.isLoading) {
s += "h";
}
return s;
},
/**
* Recursively expands all of this node's children.
* @method expandAll
*/
expandAll: function() {
var l = this.children.length;
for (var i=0;i<l;++i) {
var c = this.children[i];
if (c.isDynamic()) {
this.logger.log("Not supported (lazy load + expand all)");
break;
} else if (! c.multiExpand) {
this.logger.log("Not supported (no multi-expand + expand all)");
break;
} else {
c.expand();
c.expandAll();
}
}
},
/**
* Recursively collapses all of this node's children.
* @method collapseAll
*/
collapseAll: function() {
for (var i=0;i<this.children.length;++i) {
this.children[i].collapse();
this.children[i].collapseAll();
}
},
/**
* Configures this node for dynamically obtaining the child data
* when the node is first expanded. Calling it without the callback
* will turn off dynamic load for the node.
* @method setDynamicLoad
* @param fmDataLoader {function} the function that will be used to get the data.
* @param iconMode {int} configures the icon that is displayed when a dynamic
* load node is expanded the first time without children. By default, the
* "collapse" icon will be used. If set to 1, the leaf node icon will be
* displayed.
*/
setDynamicLoad: function(fnDataLoader, iconMode) {
if (fnDataLoader) {
this.dataLoader = fnDataLoader;
this._dynLoad = true;
} else {
this.dataLoader = null;
this._dynLoad = false;
}
if (iconMode) {
this.iconMode = iconMode;
}
},
/**
* Evaluates if this node is the root node of the tree
* @method isRoot
* @return {boolean} true if this is the root node
*/
isRoot: function() {
return (this == this.tree.root);
},
/**
* Evaluates if this node's children should be loaded dynamically. Looks for
* the property both in this instance and the root node. If the tree is
* defined to load all children dynamically, the data callback function is
* defined in the root node
* @method isDynamic
* @return {boolean} true if this node's children are to be loaded dynamically
*/
isDynamic: function() {
if (this.isLeaf) {
return false;
} else {
return (!this.isRoot() && (this._dynLoad || this.tree.root._dynLoad));
// this.logger.log("isDynamic: " + lazy);
// return lazy;
}
},
/**
* Returns the current icon mode. This refers to the way childless dynamic
* load nodes appear (this comes into play only after the initial dynamic
* load request produced no children).
* @method getIconMode
* @return {int} 0 for collapse style, 1 for leaf node style
*/
getIconMode: function() {
return (this.iconMode || this.tree.root.iconMode);
},
/**
* Checks if this node has children. If this node is lazy-loading and the
* children have not been rendered, we do not know whether or not there
* are actual children. In most cases, we need to assume that there are
* children (for instance, the toggle needs to show the expandable
* presentation state). In other times we want to know if there are rendered
* children. For the latter, "checkForLazyLoad" should be false.
* @method hasChildren
* @param checkForLazyLoad {boolean} should we check for unloaded children?
* @return {boolean} true if this has children or if it might and we are
* checking for this condition.
*/
hasChildren: function(checkForLazyLoad) {
if (this.isLeaf) {
return false;
} else {
return ( this.children.length > 0 ||
(checkForLazyLoad && this.isDynamic() && !this.dynamicLoadComplete)
);
}
},
/**
* Expands if node is collapsed, collapses otherwise.
* @method toggle
*/
toggle: function() {
if (!this.tree.locked && ( this.hasChildren(true) || this.isDynamic()) ) {
if (this.expanded) { this.collapse(); } else { this.expand(); }
}
},
/**
* Returns the markup for this node and its children.
* @method getHtml
* @return {string} the markup for this node and its expanded children.
*/
getHtml: function() {
this.childrenRendered = false;
return ['<div class="ygtvitem" id="' , this.getElId() , '">' ,this.getNodeHtml() , this.getChildrenHtml() ,'</div>'].join("");
},
/**
* Called when first rendering the tree. We always build the div that will
* contain this nodes children, but we don't render the children themselves
* unless this node is expanded.
* @method getChildrenHtml
* @return {string} the children container div html and any expanded children
* @private
*/
getChildrenHtml: function() {
var sb = [];
sb[sb.length] = '<div class="ygtvchildren" id="' + this.getChildrenElId() + '"';
// This is a workaround for an IE rendering issue, the child div has layout
// in IE, creating extra space if a leaf node is created with the expanded
// property set to true.
if (!this.expanded || !this.hasChildren()) {
sb[sb.length] = ' style="display:none;"';
}
sb[sb.length] = '>';
// this.logger.log(["index", this.index,
// "hasChildren", this.hasChildren(true),
// "expanded", this.expanded,
// "renderHidden", this.renderHidden,
// "isDynamic", this.isDynamic()]);
// Don't render the actual child node HTML unless this node is expanded.
if ( (this.hasChildren(true) && this.expanded) ||
(this.renderHidden && !this.isDynamic()) ) {
sb[sb.length] = this.renderChildren();
}
sb[sb.length] = '</div>';
return sb.join("");
},
/**
* Generates the markup for the child nodes. This is not done until the node
* is expanded.
* @method renderChildren
* @return {string} the html for this node's children
* @private
*/
renderChildren: function() {
this.logger.log("rendering children for " + this.index);
var node = this;
if (this.isDynamic() && !this.dynamicLoadComplete) {
this.isLoading = true;
this.tree.locked = true;
if (this.dataLoader) {
this.logger.log("Using dynamic loader defined for this node");
setTimeout(
function() {
node.dataLoader(node,
function() {
node.loadComplete();
});
}, 10);
} else if (this.tree.root.dataLoader) {
this.logger.log("Using the tree-level dynamic loader");
setTimeout(
function() {
node.tree.root.dataLoader(node,
function() {
node.loadComplete();
});
}, 10);
} else {
this.logger.log("no loader found");
return "Error: data loader not found or not specified.";
}
return "";
} else {
return this.completeRender();
}
},
/**
* Called when we know we have all the child data.
* @method completeRender
* @return {string} children html
*/
completeRender: function() {
this.logger.log("completeRender: " + this.index + ", # of children: " + this.children.length);
var sb = [];
for (var i=0; i < this.children.length; ++i) {
// this.children[i].childrenRendered = false;
sb[sb.length] = this.children[i].getHtml();
}
this.childrenRendered = true;
return sb.join("");
},
/**
* Load complete is the callback function we pass to the data provider
* in dynamic load situations.
* @method loadComplete
*/
loadComplete: function() {
this.logger.log(this.index + " loadComplete, children: " + this.children.length);
this.getChildrenEl().innerHTML = this.completeRender();
if (this.propagateHighlightDown) {
if (this.highlightState === 1 && !this.tree.singleNodeHighlight) {
for (var i = 0; i < this.children.length; i++) {
this.children[i].highlight(true);
}
} else if (this.highlightState === 0 || this.tree.singleNodeHighlight) {
for (i = 0; i < this.children.length; i++) {
this.children[i].unhighlight(true);
}
} // if (highlighState == 2) leave child nodes with whichever highlight state they are set
}
this.dynamicLoadComplete = true;
this.isLoading = false;
this.expand(true);
this.tree.locked = false;
},
/**
* Returns this node's ancestor at the specified depth.
* @method getAncestor
* @param {int} depth the depth of the ancestor.
* @return {Node} the ancestor
*/
getAncestor: function(depth) {
if (depth >= this.depth || depth < 0) {
this.logger.log("illegal getAncestor depth: " + depth);
return null;
}
var p = this.parent;
while (p.depth > depth) {
p = p.parent;
}
return p;
},
/**
* Returns the css class for the spacer at the specified depth for
* this node. If this node's ancestor at the specified depth
* has a next sibling the presentation is different than if it
* does not have a next sibling
* @method getDepthStyle
* @param {int} depth the depth of the ancestor.
* @return {string} the css class for the spacer
*/
getDepthStyle: function(depth) {
return (this.getAncestor(depth).nextSibling) ?
"ygtvdepthcell" : "ygtvblankdepthcell";
},
/**
* Get the markup for the node. This may be overrided so that we can
* support different types of nodes.
* @method getNodeHtml
* @return {string} The HTML that will render this node.
*/
getNodeHtml: function() {
this.logger.log("Generating html");
var sb = [];
sb[sb.length] = '<table id="ygtvtableel' + this.index + '" border="0" cellpadding="0" cellspacing="0" class="ygtvtable ygtvdepth' + this.depth;
if (this.enableHighlight) {
sb[sb.length] = ' ygtv-highlight' + this.highlightState;
}
if (this.className) {
sb[sb.length] = ' ' + this.className;
}
sb[sb.length] = '"><tr class="ygtvrow">';
for (var i=0;i<this.depth;++i) {
sb[sb.length] = '<td class="ygtvcell ' + this.getDepthStyle(i) + '"><div class="ygtvspacer"></div></td>';
}
if (this.hasIcon) {
sb[sb.length] = '<td id="' + this.getToggleElId();
sb[sb.length] = '" class="ygtvcell ';
sb[sb.length] = this.getStyle() ;
sb[sb.length] = '"><a href="#" class="ygtvspacer"> </a></td>';
}
sb[sb.length] = '<td id="' + this.contentElId;
sb[sb.length] = '" class="ygtvcell ';
sb[sb.length] = this.contentStyle + ' ygtvcontent" ';
sb[sb.length] = (this.nowrap) ? ' nowrap="nowrap" ' : '';
sb[sb.length] = ' >';
sb[sb.length] = this.getContentHtml();
sb[sb.length] = '</td></tr></table>';
return sb.join("");
},
/**
* Get the markup for the contents of the node. This is designed to be overrided so that we can
* support different types of nodes.
* @method getContentHtml
* @return {string} The HTML that will render the content of this node.
*/
getContentHtml: function () {
return "";
},
/**
* Regenerates the html for this node and its children. To be used when the
* node is expanded and new children have been added.
* @method refresh
*/
refresh: function() {
// this.loadComplete();
this.getChildrenEl().innerHTML = this.completeRender();
if (this.hasIcon) {
var el = this.getToggleEl();
if (el) {
el.className = el.className.replace(/\bygtv[lt][nmp]h*\b/gi,this.getStyle());
}
}
},
/**
* Node toString
* @method toString
* @return {string} string representation of the node
*/
toString: function() {
return this._type + " (" + this.index + ")";
},
/**
* array of items that had the focus set on them
* so that they can be cleaned when focus is lost
* @property _focusHighlightedItems
* @type Array of DOM elements
* @private
*/
_focusHighlightedItems: [],
/**
* DOM element that actually got the browser focus
* @property _focusedItem
* @type DOM element
* @private
*/
_focusedItem: null,
/**
* Returns true if there are any elements in the node that can
* accept the real actual browser focus
* @method _canHaveFocus
* @return {boolean} success
* @private
*/
_canHaveFocus: function() {
return this.getEl().getElementsByTagName('a').length > 0;
},
/**
* Removes the focus of previously selected Node
* @method _removeFocus
* @private
*/
_removeFocus:function () {
if (this._focusedItem) {
Event.removeListener(this._focusedItem,'blur');
this._focusedItem = null;
}
var el;
while ((el = this._focusHighlightedItems.shift())) { // yes, it is meant as an assignment, really
Dom.removeClass(el,YAHOO.widget.TreeView.FOCUS_CLASS_NAME );
}
},
/**
* Sets the focus on the node element.
* It will only be able to set the focus on nodes that have anchor elements in it.
* Toggle or branch icons have anchors and can be focused on.
* If will fail in nodes that have no anchor
* @method focus
* @return {boolean} success
*/
focus: function () {
var focused = false, self = this;
if (this.tree.currentFocus) {
this.tree.currentFocus._removeFocus();
}
var expandParent = function (node) {
if (node.parent) {
expandParent(node.parent);
node.parent.expand();
}
};
expandParent(this);
Dom.getElementsBy (
function (el) {
return (/ygtv(([tl][pmn]h?)|(content))/).test(el.className);
} ,
'td' ,
self.getEl().firstChild ,
function (el) {
Dom.addClass(el, YAHOO.widget.TreeView.FOCUS_CLASS_NAME );
if (!focused) {
var aEl = el.getElementsByTagName('a');
if (aEl.length) {
aEl = aEl[0];
aEl.focus();
self._focusedItem = aEl;
Event.on(aEl,'blur',function () {
//console.log('f1');
self.tree.fireEvent('focusChanged',{oldNode:self.tree.currentFocus,newNode:null});
self.tree.currentFocus = null;
self._removeFocus();
});
focused = true;
}
}
self._focusHighlightedItems.push(el);
}
);
if (focused) {
//console.log('f2');
this.tree.fireEvent('focusChanged',{oldNode:this.tree.currentFocus,newNode:this});
this.tree.currentFocus = this;
} else {
//console.log('f3');
this.tree.fireEvent('focusChanged',{oldNode:self.tree.currentFocus,newNode:null});
this.tree.currentFocus = null;
this._removeFocus();
}
return focused;
},
/**
* Count of nodes in a branch
* @method getNodeCount
* @return {int} number of nodes in the branch
*/
getNodeCount: function() {
for (var i = 0, count = 0;i< this.children.length;i++) {
count += this.children[i].getNodeCount();
}
return count + 1;
},
/**
* Returns an object which could be used to build a tree out of this node and its children.
* It can be passed to the tree constructor to reproduce this node as a tree.
* It will return false if the node or any children loads dynamically, regardless of whether it is loaded or not.
* @method getNodeDefinition
* @return {Object | false} definition of the tree or false if the node or any children is defined as dynamic
*/
getNodeDefinition: function() {
if (this.isDynamic()) { return false; }
var def, defs = Lang.merge(this.data), children = [];
if (this.expanded) {defs.expanded = this.expanded; }
if (!this.multiExpand) { defs.multiExpand = this.multiExpand; }
if (!this.renderHidden) { defs.renderHidden = this.renderHidden; }
if (!this.hasIcon) { defs.hasIcon = this.hasIcon; }
if (this.nowrap) { defs.nowrap = this.nowrap; }
if (this.className) { defs.className = this.className; }
if (this.editable) { defs.editable = this.editable; }
if (this.enableHighlight) { defs.enableHighlight = this.enableHighlight; }
if (this.highlightState) { defs.highlightState = this.highlightState; }
if (this.propagateHighlightUp) { defs.propagateHighlightUp = this.propagateHighlightUp; }
if (this.propagateHighlightDown) { defs.propagateHighlightDown = this.propagateHighlightDown; }
defs.type = this._type;
for (var i = 0; i < this.children.length;i++) {
def = this.children[i].getNodeDefinition();
if (def === false) { return false;}
children.push(def);
}
if (children.length) { defs.children = children; }
return defs;
},
/**
* Generates the link that will invoke this node's toggle method
* @method getToggleLink
* @return {string} the javascript url for toggling this node
*/
getToggleLink: function() {
return 'return false;';
},
/**
* Sets the value of property for this node and all loaded descendants.
* Only public and defined properties can be set, not methods.
* Values for unknown properties will be assigned to the refNode.data object
* @method setNodesProperty
* @param name {string} Name of the property to be set
* @param value {any} value to be set
* @param refresh {boolean} if present and true, it does a refresh
*/
setNodesProperty: function(name, value, refresh) {
if (name.charAt(0) != '_' && !Lang.isUndefined(this[name]) && !Lang.isFunction(this[name]) ) {
this[name] = value;
} else {
this.data[name] = value;
}
for (var i = 0; i < this.children.length;i++) {
this.children[i].setNodesProperty(name,value);
}
if (refresh) {
this.refresh();
}
},
/**
* Toggles the highlighted state of a Node
* @method toggleHighlight
*/
toggleHighlight: function() {
if (this.enableHighlight) {
// unhighlights only if fully highligthed. For not or partially highlighted it will highlight
if (this.highlightState == 1) {
this.unhighlight();
} else {
this.highlight();
}
}
},
/**
* Turns highlighting on node.
* @method highlight
* @param _silent {boolean} optional, don't fire the highlightEvent
*/
highlight: function(_silent) {
if (this.enableHighlight) {
if (this.tree.singleNodeHighlight) {
if (this.tree._currentlyHighlighted) {
this.tree._currentlyHighlighted.unhighlight(_silent);
}
this.tree._currentlyHighlighted = this;
}
this.highlightState = 1;
this._setHighlightClassName();
if (!this.tree.singleNodeHighlight) {
if (this.propagateHighlightDown) {
for (var i = 0;i < this.children.length;i++) {
this.children[i].highlight(true);
}
}
if (this.propagateHighlightUp) {
if (this.parent) {
this.parent._childrenHighlighted();
}
}
}
if (!_silent) {
this.tree.fireEvent('highlightEvent',this);
}
}
},
/**
* Turns highlighting off a node.
* @method unhighlight
* @param _silent {boolean} optional, don't fire the highlightEvent
*/
unhighlight: function(_silent) {
if (this.enableHighlight) {
// might have checked singleNodeHighlight but it wouldn't really matter either way
this.tree._currentlyHighlighted = null;
this.highlightState = 0;
this._setHighlightClassName();
if (!this.tree.singleNodeHighlight) {
if (this.propagateHighlightDown) {
for (var i = 0;i < this.children.length;i++) {
this.children[i].unhighlight(true);
}
}
if (this.propagateHighlightUp) {
if (this.parent) {
this.parent._childrenHighlighted();
}
}
}
if (!_silent) {
this.tree.fireEvent('highlightEvent',this);
}
}
},
/**
* Checks whether all or part of the children of a node are highlighted and
* sets the node highlight to full, none or partial highlight.
* If set to propagate it will further call the parent
* @method _childrenHighlighted
* @private
*/
_childrenHighlighted: function() {
var yes = false, no = false;
if (this.enableHighlight) {
for (var i = 0;i < this.children.length;i++) {
switch(this.children[i].highlightState) {
case 0:
no = true;
break;
case 1:
yes = true;
break;
case 2:
yes = no = true;
break;
}
}
if (yes && no) {
this.highlightState = 2;
} else if (yes) {
this.highlightState = 1;
} else {
this.highlightState = 0;
}
this._setHighlightClassName();
if (this.propagateHighlightUp) {
if (this.parent) {
this.parent._childrenHighlighted();
}
}
}
},
/**
* Changes the classNames on the toggle and content containers to reflect the current highlighting
* @method _setHighlightClassName
* @private
*/
_setHighlightClassName: function() {
var el = Dom.get('ygtvtableel' + this.index);
if (el) {
el.className = el.className.replace(/\bygtv-highlight\d\b/gi,'ygtv-highlight' + this.highlightState);
}
}
};
YAHOO.augment(YAHOO.widget.Node, YAHOO.util.EventProvider);
})();
/**
* A custom YAHOO.widget.Node that handles the unique nature of
* the virtual, presentationless root node.
* @namespace YAHOO.widget
* @class RootNode
* @extends YAHOO.widget.Node
* @param oTree {YAHOO.widget.TreeView} The tree instance this node belongs to
* @constructor
*/
YAHOO.widget.RootNode = function(oTree) {
// Initialize the node with null params. The root node is a
// special case where the node has no presentation. So we have
// to alter the standard properties a bit.
this.init(null, null, true);
/*
* For the root node, we get the tree reference from as a param
* to the constructor instead of from the parent element.
*/
this.tree = oTree;
};
YAHOO.extend(YAHOO.widget.RootNode, YAHOO.widget.Node, {
/**
* The node type
* @property _type
* @type string
* @private
* @default "RootNode"
*/
_type: "RootNode",
// overrides YAHOO.widget.Node
getNodeHtml: function() {
return "";
},
toString: function() {
return this._type;
},
loadComplete: function() {
this.tree.draw();
},
/**
* Count of nodes in tree.
* It overrides Nodes.getNodeCount because the root node should not be counted.
* @method getNodeCount
* @return {int} number of nodes in the tree
*/
getNodeCount: function() {
for (var i = 0, count = 0;i< this.children.length;i++) {
count += this.children[i].getNodeCount();
}
return count;
},
/**
* Returns an object which could be used to build a tree out of this node and its children.
* It can be passed to the tree constructor to reproduce this node as a tree.
* Since the RootNode is automatically created by treeView,
* its own definition is excluded from the returned node definition
* which only contains its children.
* @method getNodeDefinition
* @return {Object | false} definition of the tree or false if any child node is defined as dynamic
*/
getNodeDefinition: function() {
for (var def, defs = [], i = 0; i < this.children.length;i++) {
def = this.children[i].getNodeDefinition();
if (def === false) { return false;}
defs.push(def);
}
return defs;
},
collapse: function() {},
expand: function() {},
getSiblings: function() { return null; },
focus: function () {}
});
(function () {
var Dom = YAHOO.util.Dom,
Lang = YAHOO.lang,
Event = YAHOO.util.Event;
/**
* The default node presentation. The first parameter should be
* either a string that will be used as the node's label, or an object
* that has at least a string property called label. By default, clicking the
* label will toggle the expanded/collapsed state of the node. By
* setting the href property of the instance, this behavior can be
* changed so that the label will go to the specified href.
* @namespace YAHOO.widget
* @class TextNode
* @extends YAHOO.widget.Node
* @constructor
* @param oData {object} a string or object containing the data that will
* be used to render this node.
* Providing a string is the same as providing an object with a single property named label.
* All values in the oData will be used to set equally named properties in the node
* as long as the node does have such properties, they are not undefined, private or functions.
* All attributes are made available in noderef.data, which
* can be used to store custom attributes. TreeView.getNode(s)ByProperty
* can be used to retrieve a node by one of the attributes.
* @param oParent {YAHOO.widget.Node} this node's parent node
* @param expanded {boolean} the initial expanded/collapsed state (deprecated; use oData.expanded)
*/
YAHOO.widget.TextNode = function(oData, oParent, expanded) {
if (oData) {
if (Lang.isString(oData)) {
oData = { label: oData };
}
this.init(oData, oParent, expanded);
this.setUpLabel(oData);
}
this.logger = new YAHOO.widget.LogWriter(this.toString());
};
YAHOO.extend(YAHOO.widget.TextNode, YAHOO.widget.Node, {
/**
* The CSS class for the label href. Defaults to ygtvlabel, but can be
* overridden to provide a custom presentation for a specific node.
* @property labelStyle
* @type string
*/
labelStyle: "ygtvlabel",
/**
* The derived element id of the label for this node
* @property labelElId
* @type string
*/
labelElId: null,
/**
* The text for the label. It is assumed that the oData parameter will
* either be a string that will be used as the label, or an object that
* has a property called "label" that we will use.
* @property label
* @type string
*/
label: null,
/**
* The text for the title (tooltip) for the label element
* @property title
* @type string
*/
title: null,
/**
* The href for the node's label. If one is not specified, the href will
* be set so that it toggles the node.
* @property href
* @type string
*/
href: null,
/**
* The label href target, defaults to current window
* @property target
* @type string
*/
target: "_self",
/**
* The node type
* @property _type
* @private
* @type string
* @default "TextNode"
*/
_type: "TextNode",
/**
* Sets up the node label
* @method setUpLabel
* @param oData string containing the label, or an object with a label property
*/
setUpLabel: function(oData) {
if (Lang.isString(oData)) {
oData = {
label: oData
};
} else {
if (oData.style) {
this.labelStyle = oData.style;
}
}
this.label = oData.label;
this.labelElId = "ygtvlabelel" + this.index;
},
/**
* Returns the label element
* @for YAHOO.widget.TextNode
* @method getLabelEl
* @return {object} the element
*/
getLabelEl: function() {
return Dom.get(this.labelElId);
},
// overrides YAHOO.widget.Node
getContentHtml: function() {
var sb = [];
sb[sb.length] = this.href?'<a':'<span';
sb[sb.length] = ' id="' + this.labelElId + '"';
sb[sb.length] = ' class="' + this.labelStyle + '"';
if (this.href) {
sb[sb.length] = ' href="' + this.href + '"';
sb[sb.length] = ' target="' + this.target + '"';
}
if (this.title) {
sb[sb.length] = ' title="' + this.title + '"';
}
sb[sb.length] = ' >';
sb[sb.length] = this.label;
sb[sb.length] = this.href?'</a>':'</span>';
return sb.join("");
},
/**
* Returns an object which could be used to build a tree out of this node and its children.
* It can be passed to the tree constructor to reproduce this node as a tree.
* It will return false if the node or any descendant loads dynamically, regardless of whether it is loaded or not.
* @method getNodeDefinition
* @return {Object | false} definition of the tree or false if this node or any descendant is defined as dynamic
*/
getNodeDefinition: function() {
var def = YAHOO.widget.TextNode.superclass.getNodeDefinition.call(this);
if (def === false) { return false; }
// Node specific properties
def.label = this.label;
if (this.labelStyle != 'ygtvlabel') { def.style = this.labelStyle; }
if (this.title) { def.title = this.title; }
if (this.href) { def.href = this.href; }
if (this.target != '_self') { def.target = this.target; }
return def;
},
toString: function() {
return YAHOO.widget.TextNode.superclass.toString.call(this) + ": " + this.label;
},
// deprecated
onLabelClick: function() {
return false;
},
refresh: function() {
YAHOO.widget.TextNode.superclass.refresh.call(this);
var label = this.getLabelEl();
label.innerHTML = this.label;
if (label.tagName.toUpperCase() == 'A') {
label.href = this.href;
label.target = this.target;
}
}
});
})();
/**
* A menu-specific implementation that differs from TextNode in that only
* one sibling can be expanded at a time.
* @namespace YAHOO.widget
* @class MenuNode
* @extends YAHOO.widget.TextNode
* @param oData {object} a string or object containing the data that will
* be used to render this node.
* Providing a string is the same as providing an object with a single property named label.
* All values in the oData will be used to set equally named properties in the node
* as long as the node does have such properties, they are not undefined, private or functions.
* All attributes are made available in noderef.data, which
* can be used to store custom attributes. TreeView.getNode(s)ByProperty
* can be used to retrieve a node by one of the attributes.
* @param oParent {YAHOO.widget.Node} this node's parent node
* @param expanded {boolean} the initial expanded/collapsed state (deprecated; use oData.expanded)
* @constructor
*/
YAHOO.widget.MenuNode = function(oData, oParent, expanded) {
YAHOO.widget.MenuNode.superclass.constructor.call(this,oData,oParent,expanded);
/*
* Menus usually allow only one branch to be open at a time.
*/
this.multiExpand = false;
};
YAHOO.extend(YAHOO.widget.MenuNode, YAHOO.widget.TextNode, {
/**
* The node type
* @property _type
* @private
* @default "MenuNode"
*/
_type: "MenuNode"
});
(function () {
var Dom = YAHOO.util.Dom,
Lang = YAHOO.lang,
Event = YAHOO.util.Event;
/**
* This implementation takes either a string or object for the
* oData argument. If is it a string, it will use it for the display
* of this node (and it can contain any html code). If the parameter
* is an object,it looks for a parameter called "html" that will be
* used for this node's display.
* @namespace YAHOO.widget
* @class HTMLNode
* @extends YAHOO.widget.Node
* @constructor
* @param oData {object} a string or object containing the data that will
* be used to render this node.
* Providing a string is the same as providing an object with a single property named html.
* All values in the oData will be used to set equally named properties in the node
* as long as the node does have such properties, they are not undefined, private or functions.
* All other attributes are made available in noderef.data, which
* can be used to store custom attributes. TreeView.getNode(s)ByProperty
* can be used to retrieve a node by one of the attributes.
* @param oParent {YAHOO.widget.Node} this node's parent node
* @param expanded {boolean} the initial expanded/collapsed state (deprecated; use oData.expanded)
* @param hasIcon {boolean} specifies whether or not leaf nodes should
* be rendered with or without a horizontal line line and/or toggle icon. If the icon
* is not displayed, the content fills the space it would have occupied.
* This option operates independently of the leaf node presentation logic
* for dynamic nodes.
* (deprecated; use oData.hasIcon)
*/
YAHOO.widget.HTMLNode = function(oData, oParent, expanded, hasIcon) {
if (oData) {
this.init(oData, oParent, expanded);
this.initContent(oData, hasIcon);
}
};
YAHOO.extend(YAHOO.widget.HTMLNode, YAHOO.widget.Node, {
/**
* The CSS class for the html content container. Defaults to ygtvhtml, but
* can be overridden to provide a custom presentation for a specific node.
* @property contentStyle
* @type string
*/
contentStyle: "ygtvhtml",
/**
* The HTML content to use for this node's display
* @property html
* @type string
*/
html: null,
/**
* The node type
* @property _type
* @private
* @type string
* @default "HTMLNode"
*/
_type: "HTMLNode",
/**
* Sets up the node label
* @property initContent
* @param oData {object} An html string or object containing an html property
* @param hasIcon {boolean} determines if the node will be rendered with an
* icon or not
*/
initContent: function(oData, hasIcon) {
this.setHtml(oData);
this.contentElId = "ygtvcontentel" + this.index;
if (!Lang.isUndefined(hasIcon)) { this.hasIcon = hasIcon; }
this.logger = new YAHOO.widget.LogWriter(this.toString());
},
/**
* Synchronizes the node.html, and the node's content
* @property setHtml
* @param o {object} An html string or object containing an html property
*/
setHtml: function(o) {
this.html = (typeof o === "string") ? o : o.html;
var el = this.getContentEl();
if (el) {
el.innerHTML = this.html;
}
},
// overrides YAHOO.widget.Node
getContentHtml: function() {
return this.html;
},
/**
* Returns an object which could be used to build a tree out of this node and its children.
* It can be passed to the tree constructor to reproduce this node as a tree.
* It will return false if any node loads dynamically, regardless of whether it is loaded or not.
* @method getNodeDefinition
* @return {Object | false} definition of the tree or false if any node is defined as dynamic
*/
getNodeDefinition: function() {
var def = YAHOO.widget.HTMLNode.superclass.getNodeDefinition.call(this);
if (def === false) { return false; }
def.html = this.html;
return def;
}
});
})();
(function () {
var Dom = YAHOO.util.Dom,
Lang = YAHOO.lang,
Event = YAHOO.util.Event,
Calendar = YAHOO.widget.Calendar;
/**
* A Date-specific implementation that differs from TextNode in that it uses
* YAHOO.widget.Calendar as an in-line editor, if available
* If Calendar is not available, it behaves as a plain TextNode.
* @namespace YAHOO.widget
* @class DateNode
* @extends YAHOO.widget.TextNode
* @param oData {object} a string or object containing the data that will
* be used to render this node.
* Providing a string is the same as providing an object with a single property named label.
* All values in the oData will be used to set equally named properties in the node
* as long as the node does have such properties, they are not undefined, private nor functions.
* All attributes are made available in noderef.data, which
* can be used to store custom attributes. TreeView.getNode(s)ByProperty
* can be used to retrieve a node by one of the attributes.
* @param oParent {YAHOO.widget.Node} this node's parent node
* @param expanded {boolean} the initial expanded/collapsed state (deprecated; use oData.expanded)
* @constructor
*/
YAHOO.widget.DateNode = function(oData, oParent, expanded) {
YAHOO.widget.DateNode.superclass.constructor.call(this,oData, oParent, expanded);
};
YAHOO.extend(YAHOO.widget.DateNode, YAHOO.widget.TextNode, {
/**
* The node type
* @property _type
* @type string
* @private
* @default "DateNode"
*/
_type: "DateNode",
/**
* Configuration object for the Calendar editor, if used.
* See <a href="http://developer.yahoo.com/yui/calendar/#internationalization">http://developer.yahoo.com/yui/calendar/#internationalization</a>
* @property calendarConfig
*/
calendarConfig: null,
/**
* If YAHOO.widget.Calendar is available, it will pop up a Calendar to enter a new date. Otherwise, it falls back to a plain <input> textbox
* @method fillEditorContainer
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @return void
*/
fillEditorContainer: function (editorData) {
var cal, container = editorData.inputContainer;
if (Lang.isUndefined(Calendar)) {
Dom.replaceClass(editorData.editorPanel,'ygtv-edit-DateNode','ygtv-edit-TextNode');
YAHOO.widget.DateNode.superclass.fillEditorContainer.call(this, editorData);
return;
}
if (editorData.nodeType != this._type) {
editorData.nodeType = this._type;
editorData.saveOnEnter = false;
editorData.node.destroyEditorContents(editorData);
editorData.inputObject = cal = new Calendar(container.appendChild(document.createElement('div')));
if (this.calendarConfig) {
cal.cfg.applyConfig(this.calendarConfig,true);
cal.cfg.fireQueue();
}
cal.selectEvent.subscribe(function () {
this.tree._closeEditor(true);
},this,true);
} else {
cal = editorData.inputObject;
}
editorData.oldValue = this.label;
cal.cfg.setProperty("selected",this.label, false);
var delim = cal.cfg.getProperty('DATE_FIELD_DELIMITER');
var pageDate = this.label.split(delim);
cal.cfg.setProperty('pagedate',pageDate[cal.cfg.getProperty('MDY_MONTH_POSITION') -1] + delim + pageDate[cal.cfg.getProperty('MDY_YEAR_POSITION') -1]);
cal.cfg.fireQueue();
cal.render();
cal.oDomContainer.focus();
},
/**
* Returns the value from the input element.
* Overrides Node.getEditorValue.
* @method getEditorValue
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @return {string} date entered
*/
getEditorValue: function (editorData) {
if (Lang.isUndefined(Calendar)) {
return editorData.inputElement.value;
} else {
var cal = editorData.inputObject,
date = cal.getSelectedDates()[0],
dd = [];
dd[cal.cfg.getProperty('MDY_DAY_POSITION') -1] = date.getDate();
dd[cal.cfg.getProperty('MDY_MONTH_POSITION') -1] = date.getMonth() + 1;
dd[cal.cfg.getProperty('MDY_YEAR_POSITION') -1] = date.getFullYear();
return dd.join(cal.cfg.getProperty('DATE_FIELD_DELIMITER'));
}
},
/**
* Finally displays the newly entered date in the tree.
* Overrides Node.displayEditedValue.
* @method displayEditedValue
* @param value {string} date to be displayed and stored in the node
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
*/
displayEditedValue: function (value,editorData) {
var node = editorData.node;
node.label = value;
node.getLabelEl().innerHTML = value;
},
/**
* Returns an object which could be used to build a tree out of this node and its children.
* It can be passed to the tree constructor to reproduce this node as a tree.
* It will return false if the node or any descendant loads dynamically, regardless of whether it is loaded or not.
* @method getNodeDefinition
* @return {Object | false} definition of the node or false if this node or any descendant is defined as dynamic
*/
getNodeDefinition: function() {
var def = YAHOO.widget.DateNode.superclass.getNodeDefinition.call(this);
if (def === false) { return false; }
if (this.calendarConfig) { def.calendarConfig = this.calendarConfig; }
return def;
}
});
})();
(function () {
var Dom = YAHOO.util.Dom,
Lang = YAHOO.lang,
Event = YAHOO.util.Event,
TV = YAHOO.widget.TreeView,
TVproto = TV.prototype;
/**
* An object to store information used for in-line editing
* for all Nodes of all TreeViews. It contains:
* <ul>
* <li>active {boolean}, whether there is an active cell editor </li>
* <li>whoHasIt {YAHOO.widget.TreeView} TreeView instance that is currently using the editor</li>
* <li>nodeType {string} value of static Node._type property, allows reuse of input element if node is of the same type.</li>
* <li>editorPanel {HTMLelement (<div>)} element holding the in-line editor</li>
* <li>inputContainer {HTMLelement (<div>)} element which will hold the type-specific input element(s) to be filled by the fillEditorContainer method</li>
* <li>buttonsContainer {HTMLelement (<div>)} element which holds the <button> elements for Ok/Cancel. If you don't want any of the buttons, hide it via CSS styles, don't destroy it</li>
* <li>node {YAHOO.widget.Node} reference to the Node being edited</li>
* <li>saveOnEnter {boolean}, whether the Enter key should be accepted as a Save command (Esc. is always taken as Cancel), disable for multi-line input elements </li>
* <li>oldValue {any} value before editing</li>
* </ul>
* Editors are free to use this object to store additional data.
* @property editorData
* @static
* @for YAHOO.widget.TreeView
*/
TV.editorData = {
active:false,
whoHasIt:null, // which TreeView has it
nodeType:null,
editorPanel:null,
inputContainer:null,
buttonsContainer:null,
node:null, // which Node is being edited
saveOnEnter:true,
oldValue:undefined
// Each node type is free to add its own properties to this as it sees fit.
};
/**
* Validator function for edited data, called from the TreeView instance scope,
* receives the arguments (newValue, oldValue, nodeInstance)
* and returns either the validated (or type-converted) value or undefined.
* An undefined return will prevent the editor from closing
* @property validator
* @type function
* @default null
* @for YAHOO.widget.TreeView
*/
TVproto.validator = null;
/**
* Entry point for initializing the editing plug-in.
* TreeView will call this method on initializing if it exists
* @method _initEditor
* @for YAHOO.widget.TreeView
* @private
*/
TVproto._initEditor = function () {
/**
* Fires when the user clicks on the ok button of a node editor
* @event editorSaveEvent
* @type CustomEvent
* @param oArgs.newValue {mixed} the new value just entered
* @param oArgs.oldValue {mixed} the value originally in the tree
* @param oArgs.node {YAHOO.widget.Node} the node that has the focus
* @for YAHOO.widget.TreeView
*/
this.createEvent("editorSaveEvent", this);
/**
* Fires when the user clicks on the cancel button of a node editor
* @event editorCancelEvent
* @type CustomEvent
* @param {YAHOO.widget.Node} node the node that has the focus
* @for YAHOO.widget.TreeView
*/
this.createEvent("editorCancelEvent", this);
};
/**
* Entry point of the editing plug-in.
* TreeView will call this method if it exists when a node label is clicked
* @method _nodeEditing
* @param node {YAHOO.widget.Node} the node to be edited
* @return {Boolean} true to indicate that the node is editable and prevent any further bubbling of the click.
* @for YAHOO.widget.TreeView
* @private
*/
TVproto._nodeEditing = function (node) {
if (node.fillEditorContainer && node.editable) {
var ed, topLeft, buttons, button, editorData = TV.editorData;
editorData.active = true;
editorData.whoHasIt = this;
if (!editorData.nodeType) {
editorData.editorPanel = ed = document.body.appendChild(document.createElement('div'));
Dom.addClass(ed,'ygtv-label-editor');
buttons = editorData.buttonsContainer = ed.appendChild(document.createElement('div'));
Dom.addClass(buttons,'ygtv-button-container');
button = buttons.appendChild(document.createElement('button'));
Dom.addClass(button,'ygtvok');
button.innerHTML = ' ';
button = buttons.appendChild(document.createElement('button'));
Dom.addClass(button,'ygtvcancel');
button.innerHTML = ' ';
Event.on(buttons, 'click', function (ev) {
this.logger.log('click on editor');
var target = Event.getTarget(ev);
var node = TV.editorData.node;
if (Dom.hasClass(target,'ygtvok')) {
node.logger.log('ygtvok');
Event.stopEvent(ev);
this._closeEditor(true);
}
if (Dom.hasClass(target,'ygtvcancel')) {
node.logger.log('ygtvcancel');
Event.stopEvent(ev);
this._closeEditor(false);
}
}, this, true);
editorData.inputContainer = ed.appendChild(document.createElement('div'));
Dom.addClass(editorData.inputContainer,'ygtv-input');
Event.on(ed,'keydown',function (ev) {
var editorData = TV.editorData,
KEY = YAHOO.util.KeyListener.KEY;
switch (ev.keyCode) {
case KEY.ENTER:
this.logger.log('ENTER');
Event.stopEvent(ev);
if (editorData.saveOnEnter) {
this._closeEditor(true);
}
break;
case KEY.ESCAPE:
this.logger.log('ESC');
Event.stopEvent(ev);
this._closeEditor(false);
break;
}
},this,true);
} else {
ed = editorData.editorPanel;
}
editorData.node = node;
if (editorData.nodeType) {
Dom.removeClass(ed,'ygtv-edit-' + editorData.nodeType);
}
Dom.addClass(ed,' ygtv-edit-' + node._type);
topLeft = Dom.getXY(node.getContentEl());
Dom.setStyle(ed,'left',topLeft[0] + 'px');
Dom.setStyle(ed,'top',topLeft[1] + 'px');
Dom.setStyle(ed,'display','block');
ed.focus();
node.fillEditorContainer(editorData);
return true; // If inline editor available, don't do anything else.
}
};
/**
* Method to be associated with an event (clickEvent, dblClickEvent or enterKeyPressed) to pop up the contents editor
* It calls the corresponding node editNode method.
* @method onEventEditNode
* @param oArgs {object} Object passed as arguments to TreeView event listeners
* @for YAHOO.widget.TreeView
*/
TVproto.onEventEditNode = function (oArgs) {
if (oArgs instanceof YAHOO.widget.Node) {
oArgs.editNode();
} else if (oArgs.node instanceof YAHOO.widget.Node) {
oArgs.node.editNode();
}
};
/**
* Method to be called when the inline editing is finished and the editor is to be closed
* @method _closeEditor
* @param save {Boolean} true if the edited value is to be saved, false if discarded
* @private
* @for YAHOO.widget.TreeView
*/
TVproto._closeEditor = function (save) {
var ed = TV.editorData,
node = ed.node,
close = true;
if (save) {
close = ed.node.saveEditorValue(ed) !== false;
} else {
this.fireEvent( 'editorCancelEvent', node);
}
if (close) {
Dom.setStyle(ed.editorPanel,'display','none');
ed.active = false;
node.focus();
}
};
/**
* Entry point for TreeView's destroy method to destroy whatever the editing plug-in has created
* @method _destroyEditor
* @private
* @for YAHOO.widget.TreeView
*/
TVproto._destroyEditor = function() {
var ed = TV.editorData;
if (ed && ed.nodeType && (!ed.active || ed.whoHasIt === this)) {
Event.removeListener(ed.editorPanel,'keydown');
Event.removeListener(ed.buttonContainer,'click');
ed.node.destroyEditorContents(ed);
document.body.removeChild(ed.editorPanel);
ed.nodeType = ed.editorPanel = ed.inputContainer = ed.buttonsContainer = ed.whoHasIt = ed.node = null;
ed.active = false;
}
};
var Nproto = YAHOO.widget.Node.prototype;
/**
* Signals if the label is editable. (Ignored on TextNodes with href set.)
* @property editable
* @type boolean
* @for YAHOO.widget.Node
*/
Nproto.editable = false;
/**
* pops up the contents editor, if there is one and the node is declared editable
* @method editNode
* @for YAHOO.widget.Node
*/
Nproto.editNode = function () {
this.tree._nodeEditing(this);
};
/** Placeholder for a function that should provide the inline node label editor.
* Leaving it set to null will indicate that this node type is not editable.
* It should be overridden by nodes that provide inline editing.
* The Node-specific editing element (input box, textarea or whatever) should be inserted into editorData.inputContainer.
* @method fillEditorContainer
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @return void
* @for YAHOO.widget.Node
*/
Nproto.fillEditorContainer = null;
/**
* Node-specific destroy function to empty the contents of the inline editor panel.
* This function is the worst case alternative that will purge all possible events and remove the editor contents.
* Method Event.purgeElement is somewhat costly so if it can be replaced by specifc Event.removeListeners, it is better to do so.
* @method destroyEditorContents
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @for YAHOO.widget.Node
*/
Nproto.destroyEditorContents = function (editorData) {
// In the worst case, if the input editor (such as the Calendar) has no destroy method
// we can only try to remove all possible events on it.
Event.purgeElement(editorData.inputContainer,true);
editorData.inputContainer.innerHTML = '';
};
/**
* Saves the value entered into the editor.
* @method saveEditorValue
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @return {false or none} a return of exactly false will prevent the editor from closing
* @for YAHOO.widget.Node
*/
Nproto.saveEditorValue = function (editorData) {
var node = editorData.node,
value,
validator = node.tree.validator;
value = this.getEditorValue(editorData);
if (Lang.isFunction(validator)) {
value = validator(value,editorData.oldValue,node);
if (Lang.isUndefined(value)) {
return false;
}
}
if (this.tree.fireEvent( 'editorSaveEvent', {
newValue:value,
oldValue:editorData.oldValue,
node:node
}) !== false) {
this.displayEditedValue(value,editorData);
}
};
/**
* Returns the value(s) from the input element(s) .
* Should be overridden by each node type.
* @method getEditorValue
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @return {any} value entered
* @for YAHOO.widget.Node
*/
Nproto.getEditorValue = function (editorData) {
};
/**
* Finally displays the newly edited value(s) in the tree.
* Should be overridden by each node type.
* @method displayEditedValue
* @param value {any} value to be displayed and stored in the node
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @for YAHOO.widget.Node
*/
Nproto.displayEditedValue = function (value,editorData) {
};
var TNproto = YAHOO.widget.TextNode.prototype;
/**
* Places an <input> textbox in the input container and loads the label text into it.
* @method fillEditorContainer
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @return void
* @for YAHOO.widget.TextNode
*/
TNproto.fillEditorContainer = function (editorData) {
var input;
// If last node edited is not of the same type as this one, delete it and fill it with our editor
if (editorData.nodeType != this._type) {
editorData.nodeType = this._type;
editorData.saveOnEnter = true;
editorData.node.destroyEditorContents(editorData);
editorData.inputElement = input = editorData.inputContainer.appendChild(document.createElement('input'));
} else {
// if the last node edited was of the same time, reuse the input element.
input = editorData.inputElement;
}
editorData.oldValue = this.label;
input.value = this.label;
input.focus();
input.select();
};
/**
* Returns the value from the input element.
* Overrides Node.getEditorValue.
* @method getEditorValue
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @return {string} value entered
* @for YAHOO.widget.TextNode
*/
TNproto.getEditorValue = function (editorData) {
return editorData.inputElement.value;
};
/**
* Finally displays the newly edited value in the tree.
* Overrides Node.displayEditedValue.
* @method displayEditedValue
* @param value {string} value to be displayed and stored in the node
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @for YAHOO.widget.TextNode
*/
TNproto.displayEditedValue = function (value,editorData) {
var node = editorData.node;
node.label = value;
node.getLabelEl().innerHTML = value;
};
/**
* Destroys the contents of the inline editor panel.
* Overrides Node.destroyEditorContent.
* Since we didn't set any event listeners on this inline editor, it is more efficient to avoid the generic method in Node.
* @method destroyEditorContents
* @param editorData {YAHOO.widget.TreeView.editorData} a shortcut to the static object holding editing information
* @for YAHOO.widget.TextNode
*/
TNproto.destroyEditorContents = function (editorData) {
editorData.inputContainer.innerHTML = '';
};
})();
/**
* A static factory class for tree view expand/collapse animations
* @class TVAnim
* @static
*/
YAHOO.widget.TVAnim = function() {
return {
/**
* Constant for the fade in animation
* @property FADE_IN
* @type string
* @static
*/
FADE_IN: "TVFadeIn",
/**
* Constant for the fade out animation
* @property FADE_OUT
* @type string
* @static
*/
FADE_OUT: "TVFadeOut",
/**
* Returns a ygAnim instance of the given type
* @method getAnim
* @param type {string} the type of animation
* @param el {HTMLElement} the element to element (probably the children div)
* @param callback {function} function to invoke when the animation is done.
* @return {YAHOO.util.Animation} the animation instance
* @static
*/
getAnim: function(type, el, callback) {
if (YAHOO.widget[type]) {
return new YAHOO.widget[type](el, callback);
} else {
return null;
}
},
/**
* Returns true if the specified animation class is available
* @method isValid
* @param type {string} the type of animation
* @return {boolean} true if valid, false if not
* @static
*/
isValid: function(type) {
return (YAHOO.widget[type]);
}
};
} ();
/**
* A 1/2 second fade-in animation.
* @class TVFadeIn
* @constructor
* @param el {HTMLElement} the element to animate
* @param callback {function} function to invoke when the animation is finished
*/
YAHOO.widget.TVFadeIn = function(el, callback) {
/**
* The element to animate
* @property el
* @type HTMLElement
*/
this.el = el;
/**
* the callback to invoke when the animation is complete
* @property callback
* @type function
*/
this.callback = callback;
this.logger = new YAHOO.widget.LogWriter(this.toString());
};
YAHOO.widget.TVFadeIn.prototype = {
/**
* Performs the animation
* @method animate
*/
animate: function() {
var tvanim = this;
var s = this.el.style;
s.opacity = 0.1;
s.filter = "alpha(opacity=10)";
s.display = "";
var dur = 0.4;
var a = new YAHOO.util.Anim(this.el, {opacity: {from: 0.1, to: 1, unit:""}}, dur);
a.onComplete.subscribe( function() { tvanim.onComplete(); } );
a.animate();
},
/**
* Clean up and invoke callback
* @method onComplete
*/
onComplete: function() {
this.callback();
},
/**
* toString
* @method toString
* @return {string} the string representation of the instance
*/
toString: function() {
return "TVFadeIn";
}
};
/**
* A 1/2 second fade out animation.
* @class TVFadeOut
* @constructor
* @param el {HTMLElement} the element to animate
* @param callback {Function} function to invoke when the animation is finished
*/
YAHOO.widget.TVFadeOut = function(el, callback) {
/**
* The element to animate
* @property el
* @type HTMLElement
*/
this.el = el;
/**
* the callback to invoke when the animation is complete
* @property callback
* @type function
*/
this.callback = callback;
this.logger = new YAHOO.widget.LogWriter(this.toString());
};
YAHOO.widget.TVFadeOut.prototype = {
/**
* Performs the animation
* @method animate
*/
animate: function() {
var tvanim = this;
var dur = 0.4;
var a = new YAHOO.util.Anim(this.el, {opacity: {from: 1, to: 0.1, unit:""}}, dur);
a.onComplete.subscribe( function() { tvanim.onComplete(); } );
a.animate();
},
/**
* Clean up and invoke callback
* @method onComplete
*/
onComplete: function() {
var s = this.el.style;
s.display = "none";
s.opacity = 1;
s.filter = "alpha(opacity=100)";
this.callback();
},
/**
* toString
* @method toString
* @return {string} the string representation of the instance
*/
toString: function() {
return "TVFadeOut";
}
};
YAHOO.register("treeview", YAHOO.widget.TreeView, {version: "2.8.0r4", build: "2449"});
|
PypiClean
|
/classroom_voter_harrismcc-0.0.3b1-py3-none-any.whl/classroom_voter/login.py
|
import socket
import os
import sys
import json
import getpass
from classroom_voter.shared.pollTypes import Poll, FreeResponseQuestion
import classroom_voter.professor as professor
import classroom_voter.client as client
class LoginTools(object):
def __init__(self, ip, port, cli=False):
self.ip = ip
self.port = port
self.cli = cli
self.clientSocket = socket.socket()
self.clientSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.clientSocket.connect((self.ip, self.port))
if self.cli: print('Successful Connection')
self.connected = True
except socket.error as e:
self.connected = False
if self.cli: print('Failed Connection: ' + str(e))
return
if cli:
self.main()
def send_msg(self, msg):
try:
self.clientSocket.send(str.encode(json.dumps(msg)))
except socket.error as e:
if self.cli: print('Failed to send message: ' + str(e))
def attempt_login(self, username, password):
msg = {
"endpoint": "Login",
"Arguments": {
"username": username,
"password": password
}
}
self.send_msg(msg)
response = json.loads(self.clientSocket.recv(2048).decode())
return response
def reset_password(self, username, password, new_password):
msg = {
"endpoint": "Reset_password",
"Arguments": {
"username": username,
"old_password": password,
"new_password": new_password
}
}
self.send_msg(msg)
result = json.loads(self.clientSocket.recv(2048).decode())
return result
def recover_password(self, username):
msg = {
"endpoint": "Recover_password",
"Arguments" : {
"username" : username
}
}
self.send_msg(msg)
response = json.loads(self.clientSocket.recv(2048).decode())
return response
def get_new_password(self):
"""prompts the user for a new password that satisfies comprehensive8 requirements
Returns:
password: string password that satisfies comprehensive8"""
valid = False
password = ""
while not valid:
password = self.safe_prompt_for_password("please enter your new password: \n(note: Password must have atleast 8 characters including an uppercase and lowercase letter, a symbol, and a digit.\n Password: ")
valid = True
if len(password) < 8:
valid = False
print("password must be at least eight characters! \n")
if not any(x.isupper() for x in password):
valid = False
print("password must contain at least one uppercase character! \n")
if not any(x.islower() for x in password):
valid = False
print("password must contain at least one lowercase character! \n")
if not any(x.isnumeric() for x in password):
valid = False
print("password must contain at least one digit! \n")
symbols = '!@#$%^&*()-_+=`~[]{},./<>?|'
if not any(x in symbols for x in password):
valid = False
print("password must contain at least one symbol (!@#$%^&*()-_+=`~[]{},./<>?|) \n")
if valid:
confirm = self.safe_prompt_for_password("please enter the password again to confirm:")
if confirm != password:
valid = False
print("Passwords don't match! Try again")
return password
def safe_prompt_for_password(self, prompt='Enter Password: '):
if os.isatty(sys.stdin.fileno()):
os.system("stty -echo")
password = input(prompt)
os.system("stty echo")
print("")
else:
password = input(prompt)
return password
def main(self):
while True:
login_action = input("Login or Forgot Password: ")
if login_action == "Login":
username = input("Enter username: ")
password = self.safe_prompt_for_password()
login_result = self.attempt_login(username, password)
if login_result['Arguments']['result'] == 'success' or login_result['Arguments']['result'] == 'must reset':
break
else:
print('Invalid credentials. Try again.')
if login_action == "Forgot Password":
username = input("Enter username: ")
login_result = self.recover_password(username)
if login_result['Arguments']['result'] == 'success':
print('Password Recovery Succeeded')
else:
print('Password Recovery Failed. Try again.')
if login_result['Arguments']['result'] == 'must reset':
new_password = self.get_new_password()
reset_result = self.reset_password(username, password, new_password)
if reset_result['Arguments']['result'] == 'success':
pass
else:
print("Password Reset Failed")
quit()
if login_result['Arguments']['account_type'] == 'students':
client.main(self.clientSocket, login_result['Arguments']['username'])
elif login_result['Arguments']['account_type'] == 'professors':
professor.main(self.clientSocket)
def prompt_for_ip():
ip = input("Enter the IP address of the server (eg 192.168.61.1): ")
port = int(input("Enter the port of the server (eg 1500): "))
return (ip, port)
def main():
if len(sys.argv)!=1 and len(sys.argv)!= 3: # either need no args or both ip and port
print("usage: python3 %s or python3 %s <server-ip> <server-port>" % sys.argv[0])
quit(1)
ip = None
port = None
print("#"*80)
print('\t\t\tLog in to classroom voter')
print("#"*80)
if len(sys.argv) == 3:
ip = sys.argv[1]
port = int(sys.argv[2])
else:
ip, port = prompt_for_ip()
login = LoginTools(ip, port, cli="True")
if __name__ == "__main__":
main()
|
PypiClean
|
/nlglib-0.2.1.tar.gz/nlglib-0.2.1/examples/logic.py
|
import logging
from nlglib.realisation.simplenlg.realisation import Realiser
from nlglib.lexicalisation import Lexicaliser
from nlglib.macroplanning import *
from nlglib.microplanning import *
from nlglib.features import TENSE
def run():
realise = Realiser(host='nlg.kutlak.info')
lex = Lexicaliser(templates={
'x': String('X'),
'arthur': Male('Arthur'),
'shrubbery': Clause(Var(0), VP('find', NP('a', 'shrubbery'), features=[TENSE.future])),
'knight': Clause(Var(0), VP('is', NP('a', 'knight'))),
'say_ni': Clause(Var(0), VP('say', Interjection('"Ni!"'))),
})
print(realise(lex(formula_to_rst(expr(r'x')))))
print(realise(lex(formula_to_rst(expr(r'-x')))))
print(realise(lex(formula_to_rst(expr(r'x = 5')))))
print(realise(lex(formula_to_rst(expr(r'x != 5')))))
print(realise(lex(formula_to_rst(expr(r'knight(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'-knight(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'say_ni(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'-say_ni(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'shrubbery(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'-shrubbery(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'knight(arthur) & say_ni(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'say_ni(arthur) | knight(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'say_ni(arthur) -> knight(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'knight(arthur) <-> say_ni(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'say_ni(arthur) & -knight(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'say_ni(arthur) | -knight(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'say_ni(arthur) -> -knight(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'-knight(arthur) <-> say_ni(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'-knight(arthur) <-> -say_ni(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'-(knight(arthur) <-> say_ni(arthur))')))))
print(realise(lex(formula_to_rst(expr(r'say_ni(arthur) & knight(arthur) & shrubbery(arthur)')))))
print(realise(lex(formula_to_rst(expr(r'say_ni(arthur) | knight(arthur) | shrubbery(arthur)')))))
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
run()
|
PypiClean
|
/haiqv-profiling-0.0.1.tar.gz/haiqv-profiling-0.0.1/src/pandas_profiling/report/structure/variables/render_boolean.py
|
from pandas_profiling.config import config
from pandas_profiling.report.presentation.core import (
Container,
FrequencyTable,
FrequencyTableSmall,
Image,
Table,
VariableInfo,
)
from pandas_profiling.report.presentation.frequency_table_utils import freq_table
from pandas_profiling.report.structure.variables.render_common import render_common
from pandas_profiling.visualisation.plot import pie_plot
def render_boolean(summary):
varid = summary["varid"]
n_obs_bool = config["vars"]["bool"]["n_obs"].get(int)
image_format = config["plot"]["image_format"].get(str)
# Prepare variables
template_variables = render_common(summary)
# Element composition
info = VariableInfo(
anchor_id=summary["varid"],
warnings=summary["warnings"],
var_type="Boolean",
var_name=summary["varname"],
description=summary["description"],
)
table = Table(
[
{
"name": "Distinct",
"value": summary["n_distinct"],
"fmt": "fmt",
"alert": "n_distinct" in summary["warn_fields"],
},
{
"name": "Distinct (%)",
"value": summary["p_distinct"],
"fmt": "fmt_percent",
"alert": "p_distinct" in summary["warn_fields"],
},
{
"name": "Missing",
"value": summary["n_missing"],
"fmt": "fmt",
"alert": "n_missing" in summary["warn_fields"],
},
{
"name": "Missing (%)",
"value": summary["p_missing"],
"fmt": "fmt_percent",
"alert": "p_missing" in summary["warn_fields"],
},
{
"name": "Memory size",
"value": summary["memory_size"],
"fmt": "fmt_bytesize",
"alert": False,
},
]
)
fqm = FrequencyTableSmall(
freq_table(
freqtable=summary["value_counts"],
n=summary["n"],
max_number_to_print=n_obs_bool,
),
redact=False,
)
template_variables["top"] = Container([info, table, fqm], sequence_type="grid")
items = [
FrequencyTable(
template_variables["freq_table_rows"],
name="Common Values",
anchor_id=f"{varid}frequency_table",
redact=False,
)
]
max_unique = config["plot"]["pie"]["max_unique"].get(int)
if max_unique > 0:
items.append(
Image(
pie_plot(summary["value_counts"], legend_kws={"loc": "upper right"}),
image_format=image_format,
alt="Chart",
name="Chart",
anchor_id=f"{varid}pie_chart",
)
)
template_variables["bottom"] = Container(
items, sequence_type="tabs", anchor_id=f"{varid}bottom"
)
return template_variables
|
PypiClean
|
/keystone-23.0.0.0rc1.tar.gz/keystone-23.0.0.0rc1/doc/source/admin/multi-factor-authentication.rst
|
.. _multi_factor_authentication:
===========================
Multi-Factor Authentication
===========================
Configuring MFA
===============
MFA is configured on a per user basis via the user options
:ref:`multi_factor_auth_rules` and :ref:`multi_factor_auth_enabled`. Until
these are set the user can authenticate with any one of the enabled auth
methods.
MFA rules
---------
The MFA rules allow an admin to force a user to use specific forms of
authentication or combinations of forms of authentication to get a token.
The rules are specified as follows via the user option
:ref:`multi_factor_auth_rules`::
[["password", "totp"], ["password", "custom-auth-method"]]
They are a list of lists. The elements of the sub-lists must be strings and are
intended to mirror the required authentication method names (e.g. ``password``,
``totp``, etc) as defined in the ``keystone.conf`` file in the
``[auth] methods`` option. Each list of methods specifies a rule.
If the auth methods provided by a user match (or exceed) the auth methods in
the list, that rule is used. The first rule found (rules will not be processed
in a specific order) that matches will be used. If a user has the ruleset
defined as ``[["password", "totp"]]`` the user must provide both password and
totp auth methods (and both methods must succeed) to receive a token. However,
if a user has a ruleset defined as ``[["password"], ["password", "totp"]]``
the user may use the ``password`` method on it's own but would be required
to use both ``password`` and ``totp`` if ``totp`` is specified at all.
Any auth methods that are not defined in ``keystone.conf`` in the
``[auth] methods`` option are ignored when the rules are processed. Empty
rules are not allowed. If a rule is empty due to no-valid auth methods
existing within it, the rule is discarded at authentication time. If there
are no rules or no valid rules for the user, authentication occurs in the
default manner: any single configured auth method is sufficient to receive
a token.
.. note::
The ``token`` auth method typically should not be specified in any MFA
Rules. The ``token`` auth method will include all previous auth methods
for the original auth request and will match the appropriate ruleset. This
is intentional, as the ``token`` method is used for rescoping/changing
active projects.
Enabling MFA
------------
Before the MFA rules take effect on a user, MFA has to be enabled for that user
via the user option :ref:`multi_factor_auth_enabled`. By default this is unset,
and the rules will not take effect until configured.
In the case a user should be exempt from MFA Rules, regardless if they are
set, the User-Option may be set to ``False``.
Using MFA
=========
See :ref:`multi_factor_authentication_user_guide` in the user guide for some
examples.
Supported multi-factor authentication methods
=============================================
TOTP is the only suggested second factor along with password for now, but there
are plans to include more in future.
TOTP
----
This is a simple 6 digit passcode generated by both the server and client from
a known shared secret.
This used in a multi-step fashion is the most common 2-factor method used these
days.
See: :ref:`auth_totp`
|
PypiClean
|
/nr-documents-records-1.0.0.tar.gz/nr-documents-records-1.0.0/README.md
|
# nr-common-metadata
[](https://travis-ci.org/Narodni-repozitar/nr-common)
[](https://coveralls.io/github/Narodni-repozitar/nr-common)
Disclaimer: The library is part of the Czech National Repository, and therefore the README is written in Czech.
General libraries extending [Invenio](https://github.com/inveniosoftware) are concentrated under the [Oarepo
namespace](https://github.com/oarepo).
## Instalace
Nejedná se o samostatně funkční knihovnu, proto potřebuje běžící Invenio a závislosti Oarepo.
Knihovna se instaluje klasicky přes pip
```bash
pip install techlib-nr-common-metadata
```
Pro testování a/nebo samostané fungování knihovny je nutné instalovat tests z extras.
```bash
pip install -e .[tests]
```
## Účel
Knihovna obsahuje obecný metadatový model Národního repozitáře (Marshmallow, JSON schema a Elastisearch mapping).
Všechny tyto části lze
"podědit" v dalších metadatových modelech.
Knihovna není samostatný model pro "generic" věci - ten je v nr-generic.
|
PypiClean
|
/odoo13_addon_l10n_es_aeat_mod303-13.0.4.3.0-py3-none-any.whl/odoo/addons/l10n_es_aeat_mod303/models/mod303.py
|
from odoo import _, api, exceptions, fields, models
from odoo.tools import float_compare
_ACCOUNT_PATTERN_MAP = {
"C": "4700",
"D": "4700",
"V": "4700",
"X": "4700",
"N": "4700",
"I": "4750",
"G": "4750",
"U": "4750",
}
NON_EDITABLE_ON_DONE = {"done": [("readonly", True)]}
NON_EDITABLE_EXCEPT_DRAFT = {
"done": [("readonly", True)],
"calculated": [("readonly", True)],
"posted": [("readonly", True)],
"cancelled": [("readonly", True)],
}
EDITABLE_ON_DRAFT = {"draft": [("readonly", False)]}
ACTIVITY_CODE_DOMAIN = (
"["
" '|',"
" ('period_type', '=', False), ('period_type', '=', period_type),"
" '&',"
" '|', ('date_start', '=', False), ('date_start', '<=', date_start),"
" '|', ('date_end', '=', False), ('date_end', '>=', date_end),"
"]"
)
class L10nEsAeatMod303Report(models.Model):
_inherit = "l10n.es.aeat.report.tax.mapping"
_name = "l10n.es.aeat.mod303.report"
_description = "AEAT 303 Report"
_aeat_number = "303"
devolucion_mensual = fields.Boolean(
string="Montly Return",
states=NON_EDITABLE_ON_DONE,
help="Registered in the Register of Monthly Return",
)
total_devengado = fields.Float(
string="[27] VAT payable",
readonly=True,
compute_sudo=True,
compute="_compute_total_devengado",
store=True,
)
total_deducir = fields.Float(
string="[45] VAT receivable",
readonly=True,
compute_sudo=True,
compute="_compute_total_deducir",
store=True,
)
casilla_46 = fields.Float(
string="[46] General scheme result",
readonly=True,
store=True,
help="(VAT payable - VAT receivable)",
compute="_compute_casilla_46",
)
porcentaje_atribuible_estado = fields.Float(
string="[65] % attributable to State",
default=100,
states=NON_EDITABLE_ON_DONE,
help="Taxpayers who pay jointly to the Central Government and "
"the Provincial Councils of the Basque Country or the "
"Autonomous Community of Navarra, will enter in this box the "
"percentage of volume operations in the common territory. "
"Other taxpayers will enter in this box 100%",
)
atribuible_estado = fields.Float(
string="[66] Attributable to the Administration",
readonly=True,
compute="_compute_atribuible_estado",
store=True,
)
potential_cuota_compensar = fields.Float(
string="[110] Pending fees to compensate",
default=0,
states=NON_EDITABLE_ON_DONE,
)
cuota_compensar = fields.Float(
string="[78] Applied fees to compensate (old [67])",
default=0,
states=NON_EDITABLE_ON_DONE,
help="Fee to compensate for prior periods, in which his statement "
"was to return and compensation back option was chosen",
)
remaining_cuota_compensar = fields.Float(
string="[87] Remaining fees to compensate",
compute="_compute_remaining_cuota_compensar",
)
regularizacion_anual = fields.Float(
string="[68] Annual regularization",
states=NON_EDITABLE_ON_DONE,
compute="_compute_regularizacion_anual",
readonly=False,
store=True,
default=0.0,
help="In the last auto settlement of the year, shall be recorded "
"(the fourth period or 12th month), with the appropriate sign, "
"the result of the annual adjustment as have the laws by the "
"Economic Agreement approved between the State and the "
"Autonomous Community the Basque Country and the "
"Economic Agreement between the State and Navarre.",
)
casilla_69 = fields.Float(
string="[69] Result",
readonly=True,
compute="_compute_casilla_69",
help="[66] Attributable to the Administration - "
"[67] Fees to compensate + "
"[68] Annual regularization",
store=True,
)
casilla_77 = fields.Float(
string="[77] VAT deferred (Settle by customs)",
help="Contributions of import tax included in the documents "
"evidencing the payment made by the Administration and received "
"in the settlement period. You can only complete this box "
"when the requirements of Article 74.1 of the Tax Regulations "
"Value Added are met.",
)
previous_result = fields.Float(
string="[70] To be deducted",
help="Result of the previous or prior statements of the same concept, "
"exercise and period",
states=NON_EDITABLE_ON_DONE,
)
resultado_liquidacion = fields.Float(
string="[71] Settlement result",
readonly=True,
compute="_compute_resultado_liquidacion",
store=True,
)
result_type = fields.Selection(
selection=[
("I", "To enter"),
("G", "To enter - AEAT account"),
("U", "To enter - Bank account debit"),
("D", "To return"),
("V", "To return - AEAT account"),
("X", "To return - Foreign bank account"),
("C", "To compensate"),
("N", "No activity/Zero result"),
],
string="Result type",
compute="_compute_result_type",
)
counterpart_account_id = fields.Many2one(
comodel_name="account.account",
string="Counterpart account",
compute="_compute_counterpart_account_id",
domain="[('company_id', '=', company_id)]",
store=True,
readonly=False,
)
allow_posting = fields.Boolean(string="Allow posting", default=True)
exonerated_390 = fields.Selection(
selection=[("1", u"Exonerado"), ("2", u"No exonerado")],
default="2",
required=True,
states=NON_EDITABLE_EXCEPT_DRAFT,
compute="_compute_exonerated_390",
store=True,
readonly=False,
string=u"Exonerado mod. 390",
help=u"Exonerado de la Declaración-resumen anual del IVA, modelo 390: "
u"Volumen de operaciones (art. 121 LIVA)",
)
has_operation_volume = fields.Boolean(
string=u"¿Volumen de operaciones?",
default=True,
readonly=True,
states=EDITABLE_ON_DRAFT,
help=u"¿Existe volumen de operaciones (art. 121 LIVA)?",
)
has_347 = fields.Boolean(
string=u"¿Obligación del 347?",
default=True,
states=NON_EDITABLE_ON_DONE,
help=u"Marque la casilla si el sujeto pasivo ha efectuado con alguna "
u"persona o entidad operaciones por las que tenga obligación de "
u"presentar la declaración anual de operaciones con terceras "
u"personas (modelo 347).",
)
is_voluntary_sii = fields.Boolean(
string=u"¿SII voluntario?",
states=NON_EDITABLE_ON_DONE,
help=u"¿Ha llevado voluntariamente los Libros registro del IVA a "
u"través de la Sede electrónica de la AEAT durante el ejercicio?",
)
main_activity_code = fields.Many2one(
comodel_name="l10n.es.aeat.mod303.report.activity.code",
domain=ACTIVITY_CODE_DOMAIN,
states=NON_EDITABLE_ON_DONE,
string=u"Código actividad principal",
)
main_activity_iae = fields.Char(
states=NON_EDITABLE_ON_DONE,
string=u"Epígrafe I.A.E. actividad principal",
size=4,
)
other_first_activity_code = fields.Many2one(
comodel_name="l10n.es.aeat.mod303.report.activity.code",
domain=ACTIVITY_CODE_DOMAIN,
states=NON_EDITABLE_ON_DONE,
string=u"Código 1ª actividad",
)
other_first_activity_iae = fields.Char(
string=u"Epígrafe I.A.E. 1ª actividad", states=NON_EDITABLE_ON_DONE, size=4,
)
other_second_activity_code = fields.Many2one(
comodel_name="l10n.es.aeat.mod303.report.activity.code",
domain=ACTIVITY_CODE_DOMAIN,
states=NON_EDITABLE_ON_DONE,
string=u"Código 2ª actividad",
)
other_second_activity_iae = fields.Char(
string=u"Epígrafe I.A.E. 2ª actividad", states=NON_EDITABLE_ON_DONE, size=4,
)
other_third_activity_code = fields.Many2one(
comodel_name="l10n.es.aeat.mod303.report.activity.code",
domain=ACTIVITY_CODE_DOMAIN,
states=NON_EDITABLE_ON_DONE,
string=u"Código 3ª actividad",
)
other_third_activity_iae = fields.Char(
string=u"Epígrafe I.A.E. 3ª actividad", states=NON_EDITABLE_ON_DONE, size=4,
)
other_fourth_activity_code = fields.Many2one(
comodel_name="l10n.es.aeat.mod303.report.activity.code",
domain=ACTIVITY_CODE_DOMAIN,
states=NON_EDITABLE_ON_DONE,
string=u"Código 4ª actividad",
)
other_fourth_activity_iae = fields.Char(
string=u"Epígrafe I.A.E. 4ª actividad", states=NON_EDITABLE_ON_DONE, size=4,
)
other_fifth_activity_code = fields.Many2one(
comodel_name="l10n.es.aeat.mod303.report.activity.code",
domain=ACTIVITY_CODE_DOMAIN,
states=NON_EDITABLE_ON_DONE,
string=u"Código 5ª actividad",
)
other_fifth_activity_iae = fields.Char(
string=u"Epígrafe I.A.E. 5ª actividad", states=NON_EDITABLE_ON_DONE, size=4,
)
casilla_88 = fields.Float(
string=u"[88] Total volumen operaciones",
compute="_compute_casilla_88",
help=u"Información adicional - Operaciones realizadas en el ejercicio"
u" - Total volumen de operaciones ([80]+[81]+[93]+[94]+[83]+[84]"
u"+[125]+[126]+[127]+[128]+[86]+[95]+[96]+[97]+[98]-[79]-[99])",
store=True,
)
marca_sepa = fields.Selection(
selection=[
("0", "0 Vacía"),
("1", "1 Cuenta España"),
("2", "2 Unión Europea SEPA"),
("3", "3 Resto Países"),
],
compute="_compute_marca_sepa",
)
@api.depends("partner_bank_id")
def _compute_marca_sepa(self):
for record in self:
if record.partner_bank_id.bank_id.country == self.env.ref("base.es"):
record.marca_sepa = "1"
elif (
record.partner_bank_id.bank_id.country
in self.env.ref("base.europe").country_ids
):
record.marca_sepa = "2"
elif record.partner_bank_id.bank_id.country:
record.marca_sepa = "3"
else:
record.marca_sepa = "0"
@api.depends("date_start", "cuota_compensar")
def _compute_exception_msg(self):
super(L10nEsAeatMod303Report, self)._compute_exception_msg()
for mod303 in self.filtered(lambda x: x.state != "draft"):
# Get result from previous declarations, in order to identify if
# there is an amount to compensate.
prev_reports = mod303._get_previous_fiscalyear_reports(
mod303.date_start
).filtered(lambda x: x.state not in ["draft", "cancelled"])
if not prev_reports:
continue
prev_report = min(
prev_reports,
key=lambda x: abs(
fields.Date.to_date(x.date_end)
- fields.Date.to_date(mod303.date_start)
),
)
if prev_report.result_type == "C" and not mod303.cuota_compensar:
if mod303.exception_msg:
mod303.exception_msg += "\n"
else:
mod303.exception_msg = ""
mod303.exception_msg += _(
"In previous declarations this year you reported a "
"Result Type 'To Compensate'. You might need to fill "
"field '[67] Fees to compensate' in this declaration."
)
@api.depends("company_id", "result_type")
def _compute_counterpart_account_id(self):
for record in self:
code = ("%s%%" % _ACCOUNT_PATTERN_MAP.get(record.result_type, "4750"),)
record.counterpart_account_id = self.env["account.account"].search(
[("code", "=like", code[0]), ("company_id", "=", record.company_id.id)],
limit=1,
)
@api.depends("period_type")
def _compute_regularizacion_anual(self):
for record in self:
if record.period_type not in ("4T", "12"):
record.regularizacion_anual = 0
@api.depends("period_type")
def _compute_exonerated_390(self):
for record in self:
if record.period_type not in ("4T", "12"):
record.exonerated_390 = "2"
@api.depends("tax_line_ids", "tax_line_ids.amount")
def _compute_total_devengado(self):
casillas_devengado = (152, 3, 155, 6, 9, 11, 13, 15, 158, 18, 21, 24, 26)
for report in self:
tax_lines = report.tax_line_ids.filtered(
lambda x: x.field_number in casillas_devengado
)
report.total_devengado = report.currency_id.round(
sum(tax_lines.mapped("amount"))
)
@api.depends("tax_line_ids", "tax_line_ids.amount")
def _compute_total_deducir(self):
casillas_deducir = (29, 31, 33, 35, 37, 39, 41, 42, 43, 44)
for report in self:
tax_lines = report.tax_line_ids.filtered(
lambda x: x.field_number in casillas_deducir
)
report.total_deducir = report.currency_id.round(
sum(tax_lines.mapped("amount"))
)
@api.depends("total_devengado", "total_deducir")
def _compute_casilla_46(self):
for report in self:
report.casilla_46 = report.currency_id.round(
report.total_devengado - report.total_deducir
)
@api.depends("porcentaje_atribuible_estado", "casilla_46")
def _compute_atribuible_estado(self):
for report in self:
report.atribuible_estado = report.currency_id.round(
report.casilla_46 * report.porcentaje_atribuible_estado / 100.0
)
@api.depends("potential_cuota_compensar", "cuota_compensar")
def _compute_remaining_cuota_compensar(self):
for report in self:
report.remaining_cuota_compensar = report.currency_id.round(
report.potential_cuota_compensar - report.cuota_compensar
)
@api.depends(
"atribuible_estado", "cuota_compensar", "regularizacion_anual", "casilla_77"
)
def _compute_casilla_69(self):
for report in self:
report.casilla_69 = report.currency_id.round(
report.atribuible_estado
+ report.casilla_77
- report.cuota_compensar
+ report.regularizacion_anual
)
@api.depends("casilla_69", "previous_result")
def _compute_resultado_liquidacion(self):
# TODO: Add field 109
for report in self:
report.resultado_liquidacion = report.currency_id.round(
report.casilla_69 - report.previous_result
)
@api.depends("tax_line_ids", "tax_line_ids.amount")
def _compute_casilla_88(self):
taxes_88 = (80, 81, 83, 84, 85, 86, 93, 94, 95, 96, 97, 98, 125, 126, 127, 128)
for report in self:
report.casilla_88 = report.currency_id.round(
sum(
report.tax_line_ids.filtered(
lambda x: x.field_number in taxes_88
).mapped("amount")
)
- sum(
report.tax_line_ids.filtered(
lambda x: x.field_number in (79, 99,)
).mapped("amount")
)
)
def _compute_allow_posting(self):
for report in self:
report.allow_posting = True
@api.depends(
"resultado_liquidacion", "period_type", "devolucion_mensual", "marca_sepa"
)
def _compute_result_type(self):
for report in self:
result = float_compare(
report.resultado_liquidacion,
0,
precision_digits=report.currency_id.decimal_places,
)
if result == 0:
report.result_type = "N"
elif result == 1:
report.result_type = "I"
else:
if report.devolucion_mensual or report.period_type in ("4T", "12"):
report.result_type = "D" if report.marca_sepa == "1" else "X"
else:
report.result_type = "C"
@api.onchange("statement_type")
def onchange_type(self):
if self.statement_type != "C":
self.previous_result = 0
def calculate(self):
res = super(L10nEsAeatMod303Report, self).calculate()
for mod303 in self:
prev_reports = mod303._get_previous_fiscalyear_reports(
mod303.date_start
).filtered(lambda x: x.state not in ["draft", "cancelled"])
if not prev_reports:
continue
prev_report = min(
prev_reports,
key=lambda x: abs(
fields.Date.to_date(x.date_end)
- fields.Date.to_date(mod303.date_start)
),
)
if prev_report.result_type == "C":
amount = abs(prev_report.resultado_liquidacion)
mod303.write(
{"cuota_compensar": amount, "potential_cuota_compensar": amount}
)
return res
def button_confirm(self):
"""Check records"""
msg = ""
for mod303 in self:
if mod303.result_type == "D" and not mod303.partner_bank_id:
msg = _("Select an account for receiving the money")
if msg:
raise exceptions.Warning(msg)
return super(L10nEsAeatMod303Report, self).button_confirm()
@api.constrains("potential_cuota_compensar", "cuota_compensar")
def check_qty(self):
if self.filtered(
lambda x: (
x.cuota_compensar < 0
or x.remaining_cuota_compensar < 0
or (x.potential_cuota_compensar - x.cuota_compensar) < 0
)
):
raise exceptions.ValidationError(
_("The fee to compensate must be indicated as a positive number.")
)
def _get_tax_lines(self, date_start, date_end, map_line):
"""Don't populate results for fields 79-99 for reports different from
last of the year one or when not exonerated of presenting model 390.
"""
if 79 <= map_line.field_number <= 99 or map_line.field_number == 125:
if (
self.exonerated_390 == "2"
or not self.has_operation_volume
or self.period_type not in ("4T", "12")
):
return self.env["account.move.line"]
return super(L10nEsAeatMod303Report, self)._get_tax_lines(
date_start, date_end, map_line,
)
def _get_move_line_domain(self, date_start, date_end, map_line):
"""Changes dates to full year when the summary on last report of the
year for the corresponding fields. Only field number is checked as
the complete check for not bringing results is done on
`_get_tax_lines`.
"""
if 79 <= map_line.field_number <= 99 or map_line.field_number == 125:
date_start = date_start.replace(day=1, month=1)
date_end = date_end.replace(day=31, month=12)
return super(L10nEsAeatMod303Report, self)._get_move_line_domain(
date_start, date_end, map_line,
)
class L10nEsAeatMod303ReportActivityCode(models.Model):
_name = "l10n.es.aeat.mod303.report.activity.code"
_order = "period_type,code,id"
_description = "AEAT 303 Report Activities Codes"
period_type = fields.Selection(selection=[("4T", "4T"), ("12", "December")])
code = fields.Char(string="Activity code", required=True)
name = fields.Char(string="Activity name", translate=True, required=True,)
date_start = fields.Date(string="Starting date")
date_end = fields.Date(string="Ending date")
|
PypiClean
|
/tensorflow_fedora28-1.9.0rc01-cp27-cp27mu-manylinux1_x86_64.whl/tensorflow_fedora28-1.9.0rc0.data/purelib/tensorflow/contrib/learn/python/learn/utils/gc.py
|
r"""System for specifying garbage collection (GC) of path based data (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# Create the directories.
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# Create a simple parser that pulls the export_version from the directory.
path_regex = "^" + re.escape(base_dir) + "/(\\d+)$"
def parser(path):
match = re.match(path_regex, path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print(every_fifth(path_list)) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print(largest_three(all_paths)) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print(both(all_paths)) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# Delete everything not in 'both'.
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
Path = collections.namedtuple('Path', 'path export_version')
@deprecated(None, 'Please implement your own file management or use Saver.')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
@deprecated(None, 'Please implement your own file management or use Saver.')
def one_of_every_n_export_versions(n):
"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
"""A filter function that keeps exactly one out of every n paths."""
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
@deprecated(None, 'Please implement your own file management or use Saver.')
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
@deprecated(None, 'Please implement your own file management or use Saver.')
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
@deprecated(None, 'Please implement your own file management or use Saver.')
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
@deprecated(None, 'Please implement your own file name management.')
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(compat.as_str_any(base_dir),
compat.as_str_any(r)),
None))
if p:
paths.append(p)
return sorted(paths)
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.