id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/ase_koopmans-0.1.4-py3-none-any.whl/ase/io/lammpsrun.py
|
import gzip
import struct
from os.path import splitext
from collections import deque
import numpy as np
from ase.atoms import Atoms
from ase.quaternions import Quaternions
from ase.calculators.singlepoint import SinglePointCalculator
from ase.parallel import paropen
from ase.calculators.lammps import convert
def read_lammps_dump(infileobj, **kwargs):
"""Method which reads a LAMMPS dump file.
LAMMPS chooses output method depending on the given suffix:
- .bin : binary file
- .gz : output piped through gzip
- .mpiio: using mpiio (should be like cleartext,
with different ordering)
- else : normal clear-text format
:param infileobj: string to file, opened file or file-like stream
"""
# !TODO: add support for lammps-regex naming schemes (output per
# processor and timestep wildcards)
opened = False
if isinstance(infileobj, str):
opened = True
suffix = splitext(infileobj)[-1]
if suffix == ".bin":
fileobj = paropen(infileobj, "rb")
elif suffix == ".gz":
# !TODO: save for parallel execution?
fileobj = gzip.open(infileobj, "rb")
else:
fileobj = paropen(infileobj)
else:
suffix = splitext(infileobj.name)[-1]
fileobj = infileobj
if suffix == ".bin":
out = read_lammps_dump_binary(fileobj, **kwargs)
if opened:
fileobj.close()
return out
out = read_lammps_dump_text(fileobj, **kwargs)
if opened:
fileobj.close()
return out
def lammps_data_to_ase_atoms(
data,
colnames,
cell,
celldisp,
pbc=False,
atomsobj=Atoms,
order=True,
specorder=None,
prismobj=None,
units="metal",
):
"""Extract positions and other per-atom parameters and create Atoms
:param data: per atom data
:param colnames: index for data
:param cell: cell dimensions
:param celldisp: origin shift
:param pbc: periodic boundaries
:param atomsobj: function to create ase-Atoms object
:param order: sort atoms by id. Might be faster to turn off
:param specorder: list of species to map lammps types to ase-species
(usually .dump files to not contain type to species mapping)
:param prismobj: Coordinate transformation between lammps and ase
:type prismobj: Prism
:param units: lammps units for unit transformation between lammps and ase
:returns: Atoms object
:rtype: Atoms
"""
# data array of doubles
ids = data[:, colnames.index("id")].astype(int)
types = data[:, colnames.index("type")].astype(int)
if order:
sort_order = np.argsort(ids)
ids = ids[sort_order]
data = data[sort_order, :]
types = types[sort_order]
# reconstruct types from given specorder
if specorder:
types = [specorder[t - 1] for t in types]
def get_quantity(labels, quantity=None):
try:
cols = [colnames.index(label) for label in labels]
if quantity:
return convert(data[:, cols], quantity, units, "ASE")
return data[:, cols]
except ValueError:
return None
# slice data block into columns
# + perform necessary conversions to ASE units
positions = get_quantity(["x", "y", "z"], "distance")
scaled_positions = get_quantity(["xs", "ys", "zs"])
velocities = get_quantity(["vx", "vy", "vz"], "velocity")
charges = get_quantity(["q"], "charge")
forces = get_quantity(["fx", "fy", "fz"], "force")
# !TODO: how need quaternions be converted?
quaternions = get_quantity(["c_q[1]", "c_q[2]", "c_q[3]", "c_q[4]"])
# convert cell
cell = convert(cell, "distance", units, "ASE")
celldisp = convert(celldisp, "distance", units, "ASE")
if prismobj:
celldisp = prismobj.vector_to_ase(celldisp)
cell = prismobj.update_cell(cell)
if quaternions:
out_atoms = Quaternions(
symbols=types,
positions=positions,
cell=cell,
celldisp=celldisp,
pbc=pbc,
quaternions=quaternions,
)
elif positions is not None:
# reverse coordinations transform to lammps system
# (for all vectors = pos, vel, force)
if prismobj:
positions = prismobj.vector_to_ase(positions, wrap=True)
out_atoms = atomsobj(
symbols=types,
positions=positions,
pbc=pbc,
celldisp=celldisp,
cell=cell
)
elif scaled_positions is not None:
out_atoms = atomsobj(
symbols=types,
scaled_positions=scaled_positions,
pbc=pbc,
celldisp=celldisp,
cell=cell,
)
if velocities is not None:
if prismobj:
velocities = prismobj.vector_to_ase(velocities)
out_atoms.set_velocities(velocities)
if charges is not None:
out_atoms.set_initial_charges(charges)
if forces is not None:
if prismobj:
forces = prismobj.vector_to_ase(forces)
# !TODO: use another calculator if available (or move forces
# to atoms.property) (other problem: synchronizing
# parallel runs)
calculator = SinglePointCalculator(out_atoms, energy=0.0, forces=forces)
out_atoms.calc = calculator
# process the extra columns of fixes, variables and computes
# that can be dumped, add as additional arrays to atoms object
for colname in colnames:
# determine if it is a compute or fix (but not the quaternian)
if (colname.startswith('f_') or colname.startswith('v_') or
(colname.startswith('c_') and not colname.startswith('c_q['))):
out_atoms.new_array(colname, get_quantity([colname]), dtype='float')
return out_atoms
def construct_cell(diagdisp, offdiag):
"""Help function to create an ASE-cell with displacement vector from
the lammps coordination system parameters.
:param diagdisp: cell dimension convoluted with the displacement vector
:param offdiag: off-diagonal cell elements
:returns: cell and cell displacement vector
:rtype: tuple
"""
xlo, xhi, ylo, yhi, zlo, zhi = diagdisp
xy, xz, yz = offdiag
# create ase-cell from lammps-box
xhilo = (xhi - xlo) - abs(xy) - abs(xz)
yhilo = (yhi - ylo) - abs(yz)
zhilo = zhi - zlo
celldispx = xlo - min(0, xy) - min(0, xz)
celldispy = ylo - min(0, yz)
celldispz = zlo
cell = np.array([[xhilo, 0, 0], [xy, yhilo, 0], [xz, yz, zhilo]])
celldisp = np.array([celldispx, celldispy, celldispz])
return cell, celldisp
def get_max_index(index):
if np.isscalar(index):
return index
elif isinstance(index, slice):
return index.stop if (index.stop is not None) else float("inf")
def read_lammps_dump_text(fileobj, index=-1, **kwargs):
"""Process cleartext lammps dumpfiles
:param fileobj: filestream providing the trajectory data
:param index: integer or slice object (default: get the last timestep)
:returns: list of Atoms objects
:rtype: list
"""
# Load all dumped timesteps into memory simultaneously
lines = deque(fileobj.readlines())
index_end = get_max_index(index)
n_atoms = 0
images = []
while len(lines) > n_atoms:
line = lines.popleft()
if "ITEM: TIMESTEP" in line:
n_atoms = 0
line = lines.popleft()
# !TODO: pyflakes complains about this line -> do something
# ntimestep = int(line.split()[0]) # NOQA
if "ITEM: NUMBER OF ATOMS" in line:
line = lines.popleft()
n_atoms = int(line.split()[0])
if "ITEM: BOX BOUNDS" in line:
# save labels behind "ITEM: BOX BOUNDS" in triclinic case
# (>=lammps-7Jul09)
# !TODO: handle periodic boundary conditions in tilt_items
tilt_items = line.split()[3:]
celldatarows = [lines.popleft() for _ in range(3)]
celldata = np.loadtxt(celldatarows)
diagdisp = celldata[:, :2].reshape(6, 1).flatten()
# determine cell tilt (triclinic case!)
if len(celldata[0]) > 2:
# for >=lammps-7Jul09 use labels behind "ITEM: BOX BOUNDS"
# to assign tilt (vector) elements ...
offdiag = celldata[:, 2]
# ... otherwise assume default order in 3rd column
# (if the latter was present)
if len(tilt_items) >= 3:
sort_index = [tilt_items.index(i)
for i in ["xy", "xz", "yz"]]
offdiag = offdiag[sort_index]
else:
offdiag = (0.0,) * 3
cell, celldisp = construct_cell(diagdisp, offdiag)
# Handle pbc conditions
if len(tilt_items) > 3:
pbc = ["p" in d.lower() for d in tilt_items[3:]]
else:
pbc = (False,) * 3
if "ITEM: ATOMS" in line:
colnames = line.split()[2:]
datarows = [lines.popleft() for _ in range(n_atoms)]
data = np.loadtxt(datarows)
out_atoms = lammps_data_to_ase_atoms(
data=data,
colnames=colnames,
cell=cell,
celldisp=celldisp,
atomsobj=Atoms,
pbc=pbc,
**kwargs
)
images.append(out_atoms)
if len(images) > index_end >= 0:
break
return images[index]
def read_lammps_dump_binary(
fileobj, index=-1, colnames=None, intformat="SMALLBIG", **kwargs
):
"""Read binary dump-files (after binary2txt.cpp from lammps/tools)
:param fileobj: file-stream containing the binary lammps data
:param index: integer or slice object (default: get the last timestep)
:param colnames: data is columns and identified by a header
:param intformat: lammps support different integer size. Parameter set \
at compile-time and can unfortunately not derived from data file
:returns: list of Atoms-objects
:rtype: list
"""
# depending on the chosen compilation flag lammps uses either normal
# integers or long long for its id or timestep numbering
# !TODO: tags are cast to double -> missing/double ids (add check?)
tagformat, bigformat = dict(
SMALLSMALL=("i", "i"), SMALLBIG=("i", "q"), BIGBIG=("q", "q")
)[intformat]
index_end = get_max_index(index)
# Standard columns layout from lammpsrun
if not colnames:
colnames = ["id", "type", "x", "y", "z",
"vx", "vy", "vz", "fx", "fy", "fz"]
images = []
# wrap struct.unpack to raise EOFError
def read_variables(string):
obj_len = struct.calcsize(string)
data_obj = fileobj.read(obj_len)
if obj_len != len(data_obj):
raise EOFError
return struct.unpack(string, data_obj)
while True:
try:
# read header
ntimestep, = read_variables("=" + bigformat)
n_atoms, triclinic = read_variables("=" + bigformat + "i")
boundary = read_variables("=6i")
diagdisp = read_variables("=6d")
if triclinic != 0:
offdiag = read_variables("=3d")
else:
offdiag = (0.0,) * 3
size_one, nchunk = read_variables("=2i")
if len(colnames) != size_one:
raise ValueError("Provided columns do not match binary file")
# lammps cells/boxes can have different boundary conditions on each
# sides (makes mainly sense for different non-periodic conditions
# (e.g. [f]ixed and [s]hrink for a irradiation simulation))
# periodic case: b 0 = 'p'
# non-peridic cases 1: 'f', 2 : 's', 3: 'm'
pbc = np.sum(np.array(boundary).reshape((3, 2)), axis=1) == 0
cell, celldisp = construct_cell(diagdisp, offdiag)
data = []
for _ in range(nchunk):
# number-of-data-entries
n_data, = read_variables("=i")
# retrieve per atom data
data += read_variables("=" + str(n_data) + "d")
data = np.array(data).reshape((-1, size_one))
# map data-chunk to ase atoms
out_atoms = lammps_data_to_ase_atoms(
data=data,
colnames=colnames,
cell=cell,
celldisp=celldisp,
pbc=pbc,
**kwargs
)
images.append(out_atoms)
# stop if requested index has been found
if len(images) > index_end >= 0:
break
except EOFError:
break
return images[index]
|
PypiClean
|
/lmql-0.7b3.tar.gz/lmql-0.7b3/docs/source/blog/release-0.0.5.md
|
metadata:release: 2023-04-17 13:30:00 +0000
metadata:authors: team
# LMQL Release 0.0.5
Today we are releasing version 0.0.5 of LMQL. This release focuses on stability and performance improvements. For a detailed list of changes, please see below. We are particularly excited about the first community contributions that have been merged as part of this release, with many more in the works.
`lmql==0.0.5` has been published on [PyPI](https://pypi.org/project/lmql/), based the current `main` branch of the [GitHub repository](https://github.com/eth-sri/lmql). The updated version has also been deployed to the browser-based [lmql.ai/playground](http://lmql.ai/playground).
### Changelog
* **Decoder Performance** The `argmax` and `sample` decoders have undergone some optimizations, allowing them to run faster. This results in a *20-30% speed-up* on common query workloads. [#24](https://github.com/eth-sri/lmql/pull/24).
* **Postprocessing Semantics** Internally, LMQL now allows constraints to implement postprocessing semantics. This is used to convert variable values after they have been completed, to a more normalized form in the prompt, and to a semantically meaningful data type in the context of the query code. [#24](https://github.com/eth-sri/lmql/pull/24).
For example, when using an `INT(<var>)` constraint on a generated number, the model will be restricted to only generate valid integers, and now, the resulting `NUM` value will additionally be converted to an `int` value:
<div class="highlight lmql"><button href="" onclick="openPlaygroundSnippet(this, 'doc-snippets/releases-release-0-0-5-md-postprocessing-int-value')">Open In Playground</button><pre><span></span><span class="kp">argmax</span>
<span class="s2">"My favorite number is: [NUM]</span><span class="se">\n</span><span class="s2">"</span>
<span class="nb">print</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">NUM</span><span class="p">),</span> <span class="n">NUM</span> <span class="o">*</span> <span class="mi">2</span><span class="p">)</span> <span class="c1"># <class 'int'> 4</span>
<span class="s2">"Number times two is {NUM * 2}"</span>
<span class="kn">from</span>
<span class="s1">'openai/text-ada-001'</span>
<span class="kp">where</span>
<span class="n">INT</span><span class="p">(</span><span class="n">NUM</span><span class="p">)</span> </pre></div>
* **Core Interpreter** A complete reimplementation of the LMQL core interpreter has been completed. This fixes a couple of minor issues and overall, improves reliability and performance when dealing with *branching* decoding algorithms. [#24](https://github.com/eth-sri/lmql/pull/24).
* **Playground** Locally and when used in-browser, the [LMQL Playground](http://lmql.ai/playground) now *streams debugger information* from the LMQL interpreter incrementally. This leads to speed-ups when running in the Playground, especially with longer outputs. [#27f9a8ad](https://github.com/eth-sri/lmql/commit/27f9a8adb819f732608ef61c9aca9dca579dc536).
* **Other Fixes**:
- When used from within Python (as decorated function), LMQL code no longer has to be doubly-escaped, e.g. you can now write `STOPS_AT(VAR, "\n")` instead of `STOPS_AT(VAR, "\\n")`
- The LMQL inference API buffers requests that come in during startup, to avoid errors when the server is not yet ready. [#15](https://github.com/eth-sri/lmql/pull/15), thanks to [@chrispan](https://github.com/chrispan).
- OpenAI request parallelization no longer leads to an error on Linux systems, with regards to worker processes [#6](https://github.com/eth-sri/lmql/issues/6).
### Preview
Apart from the changes above, we are also working on a number of other features, including:
* **llama.cpp support** as started in [this PR](https://github.com/eth-sri/lmql/pull/18), thanks to [@CircArgs](https://github.com/CircArgs).
* Support for **Type Constraints**, e.g. `type(VAR) is DataClass`, that automatically force the model to produce a value that structurally conforms to the given type. See this [Twitter thread](https://twitter.com/lbeurerkellner/status/1646187597901733889) for more details.
* Support for using **Antlr parsers** during query execution, to force the model to produce a value that conforms to a given grammar.
* **Extending Logit Masking to OpenAI Chat Models**. This will enable full support for LMQL constraints with e.g. `chatgpt` and `gpt-4` models. See [#25](https://github.com/eth-sri/lmql/pull/25), thanks to [@kharvd](https://github.com/kharvd).
|
PypiClean
|
/solver/learning/rl_base/buffer.py
|
import torch
class RolloutBuffer:
def __init__(self, max_size=None):
self.curr_idx = 0
self.max_size = max_size
self.basic_items = ['observations', 'actions', 'rewards', 'dones', 'next_observations', 'logprobs', 'values']
self.calc_items = ['advantages', 'returns']
self.extend_items = ['hidden_states', 'cell_states', 'action_mask', 'entropies']
self.safe_rl_items = ['costs', 'cost_returns']
self.other_items = ['baseline_cost_returns']
self.all_items = self.basic_items + self.calc_items + self.extend_items + self.safe_rl_items + self.other_items
for item in self.all_items:
setattr(self, item, [])
def reset(self):
self.curr_idx = 0
for item in self.all_items:
item_list = getattr(self, item)
del item_list[:]
def clear(self):
self.reset()
def size(self):
return len(self.logprobs)
def is_full(self):
if self.max_size is None:
return False
return self.curr_id == self.max_size
def add(self, obs, action, raward, done, logprob, value=None):
self.observations.append(obs)
self.actions.append(action)
self.rewards.append(raward)
self.dones.append(done)
self.logprobs.append(logprob)
self.values.append(value)
self.curr_idx += 1
def merge(self, buffer):
for item in self.all_items:
main_item_list = getattr(self, item)
sub_item_list = getattr(buffer, item)
main_item_list += sub_item_list
# self.observations += copy.deepcopy(buffer.observation)
# self.actions += copy.deepcopy(buffer.actions)
# self.rewards += copy.deepcopy(buffer.rewards)
# self.dones += copy.deepcopy(buffer.dones)
# self.logprobs += copy.deepcopy(buffer.logprobs)
# self.values += copy.deepcopy(buffer.values)
# self.advantages += copy.deepcopy(buffer.advantages)
# self.returns += copy.deepcopy(buffer.returns)
# self.hidden_states += copy.deepcopy(buffer.hidden_states)
def compute_returns_and_advantages(self, last_value, gamma=0.99, gae_lambda=0.98, method='gae') -> None:
# calculate expected return (Genralized Advantage Estimator)
if isinstance(last_value, torch.Tensor):
last_value = last_value.item()
buffer_size = self.size()
self.returns = [0] * buffer_size
self.advantages = [0] * buffer_size
if method == 'gae':
last_gae_lam = 0
for step in reversed(range(buffer_size)):
if step == buffer_size - 1:
next_values = last_value
else:
next_values = self.values[step + 1]
next_non_terminal = 1.0 - self.dones[step]
delta = self.rewards[step] + gamma * next_values * next_non_terminal - self.values[step]
last_gae_lam = delta + gamma * gae_lambda * next_non_terminal * last_gae_lam
self.advantages[step] = last_gae_lam
self.returns[step] = self.advantages[step] + self.values[step]
elif method == 'ar_td':
self.dones[-1] = False
mean_reward = sum(self.rewards) / len(self.rewards)
for step in reversed(range(buffer_size)):
if step == buffer_size - 1:
next_values = last_value
else:
next_values = self.values[step + 1]
next_non_terminal = 1.0 - self.dones[step]
self.advantages[step] = self.rewards[step] - mean_reward + next_values * next_non_terminal - self.values[step]
self.returns[step] = self.advantages[step] + self.values[step]
elif method == 'ar_gae':
self.dones[-1] = False
last_gae_lam = 0
mean_reward = sum(self.rewards) / len(self.rewards)
for i in range(len(self.rewards)):
self.rewards[i] = self.rewards[i] - mean_reward
# for step in reversed(range(buffer_size)):
# if step == buffer_size - 1:
# next_values = last_value
# else:
# next_values = self.values[step + 1]
# next_non_terminal = 1.0 - self.dones[step]
# delta = self.rewards[step] + next_values * next_non_terminal - self.values[step] - mean_reward
# last_gae_lam = delta + gae_lambda * next_non_terminal * last_gae_lam
# self.advantages[step] = last_gae_lam
# self.returns[step] = self.advantages[step] + self.values[step]
# self.returns[step] = self.rewards[step] + next_values - mean_reward
# print(self.rewards[step], mean_reward, last_gae_lam, delta)
for step in reversed(range(buffer_size)):
if step == buffer_size - 1:
next_values = last_value
else:
next_values = self.values[step + 1]
next_non_terminal = 1.0 - self.dones[step]
delta = self.rewards[step] + gamma * next_values * next_non_terminal - self.values[step]
last_gae_lam = delta + gamma * gae_lambda * next_non_terminal * last_gae_lam
self.advantages[step] = last_gae_lam
self.returns[step] = self.advantages[step] + self.values[step]
elif method == 'mc':
self.returns = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(self.rewards), reversed(self.dones)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (gamma * discounted_reward)
self.returns.insert(0, discounted_reward)
if len(self.costs) != len(self.rewards):
return
discounted_cost = 0
for cost, is_terminal in zip(reversed(self.costs), reversed(self.dones)):
if is_terminal:
discounted_cost = 0
discounted_cost = cost + (gamma * discounted_cost)
self.cost_returns.insert(0, discounted_cost)
def compute_mc_returns(self, gamma=0.99):
discounted_reward = 0
for reward, is_terminal in zip(reversed(self.rewards), reversed(self.dones)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (gamma * discounted_reward)
self.returns.insert(0, discounted_reward)
if len(self.costs) != len(self.rewards):
return
discounted_cost = 0
for cost, is_terminal in zip(reversed(self.costs), reversed(self.dones)):
if is_terminal:
discounted_cost = 0
discounted_cost = cost + (gamma * discounted_cost)
self.cost_returns.insert(0, discounted_cost)
if __name__ == '__main__':
buffer = RolloutBuffer(1, 1)
temp = [1, 2, 3]
for i in range(10):
buffer.temp = temp
buffer.observations.append(buffer.temp)
temp.append(i)
print(buffer.observations)
|
PypiClean
|
/django-admin-star-1.0.4.tar.gz/django-admin-star-1.0.4/admin_star/static/vendors/ace-builds/src-min/mode-kotlin.js
|
define("ace/mode/kotlin_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text_highlight_rules").TextHighlightRules,s=function(){this.$rules={start:[{include:"#comments"},{token:["text","keyword.other.kotlin","text","entity.name.package.kotlin","text"],regex:/^(\s*)(package)\b(?:(\s*)([^ ;$]+)(\s*))?/},{include:"#imports"},{include:"#statements"}],"#classes":[{token:"text",regex:/(?=\s*(?:companion|class|object|interface))/,push:[{token:"text",regex:/}|(?=$)/,next:"pop"},{token:["keyword.other.kotlin","text"],regex:/\b((?:companion\s*)?)(class|object|interface)\b/,push:[{token:"text",regex:/(?=<|{|\(|:)/,next:"pop"},{token:"keyword.other.kotlin",regex:/\bobject\b/},{token:"entity.name.type.class.kotlin",regex:/\w+/}]},{token:"text",regex:/</,push:[{token:"text",regex:/>/,next:"pop"},{include:"#generics"}]},{token:"text",regex:/\(/,push:[{token:"text",regex:/\)/,next:"pop"},{include:"#parameters"}]},{token:"keyword.operator.declaration.kotlin",regex:/:/,push:[{token:"text",regex:/(?={|$)/,next:"pop"},{token:"entity.other.inherited-class.kotlin",regex:/\w+/},{token:"text",regex:/\(/,push:[{token:"text",regex:/\)/,next:"pop"},{include:"#expressions"}]}]},{token:"text",regex:/\{/,push:[{token:"text",regex:/\}/,next:"pop"},{include:"#statements"}]}]}],"#comments":[{token:"punctuation.definition.comment.kotlin",regex:/\/\*/,push:[{token:"punctuation.definition.comment.kotlin",regex:/\*\//,next:"pop"},{defaultToken:"comment.block.kotlin"}]},{token:["text","punctuation.definition.comment.kotlin","comment.line.double-slash.kotlin"],regex:/(\s*)(\/\/)(.*$)/}],"#constants":[{token:"constant.language.kotlin",regex:/\b(?:true|false|null|this|super)\b/},{token:"constant.numeric.kotlin",regex:/\b(?:0(?:x|X)[0-9a-fA-F]*|(?:[0-9]+\.?[0-9]*|\.[0-9]+)(?:(?:e|E)(?:\+|-)?[0-9]+)?)(?:[LlFfUuDd]|UL|ul)?\b/},{token:"constant.other.kotlin",regex:/\b[A-Z][A-Z0-9_]+\b/}],"#expressions":[{token:"text",regex:/\(/,push:[{token:"text",regex:/\)/,next:"pop"},{include:"#expressions"}]},{include:"#types"},{include:"#strings"},{include:"#constants"},{include:"#comments"},{include:"#keywords"}],"#functions":[{token:"text",regex:/(?=\s*fun)/,push:[{token:"text",regex:/}|(?=$)/,next:"pop"},{token:"keyword.other.kotlin",regex:/\bfun\b/,push:[{token:"text",regex:/(?=\()/,next:"pop"},{token:"text",regex:/</,push:[{token:"text",regex:/>/,next:"pop"},{include:"#generics"}]},{token:["text","entity.name.function.kotlin"],regex:/((?:[\.<\?>\w]+\.)?)(\w+)/}]},{token:"text",regex:/\(/,push:[{token:"text",regex:/\)/,next:"pop"},{include:"#parameters"}]},{token:"keyword.operator.declaration.kotlin",regex:/:/,push:[{token:"text",regex:/(?={|=|$)/,next:"pop"},{include:"#types"}]},{token:"text",regex:/\{/,push:[{token:"text",regex:/(?=\})/,next:"pop"},{include:"#statements"}]},{token:"keyword.operator.assignment.kotlin",regex:/=/,push:[{token:"text",regex:/(?=$)/,next:"pop"},{include:"#expressions"}]}]}],"#generics":[{token:"keyword.operator.declaration.kotlin",regex:/:/,push:[{token:"text",regex:/(?=,|>)/,next:"pop"},{include:"#types"}]},{include:"#keywords"},{token:"storage.type.generic.kotlin",regex:/\w+/}],"#getters-and-setters":[{token:["entity.name.function.kotlin","text"],regex:/\b(get)\b(\s*\(\s*\))/,push:[{token:"text",regex:/\}|(?=\bset\b)|$/,next:"pop"},{token:"keyword.operator.assignment.kotlin",regex:/=/,push:[{token:"text",regex:/(?=$|\bset\b)/,next:"pop"},{include:"#expressions"}]},{token:"text",regex:/\{/,push:[{token:"text",regex:/\}/,next:"pop"},{include:"#expressions"}]}]},{token:["entity.name.function.kotlin","text"],regex:/\b(set)\b(\s*)(?=\()/,push:[{token:"text",regex:/\}|(?=\bget\b)|$/,next:"pop"},{token:"text",regex:/\(/,push:[{token:"text",regex:/\)/,next:"pop"},{include:"#parameters"}]},{token:"keyword.operator.assignment.kotlin",regex:/=/,push:[{token:"text",regex:/(?=$|\bset\b)/,next:"pop"},{include:"#expressions"}]},{token:"text",regex:/\{/,push:[{token:"text",regex:/\}/,next:"pop"},{include:"#expressions"}]}]}],"#imports":[{token:["text","keyword.other.kotlin","text","keyword.other.kotlin"],regex:/^(\s*)(import)(\s+[^ $]+\s+)((?:as)?)/}],"#keywords":[{token:"storage.modifier.kotlin",regex:/\b(?:var|val|public|private|protected|abstract|final|enum|open|attribute|annotation|override|inline|var|val|vararg|lazy|in|out|internal|data|tailrec|operator|infix|const|yield|typealias|typeof)\b/},{token:"keyword.control.catch-exception.kotlin",regex:/\b(?:try|catch|finally|throw)\b/},{token:"keyword.control.kotlin",regex:/\b(?:if|else|while|for|do|return|when|where|break|continue)\b/},{token:"keyword.operator.kotlin",regex:/\b(?:in|is|as|assert)\b/},{token:"keyword.operator.comparison.kotlin",regex:/==|!=|===|!==|<=|>=|<|>/},{token:"keyword.operator.assignment.kotlin",regex:/=/},{token:"keyword.operator.declaration.kotlin",regex:/:/},{token:"keyword.operator.dot.kotlin",regex:/\./},{token:"keyword.operator.increment-decrement.kotlin",regex:/\-\-|\+\+/},{token:"keyword.operator.arithmetic.kotlin",regex:/\-|\+|\*|\/|%/},{token:"keyword.operator.arithmetic.assign.kotlin",regex:/\+=|\-=|\*=|\/=/},{token:"keyword.operator.logical.kotlin",regex:/!|&&|\|\|/},{token:"keyword.operator.range.kotlin",regex:/\.\./},{token:"punctuation.terminator.kotlin",regex:/;/}],"#namespaces":[{token:"keyword.other.kotlin",regex:/\bnamespace\b/},{token:"text",regex:/\{/,push:[{token:"text",regex:/\}/,next:"pop"},{include:"#statements"}]}],"#parameters":[{token:"keyword.operator.declaration.kotlin",regex:/:/,push:[{token:"text",regex:/(?=,|\)|=)/,next:"pop"},{include:"#types"}]},{token:"keyword.operator.declaration.kotlin",regex:/=/,push:[{token:"text",regex:/(?=,|\))/,next:"pop"},{include:"#expressions"}]},{include:"#keywords"},{token:"variable.parameter.function.kotlin",regex:/\w+/}],"#statements":[{include:"#namespaces"},{include:"#typedefs"},{include:"#classes"},{include:"#functions"},{include:"#variables"},{include:"#getters-and-setters"},{include:"#expressions"}],"#strings":[{token:"punctuation.definition.string.begin.kotlin",regex:/"""/,push:[{token:"punctuation.definition.string.end.kotlin",regex:/"""/,next:"pop"},{token:"variable.parameter.template.kotlin",regex:/\$\w+|\$\{[^\}]+\}/},{token:"constant.character.escape.kotlin",regex:/\\./},{defaultToken:"string.quoted.third.kotlin"}]},{token:"punctuation.definition.string.begin.kotlin",regex:/"/,push:[{token:"punctuation.definition.string.end.kotlin",regex:/"/,next:"pop"},{token:"variable.parameter.template.kotlin",regex:/\$\w+|\$\{[^\}]+\}/},{token:"constant.character.escape.kotlin",regex:/\\./},{defaultToken:"string.quoted.double.kotlin"}]},{token:"punctuation.definition.string.begin.kotlin",regex:/'/,push:[{token:"punctuation.definition.string.end.kotlin",regex:/'/,next:"pop"},{token:"constant.character.escape.kotlin",regex:/\\./},{defaultToken:"string.quoted.single.kotlin"}]},{token:"punctuation.definition.string.begin.kotlin",regex:/`/,push:[{token:"punctuation.definition.string.end.kotlin",regex:/`/,next:"pop"},{defaultToken:"string.quoted.single.kotlin"}]}],"#typedefs":[{token:"text",regex:/(?=\s*type)/,push:[{token:"text",regex:/(?=$)/,next:"pop"},{token:"keyword.other.kotlin",regex:/\btype\b/},{token:"text",regex:/</,push:[{token:"text",regex:/>/,next:"pop"},{include:"#generics"}]},{include:"#expressions"}]}],"#types":[{token:"storage.type.buildin.kotlin",regex:/\b(?:Any|Unit|String|Int|Boolean|Char|Long|Double|Float|Short|Byte|dynamic)\b/},{token:"storage.type.buildin.array.kotlin",regex:/\b(?:IntArray|BooleanArray|CharArray|LongArray|DoubleArray|FloatArray|ShortArray|ByteArray)\b/},{token:["storage.type.buildin.collection.kotlin","text"],regex:/\b(Array|List|Map)(<\b)/,push:[{token:"text",regex:/>/,next:"pop"},{include:"#types"},{include:"#keywords"}]},{token:"text",regex:/\w+</,push:[{token:"text",regex:/>/,next:"pop"},{include:"#types"},{include:"#keywords"}]},{token:["keyword.operator.tuple.kotlin","text"],regex:/(#)(\()/,push:[{token:"text",regex:/\)/,next:"pop"},{include:"#expressions"}]},{token:"text",regex:/\{/,push:[{token:"text",regex:/\}/,next:"pop"},{include:"#statements"}]},{token:"text",regex:/\(/,push:[{token:"text",regex:/\)/,next:"pop"},{include:"#types"}]},{token:"keyword.operator.declaration.kotlin",regex:/->/}],"#variables":[{token:"text",regex:/(?=\s*(?:var|val))/,push:[{token:"text",regex:/(?=:|=|$)/,next:"pop"},{token:"keyword.other.kotlin",regex:/\b(?:var|val)\b/,push:[{token:"text",regex:/(?=:|=|$)/,next:"pop"},{token:"text",regex:/</,push:[{token:"text",regex:/>/,next:"pop"},{include:"#generics"}]},{token:["text","entity.name.variable.kotlin"],regex:/((?:[\.<\?>\w]+\.)?)(\w+)/}]},{token:"keyword.operator.declaration.kotlin",regex:/:/,push:[{token:"text",regex:/(?==|$)/,next:"pop"},{include:"#types"},{include:"#getters-and-setters"}]},{token:"keyword.operator.assignment.kotlin",regex:/=/,push:[{token:"text",regex:/(?=$)/,next:"pop"},{include:"#expressions"},{include:"#getters-and-setters"}]}]}]},this.normalizeRules()};s.metaData={fileTypes:["kt","kts"],name:"Kotlin",scopeName:"source.Kotlin"},r.inherits(s,i),t.KotlinHighlightRules=s}),define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"],function(e,t,n){"use strict";var r=e("../../lib/oop"),i=e("../../range").Range,s=e("./fold_mode").FoldMode,o=t.FoldMode=function(e){e&&(this.foldingStartMarker=new RegExp(this.foldingStartMarker.source.replace(/\|[^|]*?$/,"|"+e.start)),this.foldingStopMarker=new RegExp(this.foldingStopMarker.source.replace(/\|[^|]*?$/,"|"+e.end)))};r.inherits(o,s),function(){this.foldingStartMarker=/([\{\[\(])[^\}\]\)]*$|^\s*(\/\*)/,this.foldingStopMarker=/^[^\[\{\(]*([\}\]\)])|^[\s\*]*(\*\/)/,this.singleLineBlockCommentRe=/^\s*(\/\*).*\*\/\s*$/,this.tripleStarBlockCommentRe=/^\s*(\/\*\*\*).*\*\/\s*$/,this.startRegionRe=/^\s*(\/\*|\/\/)#?region\b/,this._getFoldWidgetBase=this.getFoldWidget,this.getFoldWidget=function(e,t,n){var r=e.getLine(n);if(this.singleLineBlockCommentRe.test(r)&&!this.startRegionRe.test(r)&&!this.tripleStarBlockCommentRe.test(r))return"";var i=this._getFoldWidgetBase(e,t,n);return!i&&this.startRegionRe.test(r)?"start":i},this.getFoldWidgetRange=function(e,t,n,r){var i=e.getLine(n);if(this.startRegionRe.test(i))return this.getCommentRegionBlock(e,i,n);var s=i.match(this.foldingStartMarker);if(s){var o=s.index;if(s[1])return this.openingBracketBlock(e,s[1],n,o);var u=e.getCommentFoldRange(n,o+s[0].length,1);return u&&!u.isMultiLine()&&(r?u=this.getSectionRange(e,n):t!="all"&&(u=null)),u}if(t==="markbegin")return;var s=i.match(this.foldingStopMarker);if(s){var o=s.index+s[0].length;return s[1]?this.closingBracketBlock(e,s[1],n,o):e.getCommentFoldRange(n,o,-1)}},this.getSectionRange=function(e,t){var n=e.getLine(t),r=n.search(/\S/),s=t,o=n.length;t+=1;var u=t,a=e.getLength();while(++t<a){n=e.getLine(t);var f=n.search(/\S/);if(f===-1)continue;if(r>f)break;var l=this.getFoldWidgetRange(e,"all",t);if(l){if(l.start.row<=s)break;if(l.isMultiLine())t=l.end.row;else if(r==f)break}u=t}return new i(s,o,u,e.getLine(u).length)},this.getCommentRegionBlock=function(e,t,n){var r=t.search(/\s*$/),s=e.getLength(),o=n,u=/^\s*(?:\/\*|\/\/|--)#?(end)?region\b/,a=1;while(++n<s){t=e.getLine(n);var f=u.exec(t);if(!f)continue;f[1]?a--:a++;if(!a)break}var l=n;if(l>o)return new i(o,r,l,t.length)}}.call(o.prototype)}),define("ace/mode/kotlin",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/kotlin_highlight_rules","ace/mode/behaviour/cstyle","ace/mode/folding/cstyle"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text").Mode,s=e("./kotlin_highlight_rules").KotlinHighlightRules,o=e("./behaviour/cstyle").CstyleBehaviour,u=e("./folding/cstyle").FoldMode,a=function(){this.HighlightRules=s,this.foldingRules=new u,this.$behaviour=new o};r.inherits(a,i),function(){this.lineCommentStart="//",this.blockComment={start:"/*",end:"*/"},this.$id="ace/mode/kotlin"}.call(a.prototype),t.Mode=a}); (function() {
window.require(["ace/mode/kotlin"], function(m) {
if (typeof module == "object" && typeof exports == "object" && module) {
module.exports = m;
}
});
})();
|
PypiClean
|
/agavedb-0.4.2.tar.gz/agavedb-0.4.2/README.rst
|
=======
AgaveDB
=======
.. image:: https://badge.fury.io/py/agavedb.svg
:target: http://badge.fury.io/py/agavedb
.. image:: https://travis-ci.org/TACC/agavedb.svg?branch=master
:target: https://travis-ci.org/TACC/agavedb
.. image:: https://readthedocs.org/projects/agavedb/badge/?version=latest
:target: https://readthedocs.org/projects/agavedb/?badge=latest
.. image:: https://img.shields.io/pypi/l/Django.svg
:target: https://raw.githubusercontent.com/TACC/agavedb/master/LICENSE
**Multiuser-aware key/value store built using the Agave metadata API**
- Documentation: https://agavedb.readthedocs.io/en/latest/
- GitHub: https://github.com/TACC/agavedb
- PyPI: https://pypi.python.org/pypi/agavedb
- Free software: 3-Clause BSD License
Installation
============
Install from PyPI_ ::
pip install agavedb
Install from GitHub checkout::
cd agavedb
python setup.py install
Tests
=====
Tests are implemented using tox_. To run them, just type ``tox``
.. _PyPI: https://pypi.python.org/pypi/agavedb
.. _tox: https://tox.readthedocs.io/en/latest
|
PypiClean
|
/widgyts-0.5.1.tar.gz/widgyts-0.5.1/README.md
|
widgyts
===============================
[](https://widgyts.readthedocs.io/en/latest/?badge=latest)
[](https://codecov.io/gh/yt-project/widgyts)
[](https://joss.theoj.org/papers/f86e07ce58fe8bb24d928943663d2751)
[](https://zenodo.org/badge/latestdoi/124116100)
A fully client-side pan-and-zoom widget, using WebAssembly, for variable mesh
datasets from yt. It runs in the browser, so once the data hits your notebook,
it's super fast and responsive!
If you'd like to dig into the Rust and WebAssembly portion of the code, you can
find it at https://github.com/data-exp-lab/rust-yt-tools/ and in the npm
package `@data-exp-lab/yt-tools`.
Check out our [SciPy 2018 talk](https://www.youtube.com/watch?v=5dl_m_6T2bU)
and the [associated slides](https://munkm.github.io/2018-07-13-scipy/) for more info!
Documentation
-------------
Our documentation is hosted at readthedocs. Take a look
[here](https://widgyts.readthedocs.io/en/latest/).
Installation
------------
To install using pip from the most recent released version:
$ pip install widgyts
To install using pip from this directory:
$ git clone https://github.com/yt-project/widgyts.git
$ cd widgyts
$ pip install .
For a development installation (requires npm),
$ git clone https://github.com/yt-project/widgyts.git
$ cd widgyts
$ pip install -e .
$ jupyter serverextension enable --py --sys-prefix widgyts
$ jupyter nbextension install --py --symlink --sys-prefix widgyts
$ jupyter nbextension enable --py --sys-prefix widgyts
Note that in previous versions, serverextension was not provided and you were
required to set up your own mimetype in your local configuration. This is no
longer the case and you are now able to use this server extension to set up the
correct wasm mimetype.
To install the jupyterlab extension, you will need to make sure you are on a
recent enough version of Jupyterlab, preferably 0.35 or above. For a
development installation, do:
$ jupyter labextension install js
To install the latest released version,
$ jupyter labextension install @yt-project/yt-widgets
Using
-----
To use this, you will need to have yt installed. Importing it monkeypatches
the Slice and Projection objects, so you are now able to do:
```
#!python
import yt
import widgyts
ds = yt.load("data/IsolatedGalaxy/galaxy0030/galaxy0030")
s = ds.r[:,:,0.5]
s.display("density")
```
and for a projection:
```
#!python
ds = yt.load("data/IsolatedGalaxy/galaxy0030/galaxy0030")
p = ds.r[:].integrate("density", axis="x")
p.display()
```
There are a number of traits you can set on the resultant objects, as well.
|
PypiClean
|
/onegov.winterthur-0.7.5.tar.gz/onegov.winterthur-0.7.5/onegov/winterthur/roadwork.py
|
import isodate
import pycurl
import sedate
from cached_property import cached_property
from datetime import datetime, timedelta
from io import BytesIO
from onegov.core.custom import json
from pathlib import Path
from purl import URL
class RoadworkError(Exception):
pass
class RoadworkConnectionError(RoadworkError):
pass
class RoadworkConfig(object):
""" Looks at ~/.pdb.secret and /etc/pdb.secret (in this order), to extract
the configuration used for the RoadworkClient class.
The configuration is as follows::
HOSTNAME: pdb.example.org
ENDPOINT: 127.0.0.1:6004
USERNAME: username
PASSWORD: password
* The HOSTNAME is the address of the PDB service.
* The ENDPOINT is the optional address of the tcp-proxy used.
* The USERNAME is the NTLM password.
* The PASSWORD is the NTLM password.
"""
def __init__(self, hostname, endpoint, username, password):
self.hostname = hostname
self.endpoint = endpoint
self.username = username
self.password = password
@classmethod
def lookup_paths(self):
yield Path('~/.pdb.secret').expanduser()
yield Path('/etc/pdb.secret')
@classmethod
def lookup(cls):
for path in cls.lookup_paths():
if path.exists():
return cls(**cls.parse(path))
paths = ', '.join(str(p) for p in cls.lookup_paths())
raise RoadworkError(
f"No pdb configuration found in {paths}")
@classmethod
def parse(cls, path):
result = {
'hostname': None,
'endpoint': None,
'username': None,
'password': None,
}
with path.open('r') as file:
for line in file:
line = line.strip()
if not line:
continue
if ':' not in line:
continue
if line.startswith('#'):
continue
k, v = line.split(':', maxsplit=1)
k = k.strip().lower()
v = v.strip()
if k in result:
result[k] = v
return result
class RoadworkClient(object):
""" A proxy to Winterthur's internal roadworks service. Uses redis as
a caching mechanism to ensure performance and reliability.
Since the roadworks service can only be reached inside Winterthur's
network, we rely on a proxy connection during development/testing.
To not expose any information unwittingly to the public, the description
of how to connect to that proxy is kept at docs.seantis.ch.
"""
def __init__(self, cache, hostname, username, password, endpoint=None):
self.cache = cache
self.hostname = hostname
self.username = username
self.password = password
self.endpoint = endpoint or hostname
@cached_property
def curl(self):
curl = pycurl.Curl()
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_NTLM)
curl.setopt(pycurl.USERPWD, f"{self.username}:{self.password}")
curl.setopt(pycurl.HTTPHEADER, [f'HOST: {self.hostname}'])
return curl
def url(self, path):
return f'http://{self.endpoint}/{path}'
def get(self, path, lifetime=5 * 60, downtime=60 * 60):
""" Requests the given path, returning the resulting json if
successful.
A cache is used in two stages:
* At the lifetime stage, the cache is returned unconditionally.
* At the end of the lifetime, the cache is refreshed if possible.
* At the end of the downtime stage the cache forcefully refreshed.
During its lifetime the object is basically up to 5 minutes out of
date. But since the backend may not be available when that time
expires we operate with a downtime that is higher (1 hour).
This means that a downtime in the backend will not result in evicted
caches, even if the lifetime is up. Once the downtime limit is up we
do however evict the cache forcefully, raising an error if we cannot
connect to the backend.
"""
path = path.lstrip('/')
cached = self.cache.get(path)
def refresh():
try:
status, body = self.get_uncached(path)
except pycurl.error:
raise RoadworkConnectionError(
f"Could not connect to {self.hostname}")
if status == 200:
self.cache.set(path, {
'created': datetime.utcnow(),
'status': status,
'body': body
})
return body
raise RoadworkError(f"{path} returned {status}")
# no cache yet, return result and cache it
if not cached:
return refresh()
now = datetime.utcnow()
lifetime_horizon = cached['created'] + timedelta(seconds=lifetime)
downtime_horizon = cached['created'] + timedelta(seconds=downtime)
# within cache lifetime, return cached value
if now <= lifetime_horizon:
return cached['body']
# outside cache lifetime, but still in downtime horizon, try to
# refresh the value but ignore errors
if lifetime_horizon < now < downtime_horizon:
try:
return refresh()
except RoadworkConnectionError:
return cached['body']
# outside the downtime lifetime, force refresh and raise errors
return refresh()
def get_uncached(self, path):
body = BytesIO()
self.curl.setopt(pycurl.URL, self.url(path))
self.curl.setopt(pycurl.WRITEFUNCTION, body.write)
self.curl.perform()
status = self.curl.getinfo(pycurl.RESPONSE_CODE)
body = body.getvalue().decode('utf-8')
if status == 200:
body = json.loads(body)
return status, body
def is_cacheable(self, response):
return response[0] == 200
class RoadworkCollection(object):
def __init__(self, client, letter=None, query=None):
self.client = client
self.query = None
self.letter = None
if query:
self.query = query.lower()
elif letter:
self.letter = letter.lower()
@property
def letters(self):
letters = set()
for roadwork in self.by_letter(None).roadwork:
for letter in roadwork.letters:
letters.add(letter)
letters = list(letters)
letters.sort()
return letters
def by_filter(self, filter):
# note: addGisLink doesn't work here
url = URL('odata/Baustellen')\
.query_param('addGisLink', 'False')\
.query_param('$filter', filter)
records = self.client.get(url.as_string()).get('value', ())
records = (r for r in records if r['Internet'])
work = [Roadwork(r) for r in records]
work.sort(key=lambda r: r.title)
return work
@property
def roadwork(self):
date = datetime.today()
roadwork = self.by_filter(filter=' and '.join((
f'DauerVon le {date.strftime("%Y-%m-%d")}',
f'DauerBis ge {date.strftime("%Y-%m-%d")}',
)))
# The backend supports searches/filters, but the used dataset is
# so small that it makes little sense to use that feature, since it
# would lead to a lot more cache-misses on our end.
#
# Instead we simply loop through the results and filter them out.
if self.query:
roadwork = [
r for r in roadwork if self.query in r.title.lower()
]
elif self.letter:
roadwork = [
r for r in roadwork if self.letter in r.letters
]
return roadwork
def by_id(self, id):
url = URL(f'odata/Baustellen({int(id)})')\
.query_param('addGisLink', 'True')
work = tuple(
Roadwork(r) for r in self.client.get(
url.as_string()).get('value', ()))
if work:
return work[0]
# secondary lookup is against the subsections.. this probably calls
# for an index eventually
for r in self.roadwork:
for section in r.sections:
if section.id == id:
return section
def by_letter(self, letter):
return self.__class__(self.client, letter=letter, query=None)
class Roadwork(object):
def __init__(self, data):
self.data = data
self.convertors = {
'DauerVon': lambda v: v and isodate.parse_datetime(v),
'DauerBis': lambda v: v and isodate.parse_datetime(v),
}
@property
def id(self):
return self['Id']
@property
def letters(self):
for key in ('ProjektBezeichnung', 'ProjektBereich'):
if self[key]:
letter = self[key][0].lower()
if letter in ('abcdefghijklmnopqrstuvwxyz'):
yield letter
@property
def title(self):
parts = (self[key] for key in ('ProjektBezeichnung', 'ProjektBereich'))
parts = (p.strip() for p in parts if p)
parts = (p for p in parts)
return ' '.join(parts)
@property
def sections(self):
now = sedate.utcnow()
sections = (
self.__class__({
'Id': r['TeilbaustelleId'],
'Teilbaustellen': [],
**r
}) for r in self['Teilbaustellen']
)
sections = (s for s in sections if s['DauerVon'])
sections = (s for s in sections if s['DauerVon'] <= now)
sections = (s for s in sections if now <= (s['DauerBis'] or now))
return list(sections)
def __getitem__(self, key):
value = self.data[key]
if key in self.convertors:
return self.convertors[key](value)
return value
def __contains__(self, key):
return key in self.data
|
PypiClean
|
/e2eAIOK_denas-1.1.1b2023042803-py3-none-any.whl/e2eAIOK/DeNas/cv/model_builder_denas_cv.py
|
from e2eAIOK.DeNas.utils import decode_arch_tuple
from e2eAIOK.DeNas.cv.third_party.ZenNet import DeMainNet
from e2eAIOK.common.trainer.model.model_builder_cv import ModelBuilderCV
from e2eAIOK.DeNas.cv.supernet_transformer import Vision_TransformerSuper
class ModelBuilderCVDeNas(ModelBuilderCV):
def __init__(self, cfg):
super().__init__(cfg)
def _init_model(self):
if self.cfg.best_model_structure != None:
with open(self.cfg.best_model_structure, 'r') as f:
arch = f.readlines()[-1]
else:
raise RuntimeError(f"model structure string not found")
if self.cfg.domain == 'cnn':
model = DeMainNet(num_classes=self.cfg.num_classes, plainnet_struct=arch, no_create=False)
elif self.cfg.domain == 'vit':
model = Vision_TransformerSuper(img_size=self.cfg.input_size,
patch_size=self.cfg.patch_size,
embed_dim=self.cfg['SUPERNET']['EMBED_DIM'], depth=self.cfg['SUPERNET']['DEPTH'],
num_heads=self.cfg['SUPERNET']['NUM_HEADS'],mlp_ratio=self.cfg['SUPERNET']['MLP_RATIO'],
qkv_bias=True, drop_rate=self.cfg.drop,
drop_path_rate=self.cfg.drop_path,
gp=self.cfg.gp,
num_classes=self.cfg.num_classes,
max_relative_position=self.cfg.max_relative_position,
relative_position=self.cfg.relative_position,
change_qkv=self.cfg.change_qkv, abs_pos=not self.cfg.no_abs_pos)
depth, mlp_ratio, num_heads, embed_dim = decode_arch_tuple(arch)
model_config = {}
model_config['layer_num'] = depth
model_config['mlp_ratio'] = mlp_ratio
model_config['num_heads'] = num_heads
model_config['embed_dim'] = [embed_dim]*depth
n_parameters = model.get_sampled_params_numel(model_config)
print(f"model parameters size: {n_parameters}")
return model
|
PypiClean
|
/escapepod_sdk-1.0.0.tar.gz/escapepod_sdk-1.0.0/escapepod_sdk/messaging/cybervector_proxy_grpc.py
|
import grpc
from . import cybervector_proxy_pb2
class CyberVectorProxyServiceStub(object):
"""The grpc-defined connection between the SDK and Cyb3rVector EscapePod Extension Proxy.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetStatus = channel.unary_unary(
'/cybervector.CyberVectorProxyService/GetStatus',
request_serializer=cybervector_proxy_pb2.StatusRequest.SerializeToString,
response_deserializer=cybervector_proxy_pb2.StatusResponse.FromString,
)
self.Subscribe = channel.unary_stream(
'/cybervector.CyberVectorProxyService/Subscribe',
request_serializer=cybervector_proxy_pb2.SubscribeRequest.SerializeToString,
response_deserializer=cybervector_proxy_pb2.ProxyMessaage.FromString,
)
self.UnSubscribe = channel.unary_unary(
'/cybervector.CyberVectorProxyService/UnSubscribe',
request_serializer=cybervector_proxy_pb2.UnsubscribeRequest.SerializeToString,
response_deserializer=cybervector_proxy_pb2.ProxyMessaage.FromString,
)
self.InsertIntent = channel.unary_unary(
'/cybervector.CyberVectorProxyService/InsertIntent',
request_serializer=cybervector_proxy_pb2.InsertIntentRequest.SerializeToString,
response_deserializer=cybervector_proxy_pb2.InsertIntentResponse.FromString,
)
self.SelectIntents = channel.unary_unary(
'/cybervector.CyberVectorProxyService/SelectIntents',
request_serializer=cybervector_proxy_pb2.SelectIntentRequest.SerializeToString,
response_deserializer=cybervector_proxy_pb2.SelectIntentResponse.FromString,
)
self.DeleteIntent = channel.unary_unary(
'/cybervector.CyberVectorProxyService/DeleteIntent',
request_serializer=cybervector_proxy_pb2.DeleteIntentRequest.SerializeToString,
response_deserializer=cybervector_proxy_pb2.DeleteIntentResponse.FromString,
)
class CyberVectorProxyService(object):
"""The grpc-defined connection between the SDK and Cyb3rVector EscapePod Extension Proxy.
"""
def GetStatus(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Subscribe(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UnSubscribe(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InsertIntent(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SelectIntents(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteIntent(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CyberVectorProxyService_to_server(servicer, server):
rpc_method_handlers = {
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=cybervector_proxy_pb2.StatusRequest.FromString,
response_serializer=cybervector_proxy_pb2.StatusResponse.SerializeToString,
),
'Subscribe': grpc.unary_stream_rpc_method_handler(
servicer.Subscribe,
request_deserializer=cybervector_proxy_pb2.SubscribeRequest.FromString,
response_serializer=cybervector_proxy_pb2.ProxyMessaage.SerializeToString,
),
'UnSubscribe': grpc.unary_unary_rpc_method_handler(
servicer.UnSubscribe,
request_deserializer=cybervector_proxy_pb2.UnsubscribeRequest.FromString,
response_serializer=cybervector_proxy_pb2.ProxyMessaage.SerializeToString,
),
'InsertIntent': grpc.unary_unary_rpc_method_handler(
servicer.InsertIntent,
request_deserializer=cybervector_proxy_pb2.InsertIntentRequest.FromString,
response_serializer=cybervector_proxy_pb2.InsertIntentResponse.SerializeToString,
),
'SelectIntents': grpc.unary_unary_rpc_method_handler(
servicer.SelectIntents,
request_deserializer=cybervector_proxy_pb2.SelectIntentRequest.FromString,
response_serializer=cybervector_proxy_pb2.SelectIntentResponse.SerializeToString,
),
'DeleteIntent': grpc.unary_unary_rpc_method_handler(
servicer.DeleteIntent,
request_deserializer=cybervector_proxy_pb2.DeleteIntentRequest.FromString,
response_serializer=cybervector_proxy_pb2.DeleteIntentResponse.SerializeToString,
)
}
generic_handler = grpc.method_handlers_generic_handler(
'cybervector.CyberVectorProxyService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
PypiClean
|
/weixinpy-0.0.11.tar.gz/weixinpy-0.0.11/wechatpy/pay/api/coupon.py
|
from __future__ import absolute_import, unicode_literals
import random
from datetime import datetime
from wechatpy.pay.base import BaseWeChatPayAPI
class WeChatCoupon(BaseWeChatPayAPI):
def send(self, user_id, stock_id, op_user_id=None, device_info=None,
out_trade_no=None):
"""
发放代金券
:param user_id: 用户在公众号下的 openid
:param stock_id: 代金券批次 ID
:param op_user_id: 可选,操作员账号,默认为商户号
:param device_info: 可选,微信支付分配的终端设备号
:param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成
:return: 返回的结果信息
"""
if not out_trade_no:
now = datetime.now()
out_trade_no = '{0}{1}{2}'.format(
self.mch_id,
now.strftime('%Y%m%d%H%M%S'),
random.randint(1000, 10000)
)
data = {
'appid': self.appid,
'coupon_stock_id': stock_id,
'openid': user_id,
'openid_count': 1,
'partner_trade_no': out_trade_no,
'op_user_id': op_user_id,
'device_info': device_info,
'version': '1.0',
'type': 'XML',
}
return self._post('mmpaymkttransfers/send_coupon', data=data)
def query_stock(self, stock_id, op_user_id=None, device_info=None):
"""
查询代金券批次
:param stock_id: 代金券批次 ID
:param op_user_id: 可选,操作员账号,默认为商户号
:param device_info: 可选,微信支付分配的终端设备号
:return: 返回的结果信息
"""
data = {
'appid': self.appid,
'coupon_stock_id': stock_id,
'op_user_id': op_user_id,
'device_info': device_info,
'version': '1.0',
'type': 'XML',
}
return self._post('mmpaymkttransfers/query_coupon_stock', data=data)
def query_coupon(self, coupon_id, user_id,
op_user_id=None, device_info=None):
"""
查询代金券信息
:param coupon_id: 代金券 ID
:param user_id: 用户在公众号下的 openid
:param op_user_id: 可选,操作员账号,默认为商户号
:param device_info: 可选,微信支付分配的终端设备号
:return: 返回的结果信息
"""
data = {
'coupon_id': coupon_id,
'openid': user_id,
'appid': self.appid,
'op_user_id': op_user_id,
'device_info': device_info,
'version': '1.0',
'type': 'XML',
}
return self._post('promotion/query_coupon', data=data)
|
PypiClean
|
/aws-sam-cli-1.96.0.tar.gz/aws-sam-cli-1.96.0/samcli/lib/utils/version_checker.py
|
import logging
from datetime import datetime, timedelta
from functools import wraps
import click
from requests import get
from samcli import __version__ as installed_version
from samcli.cli.global_config import GlobalConfig
LOG = logging.getLogger(__name__)
AWS_SAM_CLI_PYPI_ENDPOINT = "https://pypi.org/pypi/aws-sam-cli/json"
AWS_SAM_CLI_INSTALL_DOCS = (
"https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html"
)
PYPI_CALL_TIMEOUT_IN_SECONDS = 5
DELTA_DAYS = 7
def check_newer_version(func):
"""
This function returns a wrapped function definition, which checks if there are newer version of SAM CLI available
Parameters
----------
func: function reference
Actual function (command) which will be executed
Returns
-------
function reference:
A wrapped function reference which executes original function and checks newer version of SAM CLI
"""
@wraps(func)
def wrapped(*args, **kwargs):
# execute actual command first
actual_result = func(*args, **kwargs)
# check and inform newer version if it is available
_inform_newer_version()
return actual_result
return wrapped
def _inform_newer_version(force_check=False) -> None:
"""
Compares installed SAM CLI version with the up to date version from PyPi,
and print information if up to date version is different then what is installed now
It will store last version check time into GlobalConfig, so that it won't be running all the time
Currently, it will be checking weekly
Parameters
----------
force_check: bool
When it is True, it will trigger checking new version of SAM CLI. Default value is False
"""
# run everything else in try-except block
global_config = None
need_to_update_last_check_time = True
try:
global_config = GlobalConfig()
last_version_check = global_config.last_version_check
if force_check or is_version_check_overdue(last_version_check):
fetch_and_compare_versions()
else:
need_to_update_last_check_time = False
except Exception as e:
LOG.debug("New version check failed", exc_info=e)
finally:
if need_to_update_last_check_time:
update_last_check_time()
def fetch_and_compare_versions() -> None:
"""
Compare current up to date version with the installed one, and inform if a newer version available
"""
response = get(AWS_SAM_CLI_PYPI_ENDPOINT, timeout=PYPI_CALL_TIMEOUT_IN_SECONDS)
result = response.json()
latest_version = result.get("info", {}).get("version", None)
LOG.debug("Installed version %s, current version %s", installed_version, latest_version)
if latest_version and installed_version != latest_version:
click.secho(
f"\nSAM CLI update available ({latest_version}); ({installed_version} installed)", fg="green", err=True
)
click.echo(f"To download: {AWS_SAM_CLI_INSTALL_DOCS}", err=True)
def update_last_check_time() -> None:
"""
Update last_check_time in GlobalConfig
"""
try:
gc = GlobalConfig()
gc.last_version_check = datetime.utcnow().timestamp()
except Exception as e:
LOG.debug("Updating last version check time was failed", exc_info=e)
def is_version_check_overdue(last_version_check) -> bool:
"""
Check if last version check have been made longer then a week ago
Parameters
----------
last_version_check: epoch time
last_version_check epoch time read from GlobalConfig
Returns
-------
bool:
True if last_version_check is None or older then a week, False otherwise
"""
if last_version_check is None or type(last_version_check) not in [int, float]:
return True
epoch_week_ago = datetime.utcnow() - timedelta(days=DELTA_DAYS)
return datetime.utcfromtimestamp(last_version_check) < epoch_week_ago
|
PypiClean
|
/ressources/lib/node_modules/highcharts/indicators/cmf.src.js
|
'use strict';
(function (factory) {
if (typeof module === 'object' && module.exports) {
module.exports = factory;
} else if (typeof define === 'function' && define.amd) {
define(function () {
return factory;
});
} else {
factory(Highcharts);
}
}(function (Highcharts) {
(function (H) {
/**
* (c) 2010-2017 Highsoft AS
* Author: Sebastian Domas
*
* Chaikin Money Flow indicator for Highstock
*
* License: www.highcharts.com/license
*/
H.seriesType('cmf', 'sma',
/**
* Chaikin Money Flow indicator (cmf).
*
* @type {Object}
* @extends plotOptions.sma
* @product highstock
* @sample {highstock} stock/indicators/cmf/
* Chaikin Money Flow indicator
* @since 6.0.0
* @excluding animationLimit
* @optionparent plotOptions.cmf
*/
{
params: {
period: 14,
/**
* The id of another series to use its data as volume data for the
* indiator calculation.
*/
volumeSeriesID: 'volume'
}
}, {
nameBase: 'Chaikin Money Flow',
/**
* Checks if the series and volumeSeries are accessible, number of
* points.x is longer than period, is series has OHLC data
* @returns {Boolean}
* true if series is valid and can be computed, otherwise false
**/
isValid: function () {
var chart = this.chart,
options = this.options,
series = this.linkedParent,
volumeSeries = (
this.volumeSeries ||
(
this.volumeSeries =
chart.get(options.params.volumeSeriesID)
)
),
isSeriesOHLC = (
series &&
series.yData &&
series.yData[0].length === 4
);
function isLengthValid(serie) {
return serie.xData &&
serie.xData.length >= options.params.period;
}
return !!(
series &&
volumeSeries &&
isLengthValid(series) &&
isLengthValid(volumeSeries) && isSeriesOHLC
);
},
/**
* @typedef {Object} Values
* @property {Number[][]} values
* Combined xData and yData values into a tuple
* @property {Number[]} xData
* Values represent x timestamp values
* @property {Number[]} yData
* Values represent y values
**/
/**
* Returns indicator's data
* @returns {False | Values}
* Returns false if the indicator is not valid, otherwise
* returns Values object
**/
getValues: function (series, params) {
if (!this.isValid()) {
return false;
}
return this.getMoneyFlow(
series.xData,
series.yData,
this.volumeSeries.yData,
params.period
);
},
/**
* @static
* @param {Number[]} xData x timestamp values
* @param {Number[]} seriesYData yData of basic series
* @param {Number[]} volumeSeriesYData yData of volume series
* @param {Number} period indicator's param
* @returns {Values} object containing computed money flow data
**/
getMoneyFlow: function (xData, seriesYData, volumeSeriesYData, period) {
var len = seriesYData.length,
moneyFlowVolume = [],
sumVolume = 0,
sumMoneyFlowVolume = 0,
moneyFlowXData = [],
moneyFlowYData = [],
values = [],
i,
point,
nullIndex = -1;
/**
* Calculates money flow volume, changes i, nullIndex vars from
* upper scope!
* @private
* @param {Number[]} ohlc OHLC point
* @param {Number} volume Volume point's y value
* @returns {Number} volume * moneyFlowMultiplier
**/
function getMoneyFlowVolume(ohlc, volume) {
var high = ohlc[1],
low = ohlc[2],
close = ohlc[3],
isValid =
volume !== null &&
high !== null &&
low !== null &&
close !== null &&
high !== low;
/**
* @private
* @param {Number} h High value
* @param {Number} l Low value
* @param {Number} c Close value
* @returns {Number} calculated multiplier for the point
**/
function getMoneyFlowMultiplier(h, l, c) {
return ((c - l) - (h - c)) / (h - l);
}
return isValid ?
getMoneyFlowMultiplier(high, low, close) * volume :
((nullIndex = i), null);
}
if (period > 0 && period <= len) {
for (i = 0; i < period; i++) {
moneyFlowVolume[i] = getMoneyFlowVolume(
seriesYData[i],
volumeSeriesYData[i]
);
sumVolume += volumeSeriesYData[i];
sumMoneyFlowVolume += moneyFlowVolume[i];
}
moneyFlowXData.push(xData[i - 1]);
moneyFlowYData.push(
i - nullIndex >= period && sumVolume !== 0 ?
sumMoneyFlowVolume / sumVolume :
null
);
values.push([moneyFlowXData[0], moneyFlowYData[0]]);
for (; i < len; i++) {
moneyFlowVolume[i] = getMoneyFlowVolume(
seriesYData[i],
volumeSeriesYData[i]
);
sumVolume -= volumeSeriesYData[i - period];
sumVolume += volumeSeriesYData[i];
sumMoneyFlowVolume -= moneyFlowVolume[i - period];
sumMoneyFlowVolume += moneyFlowVolume[i];
point = [
xData[i],
i - nullIndex >= period ?
sumMoneyFlowVolume / sumVolume :
null
];
moneyFlowXData.push(point[0]);
moneyFlowYData.push(point[1]);
values.push([point[0], point[1]]);
}
}
return {
values: values,
xData: moneyFlowXData,
yData: moneyFlowYData
};
}
});
/**
* A `CMF` series. If the [type](#series.cmf.type) option is not
* specified, it is inherited from [chart.type](#chart.type).
*
* @type {Object}
* @since 6.0.0
* @extends series,plotOptions.cmf
* @excluding data,dataParser,dataURL
* @product highstock
* @apioption series.cmf
*/
/**
* An array of data points for the series. For the `CMF` series type,
* points are calculated dynamically.
*
* @type {Array<Object|Array>}
* @since 6.0.0
* @extends series.line.data
* @product highstock
* @apioption series.cmf.data
*/
}(Highcharts));
return (function () {
}());
}));
|
PypiClean
|
/metinanaliz-1.1.6.tar.gz/metinanaliz-1.1.6/analiz/analiz.py
|
import re
import math
import sys
import os
sesliHarfler = 'AaÂâEeIıİiÎîOoÖöUuÜü'
heceGroupları = {}
def analiz(dosyaAdi, icerik):
icerik = icerik.replace('’','')
toplamCumleSayısı = 0
toplamKelimeSayısı = 0
toplamHeceSayısı = 0
for cumle in re.split(r'[(.+)…\?!—][\s\n]', icerik):
if(len(cumle) > 0):
toplamCumleSayısı += 1
print('================================================')
print(toplamCumleSayısı, ':')
print('')
print(cumle)
for kelime in re.findall(r'\w+', cumle):
toplamKelimeSayısı+=1
heceSayısı = heceSayisiHesapla(kelime)
print('kelime ', toplamKelimeSayısı,' (', heceSayısı, ' hece)', ': ', kelime)
toplamHeceSayısı += heceSayısı
if heceGroupları.get(heceSayısı) is None:
heceGroupları[heceSayısı] = 0
heceGroupları[heceSayısı] += 1
print('\nMetin Analizi: ' , dosyaAdi)
print('-------------------')
print('Toplam Cümle Sayısı: ', toplamCumleSayısı)
print('Toplam Kelime: ', toplamKelimeSayısı)
print('toplamHeceSayısı: ', toplamHeceSayısı)
H3 = 0
H4 = 0
H5 = 0
H6 = 0
OKS = toplamKelimeSayısı / toplamCumleSayısı
for heceGrubu in sorted(heceGroupları.keys()):
print(heceGrubu, ' herceli kelime sayısı: ', heceGroupları[heceGrubu])
if heceGrubu == 3:
H3 = heceGroupları[heceGrubu] / toplamCumleSayısı
elif heceGrubu == 4:
H4 = heceGroupları[heceGrubu] / toplamCumleSayısı
elif heceGrubu == 5:
H5 = heceGroupları[heceGrubu] / toplamCumleSayısı
elif heceGrubu == 6:
H6 = heceGroupları[heceGrubu] / toplamCumleSayısı
YOD = math.sqrt(OKS * ((H3 * 0.84) + (H4 * 1.5) + (H5 * 3.5) + (H6 * 26.25)))
print('OKS: ', OKS)
print('H3: ', H3)
print('H4: ', H4)
print('H5: ', H5)
print('H6: ', H6)
print('YOD: ', YOD)
return
def heceSayisiHesapla(kelime):
hece = 0
for harf in kelime:
if sesliHarfler.find(harf) >- 1:
hece+=1
return hece
def main():
dosyaYolu = str(sys.argv[1])
file = open(dosyaYolu, 'r')
dosyaİceriği = file.read()
analiz(os.path.splitext(dosyaYolu)[0], dosyaİceriği)
# Main function calling
if __name__=="__main__":
main()
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/models/self_signed_certificate.py
|
from __future__ import annotations
from datetime import datetime
from kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
from uuid import UUID
class SelfSignedCertificate(AdditionalDataHolder, Parsable):
def __init__(self,) -> None:
"""
Instantiates a new SelfSignedCertificate and sets the default values.
"""
# Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
self._additional_data: Dict[str, Any] = {}
# Custom key identifier.
self._custom_key_identifier: Optional[bytes] = None
# The friendly name for the key.
self._display_name: Optional[str] = None
# The date and time at which the credential expires. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
self._end_date_time: Optional[datetime] = None
# The value for the key credential. Should be a base-64 encoded value.
self._key: Optional[bytes] = None
# The unique identifier (GUID) for the key.
self._key_id: Optional[UUID] = None
# The OdataType property
self._odata_type: Optional[str] = None
# The date and time at which the credential becomes valid. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
self._start_date_time: Optional[datetime] = None
# The thumbprint value for the key.
self._thumbprint: Optional[str] = None
# The type of key credential. 'AsymmetricX509Cert'.
self._type: Optional[str] = None
# A string that describes the purpose for which the key can be used. For example, 'Verify'.
self._usage: Optional[str] = None
@property
def additional_data(self,) -> Dict[str, Any]:
"""
Gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Returns: Dict[str, Any]
"""
return self._additional_data
@additional_data.setter
def additional_data(self,value: Dict[str, Any]) -> None:
"""
Sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Args:
value: Value to set for the AdditionalData property.
"""
self._additional_data = value
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> SelfSignedCertificate:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: SelfSignedCertificate
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return SelfSignedCertificate()
@property
def custom_key_identifier(self,) -> Optional[bytes]:
"""
Gets the customKeyIdentifier property value. Custom key identifier.
Returns: Optional[bytes]
"""
return self._custom_key_identifier
@custom_key_identifier.setter
def custom_key_identifier(self,value: Optional[bytes] = None) -> None:
"""
Sets the customKeyIdentifier property value. Custom key identifier.
Args:
value: Value to set for the custom_key_identifier property.
"""
self._custom_key_identifier = value
@property
def display_name(self,) -> Optional[str]:
"""
Gets the displayName property value. The friendly name for the key.
Returns: Optional[str]
"""
return self._display_name
@display_name.setter
def display_name(self,value: Optional[str] = None) -> None:
"""
Sets the displayName property value. The friendly name for the key.
Args:
value: Value to set for the display_name property.
"""
self._display_name = value
@property
def end_date_time(self,) -> Optional[datetime]:
"""
Gets the endDateTime property value. The date and time at which the credential expires. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
Returns: Optional[datetime]
"""
return self._end_date_time
@end_date_time.setter
def end_date_time(self,value: Optional[datetime] = None) -> None:
"""
Sets the endDateTime property value. The date and time at which the credential expires. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
Args:
value: Value to set for the end_date_time property.
"""
self._end_date_time = value
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
fields: Dict[str, Callable[[Any], None]] = {
"customKeyIdentifier": lambda n : setattr(self, 'custom_key_identifier', n.get_bytes_value()),
"displayName": lambda n : setattr(self, 'display_name', n.get_str_value()),
"endDateTime": lambda n : setattr(self, 'end_date_time', n.get_datetime_value()),
"key": lambda n : setattr(self, 'key', n.get_bytes_value()),
"keyId": lambda n : setattr(self, 'key_id', n.get_uuid_value()),
"@odata.type": lambda n : setattr(self, 'odata_type', n.get_str_value()),
"startDateTime": lambda n : setattr(self, 'start_date_time', n.get_datetime_value()),
"thumbprint": lambda n : setattr(self, 'thumbprint', n.get_str_value()),
"type": lambda n : setattr(self, 'type', n.get_str_value()),
"usage": lambda n : setattr(self, 'usage', n.get_str_value()),
}
return fields
@property
def key(self,) -> Optional[bytes]:
"""
Gets the key property value. The value for the key credential. Should be a base-64 encoded value.
Returns: Optional[bytes]
"""
return self._key
@key.setter
def key(self,value: Optional[bytes] = None) -> None:
"""
Sets the key property value. The value for the key credential. Should be a base-64 encoded value.
Args:
value: Value to set for the key property.
"""
self._key = value
@property
def key_id(self,) -> Optional[UUID]:
"""
Gets the keyId property value. The unique identifier (GUID) for the key.
Returns: Optional[UUID]
"""
return self._key_id
@key_id.setter
def key_id(self,value: Optional[UUID] = None) -> None:
"""
Sets the keyId property value. The unique identifier (GUID) for the key.
Args:
value: Value to set for the key_id property.
"""
self._key_id = value
@property
def odata_type(self,) -> Optional[str]:
"""
Gets the @odata.type property value. The OdataType property
Returns: Optional[str]
"""
return self._odata_type
@odata_type.setter
def odata_type(self,value: Optional[str] = None) -> None:
"""
Sets the @odata.type property value. The OdataType property
Args:
value: Value to set for the odata_type property.
"""
self._odata_type = value
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
writer.write_object_value("customKeyIdentifier", self.custom_key_identifier)
writer.write_str_value("displayName", self.display_name)
writer.write_datetime_value("endDateTime", self.end_date_time)
writer.write_object_value("key", self.key)
writer.write_uuid_value("keyId", self.key_id)
writer.write_str_value("@odata.type", self.odata_type)
writer.write_datetime_value("startDateTime", self.start_date_time)
writer.write_str_value("thumbprint", self.thumbprint)
writer.write_str_value("type", self.type)
writer.write_str_value("usage", self.usage)
writer.write_additional_data_value(self.additional_data)
@property
def start_date_time(self,) -> Optional[datetime]:
"""
Gets the startDateTime property value. The date and time at which the credential becomes valid. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
Returns: Optional[datetime]
"""
return self._start_date_time
@start_date_time.setter
def start_date_time(self,value: Optional[datetime] = None) -> None:
"""
Sets the startDateTime property value. The date and time at which the credential becomes valid. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z.
Args:
value: Value to set for the start_date_time property.
"""
self._start_date_time = value
@property
def thumbprint(self,) -> Optional[str]:
"""
Gets the thumbprint property value. The thumbprint value for the key.
Returns: Optional[str]
"""
return self._thumbprint
@thumbprint.setter
def thumbprint(self,value: Optional[str] = None) -> None:
"""
Sets the thumbprint property value. The thumbprint value for the key.
Args:
value: Value to set for the thumbprint property.
"""
self._thumbprint = value
@property
def type(self,) -> Optional[str]:
"""
Gets the type property value. The type of key credential. 'AsymmetricX509Cert'.
Returns: Optional[str]
"""
return self._type
@type.setter
def type(self,value: Optional[str] = None) -> None:
"""
Sets the type property value. The type of key credential. 'AsymmetricX509Cert'.
Args:
value: Value to set for the type property.
"""
self._type = value
@property
def usage(self,) -> Optional[str]:
"""
Gets the usage property value. A string that describes the purpose for which the key can be used. For example, 'Verify'.
Returns: Optional[str]
"""
return self._usage
@usage.setter
def usage(self,value: Optional[str] = None) -> None:
"""
Sets the usage property value. A string that describes the purpose for which the key can be used. For example, 'Verify'.
Args:
value: Value to set for the usage property.
"""
self._usage = value
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_8/models/resource_performance.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_8 import models
class ResourcePerformance(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes_per_mirrored_write': 'int',
'bytes_per_op': 'int',
'bytes_per_read': 'int',
'bytes_per_write': 'int',
'mirrored_write_bytes_per_sec': 'int',
'mirrored_writes_per_sec': 'int',
'qos_rate_limit_usec_per_mirrored_write_op': 'int',
'qos_rate_limit_usec_per_read_op': 'int',
'qos_rate_limit_usec_per_write_op': 'int',
'queue_usec_per_mirrored_write_op': 'int',
'queue_usec_per_read_op': 'int',
'queue_usec_per_write_op': 'int',
'read_bytes_per_sec': 'int',
'reads_per_sec': 'int',
'san_usec_per_mirrored_write_op': 'int',
'san_usec_per_read_op': 'int',
'san_usec_per_write_op': 'int',
'service_usec_per_mirrored_write_op': 'int',
'service_usec_per_read_op': 'int',
'service_usec_per_write_op': 'int',
'time': 'int',
'usec_per_mirrored_write_op': 'int',
'usec_per_read_op': 'int',
'usec_per_write_op': 'int',
'write_bytes_per_sec': 'int',
'writes_per_sec': 'int',
'service_usec_per_read_op_cache_reduction': 'float',
'id': 'str',
'name': 'str'
}
attribute_map = {
'bytes_per_mirrored_write': 'bytes_per_mirrored_write',
'bytes_per_op': 'bytes_per_op',
'bytes_per_read': 'bytes_per_read',
'bytes_per_write': 'bytes_per_write',
'mirrored_write_bytes_per_sec': 'mirrored_write_bytes_per_sec',
'mirrored_writes_per_sec': 'mirrored_writes_per_sec',
'qos_rate_limit_usec_per_mirrored_write_op': 'qos_rate_limit_usec_per_mirrored_write_op',
'qos_rate_limit_usec_per_read_op': 'qos_rate_limit_usec_per_read_op',
'qos_rate_limit_usec_per_write_op': 'qos_rate_limit_usec_per_write_op',
'queue_usec_per_mirrored_write_op': 'queue_usec_per_mirrored_write_op',
'queue_usec_per_read_op': 'queue_usec_per_read_op',
'queue_usec_per_write_op': 'queue_usec_per_write_op',
'read_bytes_per_sec': 'read_bytes_per_sec',
'reads_per_sec': 'reads_per_sec',
'san_usec_per_mirrored_write_op': 'san_usec_per_mirrored_write_op',
'san_usec_per_read_op': 'san_usec_per_read_op',
'san_usec_per_write_op': 'san_usec_per_write_op',
'service_usec_per_mirrored_write_op': 'service_usec_per_mirrored_write_op',
'service_usec_per_read_op': 'service_usec_per_read_op',
'service_usec_per_write_op': 'service_usec_per_write_op',
'time': 'time',
'usec_per_mirrored_write_op': 'usec_per_mirrored_write_op',
'usec_per_read_op': 'usec_per_read_op',
'usec_per_write_op': 'usec_per_write_op',
'write_bytes_per_sec': 'write_bytes_per_sec',
'writes_per_sec': 'writes_per_sec',
'service_usec_per_read_op_cache_reduction': 'service_usec_per_read_op_cache_reduction',
'id': 'id',
'name': 'name'
}
required_args = {
}
def __init__(
self,
bytes_per_mirrored_write=None, # type: int
bytes_per_op=None, # type: int
bytes_per_read=None, # type: int
bytes_per_write=None, # type: int
mirrored_write_bytes_per_sec=None, # type: int
mirrored_writes_per_sec=None, # type: int
qos_rate_limit_usec_per_mirrored_write_op=None, # type: int
qos_rate_limit_usec_per_read_op=None, # type: int
qos_rate_limit_usec_per_write_op=None, # type: int
queue_usec_per_mirrored_write_op=None, # type: int
queue_usec_per_read_op=None, # type: int
queue_usec_per_write_op=None, # type: int
read_bytes_per_sec=None, # type: int
reads_per_sec=None, # type: int
san_usec_per_mirrored_write_op=None, # type: int
san_usec_per_read_op=None, # type: int
san_usec_per_write_op=None, # type: int
service_usec_per_mirrored_write_op=None, # type: int
service_usec_per_read_op=None, # type: int
service_usec_per_write_op=None, # type: int
time=None, # type: int
usec_per_mirrored_write_op=None, # type: int
usec_per_read_op=None, # type: int
usec_per_write_op=None, # type: int
write_bytes_per_sec=None, # type: int
writes_per_sec=None, # type: int
service_usec_per_read_op_cache_reduction=None, # type: float
id=None, # type: str
name=None, # type: str
):
"""
Keyword args:
bytes_per_mirrored_write (int): The average I/O size per mirrored write. Measured in bytes.
bytes_per_op (int): The average I/O size for both read and write (all) operations.
bytes_per_read (int): The average I/O size per read. Measured in bytes.
bytes_per_write (int): The average I/O size per write. Measured in bytes.
mirrored_write_bytes_per_sec (int): The number of mirrored bytes written per second.
mirrored_writes_per_sec (int): The number of mirrored writes per second.
qos_rate_limit_usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds.
qos_rate_limit_usec_per_read_op (int): The average time spent waiting due to QoS rate limiting for a read request. Measured in microseconds.
qos_rate_limit_usec_per_write_op (int): The average time that a write I/O request spends waiting as a result of the volume reaching its QoS bandwidth limit. Measured in microseconds.
queue_usec_per_mirrored_write_op (int): The average time that a mirrored write I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_read_op (int): The average time that a read I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_write_op (int): The average time that a write I/O request spends in the array waiting to be served. Measured in microseconds.
read_bytes_per_sec (int): The number of bytes read per second.
reads_per_sec (int): The number of read requests processed per second.
san_usec_per_mirrored_write_op (int): The average time required to transfer data from the initiator to the array for a mirrored write request. Measured in microseconds.
san_usec_per_read_op (int): The average time required to transfer data from the array to the initiator for a read request. Measured in microseconds.
san_usec_per_write_op (int): The average time required to transfer data from the initiator to the array for a write request. Measured in microseconds.
service_usec_per_mirrored_write_op (int): The average time required for the array to service a mirrored write request. Measured in microseconds.
service_usec_per_read_op (int): The average time required for the array to service a read request. Measured in microseconds.
service_usec_per_write_op (int): The average time required for the array to service a write request. Measured in microseconds.
time (int): The time when the sample performance data was taken. Measured in milliseconds since the UNIX epoch.
usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_read_op (int): The average time it takes the array to process an I/O read request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_write_op (int): The average time it takes the array to process an I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
write_bytes_per_sec (int): The number of bytes written per second.
writes_per_sec (int): The number of write requests processed per second.
service_usec_per_read_op_cache_reduction (float): The percentage reduction in `service_usec_per_read_op` due to data cache hits. For example, a value of 0.25 indicates that the value of `service_usec_per_read_op` is 25% lower than it would have been without any data cache hits.
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
"""
if bytes_per_mirrored_write is not None:
self.bytes_per_mirrored_write = bytes_per_mirrored_write
if bytes_per_op is not None:
self.bytes_per_op = bytes_per_op
if bytes_per_read is not None:
self.bytes_per_read = bytes_per_read
if bytes_per_write is not None:
self.bytes_per_write = bytes_per_write
if mirrored_write_bytes_per_sec is not None:
self.mirrored_write_bytes_per_sec = mirrored_write_bytes_per_sec
if mirrored_writes_per_sec is not None:
self.mirrored_writes_per_sec = mirrored_writes_per_sec
if qos_rate_limit_usec_per_mirrored_write_op is not None:
self.qos_rate_limit_usec_per_mirrored_write_op = qos_rate_limit_usec_per_mirrored_write_op
if qos_rate_limit_usec_per_read_op is not None:
self.qos_rate_limit_usec_per_read_op = qos_rate_limit_usec_per_read_op
if qos_rate_limit_usec_per_write_op is not None:
self.qos_rate_limit_usec_per_write_op = qos_rate_limit_usec_per_write_op
if queue_usec_per_mirrored_write_op is not None:
self.queue_usec_per_mirrored_write_op = queue_usec_per_mirrored_write_op
if queue_usec_per_read_op is not None:
self.queue_usec_per_read_op = queue_usec_per_read_op
if queue_usec_per_write_op is not None:
self.queue_usec_per_write_op = queue_usec_per_write_op
if read_bytes_per_sec is not None:
self.read_bytes_per_sec = read_bytes_per_sec
if reads_per_sec is not None:
self.reads_per_sec = reads_per_sec
if san_usec_per_mirrored_write_op is not None:
self.san_usec_per_mirrored_write_op = san_usec_per_mirrored_write_op
if san_usec_per_read_op is not None:
self.san_usec_per_read_op = san_usec_per_read_op
if san_usec_per_write_op is not None:
self.san_usec_per_write_op = san_usec_per_write_op
if service_usec_per_mirrored_write_op is not None:
self.service_usec_per_mirrored_write_op = service_usec_per_mirrored_write_op
if service_usec_per_read_op is not None:
self.service_usec_per_read_op = service_usec_per_read_op
if service_usec_per_write_op is not None:
self.service_usec_per_write_op = service_usec_per_write_op
if time is not None:
self.time = time
if usec_per_mirrored_write_op is not None:
self.usec_per_mirrored_write_op = usec_per_mirrored_write_op
if usec_per_read_op is not None:
self.usec_per_read_op = usec_per_read_op
if usec_per_write_op is not None:
self.usec_per_write_op = usec_per_write_op
if write_bytes_per_sec is not None:
self.write_bytes_per_sec = write_bytes_per_sec
if writes_per_sec is not None:
self.writes_per_sec = writes_per_sec
if service_usec_per_read_op_cache_reduction is not None:
self.service_usec_per_read_op_cache_reduction = service_usec_per_read_op_cache_reduction
if id is not None:
self.id = id
if name is not None:
self.name = name
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformance`".format(key))
if key == "bytes_per_mirrored_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_mirrored_write`, must be a value greater than or equal to `0`")
if key == "bytes_per_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0`")
if key == "bytes_per_read" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0`")
if key == "bytes_per_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0`")
if key == "mirrored_write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "mirrored_writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_writes_per_sec`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "read_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "reads_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0`")
if key == "san_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op_cache_reduction" and value is not None:
if value > 1.0:
raise ValueError("Invalid value for `service_usec_per_read_op_cache_reduction`, value must be less than or equal to `1.0`")
if value < 0.0:
raise ValueError("Invalid value for `service_usec_per_read_op_cache_reduction`, must be a value greater than or equal to `0.0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformance`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformance`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformance`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcePerformance, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcePerformance):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/django-geoprisma-0.0.1.tar.gz/django-geoprisma-0.0.1/geoprisma/static/geoprisma/lib/extjs/src/widgets/TabPanel.js
|
* @class Ext.TabPanel
* <p>A basic tab container. TabPanels can be used exactly like a standard {@link Ext.Panel}
* for layout purposes, but also have special support for containing child Components
* (<tt>{@link Ext.Container#items items}</tt>) that are managed using a
* {@link Ext.layout.CardLayout CardLayout layout manager}, and displayed as separate tabs.</p>
*
* <b>Note:</b> By default, a tab's close tool <i>destroys</i> the child tab Component
* and all its descendants. This makes the child tab Component, and all its descendants <b>unusable</b>. To enable
* re-use of a tab, configure the TabPanel with <b><code>{@link #autoDestroy autoDestroy: false}</code></b>.
*
* <p><b><u>TabPanel header/footer elements</u></b></p>
* <p>TabPanels use their {@link Ext.Panel#header header} or {@link Ext.Panel#footer footer} element
* (depending on the {@link #tabPosition} configuration) to accommodate the tab selector buttons.
* This means that a TabPanel will not display any configured title, and will not display any
* configured header {@link Ext.Panel#tools tools}.</p>
* <p>To display a header, embed the TabPanel in a {@link Ext.Panel Panel} which uses
* <b><tt>{@link Ext.Container#layout layout:'fit'}</tt></b>.</p>
*
* <p><b><u>Tab Events</u></b></p>
* <p>There is no actual tab class — each tab is simply a {@link Ext.BoxComponent Component}
* such as a {@link Ext.Panel Panel}. However, when rendered in a TabPanel, each child Component
* can fire additional events that only exist for tabs and are not available from other Components.
* These events are:</p>
* <div><ul class="mdetail-params">
* <li><tt><b>{@link Ext.Panel#activate activate}</b></tt> : Fires when this Component becomes
* the active tab.</li>
* <li><tt><b>{@link Ext.Panel#deactivate deactivate}</b></tt> : Fires when the Component that
* was the active tab becomes deactivated.</li>
* <li><tt><b>{@link Ext.Panel#beforeclose beforeclose}</b></tt> : Fires when the user clicks on the close tool of a closeable tab.
* May be vetoed by returning <code>false</code> from a handler.</li>
* <li><tt><b>{@link Ext.Panel#close close}</b></tt> : Fires a closeable tab has been closed by the user.</li>
* </ul></div>
* <p><b><u>Creating TabPanels from Code</u></b></p>
* <p>TabPanels can be created and rendered completely in code, as in this example:</p>
* <pre><code>
var tabs = new Ext.TabPanel({
renderTo: Ext.getBody(),
activeTab: 0,
items: [{
title: 'Tab 1',
html: 'A simple tab'
},{
title: 'Tab 2',
html: 'Another one'
}]
});
</code></pre>
* <p><b><u>Creating TabPanels from Existing Markup</u></b></p>
* <p>TabPanels can also be rendered from pre-existing markup in a couple of ways.</p>
* <div><ul class="mdetail-params">
*
* <li>Pre-Structured Markup</li>
* <div class="sub-desc">
* <p>A container div with one or more nested tab divs with class <tt>'x-tab'</tt> can be rendered entirely
* from existing markup (See the {@link #autoTabs} example).</p>
* </div>
*
* <li>Un-Structured Markup</li>
* <div class="sub-desc">
* <p>A TabPanel can also be rendered from markup that is not strictly structured by simply specifying by id
* which elements should be the container and the tabs. Using this method tab content can be pulled from different
* elements within the page by id regardless of page structure. For example:</p>
* <pre><code>
var tabs = new Ext.TabPanel({
renderTo: 'my-tabs',
activeTab: 0,
items:[
{contentEl:'tab1', title:'Tab 1'},
{contentEl:'tab2', title:'Tab 2'}
]
});
// Note that the tabs do not have to be nested within the container (although they can be)
<div id="my-tabs"></div>
<div id="tab1" class="x-hide-display">A simple tab</div>
<div id="tab2" class="x-hide-display">Another one</div>
</code></pre>
* Note that the tab divs in this example contain the class <tt>'x-hide-display'</tt> so that they can be rendered
* deferred without displaying outside the tabs. You could alternately set <tt>{@link #deferredRender} = false </tt>
* to render all content tabs on page load.
* </div>
*
* </ul></div>
*
* @extends Ext.Panel
* @constructor
* @param {Object} config The configuration options
* @xtype tabpanel
*/
Ext.TabPanel = Ext.extend(Ext.Panel, {
/**
* @cfg {Boolean} layoutOnTabChange
* Set to true to force a layout of the active tab when the tab is changed. Defaults to false.
* See {@link Ext.layout.CardLayout}.<code>{@link Ext.layout.CardLayout#layoutOnCardChange layoutOnCardChange}</code>.
*/
/**
* @cfg {String} tabCls <b>This config option is used on <u>child Components</u> of ths TabPanel.</b> A CSS
* class name applied to the tab strip item representing the child Component, allowing special
* styling to be applied.
*/
/**
* @cfg {Boolean} deferredRender
* <p><tt>true</tt> by default to defer the rendering of child <tt>{@link Ext.Container#items items}</tt>
* to the browsers DOM until a tab is activated. <tt>false</tt> will render all contained
* <tt>{@link Ext.Container#items items}</tt> as soon as the {@link Ext.layout.CardLayout layout}
* is rendered. If there is a significant amount of content or a lot of heavy controls being
* rendered into panels that are not displayed by default, setting this to <tt>true</tt> might
* improve performance.</p>
* <br><p>The <tt>deferredRender</tt> property is internally passed to the layout manager for
* TabPanels ({@link Ext.layout.CardLayout}) as its {@link Ext.layout.CardLayout#deferredRender}
* configuration value.</p>
* <br><p><b>Note</b>: leaving <tt>deferredRender</tt> as <tt>true</tt> means that the content
* within an unactivated tab will not be available. For example, this means that if the TabPanel
* is within a {@link Ext.form.FormPanel form}, then until a tab is activated, any Fields within
* unactivated tabs will not be rendered, and will therefore not be submitted and will not be
* available to either {@link Ext.form.BasicForm#getValues getValues} or
* {@link Ext.form.BasicForm#setValues setValues}.</p>
*/
deferredRender : true,
/**
* @cfg {Number} tabWidth The initial width in pixels of each new tab (defaults to 120).
*/
tabWidth : 120,
/**
* @cfg {Number} minTabWidth The minimum width in pixels for each tab when {@link #resizeTabs} = true (defaults to 30).
*/
minTabWidth : 30,
/**
* @cfg {Boolean} resizeTabs True to automatically resize each tab so that the tabs will completely fill the
* tab strip (defaults to false). Setting this to true may cause specific widths that might be set per tab to
* be overridden in order to fit them all into view (although {@link #minTabWidth} will always be honored).
*/
resizeTabs : false,
/**
* @cfg {Boolean} enableTabScroll True to enable scrolling to tabs that may be invisible due to overflowing the
* overall TabPanel width. Only available with tabPosition:'top' (defaults to false).
*/
enableTabScroll : false,
/**
* @cfg {Number} scrollIncrement The number of pixels to scroll each time a tab scroll button is pressed
* (defaults to <tt>100</tt>, or if <tt>{@link #resizeTabs} = true</tt>, the calculated tab width). Only
* applies when <tt>{@link #enableTabScroll} = true</tt>.
*/
scrollIncrement : 0,
/**
* @cfg {Number} scrollRepeatInterval Number of milliseconds between each scroll while a tab scroll button is
* continuously pressed (defaults to <tt>400</tt>).
*/
scrollRepeatInterval : 400,
/**
* @cfg {Float} scrollDuration The number of milliseconds that each scroll animation should last (defaults
* to <tt>.35</tt>). Only applies when <tt>{@link #animScroll} = true</tt>.
*/
scrollDuration : 0.35,
/**
* @cfg {Boolean} animScroll True to animate tab scrolling so that hidden tabs slide smoothly into view (defaults
* to <tt>true</tt>). Only applies when <tt>{@link #enableTabScroll} = true</tt>.
*/
animScroll : true,
/**
* @cfg {String} tabPosition The position where the tab strip should be rendered (defaults to <tt>'top'</tt>).
* The only other supported value is <tt>'bottom'</tt>. <b>Note</b>: tab scrolling is only supported for
* <tt>tabPosition: 'top'</tt>.
*/
tabPosition : 'top',
/**
* @cfg {String} baseCls The base CSS class applied to the panel (defaults to <tt>'x-tab-panel'</tt>).
*/
baseCls : 'x-tab-panel',
/**
* @cfg {Boolean} autoTabs
* <p><tt>true</tt> to query the DOM for any divs with a class of 'x-tab' to be automatically converted
* to tabs and added to this panel (defaults to <tt>false</tt>). Note that the query will be executed within
* the scope of the container element only (so that multiple tab panels from markup can be supported via this
* method).</p>
* <p>This method is only possible when the markup is structured correctly as a container with nested divs
* containing the class <tt>'x-tab'</tt>. To create TabPanels without these limitations, or to pull tab content
* from other elements on the page, see the example at the top of the class for generating tabs from markup.</p>
* <p>There are a couple of things to note when using this method:<ul>
* <li>When using the <tt>autoTabs</tt> config (as opposed to passing individual tab configs in the TabPanel's
* {@link #items} collection), you must use <tt>{@link #applyTo}</tt> to correctly use the specified <tt>id</tt>
* as the tab container. The <tt>autoTabs</tt> method <em>replaces</em> existing content with the TabPanel
* components.</li>
* <li>Make sure that you set <tt>{@link #deferredRender}: false</tt> so that the content elements for each
* tab will be rendered into the TabPanel immediately upon page load, otherwise they will not be transformed
* until each tab is activated and will be visible outside the TabPanel.</li>
* </ul>Example usage:</p>
* <pre><code>
var tabs = new Ext.TabPanel({
applyTo: 'my-tabs',
activeTab: 0,
deferredRender: false,
autoTabs: true
});
// This markup will be converted to a TabPanel from the code above
<div id="my-tabs">
<div class="x-tab" title="Tab 1">A simple tab</div>
<div class="x-tab" title="Tab 2">Another one</div>
</div>
</code></pre>
*/
autoTabs : false,
/**
* @cfg {String} autoTabSelector The CSS selector used to search for tabs in existing markup when
* <tt>{@link #autoTabs} = true</tt> (defaults to <tt>'div.x-tab'</tt>). This can be any valid selector
* supported by {@link Ext.DomQuery#select}. Note that the query will be executed within the scope of this
* tab panel only (so that multiple tab panels from markup can be supported on a page).
*/
autoTabSelector : 'div.x-tab',
/**
* @cfg {String/Number} activeTab A string id or the numeric index of the tab that should be initially
* activated on render (defaults to undefined).
*/
activeTab : undefined,
/**
* @cfg {Number} tabMargin The number of pixels of space to calculate into the sizing and scrolling of
* tabs. If you change the margin in CSS, you will need to update this value so calculations are correct
* with either <tt>{@link #resizeTabs}</tt> or scrolling tabs. (defaults to <tt>2</tt>)
*/
tabMargin : 2,
/**
* @cfg {Boolean} plain </tt>true</tt> to render the tab strip without a background container image
* (defaults to <tt>false</tt>).
*/
plain : false,
/**
* @cfg {Number} wheelIncrement For scrolling tabs, the number of pixels to increment on mouse wheel
* scrolling (defaults to <tt>20</tt>).
*/
wheelIncrement : 20,
/*
* This is a protected property used when concatenating tab ids to the TabPanel id for internal uniqueness.
* It does not generally need to be changed, but can be if external code also uses an id scheme that can
* potentially clash with this one.
*/
idDelimiter : '__',
// private
itemCls : 'x-tab-item',
// private config overrides
elements : 'body',
headerAsText : false,
frame : false,
hideBorders :true,
// private
initComponent : function(){
this.frame = false;
Ext.TabPanel.superclass.initComponent.call(this);
this.addEvents(
/**
* @event beforetabchange
* Fires before the active tab changes. Handlers can <tt>return false</tt> to cancel the tab change.
* @param {TabPanel} this
* @param {Panel} newTab The tab being activated
* @param {Panel} currentTab The current active tab
*/
'beforetabchange',
/**
* @event tabchange
* Fires after the active tab has changed.
* @param {TabPanel} this
* @param {Panel} tab The new active tab
*/
'tabchange',
/**
* @event contextmenu
* Relays the contextmenu event from a tab selector element in the tab strip.
* @param {TabPanel} this
* @param {Panel} tab The target tab
* @param {EventObject} e
*/
'contextmenu'
);
/**
* @cfg {Object} layoutConfig
* TabPanel implicitly uses {@link Ext.layout.CardLayout} as its layout manager.
* <code>layoutConfig</code> may be used to configure this layout manager.
* <code>{@link #deferredRender}</code> and <code>{@link #layoutOnTabChange}</code>
* configured on the TabPanel will be applied as configs to the layout manager.
*/
this.setLayout(new Ext.layout.CardLayout(Ext.apply({
layoutOnCardChange: this.layoutOnTabChange,
deferredRender: this.deferredRender
}, this.layoutConfig)));
if(this.tabPosition == 'top'){
this.elements += ',header';
this.stripTarget = 'header';
}else {
this.elements += ',footer';
this.stripTarget = 'footer';
}
if(!this.stack){
this.stack = Ext.TabPanel.AccessStack();
}
this.initItems();
},
// private
onRender : function(ct, position){
Ext.TabPanel.superclass.onRender.call(this, ct, position);
if(this.plain){
var pos = this.tabPosition == 'top' ? 'header' : 'footer';
this[pos].addClass('x-tab-panel-'+pos+'-plain');
}
var st = this[this.stripTarget];
this.stripWrap = st.createChild({cls:'x-tab-strip-wrap', cn:{
tag:'ul', cls:'x-tab-strip x-tab-strip-'+this.tabPosition}});
var beforeEl = (this.tabPosition=='bottom' ? this.stripWrap : null);
st.createChild({cls:'x-tab-strip-spacer'}, beforeEl);
this.strip = new Ext.Element(this.stripWrap.dom.firstChild);
// create an empty span with class x-tab-strip-text to force the height of the header element when there's no tabs.
this.edge = this.strip.createChild({tag:'li', cls:'x-tab-edge', cn: [{tag: 'span', cls: 'x-tab-strip-text', cn: ' '}]});
this.strip.createChild({cls:'x-clear'});
this.body.addClass('x-tab-panel-body-'+this.tabPosition);
/**
* @cfg {Template/XTemplate} itemTpl <p>(Optional) A {@link Ext.Template Template} or
* {@link Ext.XTemplate XTemplate} which may be provided to process the data object returned from
* <tt>{@link #getTemplateArgs}</tt> to produce a clickable selector element in the tab strip.</p>
* <p>The main element created should be a <tt><li></tt> element. In order for a click event on
* a selector element to be connected to its item, it must take its <i>id</i> from the TabPanel's
* native <tt>{@link #getTemplateArgs}</tt>.</p>
* <p>The child element which contains the title text must be marked by the CSS class
* <tt>x-tab-strip-inner</tt>.</p>
* <p>To enable closability, the created element should contain an element marked by the CSS class
* <tt>x-tab-strip-close</tt>.</p>
* <p>If a custom <tt>itemTpl</tt> is supplied, it is the developer's responsibility to create CSS
* style rules to create the desired appearance.</p>
* Below is an example of how to create customized tab selector items:<pre><code>
new Ext.TabPanel({
renderTo: document.body,
minTabWidth: 115,
tabWidth: 135,
enableTabScroll: true,
width: 600,
height: 250,
defaults: {autoScroll:true},
itemTpl: new Ext.XTemplate(
'<li class="{cls}" id="{id}" style="overflow:hidden">',
'<tpl if="closable">',
'<a class="x-tab-strip-close"></a>',
'</tpl>',
'<a class="x-tab-right" href="#" style="padding-left:6px">',
'<em class="x-tab-left">',
'<span class="x-tab-strip-inner">',
'<img src="{src}" style="float:left;margin:3px 3px 0 0">',
'<span style="margin-left:20px" class="x-tab-strip-text {iconCls}">{text} {extra}</span>',
'</span>',
'</em>',
'</a>',
'</li>'
),
getTemplateArgs: function(item) {
// Call the native method to collect the base data. Like the ID!
var result = Ext.TabPanel.prototype.getTemplateArgs.call(this, item);
// Add stuff used in our template
return Ext.apply(result, {
closable: item.closable,
src: item.iconSrc,
extra: item.extraText || ''
});
},
items: [{
title: 'New Tab 1',
iconSrc: '../shared/icons/fam/grid.png',
html: 'Tab Body 1',
closable: true
}, {
title: 'New Tab 2',
iconSrc: '../shared/icons/fam/grid.png',
html: 'Tab Body 2',
extraText: 'Extra stuff in the tab button'
}]
});
</code></pre>
*/
if(!this.itemTpl){
var tt = new Ext.Template(
'<li class="{cls}" id="{id}"><a class="x-tab-strip-close"></a>',
'<a class="x-tab-right" href="#"><em class="x-tab-left">',
'<span class="x-tab-strip-inner"><span class="x-tab-strip-text {iconCls}">{text}</span></span>',
'</em></a></li>'
);
tt.disableFormats = true;
tt.compile();
Ext.TabPanel.prototype.itemTpl = tt;
}
this.items.each(this.initTab, this);
},
// private
afterRender : function(){
Ext.TabPanel.superclass.afterRender.call(this);
if(this.autoTabs){
this.readTabs(false);
}
if(this.activeTab !== undefined){
var item = Ext.isObject(this.activeTab) ? this.activeTab : this.items.get(this.activeTab);
delete this.activeTab;
this.setActiveTab(item);
}
},
// private
initEvents : function(){
Ext.TabPanel.superclass.initEvents.call(this);
this.mon(this.strip, {
scope: this,
mousedown: this.onStripMouseDown,
contextmenu: this.onStripContextMenu
});
if(this.enableTabScroll){
this.mon(this.strip, 'mousewheel', this.onWheel, this);
}
},
// private
findTargets : function(e){
var item = null,
itemEl = e.getTarget('li:not(.x-tab-edge)', this.strip);
if(itemEl){
item = this.getComponent(itemEl.id.split(this.idDelimiter)[1]);
if(item.disabled){
return {
close : null,
item : null,
el : null
};
}
}
return {
close : e.getTarget('.x-tab-strip-close', this.strip),
item : item,
el : itemEl
};
},
// private
onStripMouseDown : function(e){
if(e.button !== 0){
return;
}
e.preventDefault();
var t = this.findTargets(e);
if(t.close){
if (t.item.fireEvent('beforeclose', t.item) !== false) {
t.item.fireEvent('close', t.item);
this.remove(t.item);
}
return;
}
if(t.item && t.item != this.activeTab){
this.setActiveTab(t.item);
}
},
// private
onStripContextMenu : function(e){
e.preventDefault();
var t = this.findTargets(e);
if(t.item){
this.fireEvent('contextmenu', this, t.item, e);
}
},
/**
* True to scan the markup in this tab panel for <tt>{@link #autoTabs}</tt> using the
* <tt>{@link #autoTabSelector}</tt>
* @param {Boolean} removeExisting True to remove existing tabs
*/
readTabs : function(removeExisting){
if(removeExisting === true){
this.items.each(function(item){
this.remove(item);
}, this);
}
var tabs = this.el.query(this.autoTabSelector);
for(var i = 0, len = tabs.length; i < len; i++){
var tab = tabs[i],
title = tab.getAttribute('title');
tab.removeAttribute('title');
this.add({
title: title,
contentEl: tab
});
}
},
// private
initTab : function(item, index){
var before = this.strip.dom.childNodes[index],
p = this.getTemplateArgs(item),
el = before ?
this.itemTpl.insertBefore(before, p) :
this.itemTpl.append(this.strip, p),
cls = 'x-tab-strip-over',
tabEl = Ext.get(el);
tabEl.hover(function(){
if(!item.disabled){
tabEl.addClass(cls);
}
}, function(){
tabEl.removeClass(cls);
});
if(item.tabTip){
tabEl.child('span.x-tab-strip-text', true).qtip = item.tabTip;
}
item.tabEl = el;
// Route *keyboard triggered* click events to the tab strip mouse handler.
tabEl.select('a').on('click', function(e){
if(!e.getPageX()){
this.onStripMouseDown(e);
}
}, this, {preventDefault: true});
item.on({
scope: this,
disable: this.onItemDisabled,
enable: this.onItemEnabled,
titlechange: this.onItemTitleChanged,
iconchange: this.onItemIconChanged,
beforeshow: this.onBeforeShowItem
});
},
/**
* <p>Provides template arguments for rendering a tab selector item in the tab strip.</p>
* <p>This method returns an object hash containing properties used by the TabPanel's <tt>{@link #itemTpl}</tt>
* to create a formatted, clickable tab selector element. The properties which must be returned
* are:</p><div class="mdetail-params"><ul>
* <li><b>id</b> : String<div class="sub-desc">A unique identifier which links to the item</div></li>
* <li><b>text</b> : String<div class="sub-desc">The text to display</div></li>
* <li><b>cls</b> : String<div class="sub-desc">The CSS class name</div></li>
* <li><b>iconCls</b> : String<div class="sub-desc">A CSS class to provide appearance for an icon.</div></li>
* </ul></div>
* @param {Ext.BoxComponent} item The {@link Ext.BoxComponent BoxComponent} for which to create a selector element in the tab strip.
* @return {Object} An object hash containing the properties required to render the selector element.
*/
getTemplateArgs : function(item) {
var cls = item.closable ? 'x-tab-strip-closable' : '';
if(item.disabled){
cls += ' x-item-disabled';
}
if(item.iconCls){
cls += ' x-tab-with-icon';
}
if(item.tabCls){
cls += ' ' + item.tabCls;
}
return {
id: this.id + this.idDelimiter + item.getItemId(),
text: item.title,
cls: cls,
iconCls: item.iconCls || ''
};
},
// private
onAdd : function(c){
Ext.TabPanel.superclass.onAdd.call(this, c);
if(this.rendered){
var items = this.items;
this.initTab(c, items.indexOf(c));
this.delegateUpdates();
}
},
// private
onBeforeAdd : function(item){
var existing = item.events ? (this.items.containsKey(item.getItemId()) ? item : null) : this.items.get(item);
if(existing){
this.setActiveTab(item);
return false;
}
Ext.TabPanel.superclass.onBeforeAdd.apply(this, arguments);
var es = item.elements;
item.elements = es ? es.replace(',header', '') : es;
item.border = (item.border === true);
},
// private
onRemove : function(c){
var te = Ext.get(c.tabEl);
// check if the tabEl exists, it won't if the tab isn't rendered
if(te){
te.select('a').removeAllListeners();
Ext.destroy(te);
}
Ext.TabPanel.superclass.onRemove.call(this, c);
this.stack.remove(c);
delete c.tabEl;
c.un('disable', this.onItemDisabled, this);
c.un('enable', this.onItemEnabled, this);
c.un('titlechange', this.onItemTitleChanged, this);
c.un('iconchange', this.onItemIconChanged, this);
c.un('beforeshow', this.onBeforeShowItem, this);
if(c == this.activeTab){
var next = this.stack.next();
if(next){
this.setActiveTab(next);
}else if(this.items.getCount() > 0){
this.setActiveTab(0);
}else{
this.setActiveTab(null);
}
}
if(!this.destroying){
this.delegateUpdates();
}
},
// private
onBeforeShowItem : function(item){
if(item != this.activeTab){
this.setActiveTab(item);
return false;
}
},
// private
onItemDisabled : function(item){
var el = this.getTabEl(item);
if(el){
Ext.fly(el).addClass('x-item-disabled');
}
this.stack.remove(item);
},
// private
onItemEnabled : function(item){
var el = this.getTabEl(item);
if(el){
Ext.fly(el).removeClass('x-item-disabled');
}
},
// private
onItemTitleChanged : function(item){
var el = this.getTabEl(item);
if(el){
Ext.fly(el).child('span.x-tab-strip-text', true).innerHTML = item.title;
}
},
//private
onItemIconChanged : function(item, iconCls, oldCls){
var el = this.getTabEl(item);
if(el){
el = Ext.get(el);
el.child('span.x-tab-strip-text').replaceClass(oldCls, iconCls);
el[Ext.isEmpty(iconCls) ? 'removeClass' : 'addClass']('x-tab-with-icon');
}
},
/**
* Gets the DOM element for the tab strip item which activates the child panel with the specified
* ID. Access this to change the visual treatment of the item, for example by changing the CSS class name.
* @param {Panel/Number/String} tab The tab component, or the tab's index, or the tabs id or itemId.
* @return {HTMLElement} The DOM node
*/
getTabEl : function(item){
var c = this.getComponent(item);
return c ? c.tabEl : null;
},
// private
onResize : function(){
Ext.TabPanel.superclass.onResize.apply(this, arguments);
this.delegateUpdates();
},
/**
* Suspends any internal calculations or scrolling while doing a bulk operation. See {@link #endUpdate}
*/
beginUpdate : function(){
this.suspendUpdates = true;
},
/**
* Resumes calculations and scrolling at the end of a bulk operation. See {@link #beginUpdate}
*/
endUpdate : function(){
this.suspendUpdates = false;
this.delegateUpdates();
},
/**
* Hides the tab strip item for the passed tab
* @param {Number/String/Panel} item The tab index, id or item
*/
hideTabStripItem : function(item){
item = this.getComponent(item);
var el = this.getTabEl(item);
if(el){
el.style.display = 'none';
this.delegateUpdates();
}
this.stack.remove(item);
},
/**
* Unhides the tab strip item for the passed tab
* @param {Number/String/Panel} item The tab index, id or item
*/
unhideTabStripItem : function(item){
item = this.getComponent(item);
var el = this.getTabEl(item);
if(el){
el.style.display = '';
this.delegateUpdates();
}
},
// private
delegateUpdates : function(){
var rendered = this.rendered;
if(this.suspendUpdates){
return;
}
if(this.resizeTabs && rendered){
this.autoSizeTabs();
}
if(this.enableTabScroll && rendered){
this.autoScrollTabs();
}
},
// private
autoSizeTabs : function(){
var count = this.items.length,
ce = this.tabPosition != 'bottom' ? 'header' : 'footer',
ow = this[ce].dom.offsetWidth,
aw = this[ce].dom.clientWidth;
if(!this.resizeTabs || count < 1 || !aw){ // !aw for display:none
return;
}
var each = Math.max(Math.min(Math.floor((aw-4) / count) - this.tabMargin, this.tabWidth), this.minTabWidth); // -4 for float errors in IE
this.lastTabWidth = each;
var lis = this.strip.query('li:not(.x-tab-edge)');
for(var i = 0, len = lis.length; i < len; i++) {
var li = lis[i],
inner = Ext.fly(li).child('.x-tab-strip-inner', true),
tw = li.offsetWidth,
iw = inner.offsetWidth;
inner.style.width = (each - (tw-iw)) + 'px';
}
},
// private
adjustBodyWidth : function(w){
if(this.header){
this.header.setWidth(w);
}
if(this.footer){
this.footer.setWidth(w);
}
return w;
},
/**
* Sets the specified tab as the active tab. This method fires the {@link #beforetabchange} event which
* can <tt>return false</tt> to cancel the tab change.
* @param {String/Number} item
* The id or tab Panel to activate. This parameter may be any of the following:
* <div><ul class="mdetail-params">
* <li>a <b><tt>String</tt></b> : representing the <code>{@link Ext.Component#itemId itemId}</code>
* or <code>{@link Ext.Component#id id}</code> of the child component </li>
* <li>a <b><tt>Number</tt></b> : representing the position of the child component
* within the <code>{@link Ext.Container#items items}</code> <b>property</b></li>
* </ul></div>
* <p>For additional information see {@link Ext.util.MixedCollection#get}.
*/
setActiveTab : function(item){
item = this.getComponent(item);
if(this.fireEvent('beforetabchange', this, item, this.activeTab) === false){
return;
}
if(!this.rendered){
this.activeTab = item;
return;
}
if(this.activeTab != item){
if(this.activeTab){
var oldEl = this.getTabEl(this.activeTab);
if(oldEl){
Ext.fly(oldEl).removeClass('x-tab-strip-active');
}
}
this.activeTab = item;
if(item){
var el = this.getTabEl(item);
Ext.fly(el).addClass('x-tab-strip-active');
this.stack.add(item);
this.layout.setActiveItem(item);
// Need to do this here, since setting the active tab slightly changes the size
this.delegateUpdates();
if(this.scrolling){
this.scrollToTab(item, this.animScroll);
}
}
this.fireEvent('tabchange', this, item);
}
},
/**
* Returns the Component which is the currently active tab. <b>Note that before the TabPanel
* first activates a child Component, this method will return whatever was configured in the
* {@link #activeTab} config option.</b>
* @return {BoxComponent} The currently active child Component if one <i>is</i> active, or the {@link #activeTab} config value.
*/
getActiveTab : function(){
return this.activeTab || null;
},
/**
* Gets the specified tab by id.
* @param {String} id The tab id
* @return {Panel} The tab
*/
getItem : function(item){
return this.getComponent(item);
},
// private
autoScrollTabs : function(){
this.pos = this.tabPosition=='bottom' ? this.footer : this.header;
var count = this.items.length,
ow = this.pos.dom.offsetWidth,
tw = this.pos.dom.clientWidth,
wrap = this.stripWrap,
wd = wrap.dom,
cw = wd.offsetWidth,
pos = this.getScrollPos(),
l = this.edge.getOffsetsTo(this.stripWrap)[0] + pos;
if(!this.enableTabScroll || cw < 20){ // 20 to prevent display:none issues
return;
}
if(count == 0 || l <= tw){
// ensure the width is set if there's no tabs
wd.scrollLeft = 0;
wrap.setWidth(tw);
if(this.scrolling){
this.scrolling = false;
this.pos.removeClass('x-tab-scrolling');
this.scrollLeft.hide();
this.scrollRight.hide();
// See here: http://extjs.com/forum/showthread.php?t=49308&highlight=isSafari
if(Ext.isAir || Ext.isWebKit){
wd.style.marginLeft = '';
wd.style.marginRight = '';
}
}
}else{
if(!this.scrolling){
this.pos.addClass('x-tab-scrolling');
// See here: http://extjs.com/forum/showthread.php?t=49308&highlight=isSafari
if(Ext.isAir || Ext.isWebKit){
wd.style.marginLeft = '18px';
wd.style.marginRight = '18px';
}
}
tw -= wrap.getMargins('lr');
wrap.setWidth(tw > 20 ? tw : 20);
if(!this.scrolling){
if(!this.scrollLeft){
this.createScrollers();
}else{
this.scrollLeft.show();
this.scrollRight.show();
}
}
this.scrolling = true;
if(pos > (l-tw)){ // ensure it stays within bounds
wd.scrollLeft = l-tw;
}else{ // otherwise, make sure the active tab is still visible
this.scrollToTab(this.activeTab, false);
}
this.updateScrollButtons();
}
},
// private
createScrollers : function(){
this.pos.addClass('x-tab-scrolling-' + this.tabPosition);
var h = this.stripWrap.dom.offsetHeight;
// left
var sl = this.pos.insertFirst({
cls:'x-tab-scroller-left'
});
sl.setHeight(h);
sl.addClassOnOver('x-tab-scroller-left-over');
this.leftRepeater = new Ext.util.ClickRepeater(sl, {
interval : this.scrollRepeatInterval,
handler: this.onScrollLeft,
scope: this
});
this.scrollLeft = sl;
// right
var sr = this.pos.insertFirst({
cls:'x-tab-scroller-right'
});
sr.setHeight(h);
sr.addClassOnOver('x-tab-scroller-right-over');
this.rightRepeater = new Ext.util.ClickRepeater(sr, {
interval : this.scrollRepeatInterval,
handler: this.onScrollRight,
scope: this
});
this.scrollRight = sr;
},
// private
getScrollWidth : function(){
return this.edge.getOffsetsTo(this.stripWrap)[0] + this.getScrollPos();
},
// private
getScrollPos : function(){
return parseInt(this.stripWrap.dom.scrollLeft, 10) || 0;
},
// private
getScrollArea : function(){
return parseInt(this.stripWrap.dom.clientWidth, 10) || 0;
},
// private
getScrollAnim : function(){
return {duration:this.scrollDuration, callback: this.updateScrollButtons, scope: this};
},
// private
getScrollIncrement : function(){
return this.scrollIncrement || (this.resizeTabs ? this.lastTabWidth+2 : 100);
},
/**
* Scrolls to a particular tab if tab scrolling is enabled
* @param {Panel} item The item to scroll to
* @param {Boolean} animate True to enable animations
*/
scrollToTab : function(item, animate){
if(!item){
return;
}
var el = this.getTabEl(item),
pos = this.getScrollPos(),
area = this.getScrollArea(),
left = Ext.fly(el).getOffsetsTo(this.stripWrap)[0] + pos,
right = left + el.offsetWidth;
if(left < pos){
this.scrollTo(left, animate);
}else if(right > (pos + area)){
this.scrollTo(right - area, animate);
}
},
// private
scrollTo : function(pos, animate){
this.stripWrap.scrollTo('left', pos, animate ? this.getScrollAnim() : false);
if(!animate){
this.updateScrollButtons();
}
},
onWheel : function(e){
var d = e.getWheelDelta()*this.wheelIncrement*-1;
e.stopEvent();
var pos = this.getScrollPos(),
newpos = pos + d,
sw = this.getScrollWidth()-this.getScrollArea();
var s = Math.max(0, Math.min(sw, newpos));
if(s != pos){
this.scrollTo(s, false);
}
},
// private
onScrollRight : function(){
var sw = this.getScrollWidth()-this.getScrollArea(),
pos = this.getScrollPos(),
s = Math.min(sw, pos + this.getScrollIncrement());
if(s != pos){
this.scrollTo(s, this.animScroll);
}
},
// private
onScrollLeft : function(){
var pos = this.getScrollPos(),
s = Math.max(0, pos - this.getScrollIncrement());
if(s != pos){
this.scrollTo(s, this.animScroll);
}
},
// private
updateScrollButtons : function(){
var pos = this.getScrollPos();
this.scrollLeft[pos === 0 ? 'addClass' : 'removeClass']('x-tab-scroller-left-disabled');
this.scrollRight[pos >= (this.getScrollWidth()-this.getScrollArea()) ? 'addClass' : 'removeClass']('x-tab-scroller-right-disabled');
},
// private
beforeDestroy : function() {
Ext.destroy(this.leftRepeater, this.rightRepeater);
this.deleteMembers('strip', 'edge', 'scrollLeft', 'scrollRight', 'stripWrap');
this.activeTab = null;
Ext.TabPanel.superclass.beforeDestroy.apply(this);
}
/**
* @cfg {Boolean} collapsible
* @hide
*/
/**
* @cfg {String} header
* @hide
*/
/**
* @cfg {Boolean} headerAsText
* @hide
*/
/**
* @property header
* @hide
*/
/**
* @cfg title
* @hide
*/
/**
* @cfg {Array} tools
* @hide
*/
/**
* @cfg {Array} toolTemplate
* @hide
*/
/**
* @cfg {Boolean} hideCollapseTool
* @hide
*/
/**
* @cfg {Boolean} titleCollapse
* @hide
*/
/**
* @cfg {Boolean} collapsed
* @hide
*/
/**
* @cfg {String} layout
* @hide
*/
/**
* @cfg {Boolean} preventBodyReset
* @hide
*/
});
Ext.reg('tabpanel', Ext.TabPanel);
/**
* See {@link #setActiveTab}. Sets the specified tab as the active tab. This method fires
* the {@link #beforetabchange} event which can <tt>return false</tt> to cancel the tab change.
* @param {String/Panel} tab The id or tab Panel to activate
* @method activate
*/
Ext.TabPanel.prototype.activate = Ext.TabPanel.prototype.setActiveTab;
// private utility class used by TabPanel
Ext.TabPanel.AccessStack = function(){
var items = [];
return {
add : function(item){
items.push(item);
if(items.length > 10){
items.shift();
}
},
remove : function(item){
var s = [];
for(var i = 0, len = items.length; i < len; i++) {
if(items[i] != item){
s.push(items[i]);
}
}
items = s;
},
next : function(){
return items.pop();
}
};
};
|
PypiClean
|
/ladybug-core-0.42.2.tar.gz/ladybug-core-0.42.2/ladybug/psychchart.py
|
"""Object for calculating PMV comfort from DataCollections."""
from __future__ import division
from ladybug_geometry.geometry2d.pointvector import Point2D, Vector2D
from ladybug_geometry.geometry2d.line import LineSegment2D
from ladybug_geometry.geometry2d.polyline import Polyline2D
from ladybug_geometry.geometry2d.mesh import Mesh2D
from ladybug_geometry.geometry3d.pointvector import Point3D
from .epw import EPW
from .datacollection import DailyCollection, HourlyContinuousCollection, \
HourlyDiscontinuousCollection
from .psychrometrics import humid_ratio_from_db_rh, db_temp_from_enth_hr, \
db_temp_from_rh_hr, db_temp_and_hr_from_wb_rh
from .legend import LegendParameters
from .graphic import GraphicContainer
from .datatype.time import Time
from .datatype.temperature import Temperature
from .datatype.fraction import Fraction
from .datatype.specificenergy import Enthalpy
class PsychrometricChart(object):
"""Class for constructing psychrometric charts and plotting data on them.
Args:
temperature: Hourly, daily, or sub-hourly data collection of temperature
values in Celsius or a single temperature value to be used for the
whole analysis.
relative_humidity: Hourly, daily, or sub-hourly data collection of relative
humidity values in % or a single relative humidity value to be used
for the whole analysis.
average_pressure: Number for the average air pressure across the data
plotted on the chart (Pa). (Default: 101325 Pa; pressure at sea level).
legend_parameters: An optional LegendParameter object to change the display
of the PsychrometricChart. (Default: None).
base_point: A Point2D to be used as a starting point to generate the geometry
of the plot. (Default: (0, 0)).
x_dim: A number to set the X dimension of each degree of temperature on the
chart. (Default: 1).
y_dim: A number to set the Y dimension of a unity humidity ratio on the chart.
Note that most maximum humidity ratios are around 0.03. (Default: 1500).
min_temperature: An integer for the minimum temperature on the chart in
degrees. This should be celsius if use_ip is False and fahrenheit if
use_ip is True. (Default: -20; suitable for celsius).
max_temperature: An integer for the maximum temperature on the chart in
degrees. This should be celsius if use_ip is False and fahrenheit if
use_ip is True. (Default: 50; suitable for celsius).
max_humidity_ratio: A value for the maximum humidity ratio in kg water / kg
air. (Default: 0.03).
use_ip: Boolean to note whether temperature values should be plotted in
Fahrenheit instead of Celsius. (Default: False).
Properties:
* temperature
* relative_humidity
* average_pressure
* legend_parameters
* base_point
* x_dim
* y_dim
* min_temperature
* max_temperature
* max_humidity_ratio
* use_ip
* saturation_line
* chart_border
* temperature_labels
* temperature_label_points
* temperature_lines
* rh_labels
* rh_label_points
* rh_lines
* hr_labels
* hr_label_points
* hr_lines
* enthalpy_labels
* enthalpy_label_points
* enthalpy_lines
* wb_labels
* wb_label_points
* wb_lines
* title_text
* title_location
* x_axis_text
* x_axis_location
* y_axis_text
* y_axis_location
* data_points
* time_matrix
* hour_values
* colored_mesh
* legend
* container
"""
ACCEPTABLE_COLLECTIONS = (DailyCollection, HourlyContinuousCollection,
HourlyDiscontinuousCollection)
TEMP_TYPE = Temperature()
ENTH_TYPE = Enthalpy()
def __init__(self, temperature, relative_humidity, average_pressure=101325,
legend_parameters=None, base_point=Point2D(), x_dim=1, y_dim=1500,
min_temperature=-20, max_temperature=50, max_humidity_ratio=0.03,
use_ip=False):
"""Initialize Psychrometric Chart."""
# check and assign the temperature and humidity
self._use_ip = bool(use_ip)
self._calc_length = 1
self._time_multiplier = 1
self._temperature = temperature
self._relative_humidity = relative_humidity
self._t_values = self._t_values_c = self._check_input(
temperature, Temperature, 'C', 'temperature')
self._rh_values = self._check_input(
relative_humidity, Fraction, '%', 'relative_humidity')
if len(self._t_values) == 1:
self._t_values = self._t_values_c = self._t_values * self._calc_length
if self._use_ip: # convert everything to Fahrenheit
self._t_values = self.TEMP_TYPE.to_unit(self._t_values, 'F', 'C')
assert len(self._t_values) == len(self._rh_values), \
'Number of temperature and humidity values must match.'
# assign the inputs as properties of the chart
self._average_pressure = self._check_number(average_pressure, 'average_pressure')
assert isinstance(base_point, Point2D), 'Expected Point2D for ' \
'PsychrometricChart base point. Got {}.'.format(type(base_point))
self._base_point = base_point
self._x_dim = self._check_number(x_dim, 'x_dim')
self._y_dim = self._check_number(y_dim, 'y_dim')
assert max_temperature - min_temperature >= 10, 'Psychrometric chart ' \
'max_temperature and min_temperature difference must be at least 10.'
self._max_temperature = int(max_temperature)
self._min_temperature = int(min_temperature)
self._max_humidity_ratio = float(max_humidity_ratio)
assert self._max_humidity_ratio >= 0.005, 'Psychrometric chart ' \
'max_humidity_ratio must be at least 0.005.'
# create the graphic container
if self._use_ip: # categorize based on every 1.66 fahrenheit
self._t_category = []
current_t, max_t = self._min_temperature, self._max_temperature + 1.75
while current_t < max_t:
current_t += (5 / 3)
self._t_category.append(current_t)
else: # categorize based on every degree celsius
self._t_category = list(range(self._min_temperature + 1,
self._max_temperature + 1))
self._rh_category = list(range(5, 105, 5))
self._time_matrix, self._hour_values, self._remove_pattern = \
self._compute_hour_values()
assert len(self._hour_values) > 0, \
'No data was found to lie on the psychrometric chart.'
max_x = base_point.x + (self._max_temperature - self._min_temperature + 5) \
* self._x_dim
max_pt = Point3D(max_x, self.hr_y_value(self.max_humidity_ratio), 0)
min_pt = Point3D(base_point.x, base_point.y, 0)
self._container = GraphicContainer(
self._hour_values, min_pt, max_pt, legend_parameters, Time(), 'hr')
self._process_legend_default(self._container.legend_parameters)
# create global attributes used by several of the geometry properties
self._temp_range = list(range(self._min_temperature, self._max_temperature, 5)) \
+ [self._max_temperature]
self._x_range = [self.t_x_value(t) for t in self._temp_range]
if use_ip: # ensure that _temp_range is always in celsius
self._temp_range = self.TEMP_TYPE.to_unit(self._temp_range, 'C', 'F')
rh_range = range(10, 110, 10)
self._rh_lines = tuple(self.relative_humidity_polyline(rh) for rh in rh_range)
self._saturation_line = self.relative_humidity_polyline(100, 2)
max_hr_thnd = int(self._max_humidity_ratio * 1000)
base_hr_range = list(range(5, max_hr_thnd, 5)) + [max_hr_thnd]
max_db_hr = 1000 * humid_ratio_from_db_rh(
self._temp_range[-1], 100, self._average_pressure)
base_hr_range = [val for val in base_hr_range if val <= max_db_hr]
self._hr_range = tuple(round(val / 1000, 3) for val in base_hr_range)
self._y_range = [self._y_dim * hr + self._base_point.y for hr in self._hr_range]
# set null values for properties that are optional
self._chart_border = None
self._enth_range = None
self._enth_lines = None
self._wb_range = None
self._wb_lines = None
self._data_points = None
self._colored_mesh = None
# check to be sure we don't have conditions above the boiling point
assert self._temp_range[-1] < 100, \
'Temperatures above the boiling point of water are not plot-able.'
@classmethod
def from_epw(cls, epw_file, legend_parameters=None, base_point=Point2D(),
x_dim=1, y_dim=1500, min_temperature=-20, max_temperature=50,
max_humidity_ratio=0.03, use_ip=False):
"""Create a psychrometric chart object using the data in an epw file.
Args:
epw_file: Full path to epw weather file.
legend_parameters: An optional LegendParameter object to change the display
of the PsychrometricChart. (Default: None).
base_point: A Point2D to be used as a starting point to generate the geometry
of the plot. (Default: (0, 0)).
x_dim: A number to set the X dimension of each degree of temperature on the
chart. (Default: 1).
y_dim: A number to set the Y dimension of unity humidity ratio on the chart.
Note that most maximum humidity ratios are around 0.03. (Default: 1500).
min_temperature: An integer for the minimum temperature on the chart in
degrees. This should be celsius if use_ip is False and fahrenheit if
use_ip is True. (Default: -20; suitable for celsius).
max_temperature: An integer for the maximum temperature on the chart in
degrees. This should be celsius if use_ip is False and fahrenheit if
use_ip is True. (Default: 50; suitable for celsius).
max_humidity_ratio: A value for the maximum humidity ratio in kg water / kg
air. (Default: 0.03).
use_ip: Boolean to note whether temperature values should be plotted in
Fahrenheit instead of Celsius. (Default: False).
"""
epw = EPW(epw_file)
pressure = epw.atmospheric_station_pressure.average
return cls(
epw.dry_bulb_temperature, epw.relative_humidity, pressure, legend_parameters,
base_point, x_dim, y_dim, min_temperature, max_temperature,
max_humidity_ratio, use_ip)
@classmethod
def from_dict(cls, data):
""" Create PsychrometricChart from a dictionary
Args:
data: A python dictionary in the following format
.. code-block:: python
{
'type': 'PsychrometricChart',
'temperature': {}, # data collection or value for temperature [C]
'relative_humidity': {}, # data collection or value for humidity [%]
'average_pressure': 101325, # average atmospheric pressure [Pa]
'legend_parameters': {}, # legend parameters dictionary
'base_point': {}, # Point2D dictionary
'x_dim': 1.0, # value for X dimension per degree
'y_dim': 1500.0, # value for Y dimension for unity humidity ratio
'min_temperature': -20.0, # value for minimum temperature
'max_temperature': 50.0, # value for maximum temperature
'max_humidity_ratio': 0.03, # value for maximum humidity ratio
'use_ip': False, # boolean for whether to use IP values
}
"""
# process the optional inputs
p = data['average_pressure'] if 'average_pressure' in data else 101325
lp = LegendParameters.from_dict(data['legend_parameters']) \
if 'legend_parameters' in data else None
bpt = Point2D.from_dict(data['base_point']) if 'base_point' in data \
else Point2D()
xd = data['x_dim'] if 'x_dim' in data else 1
yd = data['y_dim'] if 'y_dim' in data else 1500
tmin = data['min_temperature'] if 'min_temperature' in data else -20
tmax = data['max_temperature'] if 'max_temperature' in data else 50
hrmax = data['max_humidity_ratio'] if 'max_humidity_ratio' in data else 0.03
ip = data['use_ip'] if 'use_ip' in data else False
# process the data collections
class_mapper = {
'DailyCollection': DailyCollection,
'HourlyContinuousCollection': HourlyContinuousCollection,
'HourlyDiscontinuousCollection': HourlyDiscontinuousCollection}
t_data, rh_data = data['temperature'], data['relative_humidity']
temp = class_mapper[t_data['type']].from_dict(t_data) \
if isinstance(t_data, dict) else t_data
rh = class_mapper[rh_data['type']].from_dict(rh_data) \
if isinstance(rh_data, dict) else rh_data
return cls(temp, rh, p, lp, bpt, xd, yd, tmin, tmax, hrmax, ip)
@property
def temperature(self):
"""The temperature assigned to this psychrometric chart [C]."""
return self._temperature
@property
def relative_humidity(self):
"""The relative humidity assigned to this psychrometric chart."""
return self._relative_humidity
@property
def average_pressure(self):
"""the average air pressure across the data plotted on the chart (Pa)."""
return self._average_pressure
@property
def legend_parameters(self):
"""The legend parameters customizing this psychrometric chart."""
return self._container.legend_parameters
@property
def base_point(self):
"""Point3D for the base point of this psychrometric chart."""
return self._base_point
@property
def x_dim(self):
"""The X dimension of each degree of temperature on the chart."""
return self._x_dim
@property
def y_dim(self):
"""The Y dimension of a unity humidity ratio on the chart."""
return self._y_dim
@property
def min_temperature(self):
"""An integer for the minimum temperature on the chart.
Will be in celsius if use_ip is False and fahrenheit if use_ip is True.
"""
return self._min_temperature
@property
def max_temperature(self):
"""An integer for the maximum temperature on the chart.
Will be in celsius if use_ip is False and fahrenheit if use_ip is True.
"""
return self._max_temperature
@property
def max_humidity_ratio(self):
"""A value for the maximum humidity ratio in kg water / kg air."""
return self._max_humidity_ratio
@property
def use_ip(self):
"""Boolean for whether temperature should be in Fahrenheit or Celsius."""
return self._use_ip
@property
def saturation_line(self):
"""Get a Polyline2D for the saturation line of the chart."""
return self._saturation_line
@property
def chart_border(self):
"""Get a Polyline2D for the border of the chart (excluding saturation line)."""
if self._chart_border is None:
self._chart_border = self._compute_border()
return self._chart_border
@property
def temperature_labels(self):
"""Get a tuple of text for the temperature labels on the chart."""
if self.use_ip:
temp_range = tuple(range(self._min_temperature, self._max_temperature, 5)) \
+ (self._max_temperature,)
return tuple(str(val) for val in temp_range)
return tuple(str(val) for val in self._temp_range)
@property
def temperature_label_points(self):
"""Get a tuple of Point2Ds for the temperature labels on the chart."""
y_val = self._base_point.y - self.legend_parameters.text_height * 0.5
return tuple(Point2D(x_val, y_val) for x_val in self._x_range)
@property
def temperature_lines(self):
"""Get a tuple of LineSegment2Ds for the temperature labels on the chart."""
# get the Y-values for the top of the temperature lines
hr_vals = (humid_ratio_from_db_rh(t, 100, self.average_pressure)
for t in self._temp_range)
top_y = []
for hr in hr_vals:
y_val = self.hr_y_value(hr) if hr < self._max_humidity_ratio \
else self.hr_y_value(self._max_humidity_ratio)
top_y.append(y_val)
t_lines = [] # create the array of line segments
for x_val, y_val in zip(self._x_range, top_y):
l_seg = LineSegment2D.from_end_points(
Point2D(x_val, self._base_point.y), Point2D(x_val, y_val))
t_lines.append(l_seg)
return t_lines
@property
def rh_labels(self):
"""Get a tuple of text for the relative humidity labels on the chart."""
return tuple('{}%'.format(val) for val in range(10, 110, 10))
@property
def rh_label_points(self):
"""Get a tuple of Point2Ds for the relative humidity labels on the chart."""
last_sgs = (LineSegment2D.from_end_points(p[-2], p[-1]) for p in self._rh_lines)
last_dirs = (seg.v.reverse().normalize() * (self._x_dim * 2) for seg in last_sgs)
move_vec = (Vector2D(vec.x - (self._x_dim * 0.4), vec.y) for vec in last_dirs)
return tuple(pl[-1].move(vec) for pl, vec in zip(self._rh_lines, move_vec))
@property
def rh_lines(self):
"""Get a tuple of Polyline2Ds for the relative humidity labels on the chart."""
return self._rh_lines
@property
def hr_labels(self):
"""Get a tuple of text for the humidity ratio labels on the chart."""
return tuple(str(val) for val in self._hr_range)
@property
def hr_label_points(self):
"""Get a tuple of Point2Ds for the humidity ratio labels on the chart."""
x_val = self._x_range[-1] + self.legend_parameters.text_height * 0.5
return tuple(Point2D(x_val, y_val) for y_val in self._y_range)
@property
def hr_lines(self):
"""Get a tuple of LineSegment2Ds for the humidity ratio labels on the chart."""
hr_lines, xmax = [], self._x_range[-1]
for hr, y in zip(self._hr_range, self._y_range):
tmin = db_temp_from_rh_hr(100, hr, self.average_pressure)
tmin = self.TEMP_TYPE.to_unit([tmin], 'F', 'C')[0] if self.use_ip else tmin
xmin = self.t_x_value(tmin)
xmin = xmin if xmin > self.base_point.x else self.base_point.x
l_seg = LineSegment2D.from_end_points(Point2D(xmax, y), Point2D(xmin, y))
hr_lines.append(l_seg)
return hr_lines
@property
def enthalpy_labels(self):
"""Get a tuple of text for the enthalpy labels on the chart."""
if self._enth_range is None:
self._compute_enthalpy_range()
return tuple('{} kJ/kg'.format(val) for val in self._enth_range) if not \
self.use_ip else tuple('{} Btu/lb'.format(val) for val in self._enth_range)
@property
def enthalpy_label_points(self):
"""Get a tuple of Point2Ds for the humidity ratio labels on the chart."""
if self._enth_lines is None:
self._compute_enthalpy_range()
return self._labels_points_from_lines(self._enth_lines)
@property
def enthalpy_lines(self):
"""Get a tuple of LineSegment2Ds for the humidity ratio labels on the chart."""
if self._enth_lines is None:
self._compute_enthalpy_range()
return self._enth_lines
@property
def wb_labels(self):
"""Get a tuple of text for the wet bulb labels on the chart."""
if self._wb_range is None:
self._compute_wb_range()
return tuple('{} C'.format(val) for val in self._wb_range) if not \
self.use_ip else tuple('{} F'.format(val) for val in self._wb_range)
@property
def wb_label_points(self):
"""Get a tuple of Point2Ds for the wet bulb labels on the chart."""
if self._wb_lines is None:
self._compute_wb_range()
return self._labels_points_from_lines(self._wb_lines)
@property
def wb_lines(self):
"""Get a tuple of LineSegment2Ds for the wet bulb temp labels on the chart."""
if self._wb_lines is None:
self._compute_wb_range()
return self._wb_lines
@property
def title_text(self):
"""Get text for the title of the chart."""
title_items = ['Time [hr]']
extra_data = []
if isinstance(self.temperature, self.ACCEPTABLE_COLLECTIONS):
extra_data = self.temperature.header.metadata.items()
elif isinstance(self.relative_humidity, self.ACCEPTABLE_COLLECTIONS):
extra_data = self.relative_humidity.header.metadata.items()
return '\n'.join(title_items + ['{}: {}'.format(k, v) for k, v in extra_data])
@property
def title_location(self):
"""Get a Point2D for the title of the chart."""
origin = self.container.upper_title_location.o
return Point2D(origin.x, origin.y)
@property
def x_axis_text(self):
"""Get text for the X-axis label of the chart."""
unit = 'C' if not self.use_ip else 'F'
if isinstance(self.temperature, self.ACCEPTABLE_COLLECTIONS):
if 'type' in self.temperature.header.metadata:
return '{} [{}]'.format(self.temperature.header.metadata['type'], unit)
else:
return '{} [{}]'.format(self.temperature.header.data_type, unit)
return 'Temperature [{}]'.format(unit)
@property
def x_axis_location(self):
"""Get a Point2D for the X-axis label of the chart."""
y_val = self._base_point.y - self.legend_parameters.text_height * 2.5
return Point2D(self.base_point.x, y_val)
@property
def y_axis_text(self):
"""Get text for the Y-axis label of the chart."""
unit = 'kg' if not self.use_ip else 'lb'
return 'Humidity Ratio\n[{0} water / {0} air]'.format(unit)
@property
def y_axis_location(self):
"""Get a Point2D for the Y-axis label of the chart."""
x_val = self._container.max_point.x + self.legend_parameters.text_height * 1.5
return Point2D(x_val, self._container.max_point.y)
@property
def data_points(self):
"""Get a tuple of Point2Ds for each of the temperature and humidity values."""
if self._data_points is None:
p = self._average_pressure
self._data_points = tuple(
Point2D(
self.t_x_value(t), self.hr_y_value(humid_ratio_from_db_rh(c, r, p)))
for t, c, r in zip(self._t_values, self._t_values_c, self._rh_values))
return self._data_points
@property
def time_matrix(self):
"""Get a tuple of of tuples where each sub-tuple is a row of the mesh.
Each value in the resulting matrix corresponds to the number of temperature/
humidity points in a given cell of the mesh.
"""
return tuple(tuple(row) for row in self._time_matrix)
@property
def hour_values(self):
"""Get a tuple for the number of hours associated with each colored_mesh face."""
return self._hour_values
@property
def colored_mesh(self):
"""Get a colored mesh for the number of hours for each part of the chart."""
if self._colored_mesh is None:
self._colored_mesh = self._generate_mesh()
return self._colored_mesh
@property
def legend(self):
"""The legend assigned to this graphic."""
return self._container._legend
@property
def container(self):
"""Get the GraphicContainer for the colored mesh."""
return self._container
def plot_point(self, temperature, relative_humidity):
"""Get a Point2D for a given temperature and relative humidity on the chart.
Args:
temperature: A temperature value, which should be in Celsius if use_ip
is False and Fahrenheit is use_ip is True.
relative_humidity: A relative humidity value in % (from 0 to 100).
"""
tc = temperature if not self.use_ip else \
self.TEMP_TYPE.to_unit([temperature], 'C', 'F')[0]
hr = humid_ratio_from_db_rh(tc, relative_humidity, self.average_pressure)
return Point2D(self.t_x_value(temperature), self.hr_y_value(hr))
def data_mesh(self, data_collection, legend_parameters=None):
"""Get a colored mesh for a data_collection aligned with the chart's data.
Args:
data_collection: A data collection that is aligned with the temperature
and humidity values of the chart.
legend_parameters: Optional legend parameters to customize the legend
and look of the resulting mesh.
Returns:
A tuple with two values.
- mesh: A colored Mesh2D similar to the chart's colored_mesh property
but where each face is colored with the average value of the input
data_collection.
- container: A GraphicContainer object for the mesh, which possesses
a legend that corresponds to the mesh.
"""
# check to be sure the data collection aligns
data_vals = data_collection.values
assert len(data_vals) == self._calc_length, 'Number of data collection values ' \
'must match those of the psychometric chart temperature and humidity.'
# create a matrix with a tally of the hours for all the data
base_mtx = [[[] for val in self._t_category] for rh in self._rh_category]
for t, rh, val in zip(self._t_values, self._rh_values, data_vals):
if t < self._min_temperature or t > self._max_temperature:
continue # temperature value does not currently fit on the chart
for y, rh_cat in enumerate(self._rh_category):
if rh < rh_cat:
break
for x, t_cat in enumerate(self._t_category):
if t < t_cat:
break
base_mtx[y][x].append(val)
# compute average values
avg_values = [sum(val_list) / len(val_list) for rh_l in base_mtx
for val_list in rh_l if len(val_list) != 0]
# create the colored mesh and graphic container
base_contain = self.container
container = GraphicContainer(
avg_values, base_contain.min_point, base_contain.max_point,
legend_parameters, data_collection.header.data_type,
data_collection.header.unit)
self._process_legend_default(container.legend_parameters)
mesh = self.colored_mesh.duplicate() # start with hour mesh as a base
mesh.colors = container.value_colors
return mesh, container
def relative_humidity_polyline(self, rh, subdivisions=1):
"""Get a Polyline2D for a given relative humidity value.
Args:
rh: A number between 0 and 100 for the relative humidity line to draw.
subdivisions: Integer for the number of subdivisions for every 5
degrees. (Default: 1).
"""
# get the HR values and temperatures
prs = self.average_pressure
if subdivisions == 1:
hr_vals = [humid_ratio_from_db_rh(t, rh, prs) for t in self._temp_range]
x_vals = self._x_range
else: # build up custom temperatures and HRs
hr_vals = [humid_ratio_from_db_rh(self._temp_range[0], rh, prs)]
x_vals = [self._x_range[0]]
t_diff = (self._temp_range[1] - self._temp_range[0]) / subdivisions
x_diff = (self._x_range[1] - self._x_range[0]) / subdivisions
for i in range(len(self._temp_range) - 1):
st_t, st_x = self._temp_range[i], self._x_range[i]
for j in range(subdivisions):
t = st_t + (j + 1) * t_diff
hr_vals.append(humid_ratio_from_db_rh(t, rh, prs))
x_vals.append(st_x + (j + 1) * x_diff)
# loop through the values and create the points
pts = []
for i, (x, hr) in enumerate(zip(x_vals, hr_vals)):
if hr < self._max_humidity_ratio:
pts.append(Point2D(x, self.hr_y_value(hr)))
else: # we're at the top of the chart; cut it off
if abs(self._max_humidity_ratio - hr_vals[i - 1]) < 0.001:
del pts[-1] # avoid the case of a bad interpolation
last_db = db_temp_from_rh_hr(
rh, self._max_humidity_ratio, self.average_pressure)
last_db = self.TEMP_TYPE.to_unit([last_db], 'F', 'C')[0] \
if self.use_ip else last_db
x_val = self.t_x_value(last_db)
pts.append(Point2D(x_val, self.hr_y_value(self._max_humidity_ratio)))
break
return Polyline2D(pts, interpolated=True)
def hr_y_value(self, humidity_ratio):
"""Get the Y-coordinate associated with a certain HR on the chart.
Args:
humidity_ratio: A humidity ratio value in kg water / kg air.
"""
return self.base_point.y + humidity_ratio * self._y_dim
def t_x_value(self, temperature):
"""Get the X-coordinate associated with a certain temperature on the chart.
Args:
temperature: A temperature value, which should be in Celsius if use_ip
is False and Fahrenheit is use_ip is True.
"""
return self._base_point.x + self._x_dim * (temperature - self._min_temperature)
def to_dict(self):
"""Get psychrometric chart as a dictionary."""
temp = self.temperature
temp = temp.to_dict() if isinstance(temp, self.ACCEPTABLE_COLLECTIONS) else temp
rh = self.relative_humidity
rh = rh.to_dict() if isinstance(rh, self.ACCEPTABLE_COLLECTIONS) else rh
return {
'temperature': temp,
'relative_humidity': rh,
'average_pressure': self.average_pressure,
'legend_parameters': self.legend_parameters.to_dict(),
'base_point': self.base_point.to_dict(),
'x_dim': self.x_dim,
'y_dim': self.y_dim,
'min_temperature': self.min_temperature,
'max_temperature': self.max_temperature,
'max_humidity_ratio': self.max_humidity_ratio,
'use_ip': self.use_ip,
'type': 'PsychrometricChart'
}
def _compute_hour_values(self):
"""Compute the matrix of binned time values based on the chart inputs.
Returns:
A tuple with three values.
- base_mtx: A full matrix with counts of values for each degree
temperature and 5% RH of the chart.
- mesh_values: A list of numbers for the values of the mesh.
- remove_pattern: A list of booleans for which faces of the full mesh
should be removed.
"""
# create a matrix with a tally of the hours for all the data
base_mtx = [[0 for val in self._t_category] for rh in self._rh_category]
for t, rh in zip(self._t_values, self._rh_values):
if t < self._min_temperature or t > self._max_temperature:
continue # temperature value does not currently fit on the chart
for y, rh_cat in enumerate(self._rh_category):
if rh < rh_cat:
break
for x, t_cat in enumerate(self._t_category):
if t < t_cat:
break
base_mtx[y][x] += 1
# flatten the matrix and create a pattern to remove faces
flat_values = [tc * self._time_multiplier for rh_l in base_mtx for tc in rh_l]
remove_pattern = [val != 0 for val in flat_values]
mesh_values = tuple(val for val in flat_values if val != 0)
return base_mtx, mesh_values, remove_pattern
def _generate_mesh(self):
"""Get the colored mesh from this object's hour values."""
# global properties used in the generation of the mesh
prs = self.average_pressure
t_per_row = [self._min_temperature] + self._t_category
x_per_row = [self.t_x_value(t) for t in t_per_row]
temp_in_c = self.TEMP_TYPE.to_unit(t_per_row, 'C', 'F') \
if self.use_ip else t_per_row
# loop through RH rows and create mesh vertices and faces
vertices = [Point2D(x, self._base_point.y) for x in x_per_row]
faces, vert_count, row_len = [], 0, len(t_per_row)
for rh in self._rh_category:
vert_count += row_len
y1 = self.hr_y_value(humid_ratio_from_db_rh(temp_in_c[0], rh, prs))
vertices.append(Point2D(x_per_row[0], y1))
for i, t in enumerate(temp_in_c[1:]):
y = self.hr_y_value(humid_ratio_from_db_rh(t, rh, prs))
vertices.append(Point2D(x_per_row[i + 1], y))
v1 = vert_count - row_len + i
v2 = v1 + 1
v3 = vert_count + i + 1
v4 = v3 - 1
faces.append((v1, v2, v3, v4))
# create the Mesh2D, remove unused faces, and assign the colors
mesh = Mesh2D(vertices, faces)
mesh = mesh.remove_faces_only(self._remove_pattern)
mesh.colors = self._container.value_colors
return mesh
def _compute_border(self):
"""Compute a Polyline2D for the outer border of the chart."""
# get properties used to establish the border of the chart
prs, bpt, hmax = self.average_pressure, self.base_point, self.max_humidity_ratio
max_hr = humid_ratio_from_db_rh(self._temp_range[-1], 100, prs)
y_left = self.hr_y_value(humid_ratio_from_db_rh(self._temp_range[0], 100, prs))
y_right = self.hr_y_value(hmax) if max_hr > hmax else self.hr_y_value(max_hr)
x_max = bpt.x + (self.max_temperature - self.min_temperature) * self._x_dim
# get the points and build the polyline
pt1, pt2, pt3, pt4 = \
Point2D(bpt.x, y_left), bpt, Point2D(x_max, bpt.y), Point2D(x_max, y_right)
if max_hr > hmax:
return Polyline2D((pt1, pt2, pt3, pt4, self._saturation_line[-1]))
return Polyline2D((pt1, pt2, pt3, pt4))
def _compute_enthalpy_range(self):
"""Compute the values for enthalpy range and lines."""
# constants used throughout the calculation
low_y = self.base_point.y + 1e-6
up_y = self.hr_y_value(self._max_humidity_ratio)
border, sat_line = self.chart_border, self._saturation_line
all_enthalpies, ref_temp = tuple(range(0, 160, 10)), 0
enth_lbl = all_enthalpies
if self.use_ip:
enth_lbl = tuple(range(0, 65, 5))
all_enthalpies = self.ENTH_TYPE.to_unit(enth_lbl, 'kJ/kg', 'Btu/lb')
ref_temp = self.TEMP_TYPE.to_unit([0], 'C', 'F')[0]
# loop through the enthalpies and compute the lines of constant enthalpy
enth_range, enth_lines = [], []
for i, enthalpy in enumerate(all_enthalpies):
st_db = db_temp_from_enth_hr(enthalpy, 0.0, ref_temp)
end_db = db_temp_from_enth_hr(enthalpy, 0.03, ref_temp)
if self.use_ip:
st_db, end_db = self.TEMP_TYPE.to_unit((st_db, end_db), 'F', 'C')
enth_line = LineSegment2D.from_end_points(
Point2D(self.t_x_value(st_db), low_y),
Point2D(self.t_x_value(end_db), up_y))
border_ints = border.intersect_line_ray(enth_line)
if len(border_ints) == 2:
enth_range.append(enth_lbl[i])
seg = LineSegment2D.from_end_points(border_ints[0], border_ints[1])
enth_lines.append(seg)
else:
sat_ints = sat_line.intersect_line_ray(enth_line)
if len(sat_ints) != 0:
enth_range.append(enth_lbl[i])
if len(border_ints) == 1:
seg = LineSegment2D.from_end_points(border_ints[0], sat_ints[0])
else:
seg = LineSegment2D.from_end_points(enth_line.p, sat_ints[0])
enth_lines.append(seg)
# set the properties on this class
self._enth_range = enth_range
self._enth_lines = enth_lines
def _compute_wb_range(self):
"""Compute the values for wet bulb range and lines."""
# constants used throughout the calculation
low_y, border = self.base_point.y - 1e-6, self.chart_border
all_wbs = wb_c = tuple(range(self._min_temperature, self._max_temperature, 5))
if self.use_ip:
wb_c = self.TEMP_TYPE.to_unit(wb_c, 'C', 'F')
# loop through the wet bulb and compute the lines of constant wet bulb
wb_range, wb_lines = [], []
for i, wb in enumerate(wb_c):
st_db = db_temp_and_hr_from_wb_rh(wb, 0, self._average_pressure)[0]
end_db, end_hr = db_temp_and_hr_from_wb_rh(wb, 100, self._average_pressure)
if self.use_ip:
st_db, end_db = self.TEMP_TYPE.to_unit((st_db, end_db), 'F', 'C')
enth_line = LineSegment2D.from_end_points(
Point2D(self.t_x_value(st_db), low_y),
Point2D(self.t_x_value(end_db), self.hr_y_value(end_hr)))
border_ints = border.intersect_line_ray(enth_line)
if len(border_ints) == 2:
wb_range.append(all_wbs[i])
seg = LineSegment2D.from_end_points(border_ints[0], border_ints[1])
wb_lines.append(seg)
elif len(border_ints) == 1:
wb_range.append(all_wbs[i])
seg = LineSegment2D.from_end_points(border_ints[0], enth_line.p2)
wb_lines.append(seg)
# set the properties on this class
self._wb_range = wb_range
self._wb_lines = wb_lines
def _labels_points_from_lines(self, label_lines):
"""Extract label points from lines."""
move_vec = []
base_pts = []
for seg in label_lines:
if seg.v.y < 0:
move_vec.append(seg.v.reverse().normalize() * self._x_dim * 1.5)
base_pts.append(seg.p1)
else:
move_vec.append(seg.v.normalize() * self._x_dim * 1.5)
base_pts.append(seg.p2)
return tuple(pt.move(vec) for pt, vec in zip(base_pts, move_vec))
def _process_legend_default(self, l_par):
"""Override the dimensions of the legend to ensure it fits the chart."""
min_pt, max_pt = self.container.min_point, self.container.max_point
if l_par.vertical and l_par.is_segment_height_default:
l_par.properties_3d.segment_height = (max_pt.y - min_pt.y) / 20
l_par.properties_3d._is_segment_height_default = True
elif l_par.vertical and l_par.is_segment_height_default:
l_par.properties_3d.segment_width = (max_pt.x - min_pt.x) / 20
l_par.properties_3d._is_segment_width_default = True
def _check_input(self, data_coll, dat_type, unit, name):
"""Check an input that can be either a number or a Data Collection."""
if isinstance(data_coll, self.ACCEPTABLE_COLLECTIONS):
self._check_datacoll(data_coll, dat_type, unit, name)
return data_coll.values
else:
try: # assume that it's a single number
value = float(data_coll)
return [value] * self._calc_length
except (ValueError, TypeError):
raise TypeError('{} must be either a number or a hourly/daily data '
'collection. Got {}'.format(name, type(data_coll)))
def _check_datacoll(self, data_coll, dat_type, unit, name):
"""Check the data type and units of a Data Collection."""
assert isinstance(data_coll.header.data_type, dat_type) and \
data_coll.header.unit == unit, '{} must be {} in {}. ' \
'Got {} in {}'.format(name, dat_type().name, unit,
data_coll.header.data_type.name,
data_coll.header.unit)
if isinstance(data_coll, DailyCollection):
self._time_multiplier = 24
else: # it's an hourly or sub-hourly collection
self._time_multiplier = 1 / data_coll.header.analysis_period.timestep
self._calc_length = len(data_coll)
@staticmethod
def _check_number(value, value_name):
"""Check a given value for a dimension input."""
try:
value = float(value)
except (ValueError, TypeError):
raise TypeError('Expected number for Psychrometric Chart {}. '
'Got {}.'.format(value_name, type(value)))
assert value > 0, 'Psychrometric Chart {} must be greater than 0. ' \
'Got {}.'.format(value_name, value)
return value
def __len__(self):
"""Return length of values on the object."""
return len(self._t_values)
def __getitem__(self, key):
"""Return a tuple of temperature and humidity."""
return self._t_values[key], self._rh_values[key]
def __iter__(self):
"""Iterate through the values."""
return zip(self._t_values, self._rh_values)
def ToString(self):
"""Overwrite .NET ToString."""
return self.__repr__()
def __repr__(self):
"""Psychrometric Chart representation."""
return 'Psychrometric Chart: {} values'.format(len(self._t_values))
|
PypiClean
|
/ILAMB-2.7.tar.gz/ILAMB-2.7/LICENSE.rst
|
Copyright 2023, The ILAMB Consortium
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
PypiClean
|
/metoffice-afterburner-1.3.3.tar.gz/metoffice-afterburner-1.3.3/bin/apprun.sh
|
set -e
has_module=$(type -t module || echo '')
cmdname=$(basename $0)
usage="SYNOPSIS
$cmdname -h|--help
$cmdname <app_name> [app-options] [app-arguments]
$cmdname [script-options] -- <app-name> [app-options] [app-arguments]
ARGUMENTS
app-name - The name of the Afterburner app to run. This should either be the
leaf name or the full dotted pathname of the app's Python class.
SCRIPT-OPTIONS
--ab-module=<afterburner-module> - Load the named afterburner module
--debug - Print diagnostic/debug information
--dry-run,-n - Enable dry-run mode (doesn't run the app)
--py=<python-version> - Run app using the specified Python version
--reset_pypath - Set PYTHONPATH variable from clean state
--sci-module=<sci-module> - Load the named scitools module
"
#-------------------------------------------------------------------------------
# Function definitions
#-------------------------------------------------------------------------------
# Augment PYTHONPATH using the colon-delimited directory paths passed in via
# parameter $1. Paths are added to the front of PYTHONPATH.
augment_pythonpath () {
# Read directory paths from $1 into array variable PATHLIST.
IFS=':' read -ra PATHLIST <<< "$1"
# Loop over the paths specified in parameter $1 in reverse order.
for (( idx=${#PATHLIST[@]}-1 ; idx>=0 ; idx-- )); do
path=${PATHLIST[idx]}
if [[ -z "$PYTHONPATH" ]]; then
PYTHONPATH=$path
elif [[ ! $PYTHONPATH =~ (^|.+:)$path(:.+|$) ]]; then
PYTHONPATH=$path:$PYTHONPATH
fi
done
export PYTHONPATH
}
# Attempt to load the module specified via parameter $1.
load_module () {
# Test for presence of the module function. Return immediately if not found.
if [ -z "$has_module" ]; then
return 0
fi
# Determine the target module. If it's set to 'none' then return.
target_mod=$1
if [ "$target_mod" == "none" ]; then
return 0
fi
# Try to unload the target module using just its base name. This will complete
# silently if the module is not loaded. Then load the target module using its
# full name. This approach should be more robust than trying to determine if
# the target module is loaded and then calling module swap or module load.
target_base=${target_mod%%/*}
module unload $target_base || true
module load $target_mod
if ! module -t list 2>&1 | grep -q $target_base ; then
echo "WARNING: Unable to load module $target_mod"
return 1
fi
}
# Print script usage.
print_usage () {
echo "$usage" | while IFS= read -r line ; do echo "$line"; done
}
#-------------------------------------------------------------------------------
# Main script
#-------------------------------------------------------------------------------
if [ $# -eq 0 ]; then
echo "WARNING: No command-line arguments specified."
print_usage
exit 1
fi
if [ "$*" == "-h" -o "$*" == "--help" ]; then
print_usage
exit 0
fi
# Set option defaults.
abargs=()
ab_module=${AFTERBURNER_MODULE:-none}
debug=0
dryrun=0
python_cmd=${PYTHON_EXEC:-python}
reset_pypath=0
sci_module=${SCITOOLS_MODULE:-scitools/default}
# Configure command-line options.
shortopts="hn"
longopts="debug,dry-run,help,py:,reset-pypath,ab-module:,sci-module:"
if [[ "$*" != *--\ * ]]; then
cmdargs=("--" "$@")
else
cmdargs=("$@")
fi
# Process command-line options.
#echo "i/p cmd args: ${cmdargs[@]}"
cmdargs=$(getopt -o "$shortopts" --long "$longopts" -n "$cmdname" -- "${cmdargs[@]}")
eval set -- "$cmdargs"
#echo "o/p cmd args: ${cmdargs}"
while true; do
case "$1" in
-h|--help)
print_usage
exit 0 ;;
-n|--dry-run)
dryrun=1
shift ;;
--ab-module)
ab_module=$2
shift 2 ;;
--debug)
debug=1
shift ;;
--py)
python_cmd="python$2"
shift 2 ;;
--reset-pypath)
reset_pypath=1
shift ;;
--sci-module)
sci_module=$2
shift 2 ;;
--)
shift
abargs=("$@")
break ;;
esac
done
# Reset the PYTHONPATH variable if required.
if [ $reset_pypath -eq 1 ]; then
export PYTHONPATH=""
fi
# Add location of Rose package to PYTHONPATH.
rose_cmd=$(which rose 2>/dev/null || echo '')
if [[ -n $rose_cmd ]]; then
rose_pkg_dir=$(rose --version | sed 's/^.*(\(.*\))$/\1/')/lib/python
if [[ -d $rose_pkg_dir ]]; then
augment_pythonpath $rose_pkg_dir
fi
else
echo "WARNING: Unable to determine location of Rose python package."
fi
# Add a "scitools/" prefix to the scitools module name if required.
if [[ "$sci_module" != "none" ]] && [[ ! "$sci_module" =~ ^scitools/.* ]]; then
sci_module="scitools/$sci_module"
fi
# Add an "afterburner/" prefix to the afterburner module name if required.
if [[ "$ab_module" != "none" ]] && [[ ! "$ab_module" =~ ^afterburner/.* ]]; then
ab_module="afterburner/$ab_module"
fi
# Load a scitools module if one has been specified via the sci-module option or
# the SCITOOLS_MODULE variable.
load_module $sci_module || exit 1
# Load an afterburner module if one has been specified via the --ab-module option
# or the AFTERBURNER_MODULE variable.
# NB: a scitools module is a prerequisite for loading an afterburner module.
load_module $ab_module || exit 1
# Check to see if the required python command is on the command search path.
has_py=$(which $python_cmd 2>/dev/null || echo '')
if [[ -z $has_py ]]; then
echo "WARNING: $python_cmd command not found; reverting to plain 'python' command."
python_cmd=python
fi
# Obtain the path of the Afterburner bin directory. If the AFTERBURNER_HOME_DIR
# environment variable has been set, then use that. Otherwise set it to the
# directory containing this script.
if [[ -n $AFTERBURNER_HOME_DIR ]]; then
bindir=${AFTERBURNER_HOME_DIR}/bin
else
bindir=$(dirname $($python_cmd -c 'import sys, os; print(os.path.realpath(sys.argv[1]))' $0))
export AFTERBURNER_HOME_DIR=$(dirname $bindir)
fi
# Add location of Afterburner package to PYTHONPATH if an afterburner module
# has NOT been loaded, either by this script or by the calling environment.
loaded_mod=$(module -t list 2>&1 | grep afterburner || echo '')
if [[ -z $loaded_mod ]]; then
augment_pythonpath $($python_cmd ${bindir}/abconfig --pythonpath)
fi
# Removing any trailing ':' character from PYTHONPATH.
export PYTHONPATH=${PYTHONPATH/%:/}
# Extract the Afterburner application class name.
app_name=${abargs[0]}
# If debug mode is on then print some useful diagnostic information.
if [ $debug -eq 1 ]; then
hdr=$(printf '=%.0s' {1..30})
echo
echo "$hdr DEBUG INFO $hdr"
echo "AFTERBURNER_HOME_DIR: $AFTERBURNER_HOME_DIR"
echo "PYTHONPATH: ${PYTHONPATH:-not defined}"
echo "Python command: $(which $python_cmd)"
echo "SciTools module: $sci_module"
echo "Rose package location: ${rose_pkg_dir:-not defined}"
echo "Afterburner module: $ab_module"
echo "Afterburner app: $app_name"
echo "App arguments: ${abargs[*]:1}"
echo "$hdr============$hdr"
echo
fi
# Invoke the abrun.py script, passing through any options and arguments
if [ $dryrun -eq 1 ]; then
echo "App invocation: $python_cmd ${bindir}/abrun.py ${app_name} ${abargs[*]:1}"
else
$python_cmd ${bindir}/abrun.py ${app_name} ${abargs[*]:1}
fi
|
PypiClean
|
/dgl_cu110-0.6.1-cp37-cp37m-manylinux1_x86_64.whl/dgl/nn/pytorch/conv/atomicconv.py
|
# pylint: disable= no-member, arguments-differ, invalid-name
import numpy as np
import torch as th
import torch.nn as nn
class RadialPooling(nn.Module):
r"""
Description
-----------
Radial pooling from paper `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__.
We denote the distance between atom :math:`i` and :math:`j` by :math:`r_{ij}`.
A radial pooling layer transforms distances with radial filters. For radial filter
indexed by :math:`k`, it projects edge distances with
.. math::
h_{ij}^{k} = \exp(-\gamma_{k}|r_{ij}-r_{k}|^2)
If :math:`r_{ij} < c_k`,
.. math::
f_{ij}^{k} = 0.5 * \cos(\frac{\pi r_{ij}}{c_k} + 1),
else,
.. math::
f_{ij}^{k} = 0.
Finally,
.. math::
e_{ij}^{k} = h_{ij}^{k} * f_{ij}^{k}
Parameters
----------
interaction_cutoffs : float32 tensor of shape (K)
:math:`c_k` in the equations above. Roughly they can be considered as learnable cutoffs
and two atoms are considered as connected if the distance between them is smaller than
the cutoffs. K for the number of radial filters.
rbf_kernel_means : float32 tensor of shape (K)
:math:`r_k` in the equations above. K for the number of radial filters.
rbf_kernel_scaling : float32 tensor of shape (K)
:math:`\gamma_k` in the equations above. K for the number of radial filters.
"""
def __init__(self, interaction_cutoffs, rbf_kernel_means, rbf_kernel_scaling):
super(RadialPooling, self).__init__()
self.interaction_cutoffs = nn.Parameter(
interaction_cutoffs.reshape(-1, 1, 1), requires_grad=True)
self.rbf_kernel_means = nn.Parameter(
rbf_kernel_means.reshape(-1, 1, 1), requires_grad=True)
self.rbf_kernel_scaling = nn.Parameter(
rbf_kernel_scaling.reshape(-1, 1, 1), requires_grad=True)
def forward(self, distances):
"""
Description
-----------
Apply the layer to transform edge distances.
Parameters
----------
distances : Float32 tensor of shape (E, 1)
Distance between end nodes of edges. E for the number of edges.
Returns
-------
Float32 tensor of shape (K, E, 1)
Transformed edge distances. K for the number of radial filters.
"""
scaled_euclidean_distance = - self.rbf_kernel_scaling * \
(distances - self.rbf_kernel_means) ** 2 # (K, E, 1)
rbf_kernel_results = th.exp(scaled_euclidean_distance) # (K, E, 1)
cos_values = 0.5 * (th.cos(np.pi * distances / self.interaction_cutoffs) + 1) # (K, E, 1)
cutoff_values = th.where(
distances <= self.interaction_cutoffs,
cos_values, th.zeros_like(cos_values)) # (K, E, 1)
# Note that there appears to be an inconsistency between the paper and
# DeepChem's implementation. In the paper, the scaled_euclidean_distance first
# gets multiplied by cutoff_values, followed by exponentiation. Here we follow
# the practice of DeepChem.
return rbf_kernel_results * cutoff_values
def msg_func(edges):
"""
Description
-----------
Send messages along edges.
Parameters
----------
edges : EdgeBatch
A batch of edges.
Returns
-------
dict mapping 'm' to Float32 tensor of shape (E, K * T)
Messages computed. E for the number of edges, K for the number of
radial filters and T for the number of features to use
(types of atomic number in the paper).
"""
return {'m': th.einsum(
'ij,ik->ijk', edges.src['hv'], edges.data['he']).view(len(edges), -1)}
def reduce_func(nodes):
"""
Description
-----------
Collect messages and update node representations.
Parameters
----------
nodes : NodeBatch
A batch of nodes.
Returns
-------
dict mapping 'hv_new' to Float32 tensor of shape (V, K * T)
Updated node representations. V for the number of nodes, K for the number of
radial filters and T for the number of features to use
(types of atomic number in the paper).
"""
return {'hv_new': nodes.mailbox['m'].sum(1)}
class AtomicConv(nn.Module):
r"""
Description
-----------
Atomic Convolution Layer from paper `Atomic Convolutional Networks for
Predicting Protein-Ligand Binding Affinity <https://arxiv.org/abs/1703.10603>`__.
Denoting the type of atom :math:`i` by :math:`z_i` and the distance between atom
:math:`i` and :math:`j` by :math:`r_{ij}`.
**Distance Transformation**
An atomic convolution layer first transforms distances with radial filters and
then perform a pooling operation.
For radial filter indexed by :math:`k`, it projects edge distances with
.. math::
h_{ij}^{k} = \exp(-\gamma_{k}|r_{ij}-r_{k}|^2)
If :math:`r_{ij} < c_k`,
.. math::
f_{ij}^{k} = 0.5 * \cos(\frac{\pi r_{ij}}{c_k} + 1),
else,
.. math::
f_{ij}^{k} = 0.
Finally,
.. math::
e_{ij}^{k} = h_{ij}^{k} * f_{ij}^{k}
**Aggregation**
For each type :math:`t`, each atom collects distance information from all neighbor atoms
of type :math:`t`:
.. math::
p_{i, t}^{k} = \sum_{j\in N(i)} e_{ij}^{k} * 1(z_j == t)
Then concatenate the results for all RBF kernels and atom types.
Parameters
----------
interaction_cutoffs : float32 tensor of shape (K)
:math:`c_k` in the equations above. Roughly they can be considered as learnable cutoffs
and two atoms are considered as connected if the distance between them is smaller than
the cutoffs. K for the number of radial filters.
rbf_kernel_means : float32 tensor of shape (K)
:math:`r_k` in the equations above. K for the number of radial filters.
rbf_kernel_scaling : float32 tensor of shape (K)
:math:`\gamma_k` in the equations above. K for the number of radial filters.
features_to_use : None or float tensor of shape (T)
In the original paper, these are atomic numbers to consider, representing the types
of atoms. T for the number of types of atomic numbers. Default to None.
Note
----
* This convolution operation is designed for molecular graphs in Chemistry, but it might
be possible to extend it to more general graphs.
* There seems to be an inconsistency about the definition of :math:`e_{ij}^{k}` in the
paper and the author's implementation. We follow the author's implementation. In the
paper, :math:`e_{ij}^{k}` was defined as
:math:`\exp(-\gamma_{k}|r_{ij}-r_{k}|^2 * f_{ij}^{k})`.
* :math:`\gamma_{k}`, :math:`r_k` and :math:`c_k` are all learnable.
Example
-------
>>> import dgl
>>> import numpy as np
>>> import torch as th
>>> from dgl.nn import AtomicConv
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> feat = th.ones(6, 1)
>>> edist = th.ones(6, 1)
>>> interaction_cutoffs = th.ones(3).float() * 2
>>> rbf_kernel_means = th.ones(3).float()
>>> rbf_kernel_scaling = th.ones(3).float()
>>> conv = AtomicConv(interaction_cutoffs, rbf_kernel_means, rbf_kernel_scaling)
>>> res = conv(g, feat, edist)
>>> res
tensor([[0.5000, 0.5000, 0.5000],
[0.5000, 0.5000, 0.5000],
[0.5000, 0.5000, 0.5000],
[1.0000, 1.0000, 1.0000],
[0.5000, 0.5000, 0.5000],
[0.0000, 0.0000, 0.0000]], grad_fn=<ViewBackward>)
"""
def __init__(self, interaction_cutoffs, rbf_kernel_means,
rbf_kernel_scaling, features_to_use=None):
super(AtomicConv, self).__init__()
self.radial_pooling = RadialPooling(interaction_cutoffs=interaction_cutoffs,
rbf_kernel_means=rbf_kernel_means,
rbf_kernel_scaling=rbf_kernel_scaling)
if features_to_use is None:
self.num_channels = 1
self.features_to_use = None
else:
self.num_channels = len(features_to_use)
self.features_to_use = nn.Parameter(features_to_use, requires_grad=False)
def forward(self, graph, feat, distances):
"""
Description
-----------
Apply the atomic convolution layer.
Parameters
----------
graph : DGLGraph
Topology based on which message passing is performed.
feat : Float32 tensor of shape :math:`(V, 1)`
Initial node features, which are atomic numbers in the paper.
:math:`V` for the number of nodes.
distances : Float32 tensor of shape :math:`(E, 1)`
Distance between end nodes of edges. E for the number of edges.
Returns
-------
Float32 tensor of shape :math:`(V, K * T)`
Updated node representations. :math:`V` for the number of nodes, :math:`K` for the
number of radial filters, and :math:`T` for the number of types of atomic numbers.
"""
with graph.local_scope():
radial_pooled_values = self.radial_pooling(distances) # (K, E, 1)
if self.features_to_use is not None:
feat = (feat == self.features_to_use).float() # (V, T)
graph.ndata['hv'] = feat
graph.edata['he'] = radial_pooled_values.transpose(1, 0).squeeze(-1) # (E, K)
graph.update_all(msg_func, reduce_func)
return graph.ndata['hv_new'].view(graph.number_of_nodes(), -1) # (V, K * T)
|
PypiClean
|
/object_detection_by_ovi-0.0.1-py3-none-any.whl/object_detection/model_tpu_main.py
|
r"""Creates and runs `Estimator` for object detection model on TPUs.
This uses the TPUEstimator API to define and run a model in TRAIN/EVAL modes.
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from object_detection import model_lib
tf.flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than plain CPUs')
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_name',
default=None,
help='Name of the Cloud TPU for Cluster Resolvers.')
flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU cores).')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop.')
# For mode=train_and_eval, evaluation occurs after training is finished.
# Note: independently of steps_per_checkpoint, estimator will save the most
# recent checkpoint every 10 minutes by default for train_and_eval
flags.DEFINE_string('mode', 'train',
'Mode to run: train, eval')
flags.DEFINE_integer('train_batch_size', None, 'Batch size for training. If '
'this is not provided, batch size is read from training '
'config.')
flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
flags.DEFINE_boolean('eval_training_data', False,
'If training data should be evaluated for this job.')
flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of '
'every n eval input examples, where n is provided.')
flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '
'one of every n train input examples for evaluation, '
'where n is provided. This is only used if '
'`eval_training_data` is True.')
flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
flags.DEFINE_integer(
'max_eval_retries', 0, 'If running continuous eval, the maximum number of '
'retries upon encountering tf.errors.InvalidArgumentError. If negative, '
'will always retry the evaluation.'
)
FLAGS = tf.flags.FLAGS
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
tpu_cluster_resolver = (
tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=[FLAGS.tpu_name], zone=FLAGS.tpu_zone, project=FLAGS.gcp_project))
tpu_grpc_url = tpu_cluster_resolver.get_master()
config = tf_estimator.tpu.RunConfig(
master=tpu_grpc_url,
evaluation_master=tpu_grpc_url,
model_dir=FLAGS.model_dir,
tpu_config=tf_estimator.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_shards))
kwargs = {}
if FLAGS.train_batch_size:
kwargs['batch_size'] = FLAGS.train_batch_size
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config=config,
pipeline_config_path=FLAGS.pipeline_config_path,
train_steps=FLAGS.num_train_steps,
sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples=(
FLAGS.sample_1_of_n_eval_on_train_examples),
use_tpu_estimator=True,
use_tpu=FLAGS.use_tpu,
num_shards=FLAGS.num_shards,
save_final_config=FLAGS.mode == 'train',
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
train_steps = train_and_eval_dict['train_steps']
if FLAGS.mode == 'train':
estimator.train(input_fn=train_input_fn, max_steps=train_steps)
# Continuously evaluating.
if FLAGS.mode == 'eval':
if FLAGS.eval_training_data:
name = 'training_data'
input_fn = eval_on_train_input_fn
else:
name = 'validation_data'
# Currently only a single eval input is allowed.
input_fn = eval_input_fns[0]
model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps,
name, FLAGS.max_eval_retries)
if __name__ == '__main__':
tf.app.run()
|
PypiClean
|
/bottle-toolbelt-0.0.2.tar.gz/bottle-toolbelt-0.0.2/toolbelt/middleware.py
|
import time
import os
import re
import bottle
def safe_bottle(callback):
'''
from toolbelt.middleware import safe_bottle
bottle.install(safe_bottle)
'''
def wrapper(*args, **kwargs):
body = callback(*args, **kwargs)
bottle.response.headers['Server'] = ')'
bottle.response.headers['X-Frame-Options'] = 'SAMEORIGIN'
bottle.response.headers['X-Content-Type-Options'] = 'nosniff'
bottle.response.headers['X-XSS-Protection'] = '1; mode=block'
bottle.response.headers['Content-Language'] = 'en'
return body
return wrapper
def benchmark(callback):
'''
from toolbelt.middleware import benchmark
bottle.install(benchmark)
'''
def wrapper(*args, **kwargs):
start = time.time()
body = callback(*args, **kwargs)
end = time.time()
bottle.response.headers['X-Exec-Time'] = str(end - start)
return body
return wrapper
def redirect_http_to_https(callback):
'''
from toolbelt.middleware import redirect_http_to_https
bottle.install(redirect_http_to_https)
'''
def wrapper(*args, **kwargs):
scheme = bottle.request.urlparts[0]
if scheme == 'http':
bottle.redirect(bottle.request.url.replace('http', 'https', 1))
else:
return callback(*args, **kwargs)
return wrapper
def cors(callback):
'''
from toolbelt.middleware import cors
from toolbelt.middleware import cors_options
bottle.install(cors)
'''
def wrapper(*args, **kwargs):
if bottle.request.method == 'OPTIONS':
bottle.response.headers['Access-Control-Allow-Origin'] = '*'
bottle.response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
bottle.response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, Authorization, X-Requested-With, X-CSRF-Token, X-XSRF-Token'
else:
# CORS
bottle.response.headers['Access-Control-Allow-Origin'] = '*'
bottle.response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
bottle.response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, Authorization, X-Requested-With, X-CSRF-Token, X-XSRF-Token'
if callback:
body = callback(*args, **kwargs)
return body
return wrapper
def parse_AcceptLanguage(acceptLanguage):
'en-US,en;q=0.8,pt;q=0.6'
if not acceptLanguage:
return [('en', '1')]
languages_with_weigths = acceptLanguage.strip().split(',')
map(lambda s: s.strip, languages_with_weigths)
locale_q_pairs = []
for language in languages_with_weigths:
try:
if language.split(';')[0].strip() == language:
# no q => q = 1
locale_q_pairs.append((language.strip(), '1'))
else:
locale = language.split(';')[0].strip()
q = language.split(';')[1].split('=')[1]
locale_q_pairs.append((locale, q))
except IndexError:
pass
if locale_q_pairs:
return sorted(locale_q_pairs, key=lambda v: v[1], reverse=True)
else:
return [('en', '1')]
def simple_langs(acceptLanguage):
'parse the Accept-Language header'
langs = parse_AcceptLanguage(acceptLanguage)
langs = [l[0][0:2] for l in langs]
langs = filter(None, langs)
langs = filter(str, langs)
if langs:
return list(set(langs))
else:
return ['en']
def lang(callback):
'''
from toolbelt.middleware import lang
bottle.install(lang)
'''
def wrapper(*args, **kwargs):
# Language headers/cookies
language = simple_langs(bottle.request.headers.get('Accept-Language', None))[0]
if not bottle.request.get_cookie('language', None):
bottle.response.set_cookie('language', language)
bottle.response.headers['Content-Language'] = language
if callback:
body = callback(*args, **kwargs)
return body
return wrapper
def https_safe_api_cors_lang_bench(callback):
'''
from toolbelt.middleware import https_safe_api_cors_lang_bench
from toolbelt.middleware import cors_options
bottle.install(https_safe_api_cors_lang_bench)
'''
def wrapper(*args, **kwargs):
if bottle.request.method == 'OPTIONS':
bottle.response.headers['Access-Control-Allow-Origin'] = '*'
bottle.response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
bottle.response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, Authorization, X-Requested-With, X-CSRF-Token, X-XSRF-Token'
else:
scheme = bottle.request.urlparts[0]
if scheme == 'http' and False:
return bottle.redirect(bottle.request.url.replace('http', 'https', 1))
else:
bottle.response.headers['Server'] = ')'
bottle.response.headers['X-Frame-Options'] = 'SAMEORIGIN'
bottle.response.headers['X-Content-Type-Options'] = 'nosniff'
bottle.response.headers['X-XSS-Protection'] = '1; mode=block'
# CORS
bottle.response.headers['Access-Control-Allow-Origin'] = '*'
bottle.response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
bottle.response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, Authorization, X-Requested-With, X-CSRF-Token, X-XSRF-Token'
# API
if bottle.request.urlparts[2].startswith('/api'):
bottle.response.content_type = 'application/json'
# First visit
if not bottle.request.get_cookie('visited', None):
bottle.response.set_cookie('visited', 'yes')
# Language headers/cookies
language = simple_langs(bottle.request.headers.get('Accept-Language', None))[0]
if not bottle.request.get_cookie('language', None):
bottle.response.set_cookie('language', language)
bottle.response.headers['Content-Language'] = language
if callback:
start = time.time()
body = callback(*args, **kwargs)
end = time.time()
bottle.response.headers['X-Exec-Time'] = str(end - start)
return body
return wrapper
# Enable OPTIONS for all routes
@bottle.route('/<:re:.*>', method='OPTIONS')
def cors_options(*args, **kwargs):
pass
|
PypiClean
|
/qcg_pilotjob-0.14.0-py3-none-any.whl/qcg/pilotjob/joblist.py
|
import json
import re
import logging
from datetime import datetime, timedelta
from enum import Enum
from qcg.pilotjob.resources import CRType
from qcg.pilotjob.errors import JobAlreadyExist, IllegalResourceRequirements, IllegalJobDescription
_logger = logging.getLogger(__name__)
class JobState(Enum):
"""The job state."""
QUEUED = 1
SCHEDULED = 2
EXECUTING = 3
SUCCEED = 4
FAILED = 5
CANCELED = 6
OMITTED = 7
def is_finished(self):
"""Check if job state is finished (final)."""
return self in [JobState.SUCCEED, JobState.FAILED, JobState.CANCELED,
JobState.OMITTED]
def stats(self, stats):
if self in [JobState.QUEUED, JobState.SCHEDULED]:
stats['scheduling'] = stats.get('scheduling', 0) + 1
elif self in [JobState.EXECUTING]:
stats['executing'] = stats.get('executing', 0) + 1
elif self in [JobState.FAILED, JobState.OMITTED]:
stats['failed'] = stats.get('failed', 0) + 1
elif self in [JobState.CANCELED, JobState.SUCCEED]:
stats['finished'] = stats.get('finished', 0) + 1
class JobExecution:
"""The execution element of job description.
Attributes:
exec (str, optional): path to the executable
args (list(str), optional): list of arguments
env (dict(str, str), optional): list of environment variables
stdin (str, optional): path to the standard input file
stdout (str, optional): path to the standard output file
stderr (str, optional): path to the standard error file
modules (list(str), optional): list of modules to load before job start
venv (str, optional): path to the virtual environment to initialize before job start
wd (str, optional): path to the job's working directory
model (str, optional): model of execution
model_opts (str, optional): model options
"""
def __init__(self, exec=None, args=None, env=None, script=None, stdin=None, stdout=None, stderr=None,
modules=None, venv=None, wd=None, model=None, model_opts=None):
"""Initialize execution element of job description.
Args:
exec (str, optional): path to the executable
args (list(str), optional): list of arguments
env (dict(str, str), optional): list of environment variables
stdin (str, optional): path to the standard input file
stdout (str, optional): path to the standard output file
stderr (str, optional): path to the standard error file
modules (list(str), optional): list of modules to load before job start
venv (str, optional): path to the virtual environment to initialize before job start
wd (str, optional): path to the job's working directory
model (str, optional): model of execution
model_opts (dict(str, str), optional): model options
Raises:
IllegalJobDescription: when:
* nor ``exec`` or ``script`` are defined,
* ``script`` and one of ``exec``, ``args`` or ``env`` is both defined
* ``args`` is not a list
* ``env`` is not a dictionary
"""
if all((not exec, not script)):
raise IllegalJobDescription("Job execution (exec or script) not defined")
if script and (exec or args or env):
raise IllegalJobDescription("Job script and exec or args or env defined")
self.exec = exec
self.script = script
self.args = []
self.env = {}
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
if modules and not isinstance(modules, list):
self.modules = [modules]
else:
self.modules = modules
self.venv = venv
if model and isinstance(model, str):
self.model = model.lower()
else:
self.model = model
if model_opts is not None:
if not isinstance(model_opts, dict):
raise IllegalJobDescription("Execution model options must be an dictionary")
self.model_opts = model_opts
else:
self.model_opts = {}
if args is not None:
if not isinstance(args, list):
raise IllegalJobDescription("Execution arguments must be an array")
self.args = [str(arg) for arg in args]
if env is not None:
if not isinstance(env, dict):
raise IllegalJobDescription("Execution environment must be an dictionary")
self.env = env
self.wd = wd
def to_dict(self):
"""Serialize ``execution`` element to dictionary.
Returns:
dict(str): dictionary with ``execution`` element values
"""
result = {'exec': self.exec, 'args': self.args, 'env': self.env, 'script': self.script}
if self.wd is not None:
result['wd'] = self.wd
if self.stdin is not None:
result['stdin'] = self.stdin
if self.stdout is not None:
result['stdout'] = self.stdout
if self.stderr is not None:
result['stderr'] = self.stderr
if self.modules is not None:
result['modules'] = self.modules
if self.venv is not None:
result['venv'] = self.venv
if self.model is not None:
result['model'] = self.model
if self.model_opts is not None:
result['model_opts'] = self.model_opts
return result
def to_json(self):
"""Serialize ``execution`` element to JSON description.
Returns:
JSON description of ``execution`` element.
"""
return json.dumps(self.to_dict())
class ResourceSize:
"""The resources size element used in job description when specified the number of required cores or nodes."""
def __init__(self, exact=None, min=None, max=None, scheduler=None):
"""Initialize resource size.
Args:
exact (int, optional): exact number of resources
min (int, optional): minimum number of resources
max (int, optional): maximum number of resources
scheduler (dict, optional): the iteration resources scheduler, the ``name`` and ``params`` (optional) keys
Raises:
IllegalResourceRequirements raised when:
* ``exact`` number and one of ``min``, ``max`` or ``scheduler`` is both specified
* nor ``exact`` or ``min`` or ``max`` is not specified
* ``max`` and ``min`` is specified and ``min`` > ``max``
"""
if exact is not None and (min is not None or max is not None or scheduler is not None):
raise IllegalResourceRequirements("Exact number of resources defined with min/max/scheduler")
if max is not None and min is not None and min > max:
raise IllegalResourceRequirements("Maximum number greater than minimal")
if exact is None and min is None and max is None:
raise IllegalResourceRequirements("No resources defined")
if (exact is not None and exact < 0) or (min is not None and min < 0) or (max is not None and max < 0):
raise IllegalResourceRequirements("Neative number of resources")
self._exact = exact
self._min = min
self._max = max
self._scheduler = scheduler
@property
def exact(self):
""" int: exact number of resources."""
return self._exact
@property
def min(self):
"""int: minimum number of resources"""
return self._min
@property
def max(self):
"""int: maximum number of resources"""
return self._max
@property
def scheduler(self):
"""str: iteration resource scheduler name"""
return self._scheduler
@property
def range(self):
"""(int, int): tuple with resources range"""
return self._min, self._max
def is_exact(self):
"""Check if resource size is defined as exact number.
Returns:
True: if resource size is defined as exact number
False: if reosurce size is defined as range
"""
return self._exact is not None
def to_dict(self):
"""Serialize resource size to dictionary
Returns:
dict(str): dictionary with resource size
"""
result = {}
if self._exact is not None:
result['exact'] = self._exact
if self._min is not None:
result['min'] = self._min
if self._max is not None:
result['max'] = self._max
if self._scheduler is not None:
result['scheduler'] = self._scheduler
return result
def to_json(self):
"""Serialize resource size to JSON description.
Returns:
JSON description of resource size element.
"""
return json.dumps(self.to_dict())
class JobResources:
"""The ```resources``` element of job description."""
_wt_regex = re.compile(r'((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?')
def _parse_wt(self, wt):
"""Parse wall time description into timedelta structure.
Args:
wt (str): the wall time description as a string
Returns:
timedelta: parsed wall time description
Raises:
IllegalResourceRequirements: when wall time description has wrong format.
"""
parts = self._wt_regex.match(wt)
if not parts:
raise IllegalResourceRequirements("Wrong wall time format")
try:
parts = parts.groupdict()
time_params = {}
for name, param in parts.items():
if param:
time_params[name] = int(param)
td = timedelta(**time_params)
if td.total_seconds() == 0:
raise IllegalResourceRequirements("Wall time must be greater than 0")
return td
except IllegalResourceRequirements:
raise
except Exception:
raise IllegalResourceRequirements("Wrong wall time format")
@staticmethod
def _validate_crs(crs):
"""Validate consumable resources.
Check if crs are known and well defined.
Args:
crs (dict(string, int)): map with consumable resources.
Returns:
dict: final map of crs
Raises:
IllegalResourceRequirements: when
* unknown consumable resources
* double definition of consumable resources
* number of consumable resources is not defined as integer
* number of consumable resources is less than 1
"""
if crs:
# already used crs
result = dict()
for name, count in crs.items():
if not name.upper() in CRType.__members__:
raise IllegalResourceRequirements("Unknown consumable resource {}".format(name))
cr_type = CRType[name.upper()]
if cr_type in result:
raise IllegalResourceRequirements("Consumable resource {} already defined".format(name))
if not isinstance(count, int):
raise IllegalResourceRequirements("Consumable resource {} count not a number {}".format(
name, type(count).__name__))
if count < 1:
raise IllegalResourceRequirements("Number of consumable resource {} must be greater than 0".format(
name))
result[cr_type] = count
return result
def __init__(self, numCores=None, numNodes=None, wt=None, nodeCrs=None):
"""Initialize ``resources`` element of job description.
* if numNodes > 1, then numCores relates to each of the node, so total number of
required cores will be a product of numNodes and numCores
* nodeCrs relates to each node available consumable resources
Args:
numCores - number of cores, either as exact number or as a range
numNodes - number of nodes, either as exact number of as a range
wt - wall time
nodeCrs (dict(string,int)) - each node consumable resources
Raises:
IlleglResourceRequirements: raised when:
* ``numCores`` and ``numNodes`` not defined
* ``numCores`` or ``numNodes`` not instance of either ``int``, ``dict`` or ResourceSize
* wrong consumable resources definition
"""
if numCores is None and numNodes is None:
raise IllegalResourceRequirements("No resources defined")
if numCores is not None:
if isinstance(numCores, int):
numCores = ResourceSize(numCores)
elif isinstance(numCores, dict):
numCores = ResourceSize(**numCores)
elif not isinstance(numCores, ResourceSize):
raise IllegalJobDescription("Wrong definition of number of cores (%s)" % (type(numCores).__name__))
if numNodes is not None:
if isinstance(numNodes, int):
numNodes = ResourceSize(numNodes)
elif isinstance(numNodes, dict):
numNodes = ResourceSize(**numNodes)
elif not isinstance(numNodes, ResourceSize):
raise IllegalJobDescription("Wrong definition of number of nodes (%s)" % (type(numNodes).__name__))
if wt is not None:
self.wt = self._parse_wt(wt)
else:
self.wt = None
self._crs = None
if nodeCrs is not None:
if not isinstance(nodeCrs, dict):
raise IllegalJobDescription("Wrong definition of Consumable Resources {} (must be a dictionary)".format(
type(nodeCrs).__name__))
self._crs = JobResources._validate_crs(nodeCrs)
self._cores = numCores
self._nodes = numNodes
@property
def has_nodes(self):
"""bool: true if ``resources`` element of job description contains number of nodes definition"""
return self._nodes is not None
@property
def has_cores(self):
"""bool: true if ``resources`` element of job description contains number of cores definition"""
return self._cores is not None
@property
def has_crs(self):
"""bool: true if ``resources`` element of job description contains consumable resources definition"""
return self._crs is not None
@property
def cores(self):
"""ResourceSize: return ``numCores`` definition of ``resources`` element."""
return self._cores
@property
def nodes(self):
"""ResourceSize: return ``numNodes`` definition of ``resources`` element."""
return self._nodes
@property
def crs(self):
"""ResourceSize: return ``nodeCrs`` definition of ``resources`` element."""
return self._crs
def get_min_num_cores(self):
"""Return minimum number of cores the job can be run.
Returns:
int: minimum number of required cores for the job.
"""
min_cores = 1
if self.has_cores:
if self._cores.is_exact():
min_cores = self._cores.exact
else:
min_cores = self._cores.range[0]
if self.has_nodes:
if self._nodes.is_exact():
min_cores = min_cores * self._nodes.exact
else:
min_cores = min_cores * self._nodes.range[0]
return min_cores
def to_dict(self):
"""Serialize ``resource`` element of job description to dictionary
Returns:
dict(str): dictionary with ``resources`` element of job description
"""
result = {}
if self.has_cores:
result['numCores'] = self._cores.to_dict()
if self.has_nodes:
result['numNodes'] = self._nodes.to_dict()
if self.has_crs:
result['nodeCrs'] = {crtype.name: value for (crtype, value) in self._crs.items()}
return result
def to_json(self):
"""Serialize ``resource`` element of job description to JSON description.
Returns:
JSON description of ``resource`` element of job description.
"""
return json.dumps(self.to_dict())
class JobDependencies:
"""Runtime dependencies of job."""
@staticmethod
def _validate_job_list(job_list, err_msg):
"""Validate list of job names.
Validate if given argument is a list of strings.
Args:
job_list (list(str)): job names list
err_msg (str): error message to be places in IllegalJobDescription
Raises:
IllegalJobDescription: if ``job_list`` is not a list of strings
"""
if not isinstance(job_list, list):
raise IllegalJobDescription(err_msg)
for jobname in job_list:
if not isinstance(jobname, str):
raise IllegalJobDescription(err_msg)
def __init__(self, after=None):
"""Initialize runtime dependencies of a job.
Args:
after - list of jobs that must finish before job can be started
Raises:
IllegalJobDescription: when list of jobs has a wrong format.
"""
self.after = []
if after is not None:
JobDependencies._validate_job_list(after, "Dependency task's list must be an array of job names")
self.after = after
@property
def has_dependencies(self):
"""bool: true if job contains runtime dependencies"""
return len(self.after) > 0
def to_dict(self):
"""Serialize job's runtime dependencies
Returns:
dict(str): dictionary with job's runtime dependencies
"""
return self.__dict__
def to_json(self):
"""Serialize job's runtime dependencies to JSON description.
Returns:
JSON description of job's runtime dependencies
"""
return json.dumps(self.to_dict())
class JobIteration:
"""The ``iteration`` element of job description."""
def __init__(self, start=None, stop=None, values=None):
"""Initialize ``iteration`` element of job description.
The iteration can be defined as a range with ``start` and `stop`` parameters, or as an ``values`` set.
If ``start`` is not defined but ``stop`` is, the value 0 is assumed as a ``start``.
Args:
start (int): starting index of an iteration
stop (int): stop index of an iteration - the last value of job's iteration will be ``stop`` - 1
values (list(str)): the enumerated list of iteration values
Raises:
IllegalJobDescription: raised when:
* ``stop`` and ``values`` is not defined
* ``stop`` and ``values`` is both defined
* ``start`` is greater or equal ``stop``
"""
if stop is None and values is None:
raise IllegalJobDescription("Missing stop or values iteration value")
if stop is not None and values is not None:
raise IllegalJobDescription("Stop and values iteration are excluding")
if values is not None:
start = 0
stop = len(values)
else:
if start is None:
start = 0
if start >= stop:
raise IllegalJobDescription("Job iteration stop greater or equal than start")
self.start = start
self.stop = stop
self.values = values
def in_range(self, index):
"""Check if given index is in range of job's iterations.
Args:
index (int): index to check
Returns:
bool: true if index is in range
"""
return self.stop > index >= self.start
def iterations_gen(self):
"""Iterations generator.
Returns:
int: the iteration indexes
"""
return range(self.start, self.stop)
def iterations(self):
"""Return number of iterations of a job.
Returns:
int: number of iterations
"""
return self.stop - self.start
def iteration_value(self, index):
"""Return value related with the iteration index.
Returns:
str: if iteration has been defined with ``values`` argument the value on position according to the iteration
index will be returned, oterwise the string representation of index will be returned
"""
if self.values:
return self.values[index]
else:
return str(index)
def to_dict(self):
"""Serialize ``iteration`` element of job description
Returns:
dict(str): dictionary with ``iteration`` element of job description
"""
return self.__dict__
def to_json(self):
"""Serialize ``iteration`` element of job description to JSON description.
Returns:
JSON description of ``iteration`` element of job description
"""
return json.dumps(self.to_dict())
def __str__(self):
"""Return string representation of ``iteration`` element of job description.
Returns:
str: string representation of ``iteration`` element of job description
"""
if self.values:
return ','.join(self.values)
else:
return "{}-{}".format(self.start, self.stop)
class SubJobState:
"""Represent state of execution of single job's iteration."""
def __init__(self):
"""Initialize state of execution of single job's iteration.
The initial state is set to QUEUED and all other attributes are initialized as empty elements.
"""
self._state = JobState.QUEUED
self._history = []
self._messages = None
self._runtime = {}
def state(self):
"""Return current status of job's iteration.
Returns:
JobState: current status of job's iteration
"""
return self._state
def set_state(self, state, err_msg=None):
"""Set current state of job's iteration.
Args:
state (JobState): the new state of job's iteration
err_msg (str, optional): the error message to record
"""
assert isinstance(state, JobState), "Wrong state type"
self._state = state
self._history.append((state, datetime.now()))
if err_msg:
self.append_message(err_msg)
def append_runtime(self, data):
"""Record job's iteration runtime statistics.
Args:
data (dict): the data to append to job's iteration runtime statistics
"""
self._runtime.update(data)
def history(self):
"""Return job's iteration state change history.
Returns:
list(JobState, DateTime): job's iteration state change history.
"""
return self._history
def messages(self):
"""Return job's iteration recorded error messages.
Returns:
list(str): recorded job's iteration error messages.
"""
return self._messages
def runtime(self):
"""Return job's iteration runtime statistics.
Returns:
dict: job's iteration runtime statistics
"""
return self._runtime
def append_message(self, msg):
"""Record job's iteration error message.
Args:
msg (str): error message to record
"""
if self._messages is None:
self._messages = msg
else:
self._messages = '\n'.join([self._messages, msg])
class Job:
"""Job description and state.
Attributes:
_name (str): job name
_execution (JobExecution): execution description
_resources (JobResources): resources description
_iteration (JobIteration): iteration description
dependencies (JobDependencies): runtime dependencies description
attributes (dict): additional attributes
_subjobs (list(SubJobState)): list of job's iteration states - only if ``iteration`` element defined, elements
at positions 'job iteration' - 'iteration start'
_subjobs_not_finished (int): number of not finished already iterations - only if ``iteration`` element defined
_subjobs_failed (int): number of already failed iterations - only if ``iteration`` element defined
_history (list(JobState, DateTime)): state change history
_state (JobState): current state
_messages: recorded error messages
_runtime (dict): runtime information
_queue_pos: current job's position in scheduling queue
"""
@staticmethod
def validate_jobname(jobname):
"""Check if given name is valid for job's name.
Args:
jobname (str): name to validate
Returns:
bool: true if name is valid
"""
return ':' not in jobname
def __init__(self, name, execution, resources, iteration=None, dependencies=None, attributes=None):
"""Initialize job.
Args:
name (str): job name
execution (JobExecution or dict): ``execution`` element of job's description
resources (JobResources or dict): ``resources`` element of job's description
iteration (JobIteration or dict, optional): ``iteration`` element of job's description
dependencies (JobDependencies or dict, optional): ``dependencies`` element of job's description
attributes (dict, optional): additional job's attributes used by partition manager
Raises:
IllegalJobDescription: raised in case of wrong elements of job's description
"""
if name is None:
raise IllegalJobDescription("Job name not defined")
if not Job.validate_jobname(name):
raise IllegalJobDescription("Invalid job name {}".format(name))
self._name = name
if isinstance(execution, JobExecution):
self._execution = execution
elif isinstance(execution, dict):
self._execution = JobExecution(**execution)
else:
raise IllegalJobDescription("Job execution not defined or wrong type")
if isinstance(resources, JobResources):
self._resources = resources
elif isinstance(resources, dict):
self._resources = JobResources(**resources)
else:
raise IllegalJobDescription("Job resources not defined or wrong type")
if isinstance(iteration, JobIteration) or iteration is None:
self._iteration = iteration
elif isinstance(iteration, dict):
try:
self._iteration = JobIteration(**iteration)
except IllegalJobDescription:
raise
except Exception:
raise IllegalJobDescription("Job iteration wrong specification")
else:
raise IllegalJobDescription("Job iteration wrong type")
if isinstance(dependencies, JobDependencies) or dependencies is None:
self.dependencies = dependencies
elif isinstance(dependencies, dict):
try:
self.dependencies = JobDependencies(**dependencies)
except IllegalJobDescription:
raise
except Exception:
raise IllegalJobDescription("Job dependencies wrong specification")
else:
raise IllegalJobDescription("Job dependencies wrong type")
if attributes is not None and not isinstance(attributes, dict):
raise IllegalJobDescription("Job attributes must be dictionary")
self.attributes = attributes
if self._iteration:
self._subjobs = [SubJobState() for i in range(self._iteration.start, self._iteration.stop)]
self._subjobs_not_finished = self._iteration.iterations()
self._subjobs_failed = 0
self._history = []
self._runtime = {}
self.canceled = False
# history must be initialized before
self._state = None
self.set_state(JobState.QUEUED)
self._messages = None
# position in scheduling queue - None if not set
self._queue_pos = None
@property
def name(self):
"""str: job's name"""
return self._name
def get_name(self, iteration=None):
"""Return job's or job's iteration name.
Args:
iteration (int, optional): if defined the iteration's name is returned
Returns:
str: job's or job's iteration's name
"""
return self._name if iteration is None else '{}:{}'.format(self._name, iteration)
@property
def execution(self):
"""JobExecution: the ``execution`` element of job description"""
return self._execution
@property
def resources(self):
"""JobExecution: the ``resources`` element of job description"""
return self._resources
def get_not_finished_iterations(self):
"""Return number of currently not finished iterations.
This method is valid only for iteration jobs.
Returns:
int: number of not finished iterations
"""
return self._subjobs_not_finished
def get_failed_iterations(self):
"""Return number of already failed iterations.
This method is valid only for iteration jobs.
Returns:
int: number of failed iterations
"""
return self._subjobs_failed
def history(self, iteration=None):
"""Return job's or job's iteration state change history.
Args:
iteration (int, optional): if defined the iteration's state change history is returned
Returns:
list(JobState, DateTime): job's or job's iteration state change history.
"""
if iteration is None:
return self._history
return self._get_subjob(iteration).history()
def messages(self, iteration=None):
"""Return job's or job's iteration recorded error messages.
Args:
iteration (int, optional): if defined the iteration's recorded error messages is returned
Returns:
list(str): recorded job's or job's iteration error messages.
"""
if iteration is None:
return self._messages
return self._get_subjob(iteration).messages()
def runtime(self, iteration=None):
"""Return job's or job's iteration runtime statistics.
Args:
iteration (int, optional): if defined the iteration's runtime statistics is returned
Returns:
dict: job's or job's iteration runtime statistics
"""
if iteration is None:
return self._runtime
return self._get_subjob(iteration).runtime()
@property
def has_iterations(self):
"""bool: true if job has iterations"""
return self._iteration is not None
@property
def iteration(self):
"""JobIteration: ``iteration`` element of job description"""
return self._iteration
def state(self, iteration=None):
"""Return job's or job's iteration current state.
Args:
iteration (int, optional): if defined the iteration's state is returned
Returns:
JobState: job's or job's iteration current state
"""
if iteration is None:
return self._state
return self._get_subjob(iteration).state()
def str_state(self, iteration=None):
"""Return job's or job's iteration current state as string.
Args:
iteration (int, optional): if defined the iteration's state is returned
Returns:
JobState: job's or job's iteration current state as string
"""
return self.state(iteration).name
def _get_subjob(self, iteration):
"""Return job's iteration state object for given iteration.
Args:
iteration (int): the iteration index
Returns:
SubJobState: job's iteration state object
"""
return self._subjobs[iteration - self._iteration.start]
@property
def iteration_states(self):
"""list(SubJobState): list of iteration states"""
return self._subjobs
def set_state(self, state, iteration=None, err_msg=None):
"""Set current job's or job's iteration state.
Args:
state (JobState): new job's or job's iteration state
iteration (int, optional): job's iteration index if the iteration state should be set
err_msg (str, optional): the optional error message to record
Returns:
JobState: the job's finish state if job's iteration status change triggered job status change (for example
the last iteration job finished, so the whole job also finished), or None if job's as a whole still
not finished
"""
assert isinstance(state, JobState), "Wrong state type"
_logger.debug(f'job {self._name} iteration {iteration} status changed to {state.name} '
f'(final ? {state.is_finished()})')
if iteration is not None:
self._get_subjob(iteration).set_state(state, err_msg)
if state.is_finished():
self._subjobs_not_finished = self._subjobs_not_finished - 1
if state in [JobState.FAILED, JobState.OMITTED, JobState.CANCELED]:
self._subjobs_failed += 1
_logger.debug(f'for job {self._name} currently not finished subjobs {self._subjobs_not_finished}, '
f'failed {self._subjobs_failed}')
if self._subjobs_not_finished == 0 and not self._state.is_finished():
# all subjobs finished - change whole job state
if self.canceled:
final_state = JobState.CANCELED
else:
final_state = JobState.SUCCEED if self._subjobs_failed == 0 else JobState.FAILED
self.set_state(final_state)
return final_state
else:
self._state = state
self._history.append((state, datetime.now()))
if err_msg:
self.append_message(err_msg)
return None
@property
def has_dependencies(self):
"""bool: true if job has runtime dependencies"""
return self.dependencies is not None and self.dependencies.has_dependencies
def append_message(self, msg):
"""Record job's error message.
Args:
msg (str): error message to record
"""
if self._messages is None:
self._messages = msg
else:
self._messages = '\n'.join([self._messages, msg])
def queue_pos(self):
"""Return current position of a job in scheduling queue
Returns:
int: current position of a job in scheduling queue
"""
return self._queue_pos
def set_queue_pos(self, pos):
"""Set current position of a job in scheduling queue.
Args:
pos (int): current position of a job in scheduling queue
"""
self._queue_pos = pos
def clear_queue_pos(self):
"""Reset current position of a job in scheduling queue."""
self._queue_pos = None
def append_runtime(self, data, iteration):
"""Record job's or job's iteration runtime statistics.
Args:
data (dict): the data to append to job's or job's iteration runtime statistics
iteration (int, optional): if defined the iteration's runtime statistics will be updated
Args:
data (dict): the data to append to job's or job's iteration runtime statistics
"""
if iteration is not None:
self._get_subjob(iteration).append_runtime(data)
else:
self._runtime.update(data)
def to_dict(self):
"""Serialize job's description to dictionary.
Returns:
dict(str): dictionary with job description
"""
result = {
'name': self._name,
'execution': self._execution.to_dict(),
'resources': self._resources.to_dict()}
if self._iteration is not None:
result['iteration'] = self._iteration.to_dict()
if self.dependencies is not None:
result['dependencies'] = self.dependencies.to_dict()
if self.attributes is not None:
result['attributes'] = self.attributes
return result
def to_json(self):
"""Serialize job description to JSON format.
Returns:
JSON of job's description
"""
return json.dumps(self.to_dict())
class JobList:
"""The list of all submited jobs.
Attributes:
_jmap (dict(str,Job)): dictionary with all submited jobs with name as key
"""
def __init__(self):
"""Initialize the list."""
self._jmap = {}
@staticmethod
def parse_jobname(jobname):
"""Split given name into job name and iteration.
Args:
jobname (str): the name to parse
Returns:
name, iteration: tuple with job name and iteration, if given job name didn't contain iteration index, the
second value will be None
"""
parts = jobname.split(':', 1)
return parts[0], int(parts[1]) if len(parts) > 1 else None
def add(self, job):
"""Add a new job.
Args:
job (Job): job to add to the list
Raises:
JobAlreadyExist: when job with given name already exists in list
"""
assert isinstance(job, Job), "Wrong job type '%s'" % (type(job).__name__)
if self.exist(job.get_name()):
raise JobAlreadyExist(job.get_name())
self._jmap[job.get_name()] = job
def exist(self, jobname):
"""Check if job with given name is in list.
Args:
jobname (str): job name to check
Returns:
bool: true if job with given name is already in list
"""
return jobname in self._jmap
def get(self, jobname):
"""Return job with given name.
Args:
jobname (str): job name
Returns:
Job: job from the list or None if not such job has been added.
"""
return self._jmap.get(jobname, None)
def jobs(self):
"""Return all job names in the list.
Returns:
set-like object with job names
"""
return self._jmap.keys()
def remove(self, jobname):
"""Remove job with given name from list.
Args:
jobname (str): job's name to remove from list
"""
del self._jmap[jobname]
|
PypiClean
|
/verizon-api-sdk-1.0.0.tar.gz/verizon-api-sdk-1.0.0/verizon/models/delete_target_request.py
|
from verizon.api_helper import APIHelper
from verizon.models.account_identifier import AccountIdentifier
from verizon.models.resource_identifier import ResourceIdentifier
class DeleteTargetRequest(object):
"""Implementation of the 'DeleteTargetRequest' model.
Target to delete.
Attributes:
accountidentifier (AccountIdentifier): The ID of the authenticating
billing account, in the format
`{"billingaccountid":"1234567890-12345"}`.
resourceidentifier (ResourceIdentifier): The ID of the target to
delete, in the format {"id":
"dd1682d3-2d80-cefc-f3ee-25154800beff"}.
"""
# Create a mapping from Model property names to API property names
_names = {
"accountidentifier": 'accountidentifier',
"resourceidentifier": 'resourceidentifier'
}
_optionals = [
'accountidentifier',
'resourceidentifier',
]
def __init__(self,
accountidentifier=APIHelper.SKIP,
resourceidentifier=APIHelper.SKIP):
"""Constructor for the DeleteTargetRequest class"""
# Initialize members of the class
if accountidentifier is not APIHelper.SKIP:
self.accountidentifier = accountidentifier
if resourceidentifier is not APIHelper.SKIP:
self.resourceidentifier = resourceidentifier
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
accountidentifier = AccountIdentifier.from_dictionary(dictionary.get('accountidentifier')) if 'accountidentifier' in dictionary.keys() else APIHelper.SKIP
resourceidentifier = ResourceIdentifier.from_dictionary(dictionary.get('resourceidentifier')) if 'resourceidentifier' in dictionary.keys() else APIHelper.SKIP
# Return an object of this model
return cls(accountidentifier,
resourceidentifier)
|
PypiClean
|
/mystreamlit-1.7.0.tar.gz/mystreamlit-1.7.0/streamlit/legacy_caching/hashing.py
|
import collections
import dis
import enum
import functools
import hashlib
import importlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import textwrap
import threading
import typing
import weakref
from typing import Any, List, Pattern, Optional, Dict, Callable, Union
import unittest.mock
from streamlit import config
from streamlit import file_util
from streamlit import type_util
from streamlit import util
from streamlit.errors import StreamlitAPIException, MarkdownFormattedException
from streamlit.folder_black_list import FolderBlackList
from streamlit.logger import get_logger
from streamlit.uploaded_file_manager import UploadedFile
_LOGGER = get_logger(__name__)
# If a dataframe has more than this many rows, we consider it large and hash a sample.
_PANDAS_ROWS_LARGE = 100000
_PANDAS_SAMPLE_SIZE = 10000
# Similar to dataframes, we also sample large numpy arrays.
_NP_SIZE_LARGE = 1000000
_NP_SAMPLE_SIZE = 100000
# Arbitrary item to denote where we found a cycle in a hashed object.
# This allows us to hash self-referencing lists, dictionaries, etc.
_CYCLE_PLACEHOLDER = b"streamlit-57R34ML17-hesamagicalponyflyingthroughthesky-CYCLE"
# This needs to be initialized lazily to avoid calling config.get_option() and
# thus initializing config options when this file is first imported.
_FOLDER_BLACK_LIST = None
# FFI objects (objects that interface with C libraries) can be any of these types:
_FFI_TYPE_NAMES = [
"_cffi_backend.FFI",
"builtins.CompiledFFI",
]
# KERAS objects can be any of these types:
_KERAS_TYPE_NAMES = [
"keras.engine.training.Model",
"tensorflow.python.keras.engine.training.Model",
"tensorflow.python.keras.engine.functional.Functional",
]
Context = collections.namedtuple("Context", ["globals", "cells", "varnames"])
# Mapping of types or fully qualified names to hash functions. This is used to
# override the behavior of the hasher inside Streamlit's caching mechanism:
# when the hasher encounters an object, it will first check to see if its type
# matches a key in this dict and, if so, will use the provided function to
# generate a hash for it.
HashFuncsDict = Dict[Union[str, typing.Type[Any]], Callable[[Any], Any]]
class HashReason(enum.Enum):
CACHING_FUNC_ARGS = 0
CACHING_FUNC_BODY = 1
CACHING_FUNC_OUTPUT = 2
CACHING_BLOCK = 3
def update_hash(
val: Any,
hasher,
hash_reason: HashReason,
hash_source: Callable[..., Any],
context: Optional[Context] = None,
hash_funcs: Optional[HashFuncsDict] = None,
) -> None:
"""Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
"""
hash_stacks.current.hash_reason = hash_reason
hash_stacks.current.hash_source = hash_source
ch = _CodeHasher(hash_funcs)
ch.update(hasher, val, context)
class _HashStack:
"""Stack of what has been hashed, for debug and circular reference detection.
This internally keeps 1 stack per thread.
Internally, this stores the ID of pushed objects rather than the objects
themselves because otherwise the "in" operator inside __contains__ would
fail for objects that don't return a boolean for "==" operator. For
example, arr == 10 where arr is a NumPy array returns another NumPy array.
This causes the "in" to crash since it expects a boolean.
"""
def __init__(self):
self._stack: collections.OrderedDict[int, List[Any]] = collections.OrderedDict()
# The reason why we're doing this hashing, for debug purposes.
self.hash_reason: Optional[HashReason] = None
# Either a function or a code block, depending on whether the reason is
# due to hashing part of a function (i.e. body, args, output) or an
# st.Cache codeblock.
self.hash_source: Optional[Callable[..., Any]] = None
def __repr__(self) -> str:
return util.repr_(self)
def push(self, val: Any):
self._stack[id(val)] = val
def pop(self):
self._stack.popitem()
def __contains__(self, val: Any):
return id(val) in self._stack
def pretty_print(self):
def to_str(v):
try:
return "Object of type %s: %s" % (type_util.get_fqn_type(v), str(v))
except:
return "<Unable to convert item to string>"
# IDEA: Maybe we should remove our internal "hash_funcs" from the
# stack. I'm not removing those now because even though those aren't
# useful to users I think they might be useful when we're debugging an
# issue sent by a user. So let's wait a few months and see if they're
# indeed useful...
return "\n".join(to_str(x) for x in reversed(self._stack.values()))
class _HashStacks:
"""Stacks of what has been hashed, with at most 1 stack per thread."""
def __init__(self):
self._stacks: weakref.WeakKeyDictionary[
threading.Thread, _HashStack
] = weakref.WeakKeyDictionary()
def __repr__(self) -> str:
return util.repr_(self)
@property
def current(self) -> _HashStack:
current_thread = threading.current_thread()
stack = self._stacks.get(current_thread, None)
if stack is None:
stack = _HashStack()
self._stacks[current_thread] = stack
return stack
hash_stacks = _HashStacks()
class _Cells:
"""
This is basically a dict that allows us to push/pop frames of data.
Python code objects are nested. In the following function:
@st.cache()
def func():
production = [[x + y for x in range(3)] for y in range(5)]
return production
func.__code__ is a code object, and contains (inside
func.__code__.co_consts) additional code objects for the list
comprehensions. Those objects have their own co_freevars and co_cellvars.
What we need to do as we're traversing this "tree" of code objects is to
save each code object's vars, hash it, and then restore the original vars.
"""
_cell_delete_obj = object()
def __init__(self):
self.values = {}
self.stack = []
self.frames = []
def __repr__(self) -> str:
return util.repr_(self)
def _set(self, key, value):
"""
Sets a value and saves the old value so it can be restored when
we pop the frame. A sentinel object, _cell_delete_obj, indicates that
the key was previously empty and should just be deleted.
"""
# save the old value (or mark that it didn't exist)
self.stack.append((key, self.values.get(key, self._cell_delete_obj)))
# write the new value
self.values[key] = value
def pop(self):
"""Pop off the last frame we created, and restore all the old values."""
idx = self.frames.pop()
for key, val in self.stack[idx:]:
if val is self._cell_delete_obj:
del self.values[key]
else:
self.values[key] = val
self.stack = self.stack[:idx]
def push(self, code, func=None):
"""Create a new frame, and save all of `code`'s vars into it."""
self.frames.append(len(self.stack))
for var in code.co_cellvars:
self._set(var, var)
if code.co_freevars:
if func is not None:
assert len(code.co_freevars) == len(func.__closure__)
for var, cell in zip(code.co_freevars, func.__closure__):
self._set(var, cell.cell_contents)
else:
# List comprehension code objects also have freevars, but they
# don't have a surrouding closure. In these cases we just use the name.
for var in code.co_freevars:
self._set(var, var)
def _get_context(func) -> Context:
varnames = {}
if inspect.ismethod(func):
varnames = {"self": func.__self__}
return Context(globals=func.__globals__, cells=_Cells(), varnames=varnames)
def _int_to_bytes(i: int) -> bytes:
num_bytes = (i.bit_length() + 8) // 8
return i.to_bytes(num_bytes, "little", signed=True)
def _key(obj: Optional[Any]) -> Any:
"""Return key for memoization."""
if obj is None:
return None
def is_simple(obj):
return (
isinstance(obj, bytes)
or isinstance(obj, bytearray)
or isinstance(obj, str)
or isinstance(obj, float)
or isinstance(obj, int)
or isinstance(obj, bool)
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple):
if all(map(is_simple, obj)):
return obj
if isinstance(obj, list):
if all(map(is_simple, obj)):
return ("__l", tuple(obj))
if (
type_util.is_type(obj, "pandas.core.frame.DataFrame")
or type_util.is_type(obj, "numpy.ndarray")
or inspect.isbuiltin(obj)
or inspect.isroutine(obj)
or inspect.iscode(obj)
):
return id(obj)
return NoResult
class _CodeHasher:
"""A hasher that can hash code objects including dependencies."""
def __init__(self, hash_funcs: Optional[HashFuncsDict] = None):
# Can't use types as the keys in the internal _hash_funcs because
# we always remove user-written modules from memory when rerunning a
# script in order to reload it and grab the latest code changes.
# (See LocalSourcesWatcher.py:on_file_changed) This causes
# the type object to refer to different underlying class instances each run,
# so type-based comparisons fail. To solve this, we use the types converted
# to fully-qualified strings as keys in our internal dict.
self._hash_funcs: HashFuncsDict
if hash_funcs:
self._hash_funcs = {
k if isinstance(k, str) else type_util.get_fqn(k): v
for k, v in hash_funcs.items()
}
else:
self._hash_funcs = {}
self._hashes: Dict[Any, bytes] = {}
# The number of the bytes in the hash.
self.size = 0
def __repr__(self) -> str:
return util.repr_(self)
def to_bytes(self, obj: Any, context: Optional[Context] = None) -> bytes:
"""Add memoization to _to_bytes and protect against cycles in data structures."""
tname = type(obj).__qualname__.encode()
key = (tname, _key(obj))
# Memoize if possible.
if key[1] is not NoResult:
if key in self._hashes:
return self._hashes[key]
# Break recursive cycles.
if obj in hash_stacks.current:
return _CYCLE_PLACEHOLDER
hash_stacks.current.push(obj)
try:
# Hash the input
b = b"%s:%s" % (tname, self._to_bytes(obj, context))
# Hmmm... It's possible that the size calculation is wrong. When we
# call to_bytes inside _to_bytes things get double-counted.
self.size += sys.getsizeof(b)
if key[1] is not NoResult:
self._hashes[key] = b
except (UnhashableTypeError, UserHashError, InternalHashError):
# Re-raise exceptions we hand-raise internally.
raise
except BaseException as e:
raise InternalHashError(e, obj)
finally:
# In case an UnhashableTypeError (or other) error is thrown, clean up the
# stack so we don't get false positives in future hashing calls
hash_stacks.current.pop()
return b
def update(self, hasher, obj: Any, context: Optional[Context] = None) -> None:
"""Update the provided hasher with the hash of an object."""
b = self.to_bytes(obj, context)
hasher.update(b)
def _file_should_be_hashed(self, filename: str) -> bool:
global _FOLDER_BLACK_LIST
if not _FOLDER_BLACK_LIST:
_FOLDER_BLACK_LIST = FolderBlackList(
config.get_option("server.folderWatchBlacklist")
)
filepath = os.path.abspath(filename)
file_is_blacklisted = _FOLDER_BLACK_LIST.is_blacklisted(filepath)
# Short circuiting for performance.
if file_is_blacklisted:
return False
return file_util.file_is_in_folder_glob(
filepath, self._get_main_script_directory()
) or file_util.file_in_pythonpath(filepath)
def _to_bytes(self, obj: Any, context: Optional[Context]) -> bytes:
"""Hash objects to bytes, including code with dependencies.
Python's built in `hash` does not produce consistent results across
runs.
"""
if isinstance(obj, unittest.mock.Mock):
# Mock objects can appear to be infinitely
# deep, so we don't try to hash them at all.
return self.to_bytes(id(obj))
elif isinstance(obj, bytes) or isinstance(obj, bytearray):
return obj
elif type_util.get_fqn_type(obj) in self._hash_funcs:
# Escape hatch for unsupported objects
hash_func = self._hash_funcs[type_util.get_fqn_type(obj)]
try:
output = hash_func(obj)
except BaseException as e:
raise UserHashError(e, obj, hash_func=hash_func)
return self.to_bytes(output)
elif isinstance(obj, str):
return obj.encode()
elif isinstance(obj, float):
return self.to_bytes(hash(obj))
elif isinstance(obj, int):
return _int_to_bytes(obj)
elif isinstance(obj, (list, tuple)):
h = hashlib.new("md5")
for item in obj:
self.update(h, item, context)
return h.digest()
elif isinstance(obj, dict):
h = hashlib.new("md5")
for item in obj.items():
self.update(h, item, context)
return h.digest()
elif obj is None:
return b"0"
elif obj is True:
return b"1"
elif obj is False:
return b"0"
elif type_util.is_type(obj, "pandas.core.frame.DataFrame") or type_util.is_type(
obj, "pandas.core.series.Series"
):
import pandas as pd
if len(obj) >= _PANDAS_ROWS_LARGE:
obj = obj.sample(n=_PANDAS_SAMPLE_SIZE, random_state=0)
try:
return b"%s" % pd.util.hash_pandas_object(obj).sum()
except TypeError:
# Use pickle if pandas cannot hash the object for example if
# it contains unhashable objects.
return b"%s" % pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
elif type_util.is_type(obj, "numpy.ndarray"):
h = hashlib.new("md5")
self.update(h, obj.shape)
if obj.size >= _NP_SIZE_LARGE:
import numpy as np
state = np.random.RandomState(0)
obj = state.choice(obj.flat, size=_NP_SAMPLE_SIZE)
self.update(h, obj.tobytes())
return h.digest()
elif inspect.isbuiltin(obj):
return bytes(obj.__name__.encode())
elif any(type_util.is_type(obj, typename) for typename in _FFI_TYPE_NAMES):
return self.to_bytes(None)
elif type_util.is_type(obj, "builtins.mappingproxy") or type_util.is_type(
obj, "builtins.dict_items"
):
return self.to_bytes(dict(obj))
elif type_util.is_type(obj, "builtins.getset_descriptor"):
return bytes(obj.__qualname__.encode())
elif isinstance(obj, UploadedFile):
# UploadedFile is a BytesIO (thus IOBase) but has a name.
# It does not have a timestamp so this must come before
# temproary files
h = hashlib.new("md5")
self.update(h, obj.name)
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif hasattr(obj, "name") and (
isinstance(obj, io.IOBase)
# Handle temporary files used during testing
or isinstance(obj, tempfile._TemporaryFileWrapper)
):
# Hash files as name + last modification date + offset.
# NB: we're using hasattr("name") to differentiate between
# on-disk and in-memory StringIO/BytesIO file representations.
# That means that this condition must come *before* the next
# condition, which just checks for StringIO/BytesIO.
h = hashlib.new("md5")
obj_name = getattr(obj, "name", "wonthappen") # Just to appease MyPy.
self.update(h, obj_name)
self.update(h, os.path.getmtime(obj_name))
self.update(h, obj.tell())
return h.digest()
elif isinstance(obj, Pattern):
return self.to_bytes([obj.pattern, obj.flags])
elif isinstance(obj, io.StringIO) or isinstance(obj, io.BytesIO):
# Hash in-memory StringIO/BytesIO by their full contents
# and seek position.
h = hashlib.new("md5")
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif any(
type_util.get_fqn(x) == "sqlalchemy.pool.base.Pool"
for x in type(obj).__bases__
):
# Get connect_args from the closure of the creator function. It includes
# arguments parsed from the URL and those passed in via `connect_args`.
# However if a custom `creator` function is passed in then we don't
# expect to get this data.
cargs = obj._creator.__closure__
cargs = [cargs[0].cell_contents, cargs[1].cell_contents] if cargs else None
# Sort kwargs since hashing dicts is sensitive to key order
if cargs:
cargs[1] = dict(
collections.OrderedDict(
sorted(cargs[1].items(), key=lambda t: t[0]) # type: ignore
)
)
reduce_data = obj.__reduce__()
# Remove thread related objects
for attr in [
"_overflow_lock",
"_pool",
"_conn",
"_fairy",
"_threadconns",
"logger",
]:
reduce_data[2].pop(attr, None)
return self.to_bytes([reduce_data, cargs])
elif type_util.is_type(obj, "sqlalchemy.engine.base.Engine"):
# Remove the url because it's overwritten by creator and connect_args
reduce_data = obj.__reduce__()
reduce_data[2].pop("url", None)
reduce_data[2].pop("logger", None)
return self.to_bytes(reduce_data)
elif type_util.is_type(obj, "numpy.ufunc"):
# For numpy.remainder, this returns remainder.
return bytes(obj.__name__.encode())
elif type_util.is_type(obj, "socket.socket"):
return self.to_bytes(id(obj))
elif any(
type_util.get_fqn(x) == "torch.nn.modules.module.Module"
for x in type(obj).__bases__
):
return self.to_bytes(id(obj))
elif type_util.is_type(obj, "tensorflow.python.client.session.Session"):
return self.to_bytes(id(obj))
elif type_util.is_type(obj, "torch.Tensor") or type_util.is_type(
obj, "torch._C._TensorBase"
):
return self.to_bytes([obj.detach().numpy(), obj.grad])
elif any(type_util.is_type(obj, typename) for typename in _KERAS_TYPE_NAMES):
return self.to_bytes(id(obj))
elif type_util.is_type(
obj,
"tensorflow.python.saved_model.load.Loader._recreate_base_user_object.<locals>._UserObject",
):
return self.to_bytes(id(obj))
elif inspect.isroutine(obj):
wrapped = getattr(obj, "__wrapped__", None)
if wrapped is not None:
# Ignore the wrapper of wrapped functions.
return self.to_bytes(wrapped)
if obj.__module__.startswith("streamlit"):
# Ignore streamlit modules even if they are in the CWD
# (e.g. during development).
return self.to_bytes("%s.%s" % (obj.__module__, obj.__name__))
h = hashlib.new("md5")
code = getattr(obj, "__code__", None)
assert code is not None
if self._file_should_be_hashed(code.co_filename):
context = _get_context(obj)
defaults = getattr(obj, "__defaults__", None)
if defaults is not None:
self.update(h, defaults, context)
h.update(self._code_to_bytes(code, context, func=obj))
else:
# Don't hash code that is not in the current working directory.
self.update(h, obj.__module__)
self.update(h, obj.__name__)
return h.digest()
elif inspect.iscode(obj):
if context is None:
raise RuntimeError("context must be defined when hashing code")
return self._code_to_bytes(obj, context)
elif inspect.ismodule(obj):
# TODO: Figure out how to best show this kind of warning to the
# user. In the meantime, show nothing. This scenario is too common,
# so the current warning is quite annoying...
# st.warning(('Streamlit does not support hashing modules. '
# 'We did not hash `%s`.') % obj.__name__)
# TODO: Hash more than just the name for internal modules.
return self.to_bytes(obj.__name__)
elif inspect.isclass(obj):
# TODO: Figure out how to best show this kind of warning to the
# user. In the meantime, show nothing. This scenario is too common,
# (e.g. in every "except" statement) so the current warning is
# quite annoying...
# st.warning(('Streamlit does not support hashing classes. '
# 'We did not hash `%s`.') % obj.__name__)
# TODO: Hash more than just the name of classes.
return self.to_bytes(obj.__name__)
elif isinstance(obj, functools.partial):
# The return value of functools.partial is not a plain function:
# it's a callable object that remembers the original function plus
# the values you pickled into it. So here we need to special-case it.
h = hashlib.new("md5")
self.update(h, obj.args)
self.update(h, obj.func)
self.update(h, obj.keywords)
return h.digest()
else:
# As a last resort, hash the output of the object's __reduce__ method
h = hashlib.new("md5")
try:
reduce_data = obj.__reduce__()
except BaseException as e:
raise UnhashableTypeError(e, obj)
for item in reduce_data:
self.update(h, item, context)
return h.digest()
def _code_to_bytes(self, code, context: Context, func=None) -> bytes:
h = hashlib.new("md5")
# Hash the bytecode.
self.update(h, code.co_code)
# Hash constants that are referenced by the bytecode but ignore names of lambdas.
consts = [
n
for n in code.co_consts
if not isinstance(n, str) or not n.endswith(".<lambda>")
]
self.update(h, consts, context)
context.cells.push(code, func=func)
for ref in get_referenced_objects(code, context):
self.update(h, ref, context)
context.cells.pop()
return h.digest()
@staticmethod
def _get_main_script_directory() -> str:
"""Get the directory of the main script."""
import __main__
import os
# This works because we set __main__.__file__ to the
# script path in ScriptRunner.
main_path = __main__.__file__
return str(os.path.dirname(main_path))
def get_referenced_objects(code, context: Context) -> List[Any]:
# Top of the stack
tos: Any = None
lineno = None
refs: List[Any] = []
def set_tos(t):
nonlocal tos
if tos is not None:
# Hash tos so we support reading multiple objects
refs.append(tos)
tos = t
# Our goal is to find referenced objects. The problem is that co_names
# does not have full qualified names in it. So if you access `foo.bar`,
# co_names has `foo` and `bar` in it but it doesn't tell us that the
# code reads `bar` of `foo`. We are going over the bytecode to resolve
# from which object an attribute is requested.
# Read more about bytecode at https://docs.python.org/3/library/dis.html
for op in dis.get_instructions(code):
try:
# Sometimes starts_line is None, in which case let's just remember the
# previous start_line (if any). This way when there's an exception we at
# least can point users somewhat near the line where the error stems from.
if op.starts_line is not None:
lineno = op.starts_line
if op.opname in ["LOAD_GLOBAL", "LOAD_NAME"]:
if op.argval in context.globals:
set_tos(context.globals[op.argval])
else:
set_tos(op.argval)
elif op.opname in ["LOAD_DEREF", "LOAD_CLOSURE"]:
set_tos(context.cells.values[op.argval])
elif op.opname == "IMPORT_NAME":
try:
set_tos(importlib.import_module(op.argval))
except ImportError:
set_tos(op.argval)
elif op.opname in ["LOAD_METHOD", "LOAD_ATTR", "IMPORT_FROM"]:
if tos is None:
refs.append(op.argval)
elif isinstance(tos, str):
tos += "." + op.argval
else:
tos = getattr(tos, op.argval)
elif op.opname == "DELETE_FAST" and tos:
del context.varnames[op.argval]
tos = None
elif op.opname == "STORE_FAST" and tos:
context.varnames[op.argval] = tos
tos = None
elif op.opname == "LOAD_FAST" and op.argval in context.varnames:
set_tos(context.varnames[op.argval])
else:
# For all other instructions, hash the current TOS.
if tos is not None:
refs.append(tos)
tos = None
except Exception as e:
raise UserHashError(e, code, lineno=lineno)
return refs
class NoResult:
"""Placeholder class for return values when None is meaningful."""
pass
class UnhashableTypeError(StreamlitAPIException):
def __init__(self, orig_exc, failed_obj):
msg = self._get_message(orig_exc, failed_obj)
super(UnhashableTypeError, self).__init__(msg)
self.with_traceback(orig_exc.__traceback__)
def _get_message(self, orig_exc, failed_obj):
args = _get_error_message_args(orig_exc, failed_obj)
# This needs to have zero indentation otherwise %(hash_stack)s will
# render incorrectly in Markdown.
return (
"""
Cannot hash object of type `%(failed_obj_type_str)s`, found in %(object_part)s
%(object_desc)s.
While caching %(object_part)s %(object_desc)s, Streamlit encountered an
object of type `%(failed_obj_type_str)s`, which it does not know how to hash.
To address this, please try helping Streamlit understand how to hash that type
by passing the `hash_funcs` argument into `@st.cache`. For example:
```
@st.cache(hash_funcs={%(failed_obj_type_str)s: my_hash_func})
def my_func(...):
...
```
If you don't know where the object of type `%(failed_obj_type_str)s` is coming
from, try looking at the hash chain below for an object that you do recognize,
then pass that to `hash_funcs` instead:
```
%(hash_stack)s
```
Please see the `hash_funcs` [documentation]
(https://docs.streamlit.io/library/advanced-features/caching#the-hash_funcs-parameter)
for more details.
"""
% args
).strip("\n")
class UserHashError(StreamlitAPIException):
def __init__(self, orig_exc, cached_func_or_code, hash_func=None, lineno=None):
self.alternate_name = type(orig_exc).__name__
if hash_func:
msg = self._get_message_from_func(orig_exc, cached_func_or_code, hash_func)
else:
msg = self._get_message_from_code(orig_exc, cached_func_or_code, lineno)
super(UserHashError, self).__init__(msg)
self.with_traceback(orig_exc.__traceback__)
def _get_message_from_func(self, orig_exc, cached_func, hash_func):
args = _get_error_message_args(orig_exc, cached_func)
if hasattr(hash_func, "__name__"):
args["hash_func_name"] = "`%s()`" % hash_func.__name__
else:
args["hash_func_name"] = "a function"
return (
"""
%(orig_exception_desc)s
This error is likely due to a bug in %(hash_func_name)s, which is a
user-defined hash function that was passed into the `@st.cache` decorator of
%(object_desc)s.
%(hash_func_name)s failed when hashing an object of type
`%(failed_obj_type_str)s`. If you don't know where that object is coming from,
try looking at the hash chain below for an object that you do recognize, then
pass that to `hash_funcs` instead:
```
%(hash_stack)s
```
If you think this is actually a Streamlit bug, please [file a bug report here.]
(https://github.com/streamlit/streamlit/issues/new/choose)
"""
% args
).strip("\n")
def _get_message_from_code(self, orig_exc: BaseException, cached_code, lineno: int):
args = _get_error_message_args(orig_exc, cached_code)
failing_lines = _get_failing_lines(cached_code, lineno)
failing_lines_str = "".join(failing_lines)
failing_lines_str = textwrap.dedent(failing_lines_str).strip("\n")
args["failing_lines_str"] = failing_lines_str
args["filename"] = cached_code.co_filename
args["lineno"] = lineno
# This needs to have zero indentation otherwise %(lines_str)s will
# render incorrectly in Markdown.
return (
"""
%(orig_exception_desc)s
Streamlit encountered an error while caching %(object_part)s %(object_desc)s.
This is likely due to a bug in `%(filename)s` near line `%(lineno)s`:
```
%(failing_lines_str)s
```
Please modify the code above to address this.
If you think this is actually a Streamlit bug, you may [file a bug report
here.] (https://github.com/streamlit/streamlit/issues/new/choose)
"""
% args
).strip("\n")
class InternalHashError(MarkdownFormattedException):
"""Exception in Streamlit hashing code (i.e. not a user error)"""
def __init__(self, orig_exc: BaseException, failed_obj: Any):
msg = self._get_message(orig_exc, failed_obj)
super(InternalHashError, self).__init__(msg)
self.with_traceback(orig_exc.__traceback__)
def _get_message(self, orig_exc: BaseException, failed_obj: Any) -> str:
args = _get_error_message_args(orig_exc, failed_obj)
# This needs to have zero indentation otherwise %(hash_stack)s will
# render incorrectly in Markdown.
return (
"""
%(orig_exception_desc)s
While caching %(object_part)s %(object_desc)s, Streamlit encountered an
object of type `%(failed_obj_type_str)s`, which it does not know how to hash.
**In this specific case, it's very likely you found a Streamlit bug so please
[file a bug report here.]
(https://github.com/streamlit/streamlit/issues/new/choose)**
In the meantime, you can try bypassing this error by registering a custom
hash function via the `hash_funcs` keyword in @st.cache(). For example:
```
@st.cache(hash_funcs={%(failed_obj_type_str)s: my_hash_func})
def my_func(...):
...
```
If you don't know where the object of type `%(failed_obj_type_str)s` is coming
from, try looking at the hash chain below for an object that you do recognize,
then pass that to `hash_funcs` instead:
```
%(hash_stack)s
```
Please see the `hash_funcs` [documentation]
(https://docs.streamlit.io/library/advanced-features/caching#the-hash_funcs-parameter)
for more details.
"""
% args
).strip("\n")
def _get_error_message_args(orig_exc: BaseException, failed_obj: Any) -> Dict[str, Any]:
hash_reason = hash_stacks.current.hash_reason
hash_source = hash_stacks.current.hash_source
failed_obj_type_str = type_util.get_fqn_type(failed_obj)
if hash_source is None or hash_reason is None:
object_desc = "something"
object_part = ""
additional_explanation = ""
elif hash_reason is HashReason.CACHING_BLOCK:
object_desc = "a code block"
object_part = ""
additional_explanation = ""
else:
if hasattr(hash_source, "__name__"):
object_desc = "`%s()`" % hash_source.__name__
object_desc_specific = object_desc
else:
object_desc = "a function"
object_desc_specific = "that function"
if hash_reason is HashReason.CACHING_FUNC_ARGS:
object_part = "the arguments of"
elif hash_reason is HashReason.CACHING_FUNC_BODY:
object_part = "the body of"
elif hash_reason is HashReason.CACHING_FUNC_OUTPUT:
object_part = "the return value of"
return {
"orig_exception_desc": str(orig_exc),
"failed_obj_type_str": failed_obj_type_str,
"hash_stack": hash_stacks.current.pretty_print(),
"object_desc": object_desc,
"object_part": object_part,
}
def _get_failing_lines(code, lineno: int) -> List[str]:
"""Get list of strings (lines of code) from lineno to lineno+3.
Ideally we'd return the exact line where the error took place, but there
are reasons why this is not possible without a lot of work, including
playing with the AST. So for now we're returning 3 lines near where
the error took place.
"""
source_lines, source_lineno = inspect.getsourcelines(code)
start = lineno - source_lineno
end = min(start + 3, len(source_lines))
lines = source_lines[start:end]
return lines
|
PypiClean
|
/ansible-base-2.10.17.tar.gz/ansible-base-2.10.17/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import runpy
import json
import os
import subprocess
import sys
from contextlib import contextmanager
from ansible.executor.powershell.module_manifest import PSModuleDepFinder
from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS
from ansible.module_utils.six import reraise
from ansible.module_utils._text import to_bytes, to_text
from .utils import CaptureStd, find_executable, get_module_name_from_filename
class AnsibleModuleCallError(RuntimeError):
pass
class AnsibleModuleImportError(ImportError):
pass
class AnsibleModuleNotInitialized(Exception):
pass
class _FakeAnsibleModuleInit:
def __init__(self):
self.args = tuple()
self.kwargs = {}
self.called = False
def __call__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.called = True
raise AnsibleModuleCallError('AnsibleModuleCallError')
def _fake_load_params():
pass
@contextmanager
def setup_env(filename):
# Used to clean up imports later
pre_sys_modules = list(sys.modules.keys())
fake = _FakeAnsibleModuleInit()
module = __import__('ansible.module_utils.basic').module_utils.basic
_original_init = module.AnsibleModule.__init__
_original_load_params = module._load_params
setattr(module.AnsibleModule, '__init__', fake)
setattr(module, '_load_params', _fake_load_params)
try:
yield fake
finally:
setattr(module.AnsibleModule, '__init__', _original_init)
setattr(module, '_load_params', _original_load_params)
# Clean up imports to prevent issues with mutable data being used in modules
for k in list(sys.modules.keys()):
# It's faster if we limit to items in ansible.module_utils
# But if this causes problems later, we should remove it
if k not in pre_sys_modules and k.startswith('ansible.module_utils.'):
del sys.modules[k]
def get_ps_argument_spec(filename, collection):
fqc_name = get_module_name_from_filename(filename, collection)
pwsh = find_executable('pwsh')
if not pwsh:
raise FileNotFoundError('Required program for PowerShell arg spec inspection "pwsh" not found.')
module_path = os.path.join(os.getcwd(), filename)
b_module_path = to_bytes(module_path, errors='surrogate_or_strict')
with open(b_module_path, mode='rb') as module_fd:
b_module_data = module_fd.read()
ps_dep_finder = PSModuleDepFinder()
ps_dep_finder.scan_module(b_module_data, fqn=fqc_name)
# For ps_argspec.ps1 to compile Ansible.Basic it also needs the AddType module_util.
ps_dep_finder._add_module((b"Ansible.ModuleUtils.AddType", ".psm1", None), wrapper=False)
util_manifest = json.dumps({
'module_path': to_text(module_path, errors='surrogiate_or_strict'),
'ansible_basic': ps_dep_finder.cs_utils_module["Ansible.Basic"]['path'],
'ps_utils': dict([(name, info['path']) for name, info in ps_dep_finder.ps_modules.items()]),
})
script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ps_argspec.ps1')
proc = subprocess.Popen([script_path, util_manifest], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise AnsibleModuleImportError("STDOUT:\n%s\nSTDERR:\n%s" % (stdout.decode('utf-8'), stderr.decode('utf-8')))
kwargs = json.loads(stdout)
# the validate-modules code expects the options spec to be under the argument_spec key not options as set in PS
kwargs['argument_spec'] = kwargs.pop('options', {})
return kwargs['argument_spec'], (), kwargs
def get_py_argument_spec(filename, collection):
name = get_module_name_from_filename(filename, collection)
with setup_env(filename) as fake:
try:
with CaptureStd():
runpy.run_module(name, run_name='__main__', alter_sys=True)
except AnsibleModuleCallError:
pass
except BaseException as e:
# we want to catch all exceptions here, including sys.exit
reraise(AnsibleModuleImportError, AnsibleModuleImportError('%s' % e), sys.exc_info()[2])
if not fake.called:
raise AnsibleModuleNotInitialized()
try:
# for ping kwargs == {'argument_spec':{'data':{'type':'str','default':'pong'}}, 'supports_check_mode':True}
if 'argument_spec' in fake.kwargs:
argument_spec = fake.kwargs['argument_spec']
else:
argument_spec = fake.args[0]
# If add_file_common_args is truish, add options from FILE_COMMON_ARGUMENTS when not present.
# This is the only modification to argument_spec done by AnsibleModule itself, and which is
# not caught by setup_env's AnsibleModule replacement
if fake.kwargs.get('add_file_common_args'):
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in argument_spec:
argument_spec[k] = v
return argument_spec, fake.args, fake.kwargs
except (TypeError, IndexError):
return {}, (), {}
def get_argument_spec(filename, collection):
if filename.endswith('.py'):
return get_py_argument_spec(filename, collection)
else:
return get_ps_argument_spec(filename, collection)
|
PypiClean
|
/get_cover_art-1.8.0-py3-none-any.whl/get_cover_art/__main__.py
|
import argparse, os
from .cover_finder import CoverFinder, DEFAULTS
# This script searches apple music for artwork that is missing from your library
# It saves the artwork alongside the audio and embeds the artwork into the meta tags
# By default it will scan from the current working directory, you can override this
# with commandline parameters or arguments passed into scan_folder()
def check_art_size(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("art-size must be a positive integer")
return ivalue
def check_art_quality(value):
ivalue = int(value)
if ivalue < 0 or ivalue > 100:
raise argparse.ArgumentTypeError("art-quality must be between 1 and 100")
return ivalue
def check_throttle(value):
fvalue = float(value)
if fvalue < 0:
raise argparse.ArgumentTypeError("throtte cannot be negative")
return fvalue
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--path', help="audio file, or folder of audio files (recursive)", default=".")
parser_art = parser.add_argument_group('artwork options')
parser_art.add_argument('--art-size', type=check_art_size, help="square dimensions of artwork (default: 500)", default=DEFAULTS.get('art_size'))
parser_art.add_argument('--art-quality', type=check_art_quality, help="jpeg compression quality (1-100, default: auto)", default=DEFAULTS.get('art_quality'))
parser_art.add_argument('--art-dest', '--dest', help="set artwork destination folder", default=DEFAULTS.get('cover_art'))
parser_art.add_argument('--art-dest-inline', '--inline', help="put artwork in same folders as audio files", action='store_true')
parser_art.add_argument('--art-dest-filename', default=DEFAULTS.get('art_dest_filename'), help="set artwork destination filename format. Accepts {artist}, {album}, {album_or_title}, {filename}, and {title}. Default is '{artist} - {album_or_title}.jpg'")
parser_art.add_argument('--external-art-mode', choices=['before', 'after', 'none'], default=DEFAULTS.get('external_art_mode'), help='Use image from local folder; "before" prevents downloads, "after" uses as a fallback. Default is none.')
parser_art.add_argument('--external-art-filename', default=DEFAULTS.get('external_art_filename'), help="Filename(s) of folder art to use for preexisting external art. Accepts {artist}, {album}, and {title} for replacement: e.g. cover.jpg or {album}-{artist}.jpg ; this does not affect the filename for art that must be fetched (use --art-dest-filename for that).", nargs="+")
parser_behavior = parser.add_argument_group('behavior options')
parser_behavior.add_argument('--test', '--no-embed', '--no_embed', help="scan and download only, don't embed artwork", action='store_true')
parser_behavior.add_argument('--clear', help="clear artwork from audio file (regardless of finding art)", action='store_true')
parser_behavior.add_argument('--cleanup', help="remove downloaded artwork files afterward", action='store_true')
parser_behavior.add_argument('--no-download', '--no_download', help="embed only previously-downloaded artwork", action='store_true')
parser_behavior.add_argument('--force', help="overwrite existing artwork", action='store_true')
parser_behavior.add_argument('--verbose', help="print verbose logging", action='store_true')
parser_behavior.add_argument('--no-skip', '--no_skip', help="don't skip previously-scanned files", action='store_true')
parser_behavior.add_argument('--throttle', type=check_throttle, help="number of seconds to wait", default=0)
parser_filters = parser.add_argument_group('filter options')
parser_filters.add_argument('--skip-artwork', '--skip_artwork', help="(maintained between runs) file listing destination art files to skip", default=DEFAULTS.get('skip_artwork'))
parser_filters.add_argument('--skip-artists', '--skip_artists', help="file listing artists to skip", default=DEFAULTS.get('skip_artists'))
parser_filters.add_argument('--skip-albums', '--skip_albums', help="file listing albums to skip", default=DEFAULTS.get('skip_albums'))
return parser.parse_args()
def main():
args = get_args()
finder = CoverFinder(vars(args))
if os.path.isfile(args.path):
finder.scan_file(args.path)
else:
finder.scan_folder(args.path)
print()
num_processed = len(finder.files_processed)
num_skipped = len(finder.files_skipped)
num_failed = len(finder.files_failed)
print(f"Done! Processed: {num_processed}, Skipped: {num_skipped}, Failed: {num_failed}")
if not args.cleanup:
if finder.art_folder_override:
print(f"Artwork folder: {finder.art_folder_override}")
else:
print("Artwork files are alongside audio files.")
print()
if __name__ == '__main__':
main()
|
PypiClean
|
/funkiio-1.0.1-py3-none-any.whl/homeassistant/components/ios/__init__.py
|
import datetime
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import HTTP_BAD_REQUEST, HTTP_INTERNAL_SERVER_ERROR
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.util.json import load_json, save_json
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'ios'
CONF_PUSH = 'push'
CONF_PUSH_CATEGORIES = 'categories'
CONF_PUSH_CATEGORIES_NAME = 'name'
CONF_PUSH_CATEGORIES_IDENTIFIER = 'identifier'
CONF_PUSH_CATEGORIES_ACTIONS = 'actions'
CONF_PUSH_ACTIONS_IDENTIFIER = 'identifier'
CONF_PUSH_ACTIONS_TITLE = 'title'
CONF_PUSH_ACTIONS_ACTIVATION_MODE = 'activationMode'
CONF_PUSH_ACTIONS_AUTHENTICATION_REQUIRED = 'authenticationRequired'
CONF_PUSH_ACTIONS_DESTRUCTIVE = 'destructive'
CONF_PUSH_ACTIONS_BEHAVIOR = 'behavior'
CONF_PUSH_ACTIONS_CONTEXT = 'context'
CONF_PUSH_ACTIONS_TEXT_INPUT_BUTTON_TITLE = 'textInputButtonTitle'
CONF_PUSH_ACTIONS_TEXT_INPUT_PLACEHOLDER = 'textInputPlaceholder'
ATTR_FOREGROUND = 'foreground'
ATTR_BACKGROUND = 'background'
ACTIVATION_MODES = [ATTR_FOREGROUND, ATTR_BACKGROUND]
ATTR_DEFAULT_BEHAVIOR = 'default'
ATTR_TEXT_INPUT_BEHAVIOR = 'textInput'
BEHAVIORS = [ATTR_DEFAULT_BEHAVIOR, ATTR_TEXT_INPUT_BEHAVIOR]
ATTR_LAST_SEEN_AT = 'lastSeenAt'
ATTR_DEVICE = 'device'
ATTR_PUSH_TOKEN = 'pushToken'
ATTR_APP = 'app'
ATTR_PERMISSIONS = 'permissions'
ATTR_PUSH_ID = 'pushId'
ATTR_DEVICE_ID = 'deviceId'
ATTR_PUSH_SOUNDS = 'pushSounds'
ATTR_BATTERY = 'battery'
ATTR_DEVICE_NAME = 'name'
ATTR_DEVICE_LOCALIZED_MODEL = 'localizedModel'
ATTR_DEVICE_MODEL = 'model'
ATTR_DEVICE_PERMANENT_ID = 'permanentID'
ATTR_DEVICE_SYSTEM_VERSION = 'systemVersion'
ATTR_DEVICE_TYPE = 'type'
ATTR_DEVICE_SYSTEM_NAME = 'systemName'
ATTR_APP_BUNDLE_IDENTIFIER = 'bundleIdentifier'
ATTR_APP_BUILD_NUMBER = 'buildNumber'
ATTR_APP_VERSION_NUMBER = 'versionNumber'
ATTR_LOCATION_PERMISSION = 'location'
ATTR_NOTIFICATIONS_PERMISSION = 'notifications'
PERMISSIONS = [ATTR_LOCATION_PERMISSION, ATTR_NOTIFICATIONS_PERMISSION]
ATTR_BATTERY_STATE = 'state'
ATTR_BATTERY_LEVEL = 'level'
ATTR_BATTERY_STATE_UNPLUGGED = 'Not Charging'
ATTR_BATTERY_STATE_CHARGING = 'Charging'
ATTR_BATTERY_STATE_FULL = 'Full'
ATTR_BATTERY_STATE_UNKNOWN = 'Unknown'
BATTERY_STATES = [ATTR_BATTERY_STATE_UNPLUGGED, ATTR_BATTERY_STATE_CHARGING,
ATTR_BATTERY_STATE_FULL, ATTR_BATTERY_STATE_UNKNOWN]
ATTR_DEVICES = 'devices'
ACTION_SCHEMA = vol.Schema({
vol.Required(CONF_PUSH_ACTIONS_IDENTIFIER): vol.Upper,
vol.Required(CONF_PUSH_ACTIONS_TITLE): cv.string,
vol.Optional(CONF_PUSH_ACTIONS_ACTIVATION_MODE,
default=ATTR_BACKGROUND): vol.In(ACTIVATION_MODES),
vol.Optional(CONF_PUSH_ACTIONS_AUTHENTICATION_REQUIRED,
default=False): cv.boolean,
vol.Optional(CONF_PUSH_ACTIONS_DESTRUCTIVE,
default=False): cv.boolean,
vol.Optional(CONF_PUSH_ACTIONS_BEHAVIOR,
default=ATTR_DEFAULT_BEHAVIOR): vol.In(BEHAVIORS),
vol.Optional(CONF_PUSH_ACTIONS_TEXT_INPUT_BUTTON_TITLE): cv.string,
vol.Optional(CONF_PUSH_ACTIONS_TEXT_INPUT_PLACEHOLDER): cv.string,
}, extra=vol.ALLOW_EXTRA)
ACTION_SCHEMA_LIST = vol.All(cv.ensure_list, [ACTION_SCHEMA])
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {
CONF_PUSH: {
CONF_PUSH_CATEGORIES: vol.All(cv.ensure_list, [{
vol.Required(CONF_PUSH_CATEGORIES_NAME): cv.string,
vol.Required(CONF_PUSH_CATEGORIES_IDENTIFIER): vol.Lower,
vol.Required(CONF_PUSH_CATEGORIES_ACTIONS): ACTION_SCHEMA_LIST
}])
}
}
}, extra=vol.ALLOW_EXTRA)
IDENTIFY_DEVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_DEVICE_LOCALIZED_MODEL): cv.string,
vol.Required(ATTR_DEVICE_MODEL): cv.string,
vol.Required(ATTR_DEVICE_PERMANENT_ID): cv.string,
vol.Required(ATTR_DEVICE_SYSTEM_VERSION): cv.string,
vol.Required(ATTR_DEVICE_TYPE): cv.string,
vol.Required(ATTR_DEVICE_SYSTEM_NAME): cv.string,
}, extra=vol.ALLOW_EXTRA)
IDENTIFY_DEVICE_SCHEMA_CONTAINER = vol.All(dict, IDENTIFY_DEVICE_SCHEMA)
IDENTIFY_APP_SCHEMA = vol.Schema({
vol.Required(ATTR_APP_BUNDLE_IDENTIFIER): cv.string,
vol.Required(ATTR_APP_BUILD_NUMBER): cv.positive_int,
vol.Optional(ATTR_APP_VERSION_NUMBER): cv.string
}, extra=vol.ALLOW_EXTRA)
IDENTIFY_APP_SCHEMA_CONTAINER = vol.All(dict, IDENTIFY_APP_SCHEMA)
IDENTIFY_BATTERY_SCHEMA = vol.Schema({
vol.Required(ATTR_BATTERY_LEVEL): cv.positive_int,
vol.Required(ATTR_BATTERY_STATE): vol.In(BATTERY_STATES)
}, extra=vol.ALLOW_EXTRA)
IDENTIFY_BATTERY_SCHEMA_CONTAINER = vol.All(dict, IDENTIFY_BATTERY_SCHEMA)
IDENTIFY_SCHEMA = vol.Schema({
vol.Required(ATTR_DEVICE): IDENTIFY_DEVICE_SCHEMA_CONTAINER,
vol.Required(ATTR_BATTERY): IDENTIFY_BATTERY_SCHEMA_CONTAINER,
vol.Required(ATTR_PUSH_TOKEN): cv.string,
vol.Required(ATTR_APP): IDENTIFY_APP_SCHEMA_CONTAINER,
vol.Required(ATTR_PERMISSIONS): vol.All(cv.ensure_list,
[vol.In(PERMISSIONS)]),
vol.Required(ATTR_PUSH_ID): cv.string,
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Optional(ATTR_PUSH_SOUNDS): list
}, extra=vol.ALLOW_EXTRA)
CONFIGURATION_FILE = '.ios.conf'
def devices_with_push(hass):
"""Return a dictionary of push enabled targets."""
targets = {}
for device_name, device in hass.data[DOMAIN][ATTR_DEVICES].items():
if device.get(ATTR_PUSH_ID) is not None:
targets[device_name] = device.get(ATTR_PUSH_ID)
return targets
def enabled_push_ids(hass):
"""Return a list of push enabled target push IDs."""
push_ids = list()
for device in hass.data[DOMAIN][ATTR_DEVICES].values():
if device.get(ATTR_PUSH_ID) is not None:
push_ids.append(device.get(ATTR_PUSH_ID))
return push_ids
def devices(hass):
"""Return a dictionary of all identified devices."""
return hass.data[DOMAIN][ATTR_DEVICES]
def device_name_for_push_id(hass, push_id):
"""Return the device name for the push ID."""
for device_name, device in hass.data[DOMAIN][ATTR_DEVICES].items():
if device.get(ATTR_PUSH_ID) is push_id:
return device_name
return None
async def async_setup(hass, config):
"""Set up the iOS component."""
conf = config.get(DOMAIN)
ios_config = await hass.async_add_executor_job(
load_json, hass.config.path(CONFIGURATION_FILE))
if ios_config == {}:
ios_config[ATTR_DEVICES] = {}
ios_config[CONF_PUSH] = (conf or {}).get(CONF_PUSH, {})
hass.data[DOMAIN] = ios_config
# No entry support for notify component yet
discovery.load_platform(hass, 'notify', DOMAIN, {}, config)
if conf is not None:
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT}))
return True
async def async_setup_entry(hass, entry):
"""Set up an iOS entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, 'sensor'))
hass.http.register_view(
iOSIdentifyDeviceView(hass.config.path(CONFIGURATION_FILE)))
hass.http.register_view(iOSPushConfigView(hass.data[DOMAIN][CONF_PUSH]))
return True
# pylint: disable=invalid-name
class iOSPushConfigView(HomeAssistantView):
"""A view that provides the push categories configuration."""
url = '/api/ios/push'
name = 'api:ios:push'
def __init__(self, push_config):
"""Init the view."""
self.push_config = push_config
@callback
def get(self, request):
"""Handle the GET request for the push configuration."""
return self.json(self.push_config)
class iOSIdentifyDeviceView(HomeAssistantView):
"""A view that accepts device identification requests."""
url = '/api/ios/identify'
name = 'api:ios:identify'
def __init__(self, config_path):
"""Initiliaze the view."""
self._config_path = config_path
async def post(self, request):
"""Handle the POST request for device identification."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
hass = request.app['hass']
# Commented for now while iOS app is getting frequent updates
# try:
# data = IDENTIFY_SCHEMA(req_data)
# except vol.Invalid as ex:
# return self.json_message(
# vol.humanize.humanize_error(request.json, ex),
# HTTP_BAD_REQUEST)
data[ATTR_LAST_SEEN_AT] = datetime.datetime.now().isoformat()
name = data.get(ATTR_DEVICE_ID)
hass.data[DOMAIN][ATTR_DEVICES][name] = data
try:
save_json(self._config_path, hass.data[DOMAIN])
except HomeAssistantError:
return self.json_message("Error saving device.",
HTTP_INTERNAL_SERVER_ERROR)
return self.json({"status": "registered"})
|
PypiClean
|
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/drawing/util/common.js
|
if(!dojo._hasResource["dojox.drawing.util.common"]){
dojo._hasResource["dojox.drawing.util.common"]=true;
dojo.provide("dojox.drawing.util.common");
dojo.require("dojox.math.round");
(function(){
var _1={};
dojox.drawing.util.common={radToDeg:function(n){
return (n*180)/Math.PI;
},degToRad:function(n){
return (n*Math.PI)/180;
},angle:function(_2,_3){
if(_3){
_3=_3/180;
var _4=this.radians(_2),_5=this.length(_2),_6=Math.PI*_3,_7=dojox.math.round(_4/_6),_8=_7*_6;
return dojox.math.round(this.radToDeg(_8));
}else{
return this.radToDeg(this.radians(_2));
}
},radians:function(o){
return Math.atan2(o.start.y-o.y,o.start.x-o.x);
},length:function(o){
return Math.sqrt(Math.pow(o.start.x-o.x,2)+Math.pow(o.start.y-o.y,2));
},lineSub:function(x1,y1,x2,y2,_9){
var _a=this.distance(this.argsToObj.apply(this,arguments));
_a=_a<_9?_9:_a;
var pc=(_a-_9)/_a;
var x=x1-(x1-x2)*pc;
var y=y1-(y1-y2)*pc;
return {x:x,y:y};
},argsToObj:function(){
var a=arguments;
if(a.length<4){
return a[0];
}
return {start:{x:a[0],y:a[1]},x:a[2],y:a[3]};
},distance:function(){
var o=this.argsToObj.apply(this,arguments);
return Math.abs(Math.sqrt(Math.pow(o.start.x-o.x,2)+Math.pow(o.start.y-o.y,2)));
},slope:function(p1,p2){
if(!(p1.x-p2.x)){
return 0;
}
return ((p1.y-p2.y)/(p1.x-p2.x));
},pointOnCircle:function(cx,cy,_b,_c){
radians=_c*Math.PI/180;
var x=_b*Math.cos(radians)*-1;
var y=_b*Math.sin(radians)*-1;
return {x:cx+x,y:cy+y};
},constrainAngle:function(_d,_e,_f){
var _10=this.angle(_d);
if(_10>=_e&&_10<=_f){
return _d;
}
var _11=this.length(_d);
var _12=_e-((360-(_f-_e))/2);
var _13=_10>_f?_f:_e-_10<100?_e:_f;
return this.pointOnCircle(_d.start.x,_d.start.y,_11,_13);
},snapAngle:function(obj,ca){
var _14=this.radians(obj),_15=this.angle(obj),_16=this.length(obj),seg=Math.PI*ca,rnd=Math.round(_14/seg),_17=rnd*seg,_18=this.radToDeg(_17),pt=this.pointOnCircle(obj.start.x,obj.start.y,_16,_18);
return pt;
},uid:function(str){
str=str||"shape";
_1[str]=_1[str]===undefined?0:_1[str]+1;
return str+_1[str];
},abbr:function(_19){
return _19.substring(_19.lastIndexOf(".")+1).charAt(0).toLowerCase()+_19.substring(_19.lastIndexOf(".")+2);
},mixin:function(o1,o2){
},objects:{},register:function(obj){
this.objects[obj.id]=obj;
},byId:function(id){
return this.objects[id];
},attr:function(_1a,_1b,_1c,_1d){
if(!_1a){
return false;
}
try{
if(_1a.shape&&_1a.util){
_1a=_1a.shape;
}
if(!_1c&&_1b=="id"&&_1a.target){
var n=_1a.target;
while(!dojo.attr(n,"id")){
n=n.parentNode;
}
return dojo.attr(n,"id");
}
if(_1a.rawNode||_1a.target){
var _1e=Array.prototype.slice.call(arguments);
_1e[0]=_1a.rawNode||_1a.target;
return dojo.attr.apply(dojo,_1e);
}
return dojo.attr(_1a,"id");
}
catch(e){
if(!_1d){
}
return false;
}
}};
})();
}
|
PypiClean
|
/invenio-explicit-acls-4.5.0.tar.gz/invenio-explicit-acls-4.5.0/invenio_explicit_acls/models.py
|
"""A module implementing base ACL classes."""
import datetime
import json
import os
import uuid
from abc import abstractmethod
from typing import Dict, Iterable, Union
import elasticsearch
from elasticsearch_dsl import Q
from flask_security import AnonymousUser
from invenio_accounts.models import User
from invenio_db import db
from invenio_records import Record
from invenio_search import current_search_client
from sqlalchemy import func
from sqlalchemy.util import classproperty
from sqlalchemy_utils import Timestamp
from invenio_explicit_acls.utils import schema_to_index
from .es import add_doc_type
try:
from psycopg2 import apilevel
from sqlalchemy.dialects.postgresql import ARRAY
from .utils import ArrayType as fallback_array
fallback_StringArray = fallback_array(db.String(length=1024))
StringArray = ARRAY(db.String).with_variant(fallback_StringArray, 'sqlite')
except ImportError:
# array represented in String field
from .utils import ArrayType as ARRAY
StringArray = ARRAY(db.String(length=1024))
def gen_uuid_key():
return str(uuid.uuid4())
class ACL(db.Model, Timestamp):
"""
An abstract class for ACLs.
It defines:
1. The priority of the ACL.
Only the ACLs with the highest priority are taken into account when record ACLs are created
2. The schemas for the resources that are handled by the ACL
3. Operation that the ACL handles. It might be any string, to use helpers in
the invenio_explicit_acls.permissions.py the operation needs to be one of 'get', 'update', 'delete'
Subclasses of this class define the selection process which Records are handled by the given ACL - for example,
"only the record with the given UUID", "all records in the schema", "records whose metadata in elasticsearch
are matched by ES query" etc.
"""
__tablename__ = 'explicit_acls_acl'
id = db.Column(
db.String(36),
default=gen_uuid_key,
primary_key=True
)
"""Primary key."""
name = db.Column(
db.String(64),
nullable=False
)
"""Human readable name/description"""
priority = db.Column(
db.Integer,
default=0)
"""Priority of the acl rule. Only the applicable rules with the highest priority
within a group get applied to the resource"""
priority_group = db.Column(
db.String(32),
default='default'
)
"""ACL Priority group"""
schemas = db.Column(StringArray)
"""
Set of record schemas that this ACL handles.
Note that the schemas must be relative, for example records/record-v1.0.0.json.
"""
originator_id = db.Column(db.ForeignKey(User.id, ondelete='CASCADE', ),
nullable=False, index=True)
originator = db.relationship(
User,
backref=db.backref("authored_acls"))
"""The originator (person that last time modified the ACL)"""
type = db.Column(db.String(50))
"""Type for polymorphism"""
operation = db.Column(db.String(50))
"""An operation that actors can make"""
actors = db.relationship("Actor", back_populates="acl")
"""A set of actors for this ACL (who have rights to perform an operation this ACL references)"""
__mapper_args__ = {
'polymorphic_identity': 'acl',
'polymorphic_on': type
}
@classmethod
@abstractmethod
def get_record_acls(clz, record: Record) -> Iterable['ACL']:
"""
Returns a list of ACL objects applicable for the given record.
:param record: Invenio record
"""
raise NotImplementedError('Must be implemented')
@classmethod
@abstractmethod
def prepare_schema_acls(self, schema):
"""
Prepare ACLs for the given index.
:param schema: schema for which to prepare the ACLs
"""
raise NotImplementedError('Must be implemented')
@abstractmethod
def get_matching_resources(self) -> Iterable[str]:
"""
Get resources that match the ACL.
:return: iterable of resource ids
"""
raise NotImplementedError('Must be implemented')
@abstractmethod
def update(self):
"""Update any internal representation / index for the acl."""
raise NotImplementedError('Must be implemented')
@abstractmethod
def delete(self):
"""If the ACL writes itself to any other representation (such as ES index), delete it from there."""
raise NotImplementedError('Must be implemented')
@classmethod
def enabled_schemas(clz) -> Iterable[str]:
"""Returns all schemas that have at least one ACL defined on them."""
schemas = set()
if db.engine.dialect.name == 'postgresql':
# postgresql has array field, so return it from the array
for acl_schemas in db.session.query(func.unnest(ACL.schemas)).distinct().all():
schemas.update(acl_schemas)
else:
# otherwise iterate all the ACLs, let's hope there is not too many of them
for acl in ACL.query.all():
for schema in acl.schemas:
schemas.add(schema)
return schemas
def used_in_records(self, older_than_timestamp=None):
"""
Returns IDs of all records that reference the ACL in cached acls in elasticsearch.
:param older_than_timestamp: only restrict to records where
the cached ACLs are older than the timestamp
:return: An iterable of Record IDs
"""
for schema in self.schemas:
index, doc_type = schema_to_index(schema)
query = [
{
"term": {
"_invenio_explicit_acls.id": str(self.id)
}
}
]
if older_than_timestamp:
if isinstance(older_than_timestamp, datetime.datetime):
older_than_timestamp = older_than_timestamp.isoformat()
query.append(
{
"range": {
"_invenio_explicit_acls.timestamp": {
"lt": older_than_timestamp
}
}
}
)
query = {
"nested": {
"path": "_invenio_explicit_acls",
"score_mode": "min",
"query": {
"bool": {
"must": query
}
}
}
}
for doc in elasticsearch.helpers.scan(
current_search_client,
query={
"query": query,
"_source": False,
},
index=index,
**add_doc_type(doc_type)
):
yield doc['_id']
class Actor(db.Model, Timestamp):
"""
An abstract class for ACL actors.
An Actor defines which users are given a permission to a record
matched by ACL class.
"""
__tablename__ = 'explicit_acls_actor'
id = db.Column(
db.String(36),
default=gen_uuid_key,
primary_key=True
)
name = db.Column(
db.String(64)
)
type = db.Column(db.String(50))
acl_id = db.Column(db.ForeignKey('explicit_acls_acl.id'))
acl = db.relationship("ACL", back_populates="actors")
originator_id = db.Column(db.ForeignKey(User.id, ondelete='CASCADE', ),
nullable=False, index=True)
originator = db.relationship(
User,
backref=db.backref("authored_actors"))
"""The originator (person that last time modified the ACL)"""
__mapper_args__ = {
'polymorphic_identity': 'operation',
'polymorphic_on': type
}
@classmethod
@abstractmethod
def get_elasticsearch_schema(clz, es_version):
"""
Returns the elasticsearch schema for the _invenio_explicit_acls property.
The property looks like::
_invenio_explicit_acls [{
"timestamp": "...when the ACL has been applied to the resource",
"acl": <id of the acl>,
"operation": name of the operation
self.type: <the returned schema>
}]
:return:
"""
raise NotImplementedError("Must be implemented")
@abstractmethod
def get_elasticsearch_representation(self, another=None, record=None, **kwargs):
"""
Returns ES representation of this Actor.
For a resource that matches ACL all the actors are serialized into _invenio_explicit_acls property::
_invenio_explicit_acls [{
"timestamp": "...when the ACL has been applied to the resource",
"acl": <id of the acl>,
"operation": name of the operation
self.type: self.get_elasticsearch_representation()
}]
:param another: A serialized representation of the previous Actor of the same type.
The implementation should merge it with its own ES representation
:return: The elasticsearch representation of the property on Record
"""
raise NotImplementedError("Must be implemented")
@classmethod
@abstractmethod
def get_elasticsearch_query(clz, user: Union[User, AnonymousUser], context: Dict) -> Q or None:
"""
Returns elasticsearch query (elasticsearch_dls.Q) for the ACL.
This is the counterpart of get_elasticsearch_representation and will be placed inside "nested" query
_invenio_explicit_acls
:param user: the user to be checked
:param context: any extra context carrying information about the user
:return: elasticsearch query that enforces the user
"""
raise NotImplementedError("Must be implemented")
@abstractmethod
def user_matches(self, user: Union[User, AnonymousUser], context: Dict, record: Record = None) -> bool:
"""
Checks if a user is allowed to perform any operation according to the ACL.
:param user: user being checked against the ACL
:param context: any extra context carrying information about the user
"""
raise NotImplementedError('Must be implemented')
@abstractmethod
def get_matching_users(self, record: Record = None) -> Iterable[int]:
"""
Returns a list of users matching this Actor.
:return: Iterable of a user ids
"""
raise NotImplementedError('Must be implemented')
__all__ = [
'ACL',
'Actor'
]
|
PypiClean
|
/ansible-8.3.0-py3-none-any.whl/ansible_collections/vmware/vmware_rest/plugins/modules/vcenter_vm_hardware_ethernet.py
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: vcenter_vm_hardware_ethernet
short_description: Adds a virtual Ethernet adapter to the virtual machine.
description: Adds a virtual Ethernet adapter to the virtual machine.
options:
allow_guest_control:
description:
- Flag indicating whether the guest can connect and disconnect the device.
type: bool
backing:
description:
- Physical resource backing for the virtual Ethernet adapter. Required with
I(state=['present'])
- 'Valid attributes are:'
- ' - C(type) (str): The C(backing_type) defines the valid backing types for
a virtual Ethernet adapter. ([''present''])'
- ' This key is required with [''present''].'
- ' - Accepted values:'
- ' - DISTRIBUTED_PORTGROUP'
- ' - HOST_DEVICE'
- ' - OPAQUE_NETWORK'
- ' - STANDARD_PORTGROUP'
- ' - C(network) (str): Identifier of the network that backs the virtual Ethernet
adapter. ([''present''])'
- ' - C(distributed_port) (str): Key of the distributed virtual port that
backs the virtual Ethernet adapter. Depending on the type of the Portgroup,
the port may be specified using this field. If the portgroup type is early-binding
(also known as static), a port is assigned when the Ethernet adapter is
configured to use the port. The port may be either automatically or specifically
assigned based on the value of this field. If the portgroup type is ephemeral,
the port is created and assigned to a virtual machine when it is powered
on and the Ethernet adapter is connected. This field cannot be specified
as no free ports exist before use. ([''present''])'
type: dict
label:
description:
- The name of the item
type: str
mac_address:
description:
- MAC address. This field may be modified at any time, and changes will be
applied the next time the virtual machine is powered on.
type: str
mac_type:
choices:
- ASSIGNED
- GENERATED
- MANUAL
description:
- The C(mac_address_type) defines the valid MAC address origins for a virtual
Ethernet adapter.
type: str
nic:
description:
- Virtual Ethernet adapter identifier. Required with I(state=['absent', 'connect',
'disconnect', 'present'])
type: str
pci_slot_number:
description:
- Address of the virtual Ethernet adapter on the PCI bus. If the PCI address
is invalid, the server will change when it the VM is started or as the
device is hot added.
type: int
session_timeout:
description:
- 'Timeout settings for client session. '
- 'The maximal number of seconds for the whole operation including connection
establishment, request sending and response. '
- The default value is 300s.
type: float
version_added: 2.1.0
start_connected:
description:
- Flag indicating whether the virtual device should be connected whenever
the virtual machine is powered on.
type: bool
state:
choices:
- absent
- connect
- disconnect
- present
default: present
description: []
type: str
type:
choices:
- E1000
- E1000E
- PCNET32
- VMXNET
- VMXNET2
- VMXNET3
description:
- The C(emulation_type) defines the valid emulation types for a virtual Ethernet
adapter.
type: str
upt_compatibility_enabled:
description:
- Flag indicating whether Universal Pass-Through (UPT) compatibility should
be enabled on this virtual Ethernet adapter. This field may be modified
at any time, and changes will be applied the next time the virtual machine
is powered on.
type: bool
vcenter_hostname:
description:
- The hostname or IP address of the vSphere vCenter
- If the value is not specified in the task, the value of environment variable
C(VMWARE_HOST) will be used instead.
required: true
type: str
vcenter_password:
description:
- The vSphere vCenter password
- If the value is not specified in the task, the value of environment variable
C(VMWARE_PASSWORD) will be used instead.
required: true
type: str
vcenter_rest_log_file:
description:
- 'You can use this optional parameter to set the location of a log file. '
- 'This file will be used to record the HTTP REST interaction. '
- 'The file will be stored on the host that run the module. '
- 'If the value is not specified in the task, the value of '
- environment variable C(VMWARE_REST_LOG_FILE) will be used instead.
type: str
vcenter_username:
description:
- The vSphere vCenter username
- If the value is not specified in the task, the value of environment variable
C(VMWARE_USER) will be used instead.
required: true
type: str
vcenter_validate_certs:
default: true
description:
- Allows connection when SSL certificates are not valid. Set to C(false) when
certificates are not trusted.
- If the value is not specified in the task, the value of environment variable
C(VMWARE_VALIDATE_CERTS) will be used instead.
type: bool
vm:
description:
- Virtual machine identifier. This parameter is mandatory.
required: true
type: str
wake_on_lan_enabled:
description:
- Flag indicating whether wake-on-LAN shoud be enabled on this virtual Ethernet
adapter. This field may be modified at any time, and changes will be applied
the next time the virtual machine is powered on.
type: bool
author:
- Ansible Cloud Team (@ansible-collections)
version_added: 0.1.0
requirements:
- vSphere 7.0.2 or greater
- python >= 3.6
- aiohttp
notes:
- Tested on vSphere 7.0.2
"""
EXAMPLES = r"""
- name: Get the dvswitch called my-portgroup
vmware.vmware_rest.vcenter_network_info:
filter_types: DISTRIBUTED_PORTGROUP
filter_names: my portrgoup
register: my_portgroup
- name: Look up the VM called test_vm1 in the inventory
register: search_result
vmware.vmware_rest.vcenter_vm_info:
filter_names:
- test_vm1
- name: Collect information about a specific VM
vmware.vmware_rest.vcenter_vm_info:
vm: '{{ search_result.value[0].vm }}'
register: test_vm1_info
- name: Attach a VM to a dvswitch
vmware.vmware_rest.vcenter_vm_hardware_ethernet:
vm: '{{ test_vm1_info.id }}'
pci_slot_number: 4
backing:
type: DISTRIBUTED_PORTGROUP
network: '{{ my_portgroup.value[0].network }}'
start_connected: false
register: vm_hardware_ethernet_1
- name: Turn the NIC's start_connected flag on
vmware.vmware_rest.vcenter_vm_hardware_ethernet:
nic: '{{ vm_hardware_ethernet_1.id }}'
start_connected: true
vm: '{{ test_vm1_info.id }}'
- name: Attach the VM to a standard portgroup
vmware.vmware_rest.vcenter_vm_hardware_ethernet:
vm: '{{ test_vm1_info.id }}'
pci_slot_number: 4
backing:
type: STANDARD_PORTGROUP
network: "{{ lookup('vmware.vmware_rest.network_moid', '/my_dc/network/VM Network') }}"
register: _result
- name: Attach the VM to a standard portgroup (again)
vmware.vmware_rest.vcenter_vm_hardware_ethernet:
vm: '{{ test_vm1_info.id }}'
pci_slot_number: 4
backing:
type: STANDARD_PORTGROUP
network: "{{ lookup('vmware.vmware_rest.network_moid', '/my_dc/network/VM Network') }}"
register: _result
- name: Collect a list of the NIC for a given VM
vmware.vmware_rest.vcenter_vm_hardware_ethernet_info:
vm: '{{ test_vm1_info.id }}'
register: vm_nic
- name: Attach the VM to a standard portgroup (again) using the nic ID
vmware.vmware_rest.vcenter_vm_hardware_ethernet:
vm: '{{ test_vm1_info.id }}'
nic: '{{ vm_nic.value[0].nic }}'
backing:
type: STANDARD_PORTGROUP
network: "{{ lookup('vmware.vmware_rest.network_moid', '/my_dc/network/VM Network') }}"
register: _result
- name: Attach to another standard portgroup
vmware.vmware_rest.vcenter_vm_hardware_ethernet:
vm: '{{ test_vm1_info.id }}'
nic: '{{ vm_nic.value[0].nic }}'
backing:
type: STANDARD_PORTGROUP
network: "{{ lookup('vmware.vmware_rest.network_moid', '/my_dc/network/second_vswitch') }}"
register: _result
"""
RETURN = r"""
# content generated by the update_return_section callback# task: Attach a VM to a dvswitch
id:
description: moid of the resource
returned: On success
sample: '4000'
type: str
value:
description: Attach a VM to a dvswitch
returned: On success
sample:
allow_guest_control: 0
backing:
connection_cookie: 632732945
distributed_port: '2'
distributed_switch_uuid: 50 31 d3 c4 2d 09 4f e3-0f d6 7f 30 3d fe d4 a0
network: dvportgroup-1022
type: DISTRIBUTED_PORTGROUP
label: Network adapter 1
mac_address: 00:50:56:b1:33:76
mac_type: ASSIGNED
pci_slot_number: 4
start_connected: 0
state: NOT_CONNECTED
type: VMXNET3
upt_compatibility_enabled: 0
wake_on_lan_enabled: 0
type: dict
"""
# This structure describes the format of the data expected by the end-points
PAYLOAD_FORMAT = {
"update": {
"query": {},
"body": {
"allow_guest_control": "allow_guest_control",
"backing": "backing",
"mac_address": "mac_address",
"mac_type": "mac_type",
"start_connected": "start_connected",
"upt_compatibility_enabled": "upt_compatibility_enabled",
"wake_on_lan_enabled": "wake_on_lan_enabled",
},
"path": {"nic": "nic", "vm": "vm"},
},
"create": {
"query": {},
"body": {
"allow_guest_control": "allow_guest_control",
"backing": "backing",
"mac_address": "mac_address",
"mac_type": "mac_type",
"pci_slot_number": "pci_slot_number",
"start_connected": "start_connected",
"type": "type",
"upt_compatibility_enabled": "upt_compatibility_enabled",
"wake_on_lan_enabled": "wake_on_lan_enabled",
},
"path": {"vm": "vm"},
},
"delete": {"query": {}, "body": {}, "path": {"nic": "nic", "vm": "vm"}},
"disconnect": {"query": {}, "body": {}, "path": {"nic": "nic", "vm": "vm"}},
"connect": {"query": {}, "body": {}, "path": {"nic": "nic", "vm": "vm"}},
} # pylint: disable=line-too-long
import json
import socket
from ansible.module_utils.basic import env_fallback
try:
from ansible_collections.cloud.common.plugins.module_utils.turbo.exceptions import (
EmbeddedModuleFailure,
)
from ansible_collections.cloud.common.plugins.module_utils.turbo.module import (
AnsibleTurboModule as AnsibleModule,
)
AnsibleModule.collection_name = "vmware.vmware_rest"
except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vmware.vmware_rest.plugins.module_utils.vmware_rest import (
build_full_device_list,
exists,
gen_args,
get_device_info,
get_subdevice_type,
list_devices,
open_session,
prepare_payload,
update_changed_flag,
session_timeout,
)
def prepare_argument_spec():
argument_spec = {
"vcenter_hostname": dict(
type="str", required=True, fallback=(env_fallback, ["VMWARE_HOST"]),
),
"vcenter_username": dict(
type="str", required=True, fallback=(env_fallback, ["VMWARE_USER"]),
),
"vcenter_password": dict(
type="str",
required=True,
no_log=True,
fallback=(env_fallback, ["VMWARE_PASSWORD"]),
),
"vcenter_validate_certs": dict(
type="bool",
required=False,
default=True,
fallback=(env_fallback, ["VMWARE_VALIDATE_CERTS"]),
),
"vcenter_rest_log_file": dict(
type="str",
required=False,
fallback=(env_fallback, ["VMWARE_REST_LOG_FILE"]),
),
"session_timeout": dict(
type="float",
required=False,
fallback=(env_fallback, ["VMWARE_SESSION_TIMEOUT"]),
),
}
argument_spec["allow_guest_control"] = {"type": "bool"}
argument_spec["backing"] = {"type": "dict"}
argument_spec["label"] = {"type": "str"}
argument_spec["mac_address"] = {"type": "str"}
argument_spec["mac_type"] = {
"type": "str",
"choices": ["ASSIGNED", "GENERATED", "MANUAL"],
}
argument_spec["nic"] = {"type": "str"}
argument_spec["pci_slot_number"] = {"type": "int"}
argument_spec["start_connected"] = {"type": "bool"}
argument_spec["state"] = {
"type": "str",
"choices": ["absent", "connect", "disconnect", "present"],
"default": "present",
}
argument_spec["type"] = {
"type": "str",
"choices": ["E1000", "E1000E", "PCNET32", "VMXNET", "VMXNET2", "VMXNET3"],
}
argument_spec["upt_compatibility_enabled"] = {"type": "bool"}
argument_spec["vm"] = {"required": True, "type": "str"}
argument_spec["wake_on_lan_enabled"] = {"type": "bool"}
return argument_spec
async def main():
required_if = list([])
module_args = prepare_argument_spec()
module = AnsibleModule(
argument_spec=module_args, required_if=required_if, supports_check_mode=True
)
if not module.params["vcenter_hostname"]:
module.fail_json("vcenter_hostname cannot be empty")
if not module.params["vcenter_username"]:
module.fail_json("vcenter_username cannot be empty")
if not module.params["vcenter_password"]:
module.fail_json("vcenter_password cannot be empty")
try:
session = await open_session(
vcenter_hostname=module.params["vcenter_hostname"],
vcenter_username=module.params["vcenter_username"],
vcenter_password=module.params["vcenter_password"],
validate_certs=module.params["vcenter_validate_certs"],
log_file=module.params["vcenter_rest_log_file"],
)
except EmbeddedModuleFailure as err:
module.fail_json(err.get_message())
result = await entry_point(module, session)
module.exit_json(**result)
# template: default_module.j2
def build_url(params):
return (
"https://{vcenter_hostname}" "/api/vcenter/vm/{vm}/hardware/ethernet"
).format(**params)
async def entry_point(module, session):
if module.params["state"] == "present":
if "_create" in globals():
operation = "create"
else:
operation = "update"
elif module.params["state"] == "absent":
operation = "delete"
else:
operation = module.params["state"]
func = globals()["_" + operation]
return await func(module.params, session)
async def _connect(params, session):
_in_query_parameters = PAYLOAD_FORMAT["connect"]["query"].keys()
payload = prepare_payload(params, PAYLOAD_FORMAT["connect"])
subdevice_type = get_subdevice_type(
"/api/vcenter/vm/{vm}/hardware/ethernet/{nic}?action=connect"
)
if subdevice_type and not params[subdevice_type]:
_json = await exists(params, session, build_url(params))
if _json:
params[subdevice_type] = _json["id"]
_url = (
"https://{vcenter_hostname}"
# aa
"/api/vcenter/vm/{vm}/hardware/ethernet/{nic}?action=connect"
).format(**params) + gen_args(params, _in_query_parameters)
async with session.post(_url, json=payload, **session_timeout(params)) as resp:
try:
if resp.headers["Content-Type"] == "application/json":
_json = await resp.json()
except KeyError:
_json = {}
if "value" not in _json: # 7.0.2
_json = {"value": _json}
return await update_changed_flag(_json, resp.status, "connect")
async def _create(params, session):
lookup_url = per_id_url = build_url(params)
uniquity_keys = ["nic"]
comp_func = None
async def lookup_with_filters(params, session, url):
# e.g: for the datacenter resources
if "folder" not in params:
return
if "name" not in params:
return
async with session.get(
f"{url}?names={params['name']}&folders={params['folder']}"
) as resp:
_json = await resp.json()
if isinstance(_json, list) and len(_json) == 1:
return await get_device_info(session, url, _json[0]["nic"])
_json = None
if params["nic"]:
_json = await get_device_info(session, build_url(params), params["nic"])
if not _json and (uniquity_keys or comp_func):
_json = await exists(
params,
session,
url=lookup_url,
uniquity_keys=uniquity_keys,
per_id_url=per_id_url,
comp_func=comp_func,
)
if not _json:
_json = await lookup_with_filters(params, session, build_url(params))
if _json:
if "value" not in _json: # 7.0.2+
_json = {"value": _json}
if "_update" in globals():
params["nic"] = _json["id"]
return await globals()["_update"](params, session)
return await update_changed_flag(_json, 200, "get")
payload = prepare_payload(params, PAYLOAD_FORMAT["create"])
_url = (
"https://{vcenter_hostname}" "/api/vcenter/vm/{vm}/hardware/ethernet"
).format(**params)
async with session.post(_url, json=payload, **session_timeout(params)) as resp:
if resp.status == 500:
text = await resp.text()
raise EmbeddedModuleFailure(
f"Request has failed: status={resp.status}, {text}"
)
try:
if resp.headers["Content-Type"] == "application/json":
_json = await resp.json()
except KeyError:
_json = {}
if (resp.status in [200, 201]) and "error" not in _json:
if isinstance(_json, str): # 7.0.2 and greater
_id = _json # TODO: fetch the object
elif isinstance(_json, dict) and "value" not in _json:
_id = list(_json["value"].values())[0]
elif isinstance(_json, dict) and "value" in _json:
_id = _json["value"]
_json_device_info = await get_device_info(session, _url, _id)
if _json_device_info:
_json = _json_device_info
return await update_changed_flag(_json, resp.status, "create")
async def _delete(params, session):
_in_query_parameters = PAYLOAD_FORMAT["delete"]["query"].keys()
payload = prepare_payload(params, PAYLOAD_FORMAT["delete"])
subdevice_type = get_subdevice_type("/api/vcenter/vm/{vm}/hardware/ethernet/{nic}")
if subdevice_type and not params[subdevice_type]:
_json = await exists(params, session, build_url(params))
if _json:
params[subdevice_type] = _json["id"]
_url = (
"https://{vcenter_hostname}" "/api/vcenter/vm/{vm}/hardware/ethernet/{nic}"
).format(**params) + gen_args(params, _in_query_parameters)
async with session.delete(_url, json=payload, **session_timeout(params)) as resp:
try:
if resp.headers["Content-Type"] == "application/json":
_json = await resp.json()
except KeyError:
_json = {}
return await update_changed_flag(_json, resp.status, "delete")
async def _disconnect(params, session):
_in_query_parameters = PAYLOAD_FORMAT["disconnect"]["query"].keys()
payload = prepare_payload(params, PAYLOAD_FORMAT["disconnect"])
subdevice_type = get_subdevice_type(
"/api/vcenter/vm/{vm}/hardware/ethernet/{nic}?action=disconnect"
)
if subdevice_type and not params[subdevice_type]:
_json = await exists(params, session, build_url(params))
if _json:
params[subdevice_type] = _json["id"]
_url = (
"https://{vcenter_hostname}"
# aa
"/api/vcenter/vm/{vm}/hardware/ethernet/{nic}?action=disconnect"
).format(**params) + gen_args(params, _in_query_parameters)
async with session.post(_url, json=payload, **session_timeout(params)) as resp:
try:
if resp.headers["Content-Type"] == "application/json":
_json = await resp.json()
except KeyError:
_json = {}
if "value" not in _json: # 7.0.2
_json = {"value": _json}
return await update_changed_flag(_json, resp.status, "disconnect")
async def _update(params, session):
payload = prepare_payload(params, PAYLOAD_FORMAT["update"])
_url = (
"https://{vcenter_hostname}" "/api/vcenter/vm/{vm}/hardware/ethernet/{nic}"
).format(**params)
async with session.get(_url, **session_timeout(params)) as resp:
_json = await resp.json()
if "value" in _json:
value = _json["value"]
else: # 7.0.2 and greater
value = _json
for k, v in value.items():
if k in payload:
if isinstance(payload[k], dict) and isinstance(v, dict):
to_delete = True
for _k in list(payload[k].keys()):
if payload[k][_k] != v.get(_k):
to_delete = False
if to_delete:
del payload[k]
elif payload[k] == v:
del payload[k]
elif payload[k] == {}:
del payload[k]
if payload == {} or payload == {"spec": {}}:
# Nothing has changed
if "value" not in _json: # 7.0.2
_json = {"value": _json}
_json["id"] = params.get("nic")
return await update_changed_flag(_json, resp.status, "get")
async with session.patch(_url, json=payload, **session_timeout(params)) as resp:
try:
if resp.headers["Content-Type"] == "application/json":
_json = await resp.json()
except KeyError:
_json = {}
if "value" not in _json: # 7.0.2
_json = {"value": _json}
# e.g: content_configuration
if not _json and resp.status == 204:
async with session.get(_url, **session_timeout(params)) as resp_get:
_json_get = await resp_get.json()
if _json_get:
_json = _json_get
_json["id"] = params.get("nic")
return await update_changed_flag(_json, resp.status, "update")
if __name__ == "__main__":
import asyncio
current_loop = asyncio.get_event_loop_policy().get_event_loop()
current_loop.run_until_complete(main())
|
PypiClean
|
/commonn_abs_lyr-0.0.1.tar.gz/commonn_abs_lyr-0.0.1/connlayer/validate_config.py
|
import constants as constants
error_msg = dict()
class ValidateConfig(object):
@staticmethod
def mysql_config(self, config: dict):
try:
print("in mysql", config)
global error_msg
if type(config) is dict:
if constants.host not in config:
error_msg["error"] = "host name not valid"
return error_msg
if constants.user not in config:
error_msg["error"] = "please check user name"
return error_msg
if constants.password not in config:
error_msg["error"] = "please check password"
return error_msg
if constants.database not in config:
error_msg["error"] = "please check database name"
return error_msg
except Exception as err:
error_msg["error"] = "error while reading mysql config:-" + err
return error_msg
return error_msg
@staticmethod
def redis_config(self, config: dict):
try:
global error_msg
if type(config) is dict:
if constants.host not in config and config[constants.host]:
error_msg["error"] = "host name not valid"
return error_msg
if constants.port not in config:
error_msg["error"] = "please check port"
return error_msg
if constants.database not in config:
error_msg["error"] = "please check database"
return error_msg
except Exception as err:
error_msg["error"] = "error while reading redis config:-" + str(err)
return error_msg
return error_msg
@staticmethod
def cosmos_config(self, config: dict):
try:
global error_msg
if type(config) is dict:
if constants.host not in config and config[constants.host]:
error_msg["error"] = "host name not valid"
return error_msg
if constants.master_key not in config:
error_msg["error"] = "please check master key"
return error_msg
except Exception as err:
error_msg["error"] = "error while reading cosmos config:-" + err
return error_msg
return error_msg
@staticmethod
def postgres_config(self, config: dict):
try:
global error_msg
if type(config) is dict:
if constants.host not in config:
error_msg["error"] = "host name not valid"
return error_msg
if constants.user not in config:
error_msg["error"] = "please check user name"
return error_msg
if constants.password not in config:
error_msg["error"] = "please check password"
return error_msg
if constants.database not in config:
error_msg["error"] = "please check database name"
return error_msg
except Exception as err:
error_msg["error"] = "error while reading postgres config:-" + err
return error_msg
return error_msg
@staticmethod
def dwh_config(self, config: dict):
try:
global error_msg
if type(config) is dict:
if constants.host not in config:
error_msg["error"] = "host name not valid"
return error_msg
if constants.port not in config:
error_msg["error"] = "please check port"
return error_msg
if constants.database not in config:
error_msg["error"] = "please check database"
return error_msg
if constants.user not in config:
error_msg["error"] = "please check user name"
return error_msg
if constants.password not in config:
error_msg["error"] = "please check password"
return error_msg
if constants.driver not in config:
error_msg["error"] = "please check driver"
return error_msg
except Exception as err:
error_msg["error"] = "error while reading dwh config:-" + err
return error_msg
return error_msg
@staticmethod
def clickhouse_config(self, config: dict):
try:
global error_msg
if type(config) is dict:
if constants.host not in config:
error_msg["error"] = "host name not valid"
return error_msg
if constants.user not in config:
error_msg["error"] = "please check user name"
return error_msg
if constants.password not in config:
error_msg["error"] = "please check password"
return error_msg
if constants.database not in config:
error_msg["error"] = "please check database name"
return error_msg
except Exception as err:
error_msg["error"] = "error while reading clickhouse config:-" + err
return error_msg
return error_msg
|
PypiClean
|
/dropbox_sign-1.1.1-py3-none-any.whl/dropbox_sign/model/bulk_send_job_get_response_signature_requests.py
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, List, Dict, Union
import json # noqa: F401
import re # noqa: F401
import sys # noqa: F401
from dropbox_sign import ApiClient
from dropbox_sign.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from dropbox_sign.exceptions import ApiAttributeError
if TYPE_CHECKING:
from dropbox_sign.model.signature_request_response import SignatureRequestResponse
from dropbox_sign.model.signature_request_response_attachment import SignatureRequestResponseAttachment
from dropbox_sign.model.signature_request_response_custom_field_base import SignatureRequestResponseCustomFieldBase
from dropbox_sign.model.signature_request_response_data_base import SignatureRequestResponseDataBase
from dropbox_sign.model.signature_request_response_signatures import SignatureRequestResponseSignatures
def lazy_import():
from dropbox_sign.model.signature_request_response import SignatureRequestResponse
from dropbox_sign.model.signature_request_response_attachment import SignatureRequestResponseAttachment
from dropbox_sign.model.signature_request_response_custom_field_base import SignatureRequestResponseCustomFieldBase
from dropbox_sign.model.signature_request_response_data_base import SignatureRequestResponseDataBase
from dropbox_sign.model.signature_request_response_signatures import SignatureRequestResponseSignatures
globals()['SignatureRequestResponse'] = SignatureRequestResponse
globals()['SignatureRequestResponseAttachment'] = SignatureRequestResponseAttachment
globals()['SignatureRequestResponseCustomFieldBase'] = SignatureRequestResponseCustomFieldBase
globals()['SignatureRequestResponseDataBase'] = SignatureRequestResponseDataBase
globals()['SignatureRequestResponseSignatures'] = SignatureRequestResponseSignatures
class BulkSendJobGetResponseSignatureRequests(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'test_mode': (bool, none_type,), # noqa: E501
'signature_request_id': (str,), # noqa: E501
'requester_email_address': (str,), # noqa: E501
'title': (str,), # noqa: E501
'original_title': (str,), # noqa: E501
'subject': (str, none_type,), # noqa: E501
'message': (str, none_type,), # noqa: E501
'metadata': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501
'created_at': (int,), # noqa: E501
'expires_at': (int,), # noqa: E501
'is_complete': (bool,), # noqa: E501
'is_declined': (bool,), # noqa: E501
'has_error': (bool,), # noqa: E501
'files_url': (str,), # noqa: E501
'signing_url': (str, none_type,), # noqa: E501
'details_url': (str,), # noqa: E501
'cc_email_addresses': ([str],), # noqa: E501
'signing_redirect_url': (str, none_type,), # noqa: E501
'template_ids': ([str], none_type,), # noqa: E501
'custom_fields': ([SignatureRequestResponseCustomFieldBase], none_type,), # noqa: E501
'attachments': ([SignatureRequestResponseAttachment], none_type,), # noqa: E501
'response_data': ([SignatureRequestResponseDataBase], none_type,), # noqa: E501
'signatures': ([SignatureRequestResponseSignatures],), # noqa: E501
'bulk_send_job_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
@staticmethod
def init(data: any) -> BulkSendJobGetResponseSignatureRequests:
"""
Attempt to instantiate and hydrate a new instance of this class
"""
try:
obj_data = json.dumps(data)
except TypeError:
obj_data = data
return ApiClient().deserialize(
response=type('obj_dict', (object,), {'data': obj_data}),
response_type=[BulkSendJobGetResponseSignatureRequests],
_check_type=True,
)
attribute_map = {
'test_mode': 'test_mode', # noqa: E501
'signature_request_id': 'signature_request_id', # noqa: E501
'requester_email_address': 'requester_email_address', # noqa: E501
'title': 'title', # noqa: E501
'original_title': 'original_title', # noqa: E501
'subject': 'subject', # noqa: E501
'message': 'message', # noqa: E501
'metadata': 'metadata', # noqa: E501
'created_at': 'created_at', # noqa: E501
'expires_at': 'expires_at', # noqa: E501
'is_complete': 'is_complete', # noqa: E501
'is_declined': 'is_declined', # noqa: E501
'has_error': 'has_error', # noqa: E501
'files_url': 'files_url', # noqa: E501
'signing_url': 'signing_url', # noqa: E501
'details_url': 'details_url', # noqa: E501
'cc_email_addresses': 'cc_email_addresses', # noqa: E501
'signing_redirect_url': 'signing_redirect_url', # noqa: E501
'template_ids': 'template_ids', # noqa: E501
'custom_fields': 'custom_fields', # noqa: E501
'attachments': 'attachments', # noqa: E501
'response_data': 'response_data', # noqa: E501
'signatures': 'signatures', # noqa: E501
'bulk_send_job_id': 'bulk_send_job_id', # noqa: E501
}
read_only_vars = {
}
@property
def test_mode(self) -> Optional[bool]:
return self.get("test_mode")
@test_mode.setter
def test_mode(self, value: Optional[bool]):
setattr(self, "test_mode", value)
@property
def signature_request_id(self) -> str:
return self.get("signature_request_id")
@signature_request_id.setter
def signature_request_id(self, value: str):
setattr(self, "signature_request_id", value)
@property
def requester_email_address(self) -> str:
return self.get("requester_email_address")
@requester_email_address.setter
def requester_email_address(self, value: str):
setattr(self, "requester_email_address", value)
@property
def title(self) -> str:
return self.get("title")
@title.setter
def title(self, value: str):
setattr(self, "title", value)
@property
def original_title(self) -> str:
return self.get("original_title")
@original_title.setter
def original_title(self, value: str):
setattr(self, "original_title", value)
@property
def subject(self) -> Optional[str]:
return self.get("subject")
@subject.setter
def subject(self, value: Optional[str]):
setattr(self, "subject", value)
@property
def message(self) -> Optional[str]:
return self.get("message")
@message.setter
def message(self, value: Optional[str]):
setattr(self, "message", value)
@property
def metadata(self) -> Dict[str, Union[bool, date, datetime, dict, float, int, list, str, none_type]]:
return self.get("metadata")
@metadata.setter
def metadata(self, value: Dict[str, Union[bool, date, datetime, dict, float, int, list, str, none_type]]):
setattr(self, "metadata", value)
@property
def created_at(self) -> int:
return self.get("created_at")
@created_at.setter
def created_at(self, value: int):
setattr(self, "created_at", value)
@property
def expires_at(self) -> int:
return self.get("expires_at")
@expires_at.setter
def expires_at(self, value: int):
setattr(self, "expires_at", value)
@property
def is_complete(self) -> bool:
return self.get("is_complete")
@is_complete.setter
def is_complete(self, value: bool):
setattr(self, "is_complete", value)
@property
def is_declined(self) -> bool:
return self.get("is_declined")
@is_declined.setter
def is_declined(self, value: bool):
setattr(self, "is_declined", value)
@property
def has_error(self) -> bool:
return self.get("has_error")
@has_error.setter
def has_error(self, value: bool):
setattr(self, "has_error", value)
@property
def files_url(self) -> str:
return self.get("files_url")
@files_url.setter
def files_url(self, value: str):
setattr(self, "files_url", value)
@property
def signing_url(self) -> Optional[str]:
return self.get("signing_url")
@signing_url.setter
def signing_url(self, value: Optional[str]):
setattr(self, "signing_url", value)
@property
def details_url(self) -> str:
return self.get("details_url")
@details_url.setter
def details_url(self, value: str):
setattr(self, "details_url", value)
@property
def cc_email_addresses(self) -> List[str]:
return self.get("cc_email_addresses")
@cc_email_addresses.setter
def cc_email_addresses(self, value: List[str]):
setattr(self, "cc_email_addresses", value)
@property
def signing_redirect_url(self) -> Optional[str]:
return self.get("signing_redirect_url")
@signing_redirect_url.setter
def signing_redirect_url(self, value: Optional[str]):
setattr(self, "signing_redirect_url", value)
@property
def template_ids(self) -> Optional[List[str]]:
return self.get("template_ids")
@template_ids.setter
def template_ids(self, value: Optional[List[str]]):
setattr(self, "template_ids", value)
@property
def custom_fields(self) -> Optional[List[SignatureRequestResponseCustomFieldBase]]:
return self.get("custom_fields")
@custom_fields.setter
def custom_fields(self, value: Optional[List[SignatureRequestResponseCustomFieldBase]]):
setattr(self, "custom_fields", value)
@property
def attachments(self) -> Optional[List[SignatureRequestResponseAttachment]]:
return self.get("attachments")
@attachments.setter
def attachments(self, value: Optional[List[SignatureRequestResponseAttachment]]):
setattr(self, "attachments", value)
@property
def response_data(self) -> Optional[List[SignatureRequestResponseDataBase]]:
return self.get("response_data")
@response_data.setter
def response_data(self, value: Optional[List[SignatureRequestResponseDataBase]]):
setattr(self, "response_data", value)
@property
def signatures(self) -> List[SignatureRequestResponseSignatures]:
return self.get("signatures")
@signatures.setter
def signatures(self, value: List[SignatureRequestResponseSignatures]):
setattr(self, "signatures", value)
@property
def bulk_send_job_id(self) -> str:
return self.get("bulk_send_job_id")
@bulk_send_job_id.setter
def bulk_send_job_id(self, value: str):
setattr(self, "bulk_send_job_id", value)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""BulkSendJobGetResponseSignatureRequests - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
test_mode (bool, none_type): Whether this is a test signature request. Test requests have no legal value. Defaults to `false`.. [optional] if omitted the server will use the default value of False # noqa: E501
signature_request_id (str): The id of the SignatureRequest.. [optional] # noqa: E501
requester_email_address (str): The email address of the initiator of the SignatureRequest.. [optional] # noqa: E501
title (str): The title the specified Account uses for the SignatureRequest.. [optional] # noqa: E501
original_title (str): Default Label for account.. [optional] # noqa: E501
subject (str, none_type): The subject in the email that was initially sent to the signers.. [optional] # noqa: E501
message (str, none_type): The custom message in the email that was initially sent to the signers.. [optional] # noqa: E501
metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The metadata attached to the signature request.. [optional] # noqa: E501
created_at (int): Time the signature request was created.. [optional] # noqa: E501
expires_at (int): The time when the signature request will expire unsigned signatures. See [Signature Request Expiration Date](https://developers.hellosign.com/docs/signature-request/expiration/) for details.. [optional] # noqa: E501
is_complete (bool): Whether or not the SignatureRequest has been fully executed by all signers.. [optional] # noqa: E501
is_declined (bool): Whether or not the SignatureRequest has been declined by a signer.. [optional] # noqa: E501
has_error (bool): Whether or not an error occurred (either during the creation of the SignatureRequest or during one of the signings).. [optional] # noqa: E501
files_url (str): The URL where a copy of the request's documents can be downloaded.. [optional] # noqa: E501
signing_url (str, none_type): The URL where a signer, after authenticating, can sign the documents. This should only be used by users with existing Dropbox Sign accounts as they will be required to log in before signing.. [optional] # noqa: E501
details_url (str): The URL where the requester and the signers can view the current status of the SignatureRequest.. [optional] # noqa: E501
cc_email_addresses ([str]): A list of email addresses that were CCed on the SignatureRequest. They will receive a copy of the final PDF once all the signers have signed.. [optional] # noqa: E501
signing_redirect_url (str, none_type): The URL you want the signer redirected to after they successfully sign.. [optional] # noqa: E501
template_ids ([str], none_type): Templates IDs used in this SignatureRequest (if any).. [optional] # noqa: E501
custom_fields ([SignatureRequestResponseCustomFieldBase], none_type): An array of Custom Field objects containing the name and type of each custom field. * Text Field uses `SignatureRequestResponseCustomFieldText` * Checkbox Field uses `SignatureRequestResponseCustomFieldCheckbox`. [optional] # noqa: E501
attachments ([SignatureRequestResponseAttachment], none_type): Signer attachments.. [optional] # noqa: E501
response_data ([SignatureRequestResponseDataBase], none_type): An array of form field objects containing the name, value, and type of each textbox or checkmark field filled in by the signers.. [optional] # noqa: E501
signatures ([SignatureRequestResponseSignatures]): An array of signature objects, 1 for each signer.. [optional] # noqa: E501
bulk_send_job_id (str): The id of the BulkSendJob.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BulkSendJobGetResponseSignatureRequests - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
test_mode (bool, none_type): Whether this is a test signature request. Test requests have no legal value. Defaults to `false`.. [optional] if omitted the server will use the default value of False # noqa: E501
signature_request_id (str): The id of the SignatureRequest.. [optional] # noqa: E501
requester_email_address (str): The email address of the initiator of the SignatureRequest.. [optional] # noqa: E501
title (str): The title the specified Account uses for the SignatureRequest.. [optional] # noqa: E501
original_title (str): Default Label for account.. [optional] # noqa: E501
subject (str, none_type): The subject in the email that was initially sent to the signers.. [optional] # noqa: E501
message (str, none_type): The custom message in the email that was initially sent to the signers.. [optional] # noqa: E501
metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The metadata attached to the signature request.. [optional] # noqa: E501
created_at (int): Time the signature request was created.. [optional] # noqa: E501
expires_at (int): The time when the signature request will expire unsigned signatures. See [Signature Request Expiration Date](https://developers.hellosign.com/docs/signature-request/expiration/) for details.. [optional] # noqa: E501
is_complete (bool): Whether or not the SignatureRequest has been fully executed by all signers.. [optional] # noqa: E501
is_declined (bool): Whether or not the SignatureRequest has been declined by a signer.. [optional] # noqa: E501
has_error (bool): Whether or not an error occurred (either during the creation of the SignatureRequest or during one of the signings).. [optional] # noqa: E501
files_url (str): The URL where a copy of the request's documents can be downloaded.. [optional] # noqa: E501
signing_url (str, none_type): The URL where a signer, after authenticating, can sign the documents. This should only be used by users with existing Dropbox Sign accounts as they will be required to log in before signing.. [optional] # noqa: E501
details_url (str): The URL where the requester and the signers can view the current status of the SignatureRequest.. [optional] # noqa: E501
cc_email_addresses ([str]): A list of email addresses that were CCed on the SignatureRequest. They will receive a copy of the final PDF once all the signers have signed.. [optional] # noqa: E501
signing_redirect_url (str, none_type): The URL you want the signer redirected to after they successfully sign.. [optional] # noqa: E501
template_ids ([str], none_type): Templates IDs used in this SignatureRequest (if any).. [optional] # noqa: E501
custom_fields ([SignatureRequestResponseCustomFieldBase], none_type): An array of Custom Field objects containing the name and type of each custom field. * Text Field uses `SignatureRequestResponseCustomFieldText` * Checkbox Field uses `SignatureRequestResponseCustomFieldCheckbox`. [optional] # noqa: E501
attachments ([SignatureRequestResponseAttachment], none_type): Signer attachments.. [optional] # noqa: E501
response_data ([SignatureRequestResponseDataBase], none_type): An array of form field objects containing the name, value, and type of each textbox or checkmark field filled in by the signers.. [optional] # noqa: E501
signatures ([SignatureRequestResponseSignatures]): An array of signature objects, 1 for each signer.. [optional] # noqa: E501
bulk_send_job_id (str): The id of the BulkSendJob.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'oneOf': [
],
}
|
PypiClean
|
/alipay-python-3.3.17.tar.gz/alipay-python-3.3.17/alipay/aop/api/request/ZhimaCreditPeContractUserstatusQueryRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ZhimaCreditPeContractUserstatusQueryModel import ZhimaCreditPeContractUserstatusQueryModel
class ZhimaCreditPeContractUserstatusQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, ZhimaCreditPeContractUserstatusQueryModel):
self._biz_content = value
else:
self._biz_content = ZhimaCreditPeContractUserstatusQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'zhima.credit.pe.contract.userstatus.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/discord.py-unofficial-0.18.0b1.tar.gz/discord.py-unofficial-0.18.0b1/discord_unofficial/ext/commands/converter.py
|
import discord
import asyncio
import re
import inspect
from .errors import BadArgument, NoPrivateMessage
__all__ = [ 'Converter', 'MemberConverter', 'UserConverter',
'ChannelConverter', 'InviteConverter', 'RoleConverter',
'GameConverter', 'ColourConverter' ]
def _get_from_servers(bot, getter, argument):
result = None
for server in bot.servers:
result = getattr(server, getter)(argument)
if result:
return result
return result
class Converter:
"""The base class of custom converters that require the :class:`Context`
to be passed to be useful.
This allows you to implement converters that function similar to the
special cased ``discord`` classes.
Classes that derive from this should override the :meth:`convert` method
to do its conversion logic. This method could be a coroutine or a regular
function.
Attributes
-----------
ctx: :class:`Context`
The invocation context that the argument is being used in.
argument: str
The argument that is being converted.
"""
def __init__(self, ctx, argument):
self.ctx = ctx
self.argument = argument
def convert(self):
raise NotImplementedError('Derived classes need to implement this.')
class IDConverter(Converter):
def __init__(self, ctx, argument):
super().__init__(ctx, argument)
self._id_regex = re.compile(r'([0-9]{15,21})$')
def _get_id_match(self):
return self._id_regex.match(self.argument)
class MemberConverter(IDConverter):
def convert(self):
message = self.ctx.message
bot = self.ctx.bot
match = self._get_id_match() or re.match(r'<@!?([0-9]+)>$', self.argument)
server = message.server
result = None
if match is None:
# not a mention...
if server:
result = server.get_member_named(self.argument)
else:
result = _get_from_servers(bot, 'get_member_named', self.argument)
else:
user_id = match.group(1)
if server:
result = server.get_member(user_id)
else:
result = _get_from_servers(bot, 'get_member', user_id)
if result is None:
raise BadArgument('Member "{}" not found'.format(self.argument))
return result
UserConverter = MemberConverter
class ChannelConverter(IDConverter):
def convert(self):
message = self.ctx.message
bot = self.ctx.bot
match = self._get_id_match() or re.match(r'<#([0-9]+)>$', self.argument)
result = None
server = message.server
if match is None:
# not a mention
if server:
result = discord.utils.get(server.channels, name=self.argument)
else:
result = discord.utils.get(bot.get_all_channels(), name=self.argument)
else:
channel_id = match.group(1)
if server:
result = server.get_channel(channel_id)
else:
result = _get_from_servers(bot, 'get_channel', channel_id)
if result is None:
raise BadArgument('Channel "{}" not found.'.format(self.argument))
return result
class ColourConverter(Converter):
def convert(self):
arg = self.argument.replace('0x', '').lower()
if arg[0] == '#':
arg = arg[1:]
try:
value = int(arg, base=16)
return discord.Colour(value=value)
except ValueError:
method = getattr(discord.Colour, arg, None)
if method is None or not inspect.ismethod(method):
raise BadArgument('Colour "{}" is invalid.'.format(arg))
return method()
class RoleConverter(IDConverter):
def convert(self):
server = self.ctx.message.server
if not server:
raise NoPrivateMessage()
match = self._get_id_match() or re.match(r'<@&([0-9]+)>$', self.argument)
params = dict(id=match.group(1)) if match else dict(name=self.argument)
result = discord.utils.get(server.roles, **params)
if result is None:
raise BadArgument('Role "{}" not found.'.format(self.argument))
return result
class GameConverter(Converter):
def convert(self):
return discord.Game(name=self.argument)
class InviteConverter(Converter):
@asyncio.coroutine
def convert(self):
try:
invite = yield from self.ctx.bot.get_invite(self.argument)
return invite
except Exception as e:
raise BadArgument('Invite is invalid or expired') from e
class EmojiConverter(IDConverter):
@asyncio.coroutine
def convert(self):
message = self.ctx.message
bot = self.ctx.bot
match = self._get_id_match() or re.match(r'<:[a-zA-Z0-9]+:([0-9]+)>$', self.argument)
result = None
server = message.server
if match is None:
# Try to get the emoji by name. Try local server first.
if server:
result = discord.utils.get(server.emojis, name=self.argument)
if result is None:
result = discord.utils.get(bot.get_all_emojis(), name=self.argument)
else:
emoji_id = match.group(1)
# Try to look up emoji by id.
if server:
result = discord.utils.get(server.emojis, id=emoji_id)
if result is None:
result = discord.utils.get(bot.get_all_emojis(), id=emoji_id)
if result is None:
raise BadArgument('Emoji "{}" not found.'.format(self.argument))
return result
|
PypiClean
|
/drypatrick-frontend-20210603.0.tar.gz/drypatrick-frontend-20210603.0/hass_frontend/frontend_es5/chunk.b82ffa3c7bd1cd5b9df9.js
|
(self.webpackChunkhome_assistant_frontend=self.webpackChunkhome_assistant_frontend||[]).push([[242],{21157:function(n,e,t){"use strict";var l;t(65233);var i,a,s=(0,t(50856).d)(l||(i=['\n/* Most common used flex styles*/\n<dom-module id="iron-flex">\n <template>\n <style>\n .layout.horizontal,\n .layout.vertical {\n display: -ms-flexbox;\n display: -webkit-flex;\n display: flex;\n }\n\n .layout.inline {\n display: -ms-inline-flexbox;\n display: -webkit-inline-flex;\n display: inline-flex;\n }\n\n .layout.horizontal {\n -ms-flex-direction: row;\n -webkit-flex-direction: row;\n flex-direction: row;\n }\n\n .layout.vertical {\n -ms-flex-direction: column;\n -webkit-flex-direction: column;\n flex-direction: column;\n }\n\n .layout.wrap {\n -ms-flex-wrap: wrap;\n -webkit-flex-wrap: wrap;\n flex-wrap: wrap;\n }\n\n .layout.no-wrap {\n -ms-flex-wrap: nowrap;\n -webkit-flex-wrap: nowrap;\n flex-wrap: nowrap;\n }\n\n .layout.center,\n .layout.center-center {\n -ms-flex-align: center;\n -webkit-align-items: center;\n align-items: center;\n }\n\n .layout.center-justified,\n .layout.center-center {\n -ms-flex-pack: center;\n -webkit-justify-content: center;\n justify-content: center;\n }\n\n .flex {\n -ms-flex: 1 1 0.000000001px;\n -webkit-flex: 1;\n flex: 1;\n -webkit-flex-basis: 0.000000001px;\n flex-basis: 0.000000001px;\n }\n\n .flex-auto {\n -ms-flex: 1 1 auto;\n -webkit-flex: 1 1 auto;\n flex: 1 1 auto;\n }\n\n .flex-none {\n -ms-flex: none;\n -webkit-flex: none;\n flex: none;\n }\n </style>\n </template>\n</dom-module>\n/* Basic flexbox reverse styles */\n<dom-module id="iron-flex-reverse">\n <template>\n <style>\n .layout.horizontal-reverse,\n .layout.vertical-reverse {\n display: -ms-flexbox;\n display: -webkit-flex;\n display: flex;\n }\n\n .layout.horizontal-reverse {\n -ms-flex-direction: row-reverse;\n -webkit-flex-direction: row-reverse;\n flex-direction: row-reverse;\n }\n\n .layout.vertical-reverse {\n -ms-flex-direction: column-reverse;\n -webkit-flex-direction: column-reverse;\n flex-direction: column-reverse;\n }\n\n .layout.wrap-reverse {\n -ms-flex-wrap: wrap-reverse;\n -webkit-flex-wrap: wrap-reverse;\n flex-wrap: wrap-reverse;\n }\n </style>\n </template>\n</dom-module>\n/* Flexbox alignment */\n<dom-module id="iron-flex-alignment">\n <template>\n <style>\n /**\n * Alignment in cross axis.\n */\n .layout.start {\n -ms-flex-align: start;\n -webkit-align-items: flex-start;\n align-items: flex-start;\n }\n\n .layout.center,\n .layout.center-center {\n -ms-flex-align: center;\n -webkit-align-items: center;\n align-items: center;\n }\n\n .layout.end {\n -ms-flex-align: end;\n -webkit-align-items: flex-end;\n align-items: flex-end;\n }\n\n .layout.baseline {\n -ms-flex-align: baseline;\n -webkit-align-items: baseline;\n align-items: baseline;\n }\n\n /**\n * Alignment in main axis.\n */\n .layout.start-justified {\n -ms-flex-pack: start;\n -webkit-justify-content: flex-start;\n justify-content: flex-start;\n }\n\n .layout.center-justified,\n .layout.center-center {\n -ms-flex-pack: center;\n -webkit-justify-content: center;\n justify-content: center;\n }\n\n .layout.end-justified {\n -ms-flex-pack: end;\n -webkit-justify-content: flex-end;\n justify-content: flex-end;\n }\n\n .layout.around-justified {\n -ms-flex-pack: distribute;\n -webkit-justify-content: space-around;\n justify-content: space-around;\n }\n\n .layout.justified {\n -ms-flex-pack: justify;\n -webkit-justify-content: space-between;\n justify-content: space-between;\n }\n\n /**\n * Self alignment.\n */\n .self-start {\n -ms-align-self: flex-start;\n -webkit-align-self: flex-start;\n align-self: flex-start;\n }\n\n .self-center {\n -ms-align-self: center;\n -webkit-align-self: center;\n align-self: center;\n }\n\n .self-end {\n -ms-align-self: flex-end;\n -webkit-align-self: flex-end;\n align-self: flex-end;\n }\n\n .self-stretch {\n -ms-align-self: stretch;\n -webkit-align-self: stretch;\n align-self: stretch;\n }\n\n .self-baseline {\n -ms-align-self: baseline;\n -webkit-align-self: baseline;\n align-self: baseline;\n }\n\n /**\n * multi-line alignment in main axis.\n */\n .layout.start-aligned {\n -ms-flex-line-pack: start; /* IE10 */\n -ms-align-content: flex-start;\n -webkit-align-content: flex-start;\n align-content: flex-start;\n }\n\n .layout.end-aligned {\n -ms-flex-line-pack: end; /* IE10 */\n -ms-align-content: flex-end;\n -webkit-align-content: flex-end;\n align-content: flex-end;\n }\n\n .layout.center-aligned {\n -ms-flex-line-pack: center; /* IE10 */\n -ms-align-content: center;\n -webkit-align-content: center;\n align-content: center;\n }\n\n .layout.between-aligned {\n -ms-flex-line-pack: justify; /* IE10 */\n -ms-align-content: space-between;\n -webkit-align-content: space-between;\n align-content: space-between;\n }\n\n .layout.around-aligned {\n -ms-flex-line-pack: distribute; /* IE10 */\n -ms-align-content: space-around;\n -webkit-align-content: space-around;\n align-content: space-around;\n }\n </style>\n </template>\n</dom-module>\n/* Non-flexbox positioning helper styles */\n<dom-module id="iron-flex-factors">\n <template>\n <style>\n .flex,\n .flex-1 {\n -ms-flex: 1 1 0.000000001px;\n -webkit-flex: 1;\n flex: 1;\n -webkit-flex-basis: 0.000000001px;\n flex-basis: 0.000000001px;\n }\n\n .flex-2 {\n -ms-flex: 2;\n -webkit-flex: 2;\n flex: 2;\n }\n\n .flex-3 {\n -ms-flex: 3;\n -webkit-flex: 3;\n flex: 3;\n }\n\n .flex-4 {\n -ms-flex: 4;\n -webkit-flex: 4;\n flex: 4;\n }\n\n .flex-5 {\n -ms-flex: 5;\n -webkit-flex: 5;\n flex: 5;\n }\n\n .flex-6 {\n -ms-flex: 6;\n -webkit-flex: 6;\n flex: 6;\n }\n\n .flex-7 {\n -ms-flex: 7;\n -webkit-flex: 7;\n flex: 7;\n }\n\n .flex-8 {\n -ms-flex: 8;\n -webkit-flex: 8;\n flex: 8;\n }\n\n .flex-9 {\n -ms-flex: 9;\n -webkit-flex: 9;\n flex: 9;\n }\n\n .flex-10 {\n -ms-flex: 10;\n -webkit-flex: 10;\n flex: 10;\n }\n\n .flex-11 {\n -ms-flex: 11;\n -webkit-flex: 11;\n flex: 11;\n }\n\n .flex-12 {\n -ms-flex: 12;\n -webkit-flex: 12;\n flex: 12;\n }\n </style>\n </template>\n</dom-module>\n<dom-module id="iron-positioning">\n <template>\n <style>\n .block {\n display: block;\n }\n\n [hidden] {\n display: none !important;\n }\n\n .invisible {\n visibility: hidden !important;\n }\n\n .relative {\n position: relative;\n }\n\n .fit {\n position: absolute;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n }\n\n body.fullbleed {\n margin: 0;\n height: 100vh;\n }\n\n .scroll {\n -webkit-overflow-scrolling: touch;\n overflow: auto;\n }\n\n /* fixed position */\n .fixed-bottom,\n .fixed-left,\n .fixed-right,\n .fixed-top {\n position: fixed;\n }\n\n .fixed-top {\n top: 0;\n left: 0;\n right: 0;\n }\n\n .fixed-right {\n top: 0;\n right: 0;\n bottom: 0;\n }\n\n .fixed-bottom {\n right: 0;\n bottom: 0;\n left: 0;\n }\n\n .fixed-left {\n top: 0;\n bottom: 0;\n left: 0;\n }\n </style>\n </template>\n</dom-module>\n'],a||(a=i.slice(0)),l=Object.freeze(Object.defineProperties(i,{raw:{value:Object.freeze(a)}}))));s.setAttribute("style","display: none;"),document.head.appendChild(s.content)},33760:function(n,e,t){"use strict";t.d(e,{U:function(){return a}});t(65233);var l=t(51644),i=t(26110),a=[l.P,i.a,{hostAttributes:{role:"option",tabindex:"0"}}]},97968:function(n,e,t){"use strict";t(65660),t(70019);var l=document.createElement("template");l.setAttribute("style","display: none;"),l.innerHTML="<dom-module id=\"paper-item-shared-styles\">\n <template>\n <style>\n :host, .paper-item {\n display: block;\n position: relative;\n min-height: var(--paper-item-min-height, 48px);\n padding: 0px 16px;\n }\n\n .paper-item {\n @apply --paper-font-subhead;\n border:none;\n outline: none;\n background: white;\n width: 100%;\n text-align: left;\n }\n\n :host([hidden]), .paper-item[hidden] {\n display: none !important;\n }\n\n :host(.iron-selected), .paper-item.iron-selected {\n font-weight: var(--paper-item-selected-weight, bold);\n\n @apply --paper-item-selected;\n }\n\n :host([disabled]), .paper-item[disabled] {\n color: var(--paper-item-disabled-color, var(--disabled-text-color));\n\n @apply --paper-item-disabled;\n }\n\n :host(:focus), .paper-item:focus {\n position: relative;\n outline: 0;\n\n @apply --paper-item-focused;\n }\n\n :host(:focus):before, .paper-item:focus:before {\n @apply --layout-fit;\n\n background: currentColor;\n content: '';\n opacity: var(--dark-divider-opacity);\n pointer-events: none;\n\n @apply --paper-item-focused-before;\n }\n </style>\n </template>\n</dom-module>",document.head.appendChild(l.content)},53973:function(n,e,t){"use strict";t(65233),t(65660),t(97968);var l,i,a,s=t(9672),o=t(50856),r=t(33760);(0,s.k)({_template:(0,o.d)(l||(i=['\n <style include="paper-item-shared-styles">\n :host {\n @apply --layout-horizontal;\n @apply --layout-center;\n @apply --paper-font-subhead;\n\n @apply --paper-item;\n }\n </style>\n <slot></slot>\n'],a||(a=i.slice(0)),l=Object.freeze(Object.defineProperties(i,{raw:{value:Object.freeze(a)}})))),is:"paper-item",behaviors:[r.U]})},51095:function(n,e,t){"use strict";t(65233);var l,i,a,s=t(78161),o=t(9672),r=t(50856);(0,o.k)({_template:(0,r.d)(l||(i=["\n <style>\n :host {\n display: block;\n padding: 8px 0;\n\n background: var(--paper-listbox-background-color, var(--primary-background-color));\n color: var(--paper-listbox-color, var(--primary-text-color));\n\n @apply --paper-listbox;\n }\n </style>\n\n <slot></slot>\n"],a||(a=i.slice(0)),l=Object.freeze(Object.defineProperties(i,{raw:{value:Object.freeze(a)}})))),is:"paper-listbox",behaviors:[s.i],hostAttributes:{role:"listbox"}})}}]);
//# sourceMappingURL=chunk.b82ffa3c7bd1cd5b9df9.js.map
|
PypiClean
|
/baiduads_sdk_auto-2023.1.0-py3-none-any.whl/baiduads/shieldfunction/model/get_hit_black_ip_policy_response_wrapper.py
|
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
def lazy_import():
from baiduads.common.model.api_response_header import ApiResponseHeader
from baiduads.shieldfunction.model.get_hit_black_ip_policy_response_wrapper_body import GetHitBlackIPPolicyResponseWrapperBody
globals()['ApiResponseHeader'] = ApiResponseHeader
globals()['GetHitBlackIPPolicyResponseWrapperBody'] = GetHitBlackIPPolicyResponseWrapperBody
class GetHitBlackIPPolicyResponseWrapper(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'header': (ApiResponseHeader,), # noqa: E501
'body': (GetHitBlackIPPolicyResponseWrapperBody,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'header': 'header', # noqa: E501
'body': 'body', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GetHitBlackIPPolicyResponseWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiResponseHeader): [optional] # noqa: E501
body (GetHitBlackIPPolicyResponseWrapperBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GetHitBlackIPPolicyResponseWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiResponseHeader): [optional] # noqa: E501
body (GetHitBlackIPPolicyResponseWrapperBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/rain_python-0.4.0-py3-none-any.whl/rain/client/input.py
|
from .data import DataObject, DataType, to_dataobj
from .task import Task
class InputBase:
dataobj = None
label = None
path = None
load = None
data_type = None
content_type = None
def __init__(self,
label=None,
path=None,
dataobj=None,
load=None,
content_type=None,
write=False):
assert self.data_type is not None
if label is not None and not isinstance(label, str):
raise Exception("Label has to be string, not {!r}".format(label))
self.label = label
if path is None:
if label:
path = label
else:
path = "input_{}".format(id(self))
self.path = path
if dataobj is not None:
dataobj = to_dataobj(dataobj)
if dataobj.spec.data_type != self.data_type:
raise Exception(
"Input exects data type {}, but provided data object has type {}"
.format(self.data_type, dataobj.spec.data_type))
self.dataobj = dataobj
self.load = load
self.content_type = content_type
self.write = write
def __repr__(self):
args = []
if self.path:
args.append("path={}".format(self.path))
if self.dataobj:
args.append("data={}".format(self.dataobj))
return "<Input '{}'>".format(self.label, " ".join(args))
@classmethod
def _for_data_object(cls, do):
assert isinstance(do, DataObject)
if do.spec.data_type == DataType.BLOB:
c = Input
else:
assert do.spec.data_type == DataType.DIRECTORY
c = InputDir
return c(label=do.spec.label, dataobj=do, content_type=do.content_type)
@classmethod
def _for_program(cls, inp, label=None, execute=False, label_as_path=False):
"""
Create `Input` from `Input`, `DataObject`, `Task` (single output)
or `str` for `Program` or `execute`.
"""
inp0 = inp
if isinstance(inp, str):
inp = Input(inp)
if isinstance(inp, Task):
inp = inp.output
if isinstance(inp, DataObject):
inp = Input._for_data_object(inp)
if not isinstance(inp, InputBase):
raise TypeError("Object {!r} cannot be used as input".format(inp0))
if inp.label is None:
inp.label = label
if inp.label is None:
raise ValueError("Program/execute Inputs need `label`")
if inp.load is not None:
raise ValueError("Program/execute Inputs do not accept `load`.")
if execute and inp.dataobj is None:
raise(ValueError("`execute` Inputs need `dataobj`"))
if not execute and inp.dataobj is not None:
raise(ValueError("`Program` Inputs can't have `dataobj`"))
if execute and inp.path is None:
if label_as_path:
inp.path = inp.label
else:
inp.path = "in_{}_{}".format(inp.label, inp.dataobj.id[1])
return inp
class Input(InputBase):
data_type = DataType.BLOB
class InputDir(InputBase):
data_type = DataType.DIRECTORY
|
PypiClean
|
/pulsar_client-3.3.0-cp39-cp39-macosx_10_15_universal2.whl/pulsar/schema/definition.py
|
import copy
from abc import abstractmethod
from collections import OrderedDict
from enum import Enum, EnumMeta
def _string_representation(x):
if hasattr(x, "__name__"):
return x.__name__
else:
return str(x)
def _check_record_or_field(x):
if (type(x) is type and not issubclass(x, Record)) \
and not isinstance(x, Field):
raise Exception('Argument ' + _string_representation(x) + ' is not a Record or a Field')
class RecordMeta(type):
def __new__(metacls, name, parents, dct):
if name != 'Record':
# Do not apply this logic to the base class itself
dct['_fields'] = RecordMeta._get_fields(dct)
dct['_required'] = False
return type.__new__(metacls, name, parents, dct)
@classmethod
def _get_fields(cls, dct):
# Build a set of valid fields for this record
fields = OrderedDict()
for name, value in dct.items():
if issubclass(type(value), EnumMeta):
value = CustomEnum(value)
elif type(value) == RecordMeta:
# We expect an instance of a record rather than the class itself
value = value()
if isinstance(value, Record) or isinstance(value, Field):
fields[name] = value
return fields
class Record(metaclass=RecordMeta):
# This field is used to set namespace for Avro Record schema.
_avro_namespace = None
# Generate a schema where fields are sorted alphabetically
_sorted_fields = False
def __init__(self, default=None, required_default=False, required=False, *args, **kwargs):
self._required_default = required_default
self._default = default
self._required = required
for k, value in self._fields.items():
if k in kwargs:
if isinstance(value, Record) and isinstance(kwargs[k], dict):
# Use dict init Record object
copied = copy.copy(value)
copied.__init__(**kwargs[k])
self.__setattr__(k, copied)
elif isinstance(value, Array) and isinstance(kwargs[k], list) and len(kwargs[k]) > 0 \
and isinstance(value.array_type, Record) and isinstance(kwargs[k][0], dict):
arr = []
for item in kwargs[k]:
copied = copy.copy(value.array_type)
copied.__init__(**item)
arr.append(copied)
self.__setattr__(k, arr)
elif isinstance(value, Map) and isinstance(kwargs[k], dict) and len(kwargs[k]) > 0 \
and isinstance(value.value_type, Record) and isinstance(list(kwargs[k].values())[0], dict):
dic = {}
for mapKey, mapValue in kwargs[k].items():
copied = copy.copy(value.value_type)
copied.__init__(**mapValue)
dic[mapKey] = copied
self.__setattr__(k, dic)
else:
# Value was overridden at constructor
self.__setattr__(k, kwargs[k])
elif isinstance(value, Record):
# Value is a subrecord
self.__setattr__(k, value)
else:
# Set field to default value, without revalidating the default value type
super(Record, self).__setattr__(k, value.default())
@classmethod
def schema(cls):
return cls.schema_info(set())
@classmethod
def schema_info(cls, defined_names):
namespace_prefix = ''
if cls._avro_namespace is not None:
namespace_prefix = cls._avro_namespace + '.'
namespace_name = namespace_prefix + cls.__name__
if namespace_name in defined_names:
return namespace_name
defined_names.add(namespace_name)
schema = {
'type': 'record',
'name': str(cls.__name__)
}
if cls._avro_namespace is not None:
schema['namespace'] = cls._avro_namespace
schema['fields'] = []
def get_filed_default_value(value):
if isinstance(value, Enum):
return value.name
else:
return value
if cls._sorted_fields:
fields = sorted(cls._fields.keys())
else:
fields = cls._fields.keys()
for name in fields:
field = cls._fields[name]
field_type = field.schema_info(defined_names) \
if field._required else ['null', field.schema_info(defined_names)]
schema['fields'].append({
'name': name,
'default': get_filed_default_value(field.default()),
'type': field_type
}) if field.required_default() else schema['fields'].append({
'name': name,
'type': field_type,
})
return schema
def __setattr__(self, key, value):
if key == '_default':
super(Record, self).__setattr__(key, value)
elif key == '_required_default':
super(Record, self).__setattr__(key, value)
elif key == '_required':
super(Record, self).__setattr__(key, value)
else:
if key not in self._fields:
raise AttributeError('Cannot set undeclared field ' + key + ' on record')
# Check that type of value matches the field type
field = self._fields[key]
value = field.validate_type(key, value)
super(Record, self).__setattr__(key, value)
def __eq__(self, other):
for field in self._fields:
if self.__getattribute__(field) != other.__getattribute__(field):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.__dict__)
def type(self):
return str(self.__class__.__name__)
def python_type(self):
return self.__class__
def validate_type(self, name, val):
if val is None and not self._required:
return self.default()
if not isinstance(val, self.__class__):
raise TypeError("Invalid type '%s' for sub-record field '%s'. Expected: %s" % (
type(val), name, _string_representation(self.__class__)))
return val
def default(self):
if self._default is not None:
return self._default
else:
return None
def required_default(self):
return self._required_default
class Field(object):
def __init__(self, default=None, required=False, required_default=False):
if default is not None:
default = self.validate_type('default', default)
self._default = default
self._required_default = required_default
self._required = required
@abstractmethod
def type(self):
pass
@abstractmethod
def python_type(self):
pass
def validate_type(self, name, val):
if val is None and not self._required:
return self.default()
if type(val) != self.python_type():
raise TypeError("Invalid type '%s' for field '%s'. Expected: %s" % (type(val), name, _string_representation(self.python_type())))
return val
def schema(self):
# For primitive types, the schema would just be the type itself
return self.type()
def schema_info(self, defined_names):
return self.type()
def default(self):
return self._default
def required_default(self):
return self._required_default
# All types
class Null(Field):
def type(self):
return 'null'
def python_type(self):
return type(None)
def validate_type(self, name, val):
if val is not None:
raise TypeError('Field ' + name + ' is set to be None')
return val
class Boolean(Field):
def type(self):
return 'boolean'
def python_type(self):
return bool
def default(self):
if self._default is not None:
return self._default
else:
return False
class Integer(Field):
def type(self):
return 'int'
def python_type(self):
return int
def default(self):
if self._default is not None:
return self._default
else:
return None
class Long(Field):
def type(self):
return 'long'
def python_type(self):
return int
def default(self):
if self._default is not None:
return self._default
else:
return None
class Float(Field):
def type(self):
return 'float'
def python_type(self):
return float
def default(self):
if self._default is not None:
return self._default
else:
return None
class Double(Field):
def type(self):
return 'double'
def python_type(self):
return float
def default(self):
if self._default is not None:
return self._default
else:
return None
class Bytes(Field):
def type(self):
return 'bytes'
def python_type(self):
return bytes
def default(self):
if self._default is not None:
return self._default
else:
return None
class String(Field):
def type(self):
return 'string'
def python_type(self):
return str
def validate_type(self, name, val):
t = type(val)
if val is None and not self._required:
return self.default()
if not (t is str or t.__name__ == 'unicode'):
raise TypeError("Invalid type '%s' for field '%s'. Expected a string" % (t, name))
return val
def default(self):
if self._default is not None:
return self._default
else:
return None
# Complex types
class CustomEnum(Field):
def __init__(self, enum_type, default=None, required=False, required_default=False):
if not issubclass(enum_type, Enum):
raise Exception(_string_representation(enum_type) + " is not a valid Enum type")
self.enum_type = enum_type
self.values = {}
for x in enum_type.__members__.values():
self.values[x.value] = x
super(CustomEnum, self).__init__(default, required, required_default)
def type(self):
return 'enum'
def python_type(self):
return self.enum_type
def validate_type(self, name, val):
if val is None:
return None
if type(val) is str:
# The enum was passed as a string, we need to check it against the possible values
if val in self.enum_type.__members__:
return self.enum_type.__members__[val]
else:
raise TypeError(
"Invalid enum value '%s' for field '%s'. Expected: %s" % (val, name, self.enum_type.__members__.keys()))
elif type(val) is int:
# The enum was passed as an int, we need to check it against the possible values
if val in self.values:
return self.values[val]
else:
raise TypeError(
"Invalid enum value '%s' for field '%s'. Expected: %s" % (val, name, self.values.keys()))
elif type(val) != self.python_type():
raise TypeError("Invalid type '%s' for field '%s'. Expected: %s" % (type(val), name, _string_representation(self.python_type())))
else:
return val
def schema(self):
return self.schema_info(set())
def schema_info(self, defined_names):
if self.enum_type.__name__ in defined_names:
return self.enum_type.__name__
defined_names.add(self.enum_type.__name__)
return {
'type': self.type(),
'name': self.enum_type.__name__,
'symbols': [x.name for x in self.enum_type]
}
def default(self):
if self._default is not None:
return self._default
else:
return None
class Array(Field):
def __init__(self, array_type, default=None, required=False, required_default=False):
_check_record_or_field(array_type)
self.array_type = array_type
super(Array, self).__init__(default=default, required=required, required_default=required_default)
def type(self):
return 'array'
def python_type(self):
return list
def validate_type(self, name, val):
if val is None:
return None
super(Array, self).validate_type(name, val)
for x in val:
if type(x) != self.array_type.python_type():
raise TypeError('Array field ' + name + ' items should all be of type ' +
_string_representation(self.array_type.type()))
return val
def schema(self):
return self.schema_info(set())
def schema_info(self, defined_names):
return {
'type': self.type(),
'items': self.array_type.schema_info(defined_names) if isinstance(self.array_type, (Array, Map, Record))
else self.array_type.type()
}
def default(self):
if self._default is not None:
return self._default
else:
return None
class Map(Field):
def __init__(self, value_type, default=None, required=False, required_default=False):
_check_record_or_field(value_type)
self.value_type = value_type
super(Map, self).__init__(default=default, required=required, required_default=required_default)
def type(self):
return 'map'
def python_type(self):
return dict
def validate_type(self, name, val):
if val is None:
return None
super(Map, self).validate_type(name, val)
for k, v in val.items():
if type(k) != str and not is_unicode(k):
raise TypeError('Map keys for field ' + name + ' should all be strings')
if type(v) != self.value_type.python_type():
raise TypeError('Map values for field ' + name + ' should all be of type '
+ _string_representation(self.value_type.python_type()))
return val
def schema(self):
return self.schema_info(set())
def schema_info(self, defined_names):
return {
'type': self.type(),
'values': self.value_type.schema_info(defined_names) if isinstance(self.value_type, (Array, Map, Record))
else self.value_type.type()
}
def default(self):
if self._default is not None:
return self._default
else:
return None
# Python3 has no `unicode` type, so here we use a tricky way to check if the type of `x` is `unicode` in Python2
# and also make it work well with Python3.
def is_unicode(x):
return 'encode' in dir(x) and type(x.encode()) == str
|
PypiClean
|
/azure-ai-translation-document-1.0.0b5.zip/azure-ai-translation-document-1.0.0b5/samples/async_samples/sample_translation_with_glossaries_async.py
|
import asyncio
async def sample_translation_with_glossaries_async():
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.translation.document.aio import DocumentTranslationClient
from azure.ai.translation.document import (
TranslationGlossary
)
endpoint = os.environ["AZURE_DOCUMENT_TRANSLATION_ENDPOINT"]
key = os.environ["AZURE_DOCUMENT_TRANSLATION_KEY"]
source_container_url = os.environ["AZURE_SOURCE_CONTAINER_URL"]
target_container_url = os.environ["AZURE_TARGET_CONTAINER_URL"]
glossary_url = os.environ["AZURE_TRANSLATION_GLOSSARY_URL"]
client = DocumentTranslationClient(endpoint, AzureKeyCredential(key))
async with client:
poller = await client.begin_translation(
source_container_url,
target_container_url,
"es",
glossaries=[TranslationGlossary(glossary_url=glossary_url, file_format="TSV")]
)
result = await poller.result()
print("Status: {}".format(poller.status()))
print("Created on: {}".format(poller.details.created_on))
print("Last updated on: {}".format(poller.details.last_updated_on))
print("Total number of translations on documents: {}".format(poller.details.documents_total_count))
print("\nOf total documents...")
print("{} failed".format(poller.details.documents_failed_count))
print("{} succeeded".format(poller.details.documents_succeeded_count))
async for document in result:
print("Document ID: {}".format(document.id))
print("Document status: {}".format(document.status))
if document.status == "Succeeded":
print("Source document location: {}".format(document.source_document_url))
print("Translated document location: {}".format(document.translated_document_url))
print("Translated to language: {}\n".format(document.translated_to))
else:
print("Error Code: {}, Message: {}\n".format(document.error.code, document.error.message))
async def main():
await sample_translation_with_glossaries_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
PypiClean
|
/FairDynamicRec-0.0.123-py3-none-any.whl/fair_dynamic_rec/core/rankers/factor_ucb.py
|
import numpy as np
from .abstract_ranker import AbstractRanker
import sys
from sklearn.metrics.pairwise import cosine_similarity
from fair_dynamic_rec.core.util.utils import get_param_config_name
from fair_dynamic_rec.core.util.outputs import make_output_dir
# Objective function: min 1/2 (X - U . V_T) + 1/2 * \lambda (U)^2 + 1/2 * \lambda (V)^2
class FactorUCB(AbstractRanker):
def __init__(self, config, dataObj, parameters=None):
super(FactorUCB, self).__init__(config, dataObj)
self.prng = np.random.RandomState(seed=config.seed)
self.sigma = float(parameters["sigma"]["value"]) if "sigma" in parameters else 1.0
self.l = int(parameters["latent_dim"]["value"]) if "latent_dim" in parameters else 0
self.lambda_1 = float(parameters["lambda1"]["value"]) if "lambda1" in parameters else 1.0
self.lambda_2 = float(parameters["labmda2"]["value"]) if "lambda2" in parameters else 1.0
self.alpha_a = float(parameters["alpha_a"]["value"]) if "alpha_a" in parameters else 1.0
self.alpha_u = float(parameters["alpha_u"]["value"]) if "alpha_u" in parameters else 1.0
self.w_type = parameters["w_type"]["value"] if "w_type" in parameters else ""
self.k = 0
self.contextual_var = bool(parameters["contextual_variable"]["value"]) if "contextual_variable" in parameters else False
if self.contextual_var:
self.X = self.dataObj.feature_data['train_item_topical_features']
self.k = self.X.shape[1]
# self.XV = np.concatenate((self.X, self.V), axis=1)
# self.XV_optimal = np.concatenate((self.X, self.dataObj.feature_data['test_item_latent_features']), axis=1)
# else:
# self.XV = self.V
# self.XV_optimal = self.dataObj.feature_data['test_item_latent_features']
if config.load_model and self.__class__.__name__ == 'FactorUCB':
self.load_parameters(config, parameters)
else:
self.n_samples = np.zeros(dataObj.n_users)
self.n_clicks = np.zeros(dataObj.n_users)
self.V = np.zeros((self.dataObj.n_items, self.l))
self.Theta = np.zeros((self.dataObj.n_users, self.k + self.l))
self.W = np.identity(n=self.dataObj.n_users)
if self.w_type == "user-user_sim":
self.W = cosine_similarity(dataObj.train_data)
# self.A = self.lambda_1 * np.identity(n=(self.k+self.l)*self.dataObj.n_users)
# self.AInv = np.zeros(((self.k+self.l)*self.dataObj.n_users, (self.k+self.l)*self.dataObj.n_users))
self.AInv = np.eye((self.k+self.l)*self.dataObj.n_users)#np.linalg.inv(self.A)
print("AInv dimensions: " + str(self.AInv.shape))
sys.stdout.flush()
self.b = np.zeros((self.k+self.l)*self.dataObj.n_users)
self.C = np.zeros((self.dataObj.n_items, self.l, self.l))
self.CInv = np.zeros((self.dataObj.n_items, self.l, self.l))
for i in range(self.C.shape[0]):
self.C[i] = self.lambda_2 * np.identity(n=self.l)
self.CInv[i] = np.linalg.inv(self.C[i]) # np.zeros((self.k+self.l,self.k+self.l))
print("C dimensions: " + str(self.C.shape) + ", " + str(self.CInv.shape))
sys.stdout.flush()
self.d = np.zeros((self.dataObj.n_items, self.l))
self.ill_matrix_counter = 0
# for ill inverse
# self.AInv_tmp = np.zeros((self.dataObj.n_users, (self.k+self.l)*self.dataObj.n_users, (self.k+self.l)*self.dataObj.n_users))
# self.b_tmp = np.zeros((self.dataObj.n_users, (self.k+self.l)*self.dataObj.n_users))
# self.CInv_tmp = np.zeros((self.dataObj.n_items, self.l, self.l))
# self.d_tmp = np.zeros((self.dataObj.n_items, self.l))
if self.contextual_var:
self.XV = np.concatenate((self.X, self.V), axis=1)
else:
self.XV = self.V
self.Theta_x = self.Theta[:, :self.k]
self.Theta_v = self.Theta[:, self.k:]
def save_parameters(self, config, ranker_config):
pre_path = make_output_dir(config, get_param_config_name(ranker_config))
np.savetxt(pre_path/'n_samples', self.n_samples, fmt='%i')
np.savetxt(pre_path / 'n_clicks', self.n_clicks, fmt='%i')
text_file = open(pre_path / 'ill_matrix_counter', "w")
text_file.write(str(self.ill_matrix_counter))
text_file.close()
np.savetxt(pre_path / 'V', self.V, fmt='%f')
np.savetxt(pre_path / 'Theta', self.Theta, fmt='%f')
np.savetxt(pre_path / 'W', self.W, fmt='%f')
np.savetxt(pre_path / 'AInv', self.AInv, fmt='%f')
np.savetxt(pre_path / 'b', self.b, fmt='%f')
self.save_3d_array(pre_path / 'C', self.C, '%f')
self.save_3d_array(pre_path / 'CInv', self.CInv, '%f')
np.savetxt(pre_path / 'd', self.d, fmt='%f')
# np.savetxt(pre_path / 'click_history', self.click_history, fmt='%f')
# np.savetxt(pre_path / 'item_coef', self.item_coef, fmt='%f')
# np.savetxt(pre_path / 'exp_recommended', self.exp_recommended, fmt='%f')
# np.savetxt(pre_path / 'exp_examined', self.exp_examined, fmt='%f')
def load_parameters(self, config, ranker_config):
pre_path = make_output_dir(config, get_param_config_name(ranker_config))
self.n_samples = np.loadtxt(pre_path/'n_samples', dtype='int')
self.n_clicks = np.loadtxt(pre_path / 'n_clicks', dtype='int')
with open(pre_path / 'ill_matrix_counter') as file:
line = file.readline().rstrip()
self.ill_matrix_counter = int(line)
self.V = np.loadtxt(pre_path / 'V')
self.Theta = np.loadtxt(pre_path / 'Theta')
self.W = np.loadtxt(pre_path / 'W')
self.AInv = np.loadtxt(pre_path / 'AInv')
self.b = np.loadtxt(pre_path / 'b')
self.C = self.load_3d_array(pre_path / 'C', (self.dataObj.n_items, self.l, self.l))
self.CInv = self.load_3d_array(pre_path / 'CInv', (self.dataObj.n_items, self.l, self.l))
self.d = np.loadtxt(pre_path / 'd')
# self.click_history = np.loadtxt(pre_path / 'click_history')
# self.item_coef = np.loadtxt(pre_path / 'item_coef')
# self.exp_recommended = np.loadtxt(pre_path / 'exp_recommended')
# self.exp_examined = np.loadtxt(pre_path / 'exp_examined')
def save_3d_array(self, fn_out, arr, frmt):
# Write the array to disk
with open(fn_out, 'w') as outfile:
# I'm writing a header here just for the sake of readability
# Any line starting with "#" will be ignored by numpy.loadtxt
outfile.write('# Array shape: {0}\n'.format(arr.shape))
# Iterating through a ndimensional array produces slices along
# the last axis. This is equivalent to data[i,:,:] in this case
for data_slice in arr:
# The formatting string indicates that I'm writing out
# the values in left-justified columns 7 characters in width
# with 2 decimal places.
np.savetxt(outfile, data_slice, fmt=frmt)
# Writing out a break to indicate different slices...
outfile.write('# New slice\n')
def load_3d_array(self, fn_in, shp):
# Read the array from disk
arr = np.loadtxt(fn_in)
# However, going back to 3D is easy if we know the
# original shape of the array
return arr.reshape(shp)
def get_ranking(self, batch_users, sampled_items=None, round=None):
"""
:param x: features
:param k: number of positions
:return: ranking: the ranked item id.
"""
# assert x.shape[0] >= k
rankings = np.zeros((len(batch_users), self.config.list_size), dtype=int)
# self.batch_features = np.zeros((len(batch_users), self.config.list_size, self.dim))
tie_breaker = self.prng.rand(len(sampled_items))
for i in range(len(batch_users)):
user = batch_users[i]
# compute vec((X_a_t,V_a_t)W^T) -> N * (k+l)N
XVW = self.create_vectorized_matrix(self.W.shape[0], self.W.shape[0]*(self.k+self.l), self.XV, self.W, range(len(sampled_items)))
# compute line 9 of Algorithm 1
score = np.dot(self.XV[sampled_items], np.dot(self.Theta.T, self.W[user].T))
cb1 = np.sqrt(np.sum(np.multiply(np.dot(XVW,self.AInv),XVW),axis=1))
Theta_v_W = np.dot(self.Theta_v.T,self.W[user].T)
var2 = [(np.dot(np.dot(Theta_v_W.T, self.CInv[item]),Theta_v_W)) for item in sampled_items]
cb2 = np.sqrt(var2)
ucb = score + self.alpha_u * cb1 + self.alpha_a * cb2
selected_list = np.lexsort((tie_breaker, -ucb))[:self.config.list_size]
rankings[i] = sampled_items[selected_list]
return rankings
def update(self, batch_users, rankings, clicks, round=None, user_round=None):
for i in range(len(batch_users)):
user = batch_users[i]
# compute vec((X_a_t,V_a_t)W^T) -> list_size * (k+l)N
XVW = self.create_vectorized_matrix(self.W.shape[0], self.W.shape[0] * (self.k + self.l), self.XV, self.W, range(len(rankings[i])))
# XVW_optimal = self.create_vectorized_matrix(self.W.shape[0], self.W.shape[0] * (self.k + self.l), self.XV_optimal[sampled_items], self.W, rankings[i])
_clicks, _ranking, _XVW = self.__collect_feedback(clicks[i], rankings[i], XVW)
# discount_coef = [1 / (math.log(1 + j)) for j in range(1, len(rankings[0]) + 1)]
# discount_coef_reward = [math.log(1 + j) for j in range(1, len(_clicks) + 1)]
# discount_coef_penalization = [self.gamma * 1 / (math.log(1 + j)) for j in range(1, len(_clicks) + 1)]
# if self.processing_type == 'recommended_discountfactor':
# self.exp_recommended[user][np.array(rankings[0])] += discount_coef
# elif self.processing_type == 'examined_discountfactor':
# if len(clicks) == 0:
# self.exp_examined[user][np.array(rankings[0])] += discount_coef
# else:
# self.exp_examined[user][np.array(rankings[0][:len(clicks)])] += discount_coef[:len(clicks)]
#
# if self.processing_type == 'item_weight':
# _batch_features = self.update_item_weight(rankings[0], _batch_features, _clicks, discount_coef_penalization, discount_coef_reward, user, user_round)
"""
This is for computing self.theta (Line 3-5 of Alogirthm 1 of NIPS 11)
For fast matrix inverse, we use Woodbury matrix identity (https://en.wikipedia.org/wiki/Woodbury_matrix_identity)
Return: self.theta is updated.
"""
# for the inverse of M, feature matrix
# XW * A^-1 * XW^T
xAx = np.dot(_XVW, np.dot(self.AInv, _XVW.T))
# (1/sigma I + xAx)^-1
try:
tmp_inv = np.linalg.inv(1 / self.sigma * np.eye(len(_XVW)) + xAx)
except np.linalg.LinAlgError:
# for the ill matrix. if the matrix is not invertible, we ignore this update
self.ill_matrix_counter += 1
return
# A^-1*x^T
AInv_xT = self.AInv.dot(_XVW.T)
# AInv_xT*tmp_inv*AInv_xT^T
self.AInv_tmp = np.dot(np.dot(AInv_xT, tmp_inv), AInv_xT.T)
# MInv - the new part
self.AInv -= self.AInv_tmp
# self.A[user] += self.sigma * _XVW.T.dot(_XVW)
# for b: feedback
# if self.processing_type == 'feature_weight':
# self.update_feature_weight(_batch_features, _clicks, discount_coef_penalization, discount_coef_reward,
# user, user_round)
# else:
self.b += np.dot(_clicks, _XVW)
# for parameter Theta
self.Theta = self.devectorize(np.dot(self.AInv, self.b), self.k+self.l)
# self.theta[self.theta < 0] = 0
self.Theta_x = self.Theta[:, :self.k]
self.Theta_v = self.Theta[:, self.k:]
# ranking = rankings[i][:len(_clicks)]
Theta_v_W = np.dot(self.Theta_v.T, self.W[user].T)
xx = np.dot(Theta_v_W.reshape(self.Theta_v.shape[1],1), Theta_v_W.reshape(self.Theta_v.shape[1],1).T)
for i in range(len(_ranking)):
item = _ranking[i]
self.C[item] += xx
self.CInv[item] = np.linalg.inv(self.C[item])
if self.contextual_var:
# print('a='+str(self.d.shape)+', b='+str(Theta_v_W.shape)+', c='+str(self.X[ranking].shape)+', d='+str(self.Theta_x.T.shape)+', e='+str(self.W[user])+', f='+str((_clicks[i] - np.dot(self.X[ranking],np.dot(self.Theta_x.T, self.W[user]))).shape))
# sys.stdout.flush()
# clicked_items_index = _clicks[i].nonzero()[0]
self.d[item] += Theta_v_W * (_clicks[i] - np.dot(self.X[item],np.dot(self.Theta_x.T, self.W[user])))
else:
self.d[item] += Theta_v_W * _clicks[i]
self.V[item] = np.dot(self.CInv[item], self.d[item])
self.XV[:, self.k:] = self.V
self.n_samples[user] += len(_clicks)
self.n_clicks[user] += sum(_clicks)
def __collect_feedback(self, click, ranking, batch_X):
"""
:param y:
:return: the last observed position.
"""
# With Cascade assumption, only the first click counts.
if self.config.feedback_model == 'cascade':
if np.sum(click) == 0:
return click, ranking, batch_X
first_click = np.where(click)[0][0]
# batch_X[ranking[:first_click+1]] = np.zeros((ranking[:first_click+1][0], batch_X.shape[1]))
return click[:first_click + 1], ranking[:first_click + 1], batch_X[:first_click + 1]
elif self.config.feedback_model == 'dcm':
if np.sum(click) == 0:
return click, batch_X
last_click = np.where(click)[0][-1]
return click[:last_click + 1], ranking[:last_click + 1], batch_X[:last_click + 1]
# all items are observed
else:
return click, ranking, batch_X
def create_vectorized_matrix(self, n_rows, n_cols, first_matrix, second_matrix, selected_rows):
mat = np.zeros((n_rows, n_cols))
mat = [self.vectorize(np.dot(self.matrixize(second_matrix.shape[0], first_matrix.shape[1], first_matrix[row], row).T,second_matrix)) for row in selected_rows]
return np.array(mat)
def matrixize(self, n_rows, n_cols, vec, target_row):
mat = np.zeros((n_rows, n_cols))
mat[target_row] = vec
return mat
def vectorize(self, M):
# temp = []
# for i in range(M.shape[0]*M.shape[1]):
# temp.append(M.T.item(i))
# V = np.asarray(temp)
# return V
return np.reshape(M.T, M.shape[0] * M.shape[1])
def devectorize(self, Vec, dimension):
# temp = np.zeros(shape = (C_dimension, len(V)/C_dimension))
# for i in range(len(V)/C_dimension):
# temp.T[i] = V[i*C_dimension : (i+1)*C_dimension]
# W = temp
# return W
# To-do: use numpy built-in function reshape.
return np.reshape(Vec, (int(len(Vec) / dimension), dimension))
|
PypiClean
|
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/network/v20200401/get_route.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRouteResult',
'AwaitableGetRouteResult',
'get_route',
]
@pulumi.output_type
class GetRouteResult:
"""
Route resource.
"""
def __init__(__self__, address_prefix=None, etag=None, id=None, name=None, next_hop_ip_address=None, next_hop_type=None, provisioning_state=None):
if address_prefix and not isinstance(address_prefix, str):
raise TypeError("Expected argument 'address_prefix' to be a str")
pulumi.set(__self__, "address_prefix", address_prefix)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if next_hop_ip_address and not isinstance(next_hop_ip_address, str):
raise TypeError("Expected argument 'next_hop_ip_address' to be a str")
pulumi.set(__self__, "next_hop_ip_address", next_hop_ip_address)
if next_hop_type and not isinstance(next_hop_type, str):
raise TypeError("Expected argument 'next_hop_type' to be a str")
pulumi.set(__self__, "next_hop_type", next_hop_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[str]:
"""
The destination CIDR to which the route applies.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nextHopIpAddress")
def next_hop_ip_address(self) -> Optional[str]:
"""
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
"""
return pulumi.get(self, "next_hop_ip_address")
@property
@pulumi.getter(name="nextHopType")
def next_hop_type(self) -> str:
"""
The type of Azure hop the packet should be sent to.
"""
return pulumi.get(self, "next_hop_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the route resource.
"""
return pulumi.get(self, "provisioning_state")
class AwaitableGetRouteResult(GetRouteResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteResult(
address_prefix=self.address_prefix,
etag=self.etag,
id=self.id,
name=self.name,
next_hop_ip_address=self.next_hop_ip_address,
next_hop_type=self.next_hop_type,
provisioning_state=self.provisioning_state)
def get_route(resource_group_name: Optional[str] = None,
route_name: Optional[str] = None,
route_table_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str route_name: The name of the route.
:param str route_table_name: The name of the route table.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['routeName'] = route_name
__args__['routeTableName'] = route_table_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200401:getRoute', __args__, opts=opts, typ=GetRouteResult).value
return AwaitableGetRouteResult(
address_prefix=__ret__.address_prefix,
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
next_hop_ip_address=__ret__.next_hop_ip_address,
next_hop_type=__ret__.next_hop_type,
provisioning_state=__ret__.provisioning_state)
|
PypiClean
|
/e6data-python-connector-1.1.5.tar.gz/e6data-python-connector-1.1.5/e6data_python_connector/datainputstream.py
|
import struct
import logging
from e6xdb.date_time_utils import FORMATS, floor_div, floor_mod
from datetime import datetime, timedelta
from e6xdb.constants import ZONE
_logger = logging.getLogger(__name__)
class DataInputStream:
def __init__(self, stream):
self.stream = stream
def read_boolean(self):
return struct.unpack('?', self.stream.read(1))[0]
def read_bytes(self, byte_array):
for i in range(len(byte_array)):
byte_array[i] = struct.unpack('B', self.stream.read(1))[0]
return byte_array
def read_int_96(self):
return struct.unpack('B', self.stream.read(12))[0]
def read_byte(self):
return struct.unpack('b', self.stream.read(1))[0]
def read_unsigned_byte(self):
return struct.unpack('B', self.stream.read(1))[0]
def read_char(self):
return chr(struct.unpack('>H', self.stream.read(2))[0])
def read_double(self):
return struct.unpack('>d', self.stream.read(8))[0]
def read_float(self):
return struct.unpack('>f', self.stream.read(4))[0]
def read_short(self):
return struct.unpack('>h', self.stream.read(2))[0]
def read_unsigned_short(self):
return struct.unpack('>H', self.stream.read(2))[0]
def read_long(self):
return struct.unpack('>q', self.stream.read(8))[0]
def read_utf(self):
utf_length = struct.unpack('>H', self.stream.read(2))[0]
return self.stream.read(utf_length)
def read_int(self):
return struct.unpack('>i', self.stream.read(4))[0]
def read_unsigned_int(self):
return struct.unpack('>I', self.stream.read(4))[0]
class FieldInfo:
def __init__(self, name, field_type, date_format, zone):
self.name = name
self.field_type = field_type
self.date_format = date_format
self.zone = zone
def get_zone(self):
if self.field_type == 'DATE' or self.field_type == 'DATETIME':
return self.zone
return None
def get_format(self):
if self.field_type == 'DATE' or self.field_type == 'DATETIME':
return self.date_format
return None
def get_field_type(self):
return self.field_type
def get_name(self):
return self.name
def get_query_columns_info(buffer):
result_meta_bytes = DataInputStream(buffer)
rowcount = result_meta_bytes.read_long()
field_count = result_meta_bytes.read_int()
columns_description = list()
for i in range(field_count):
name = result_meta_bytes.read_utf().decode()
field_type = result_meta_bytes.read_utf().decode()
zone = result_meta_bytes.read_utf().decode()
date_format = result_meta_bytes.read_utf().decode()
field_info = FieldInfo(name, field_type, date_format, zone)
columns_description.append(field_info)
return rowcount, columns_description
def read_values_from_array(query_columns_description: list, dis: DataInputStream) -> list:
value_array = list()
for i in query_columns_description:
dtype = i.get_field_type()
isPresent = dis.read_byte()
date_format = i.get_format()
if isPresent == 0:
value_array.append(None)
continue
try:
if dtype == "LONG":
value_array.append(dis.read_long())
elif dtype == "DATE":
epoch_seconds = floor_div(dis.read_long(), 1000_000)
date = datetime.fromtimestamp(epoch_seconds, ZONE)
value_array.append(date.strftime("%Y-%m-%d"))
elif dtype == "DATETIME":
epoch_micros = dis.read_long()
epoch_seconds = floor_div(epoch_micros, 1000_000)
micros_of_the_day = floor_mod(epoch_micros, 1000_000)
date_time = datetime.fromtimestamp(epoch_seconds, ZONE)
date_time = date_time + timedelta(microseconds=micros_of_the_day)
value_array.append(date_time.strftime("%Y-%m-%d %H:%M:%S"))
elif dtype == "STRING" or dtype == "ARRAY" or dtype == "MAP" or dtype == "STRUCT":
value_array.append(dis.read_utf().decode())
elif dtype == "INT":
value_array.append(dis.read_int())
elif dtype == "DOUBLE":
value_array.append(dis.read_double())
elif dtype == "BINARY":
value_array.append(dis.read_utf())
elif dtype == "FLOAT":
value_array.append(dis.read_float())
elif dtype == "CHAR":
value_array.append(dis.read_char())
elif dtype == "BOOLEAN":
value_array.append(dis.read_boolean())
elif dtype == "SHORT":
value_array.append(dis.read_short())
elif dtype == "BYTE":
value_array.append(dis.read_byte())
elif dtype == "INT96":
julian_day = dis.read_int()
time = dis.read_long()
date_time = datetime.fromtimestamp((julian_day - 2440588) * 86400)
date_time_with_nanos = date_time + timedelta(microseconds=(time / 1000))
value_array.append(date_time_with_nanos)
elif dtype == "INTEGER":
value_array.append(dis.read_int())
except Exception as e:
_logger.error(e)
value_array.append('Failed to parse.')
return value_array
def read_rows_from_batch(query_columns_description: list, dis: DataInputStream):
is_row_present = dis.read_byte()
if not is_row_present:
return None
rows = list()
while is_row_present == 1:
if is_row_present:
row = read_values_from_array(query_columns_description, dis)
rows.append(row)
# if rows become 1000, break it
# if len(rows) == 1000:
# _logger.info("Read Batch - Breaking the loop after 1000 records")
# break
is_row_present = dis.read_byte()
return rows
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/me/calendars/item/calendar_view/item/calendar/calendar_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from .......models import calendar
from .......models.o_data_errors import o_data_error
class CalendarRequestBuilder():
"""
Provides operations to manage the calendar property of the microsoft.graph.event entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new CalendarRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/me/calendars/{calendar%2Did}/calendarView/{event%2Did}/calendar{?%24select}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_get_request_information(self,request_configuration: Optional[CalendarRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
The calendar that contains the event. Navigation property. Read-only.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = "application/json"
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
async def get(self,request_configuration: Optional[CalendarRequestBuilderGetRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> Optional[calendar.Calendar]:
"""
The calendar that contains the event. Navigation property. Read-only.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: Optional[calendar.Calendar]
"""
request_info = self.create_get_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_async(request_info, calendar.Calendar, response_handler, error_mapping)
@dataclass
class CalendarRequestBuilderGetQueryParameters():
"""
The calendar that contains the event. Navigation property. Read-only.
"""
# Select properties to be returned
select: Optional[List[str]] = None
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "select":
return "%24select"
return original_name
@dataclass
class CalendarRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[CalendarRequestBuilder.CalendarRequestBuilderGetQueryParameters] = None
|
PypiClean
|
/client-sdk-python-1.1.1.tar.gz/client-sdk-python-1.1.1/client_sdk_python/datastructures.py
|
from collections import (
Hashable,
Mapping,
MutableMapping,
OrderedDict,
Sequence,
)
from client_sdk_python.packages.eth_utils import (
is_integer,
)
from client_sdk_python.utils.formatters import (
recursive_map,
)
# Hashable must be immutable:
# "the implementation of hashable collections requires that a key's hash value is immutable"
# https://docs.python.org/3/reference/datamodel.html#object.__hash__
class ReadableAttributeDict(Mapping):
"""
The read attributes for the AttributeDict types
"""
def __init__(self, dictionary, *args, **kwargs):
self.__dict__ = dict(dictionary)
self.__dict__.update(dict(*args, **kwargs))
def __getitem__(self, key):
return self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __repr__(self):
return self.__class__.__name__ + "(%r)" % self.__dict__
def _repr_pretty_(self, builder, cycle):
"""
Custom pretty output for the IPython console
"""
builder.text(self.__class__.__name__ + "(")
if cycle:
builder.text("<cycle>")
else:
builder.pretty(self.__dict__)
builder.text(")")
@classmethod
def _apply_if_mapping(cls, value):
if isinstance(value, Mapping):
return cls(value)
else:
return value
@classmethod
def recursive(cls, value):
return recursive_map(cls._apply_if_mapping, value)
class MutableAttributeDict(MutableMapping, ReadableAttributeDict):
def __setitem__(self, key, val):
self.__dict__[key] = val
def __delitem__(self, key):
del self.__dict__[key]
class AttributeDict(ReadableAttributeDict, Hashable):
"""
This provides superficial immutability, someone could hack around it
"""
def __setattr__(self, attr, val):
if attr == '__dict__':
super().__setattr__(attr, val)
else:
raise TypeError('This data is immutable -- create a copy instead of modifying')
def __delattr__(self, key):
raise TypeError('This data is immutable -- create a copy instead of modifying')
def __hash__(self):
return hash(tuple(sorted(self.items())))
def __eq__(self, other):
if isinstance(other, Mapping):
return self.__dict__ == dict(other)
else:
return False
class NamedElementOnion(Mapping):
'''
Add layers to an onion-shaped structure. Optionally, inject to a specific layer.
This structure is iterable, where the outermost layer is first, and innermost is last.
'''
def __init__(self, init_elements, valid_element=callable):
self._queue = OrderedDict()
for element in reversed(init_elements):
if valid_element(element):
self.add(element)
else:
self.add(*element)
def add(self, element, name=None):
if name is None:
name = element
if name in self._queue:
if name is element:
raise ValueError("You can't add the same un-named instance twice")
else:
raise ValueError("You can't add the same name again, use replace instead")
self._queue[name] = element
def inject(self, element, name=None, layer=None):
'''
Inject a named element to an arbitrary layer in the onion.
The current implementation only supports insertion at the innermost layer,
or at the outermost layer. Note that inserting to the outermost is equivalent
to calling :meth:`add` .
'''
if not is_integer(layer):
raise TypeError("The layer for insertion must be an int.")
elif layer != 0 and layer != len(self._queue):
raise NotImplementedError(
"You can only insert to the beginning or end of a %s, currently. "
"You tried to insert to %d, but only 0 and %d are permitted. " % (
type(self),
layer,
len(self._queue),
)
)
self.add(element, name=name)
if layer == 0:
if name is None:
name = element
self._queue.move_to_end(name, last=False)
elif layer == len(self._queue):
return
else:
raise AssertionError("Impossible to reach: earlier validation raises an error")
def clear(self):
self._queue.clear()
def replace(self, old, new):
if old not in self._queue:
raise ValueError("You can't replace unless one already exists, use add instead")
to_be_replaced = self._queue[old]
if to_be_replaced is old:
# re-insert with new name in old slot
self._replace_with_new_name(old, new)
else:
self._queue[old] = new
return to_be_replaced
def remove(self, old):
if old not in self._queue:
raise ValueError("You can only remove something that has been added")
del self._queue[old]
def _replace_with_new_name(self, old, new):
self._queue[new] = new
found_old = False
for key in list(self._queue.keys()):
if not found_old:
if key == old:
found_old = True
continue
elif key != new:
self._queue.move_to_end(key)
del self._queue[old]
def __iter__(self):
elements = self._queue.values()
if not isinstance(elements, Sequence):
elements = list(elements)
return iter(reversed(elements))
def __add__(self, other):
if not isinstance(other, NamedElementOnion):
raise NotImplementedError("You can only combine with another NamedElementOnion")
combined = self._queue.copy()
combined.update(other._queue)
return NamedElementOnion(combined.items())
def __contains__(self, element):
return element in self._queue
def __getitem__(self, element):
return self._queue[element]
def __len__(self):
return len(self._queue)
def __reversed__(self):
elements = self._queue.values()
if not isinstance(elements, Sequence):
elements = list(elements)
return iter(elements)
|
PypiClean
|
/py_di-1.1.tar.gz/py_di-1.1/README.md
|
# Py_DI - Python Container of Dependency Injection (IOC) implementation
## Authors
- [Moises P. Sena](http://moisespsena.com)
## Issues
[Issues on GitHub](https://github.com/moisespsena/py_di/issues).
## Installation
### By pip:
```bash
sudo pip install py_di
```
### Manual:
For default python executable:
```bash
git clone https://github.com/moisespsena/py_di py_di
cd py_di
sudo python setup.py install
```
For another python executable:
```bash
sudo python3 setup.py install
```
## Example:
See [tests](https://github.com/moisespsena/py_di/blob/master/tests/p/test_all.py) for more details.
|
PypiClean
|
/ZoomFoundry-6.21.0-py3-none-any.whl/zoom/_assets/standard_apps/admin/jobs.py
|
import zoom
def timespan(time1, time2):
if time1 and time2:
return time1 - time2
return ''
class BackgroundController(zoom.Controller):
def index(self):
"""Returns a list of background jobs"""
actions = []
if zoom.system.user.is_admin:
actions.append(('Clear Placeholders', 'jobs/clear'))
jobs = zoom.sites.Site(zoom.system.site.path).background_jobs
lookup = {
job.qualified_name: job
for job in zoom.store_of(zoom.background.BackgroundJobPlaceholder)
}
when = zoom.helpers.when
labels = (
'Name', 'Status', 'Trigger', 'Next Run', 'Last Run',
'Elapsed', 'Last Run Status'
)
content = zoom.browse(
(
(
job.name,
job.status,
job.trigger,
when(job.next_run),
when(
lookup.get(job.qualified_name) and
lookup.get(job.qualified_name).last_run
) or 'never',
timespan(
lookup.get(job.qualified_name).last_finished
if job.qualified_name in lookup else None,
lookup[job.qualified_name].last_run
if job.qualified_name in lookup else None,
),
lookup[job.qualified_name].last_run_status
if job.qualified_name in lookup and
lookup[job.qualified_name].last_run_status
else '-'
)
for job in jobs
),
labels=labels,
)
title = 'Jobs'
return zoom.page(content, title=title,actions=actions)
@zoom.authorize('administrators')
def clear(self):
zoom.store_of(zoom.background.BackgroundJobPlaceholder).zap()
return zoom.home('jobs')
main = zoom.dispatch(BackgroundController)
|
PypiClean
|
/ixnetwork_restpy-1.1.10.tar.gz/ixnetwork_restpy-1.1.10/uhd_restpy/testplatform/sessions/ixnetwork/topology/bgpexportroutetargetlist_ce93ce056c01eaf7643c31a7fd67768c.py
|
import sys
from uhd_restpy.base import Base
from uhd_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class BgpExportRouteTargetList(Base):
"""Export RouteTarget
The BgpExportRouteTargetList class encapsulates a list of bgpExportRouteTargetList resources that are managed by the system.
A list of resources can be retrieved from the server using the BgpExportRouteTargetList.find() method.
"""
__slots__ = ()
_SDM_NAME = 'bgpExportRouteTargetList'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'TargetAs4Number': 'targetAs4Number',
'TargetAsNumber': 'targetAsNumber',
'TargetAssignedNumber': 'targetAssignedNumber',
'TargetIpAddress': 'targetIpAddress',
'TargetType': 'targetType',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(BgpExportRouteTargetList, self).__init__(parent, list_op)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def TargetAs4Number(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Export Route Target AS4 Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TargetAs4Number']))
@property
def TargetAsNumber(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Export Route Target AS Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TargetAsNumber']))
@property
def TargetAssignedNumber(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Export Route Target Assigned Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TargetAssignedNumber']))
@property
def TargetIpAddress(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Export Route Target IP Address
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TargetIpAddress']))
@property
def TargetType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Export Route Target Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TargetType']))
def update(self, Name=None):
# type: (str) -> BgpExportRouteTargetList
"""Updates bgpExportRouteTargetList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Name=None):
# type: (str) -> BgpExportRouteTargetList
"""Adds a new bgpExportRouteTargetList resource on the json, only valid with batch add utility
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with all currently retrieved bgpExportRouteTargetList resources using find and the newly added bgpExportRouteTargetList resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
# type: (int, str, str) -> BgpExportRouteTargetList
"""Finds and retrieves bgpExportRouteTargetList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve bgpExportRouteTargetList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all bgpExportRouteTargetList resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching bgpExportRouteTargetList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of bgpExportRouteTargetList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the bgpExportRouteTargetList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, TargetAs4Number=None, TargetAsNumber=None, TargetAssignedNumber=None, TargetIpAddress=None, TargetType=None):
"""Base class infrastructure that gets a list of bgpExportRouteTargetList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- TargetAs4Number (str): optional regex of targetAs4Number
- TargetAsNumber (str): optional regex of targetAsNumber
- TargetAssignedNumber (str): optional regex of targetAssignedNumber
- TargetIpAddress (str): optional regex of targetIpAddress
- TargetType (str): optional regex of targetType
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
PypiClean
|
/lightning-grid-0.8.81.tar.gz/lightning-grid-0.8.81/grid/sdk/utils/tar.py
|
from dataclasses import dataclass
import math
import os
import subprocess
import tarfile
from typing import Optional, Tuple
import click
MAX_SPLIT_COUNT = 999
def get_dir_size_and_count(source_dir: str, prefix: Optional[str] = None) -> Tuple[int, int]:
"""
Get size and file count of a directory
Parameters
----------
source_dir: str
Directory path
Returns
-------
Tuple[int, int]
Size in megabytes and file count
"""
size = 0
count = 0
for root, _, files in os.walk(source_dir, topdown=True):
for f in files:
if prefix and not f.startswith(prefix):
continue
full_path = os.path.join(root, f)
size += os.path.getsize(full_path)
count += 1
return (size, count)
@dataclass
class TarResults:
"""
This class holds the results of running tar_path.
Attributes
----------
before_size: int
The total size of the original directory files in bytes
after_size: int
The total size of the compressed and tarred split files in bytes
"""
before_size: int
after_size: int
def get_split_size(
total_size: int, minimum_split_size: int = 1024 * 1000 * 20, max_split_count: int = MAX_SPLIT_COUNT
) -> int:
"""
Calculate the split size we should use to split the multipart upload of an object to a bucket. We are limited
to 1000 max parts as the way we are using ListMultipartUploads.
More info
https://github.com/gridai/grid/pull/5267
https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpu-process
https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
https://github.com/psf/requests/issues/2717#issuecomment-724725392 Python or requests has a limit of 2**31 bytes for a single file upload.
Parameters
----------
minimum_split_size: int
The minimum split size to use
max_split_count: int
The maximum split count
total_size: int
Total size of the file to split
Returns
-------
int
Split size
"""
max_size = max_split_count * (1 << 31) # max size per part limited by Requests or urllib as shown in ref above
if total_size > max_size:
raise click.ClickException(
f"The size of the datastore to be uploaded is bigger than our {max_size/(1 << 40):.2f} TBytes limit"
)
split_size = minimum_split_size
split_count = math.ceil(total_size / split_size)
if split_count > max_split_count:
# Adjust the split size based on max split count
split_size = math.ceil(total_size / max_split_count)
return split_size
def tar_path(source_path: str, target_file: str, compression: bool = False) -> TarResults:
"""
Create tar from directory using `tar`
Parameters
----------
source_path: str
Source directory or file
target_file
Target tar file
compression: bool, default False
Enable compression, which is disabled by default.
Returns
-------
TarResults
Results that holds file counts and sizes
"""
if os.path.isdir(source_path):
before_size, _ = get_dir_size_and_count(source_path)
else:
before_size = os.path.getsize(source_path)
try:
_tar_path_subprocess(source_path, target_file, compression)
except subprocess.CalledProcessError:
_tar_path_python(source_path, target_file, compression)
after_size = os.stat(target_file).st_size
return TarResults(before_size=before_size, after_size=after_size)
def _tar_path_python(source_path: str, target_file: str, compression: bool = False) -> None:
"""
Create tar from directory using `python`
Parameters
----------
source_path: str
Source directory or file
target_file
Target tar file
compression: bool, default False
Enable compression, which is disabled by default.
"""
file_mode = "w:gz" if compression else "w:"
with tarfile.open(target_file, file_mode) as tar:
if os.path.isdir(source_path):
tar.add(str(source_path), arcname=".")
elif os.path.isfile(source_path):
file_info = tarfile.TarInfo(os.path.basename(str(source_path)))
tar.addfile(file_info, open(source_path))
def _tar_path_subprocess(source_path: str, target_file: str, compression: bool = False) -> None:
"""
Create tar from directory using `tar`
Parameters
----------
source_path: str
Source directory or file
target_file
Target tar file
compression: bool, default False
Enable compression, which is disabled by default.
"""
# Only add compression when users explicitly request it.
# We do this because it takes too long to compress
# large datastores.
tar_flags = '-cvf'
if compression:
tar_flags = '-zcvf'
if os.path.isdir(source_path):
command = f"tar -C {source_path} {tar_flags} {target_file} ./"
else:
abs_path = os.path.abspath(source_path)
parent_dir = os.path.dirname(abs_path)
base_name = os.path.basename(abs_path)
command = f"tar -C {parent_dir} {tar_flags} {target_file} {base_name}"
subprocess.check_call(
command,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True,
env={
"GZIP": "-9",
"COPYFILE_DISABLE": "1"
}
)
|
PypiClean
|
/bicleaner-0.16.tar.gz/bicleaner-0.16/README.md
|
# bicleaner

Bicleaner (`bicleaner-classify`) is a tool in Python that aims at detecting noisy sentence pairs in a parallel corpus. It
indicates the likelihood of a pair of sentences being mutual translations (with a value near to 1) or not (with a value near to 0). Sentence pairs considered very noisy are scored with 0.
Although a training tool (`bicleaner-train`) is provided, you may want to use the available ready-to-use language packages.
Please, visit https://github.com/bitextor/bicleaner-data/releases/latest or use `./utils/download-pack.sh` to download the latest language packages.
Visit our [Wiki](https://github.com/bitextor/bicleaner/wiki/How-to-train-your-Bicleaner) for a detailed example on Bicleaner training.
## Citation
If you find Bicleaner useful, please consider citing the following papers:
> V. M. Sánchez-Cartagena, M. Bañón, S. Ortiz-Rojas and G. Ramírez-Sánchez,\
> "[Prompsit's submission to WMT 2018 Parallel Corpus Filtering shared task](http://www.statmt.org/wmt18/pdf/WMT116.pdf)",\
>in *Proceedings of the Third Conference on Machine Translation, Volume 2: Shared Task Papers*.\
>Brussels, Belgium: Association for Computational Linguistics, October 2018
```latex
@InProceedings{prompsit:2018:WMT,
author = { V\'{i}ctor M. S\'{a}nchez-Cartagena and Marta Ba{\~n}\'{o}n and Sergio Ortiz-Rojas and Gema Ram\'{i}rez-S\'{a}nchez},
title = {Prompsit's submission to WMT 2018 Parallel Corpus Filtering shared task},
booktitle = {Proceedings of the Third Conference on Machine Translation, Volume 2: Shared Task Papers},
month = {October},
address = {Brussels, Belgium},
publisher = {Association for Computational Linguistics}
}
```
> Gema Ramírez-Sánchez, Jaume Zaragoza-Bernabeu, Marta Bañón and Sergio Ortiz Rojas \
> "[Bifixer and Bicleaner: two open-source tools to clean your parallel data.](https://eamt2020.inesc-id.pt/proceedings-eamt2020.pdf#page=311)",\
>in *Proceedings of the 22nd Annual Conference of the European Association for Machine Translation*.\
>Lisboa, Portugal: European Association for Machine Translation, November 2020
```latex
@InProceedings{prompsit:2020:EAMT,
author = {Gema Ram\'{i}rez-S\'{a}nchez and Jaume Zaragoza-Bernabeu and Marta Ba{\~n}\'{o}n and Sergio Ortiz-Rojas},
title = {Bifixer and Bicleaner: two open-source tools to clean your parallel data.},
booktitle = {Proceedings of the 22nd Annual Conference of the European Association for Machine Translation},
pages = {291--298},
isbn = {978-989-33-0589-8},
year = {2020},
month = {November},
address = {Lisboa, Portugal},
publisher = {European Association for Machine Translation}
}
```
## Installation & Requirements
Bicleaner is written in Python and can be installed using pip. It also requires the KenLM Python bindings with support for 7-gram language models. You can easily install it by running the following commands:
```bash
pip install bicleaner
pip install https://github.com/kpu/kenlm/archive/master.zip --install-option="--max_order 7"
```
The remaining extra modules required by Bicleaner will be automatically downloaded and installed/upgraded (if required) with the first command.
Also, you can install the conda package (KenLM is already included):
```bash
conda install -c conda-forge -c bitextor bicleaner
```
After installation, three binary files (`bicleaner-train`, `bicleaner-classify` and `bicleaner-classify-lite`) will be located in your `python/installation/prefix/bin` directory. This is usually `$HOME/.local/bin` or `/usr/local/bin/`.
## Cleaning
`bicleaner-classify` aims at detecting noisy sentence pairs in a parallel corpus. It
indicates the likelihood of a pair of sentences being mutual translations (with a value near to 1) or not (with a value near to 0). Sentence pairs considered very noisy are scored with 0.
By default, the input file (the parallel corpus to be classified) must contain at least four columns, being:
* col1: URL 1
* col2: URL 2
* col3: Source sentence
* col4: Target sentence
but the source and target sentences column index can be customized by using the `--scol` and `--tcol` flags.
The generated output file will contain the same lines and columns that the original input file had, adding an extra column containing the Bicleaner classifier score.
This tool can be run with
```bash
bicleaner-classify [-h]
[-S SOURCE_TOKENIZER_COMMAND]
[-T TARGET_TOKENIZER_COMMAND]
[--header]
[--scol SCOL]
[--tcol TCOL]
[--tmp_dir TMP_DIR]
[-b BLOCK_SIZE]
[-p PROCESSES]
[-d DISCARDED_TUS]
[--lm_threshold LM_THRESHOLD]
[--score_only]
[--disable_hardrules]
[--disable_lm_filter]
[--disable_porn_removal]
[--disable_minimal_length]
[-q]
[--debug]
[--logfile LOGFILE]
[-v]
input
[output]
metadata
```
### Parameters
* positional arguments:
* `input`: Tab-separated files to be classified (default line format: `URL1 URL2 SOURCE_SENTENCE TARGET_SENTENCE [EXTRA_COLUMNS]`, tab-separated). When input is -, reads standard input.
* `output`: Output of the classification (default: standard output). When output is -, writes standard output.
* `metadata`: Training metadata (YAML file), generated by `bicleaner-train` or [downloaded](https://github.com/bitextor/bicleaner-data/releases/latest) as a part of a language pack. You just need to `untar` the language pack for the pair of languages of the file you want to clean. The tar file contains the YAML metadata file.
There's a script that can download and unpack it for you, use:
```bash
$ ./utils/download-pack.sh en cs ./models
```
to download English-Czech language pack to the ./models directory and unpack it.
* optional arguments:
* `-h, --help`: show this help message and exit
* Optional:
* `-S SOURCE_TOKENIZER_COMMAND`: Source language tokenizer full command (including flags if needed). If not given, Sacremoses tokenizer is used (with `escape=False` option).
* `-T TARGET_TOKENIZER_COMMAND`: Target language tokenizer full command (including flags if needed). If not given, Sacremoses tokenizer is used (with `escape=False` option).
* `--header`: Treats the first sentence of the input file as the header row. If set, the output will contain a header as well
* `--scol SCOL`: Source sentence column (starting in 1). If `--header` is set, the expected value will be the name of the field (default: 3 if `--header` is not set else src_text)
* `--tcol TCOL`: Target sentence column (starting in 1). If `--header` is set, the expected value will be the name of the field (default: 4 if `--header` is not set else trg_text)
* `--tmp_dir TMP_DIR`: Temporary directory where creating the temporary files of this program (default: default system temp dir, defined by the environment variable TMPDIR in Unix)
* `-b BLOCK_SIZE, --block_size BLOCK_SIZE`: Sentence pairs per block (default: 10000)
* `p PROCESSES, --processes PROCESSES`: Number of processes to use (default: all CPUs minus one)
* `-d DISCARDED_TUS, --discarded_tus DISCARDED_TUS`: TSV file with discarded TUs. Discarded TUs by the classifier are written in this file in TSV file. (default: None)
* `--lm_threshold LM_THRESHOLD`: Threshold for language model fluency scoring. All sentence pairs whose LM fluency score falls below the threshold are removed (classifier score set to 0), unless the option --keep_lm_result is set. (default: 0.5)
* `--score_only`: Only output one column which is the bicleaner score (default: False)
* `--disable_hardrules`: Disables the bicleaner_hardrules filtering (only bicleaner_classify is applied) (default: False)
* `--disable_lm_filter`: Disables LM filtering.
* `--disable_porn_removal`: Disables porn removal.
* `--disable_minimal_length` : Don't apply minimal length rule (default: False).
* Logging:
* `-q, --quiet`: Silent logging mode (default: False)
* `--debug`: Debug logging mode (default: False)
* `--logfile LOGFILE`: Store log to a file (default: <_io.TextIOWrapper name='<stderr>' mode='w' encoding='UTF-8'>)
* `-v, --version`: show version of this script and exit
### Example
```bash
bicleaner-classify \
corpus.en-es.raw \
corpus.en-es.classifed \
training.en-es.yaml
```
This will read the "`corpus.en-es.raw`" file,
classify it with the classifier indicated in the "`training.en-es.yaml`" metadata file,
writing the result of the classification in the "`corpus.en-es.classified`" file.
Each line of the new file will contain the same content as the input file, adding a column with the score given by the Bicleaner classifier.
### Automatic test
We included a small test corpus and a script to check that your Bicleaner classifier is working as expected.
In order to use it, just run:
```bash
python3.7 -m pytest -s tests/bicleaner_test.py
```
This will download the required language pack, classify the provided test corpus, and check the resulting classification scores. If everything went as expected, the output will be "1 passed in XX.XX seconds". All downloaded data will be removed at the end of the testing session.
## Training classifiers
In case you need to train a new classifier (i.e. because it is not available in the language packs provided at [bicleaner-data](https://github.com/bitextor/bicleaner-data/releases/latest)), you can use `bicleaner-train` .
`bicleaner-train` is a Python3 tool that allows you to train a classifier which predicts
whether a pair of sentences are mutual translations or not and discards too noisy sentence pairs. Visit our [Wiki](https://github.com/bitextor/bicleaner/wiki/How-to-train-your-Bicleaner) for a detailed example on Bicleaner training.
### Requirements
In order to train a new classifier, you must provide:
* A clean parallel corpus (100k pairs of sentences is the recommended size).
* SL-to-TL and TL-to-SL gzipped probabilistic bilingual dictionaries. You can check their format by downloading any of the available language packs.
* The SL-to-TL probabilistic bilingual dictionary must contain one entry per line. Each entry must contain the following 3 fields, split by space, in this order: TL word, SL word, probability.
* The TL-to-SL probabilistic bilingual dictionary must contain one entry per line. Each entry must contain the following 3 fields, split by space, in this order: SL word, TL word, probability.
* We recommend filtering out entries with a very low probability: removing those with a probability 10 times lower than the maximum translation probability for each word speeds up the process and does not decrease accuracy.
* Prior to inferring the probabilistic dictionaries, sentences must be tokenizer with the Moses tokenizer (with the `-a` flag) and lowercased.
* You can uses Moses and MGIZA++ to obtain probabilistic dictionaries from a parallel corpus.
* Please note that both target and source words in probabilistic bilingual dictionaries must be single words.
* Gzipped lists of monolingual word frequencies. You can check their format by downloading any of the available language packs.
* The SL list of word frequencies with one entry per line. Each entry must contain the following 2 fields, split by space, in this order: word frequency (number of times a word appears in text), SL word.
* The TL list of word frequencies with one entry per line. Each entry must contain the following 2 fields, split by space, in this order: word frequency (number of times a word appears in text), TL word.
* These lists can easily be obtained from a monolingual corpus (i.e. newscrawl or the same text used to train probabilistic bilingual dictionaries) and a command line in bash:
```bash
$ cat monolingual.SL \
| sacremoses -l SL tokenize -x \
| awk '{print tolower($0)}' \
| tr ' ' '\n' \
| LC_ALL=C sort | uniq -c \
| LC_ALL=C sort -nr \
| grep -v '[[:space:]]*1' \
| gzip > wordfreq-SL.gz
$ cat monolingual.TL \
| sacremoses -l TL tokenize -x \
| awk '{print tolower($0)}' \
| tr ' ' '\n' \
| LC_ALL=C sort | uniq -c \
| LC_ALL=C sort -nr \
| grep -v '[[:space:]]*1' \
| gzip > wordfreq-TL.gz
```
Optionally, if you want the classifier to include a porn filter, you must also provide:
* File with training dataset for porn removal classifier. Each sentence must contain at the beginning the `__label__negative` or `__label__positive` according to FastText convention. It should be lowercased and tokenized.
Optionally, if you want the classifier to include an improved fluency filter based on language models, you must also provide:
* A monolingual corpus made ONLY of noisy sentences in the SL (100k sentences is the recommended size)
* A monolingual corpus made ONLY of noisy sentences in the TL (100k sentences is the recommended size)
If not provided, since Bicleaner `0.13`, noisy corpora is produced synthetically from the training corpus.
Moreover, **`lmplz`, the command to train a KenLM language model must be in `PATH`**. See https://github.com/kpu/kenlm for instructions about its compilation and installation.
In principle, if you want to use Bicleaner to clean a partially noisy corpus, it could be difficult to find a corpus made solely of noisy sentences. Fortunately, there are two options available with Bicleaner:
### Extracting noisy sentences from an existing corpus with heuristic rules
Given a parallel corpus, you use `bicleaner-hardrules` to extract some of its noisiest sentences using heuristic rules by running the following command:
```bash
bicleaner-hardrules [-h]
[--annotated_output]
-s SOURCE_LANG
-t TARGET_LANG
[--tmp_dir TMP_DIR]
[-b BLOCK_SIZE]
[-p PROCESSES]
[--disable_lang_ident]
[--disable_minimal_length]
[--header]
[--scol SCOL]
[--tcol TCOL]
[--disable_lm_filter]
[--disable_porn_removal]
[--metadata METADATA]
[--lm_threshold LM_THRESHOLD]
[-q]
[--debug]
[--logfile LOGFILE]
[input]
[output]
```
where `INPUT_FILE` contains a sentence-aligned parallel corpus, with a sentence pair per line. Sentences are split by tab. `OUTPUT_FILE` will contain all the input sentences, with an extra score column with `0` (if the sentence is noisy and should be discarded) or `1` (if the sentence is ok). When the `--annotated_output` flag is in use, `OUTPUT_FILE` will contain another extra column, specifying the heuristic rule applied to decide discarding each sentence (or `keep`, if the sentence is ok and should not be discarded). If the `--disable_lang_ident` flag is in use, rules that require language identification are not used. '--scol' and '--tcol' allow to indicate which columns contains source and target in the input file (default: `1`and `2`, respectively).
In order to use the LM filtering and/or porn removal, you must provide the `--metadata` (it is: the .yaml file generated by Bicleaner training).
To disable LM filtering and/or porn removal, just use the `--disable_lm_filter` and/or `--disable_porn_removal` flags.
You can then obtain the monolingual noisy corpora by "cutting" the appropriate columns (after running `bicleaner-hardrules` with the `--annotated_output` flag). Asuming scol=1 and tcol=2, and no more columns in the input corpus (so the hardrules score is the 3rd column in the output):
```bash
cat OUTPUT_FILE | awk -F'\t' '{if ($3 == 0) print $1 }' > MONOLINGUAL_NOISY.SOURCE_LANG
cat OUTPUT_FILE | awk -F'\t' '{if ($3 == 0) print $2 }' > MONOLINGUAL_NOISY.TARGET_LANG
```
### Building synthetic noisy sentences
```bash
cat TRAINING_CORPUS | cut -f1 | python3.7 bicleaner/utils/shuffle.py - > MONOLINGUAL_NOISY.SOURCE_LANG
cat TRAINING_CORPUS | cut -f2 | python3.7 bicleaner/utils/shuffle.py - > MONOLINGUAL_NOISY.TARGET_LANG
```
Since `0.13`, if no noisy corpora is provided, it's produced by Bicleaner training itself, so it has become an optional parameter.
### Parameters
It can be used as follows. Note that the parameters `--noisy_examples_file_sl`, `--noisy_examples_file_tl`, `--lm_file_sl`, `--lm_file_tl`, are mandatory if you want to enable improved fluency filter based on language models (recommended).
```bash
bicleaner_train.py [-h]
-m METADATA
-c CLASSIFIER
-s SOURCE_LANG
-t TARGET_LANG
-d SOURCE_DICTIONARY
-D TARGET_DICTIONARY
-f SOURCE_WORD_FREQS
-F TARGET_WORD_FREQS
[-S SOURCE_TOKENIZER_COMMAND]
[-T TARGET_TOKENIZER_COMMAND]
[--normalize_by_length]
[--treat_oovs]
[--qmax_limit QMAX_LIMIT]
[--disable_features_quest]
[--classifier_type {mlp,svm,nn,nn1,adaboost,random_forest,extra_trees}]
[--dump_features DUMP_FEATURES]
[-b BLOCK_SIZE]
[-p PROCESSES]
[--wrong_examples_file WRONG_EXAMPLES_FILE]
[--features_version FEATURES_VERSION]
[--disable_lang_ident]
[--seed SEED]
[--relative_paths]
[--noisy_examples_file_sl NOISY_EXAMPLES_FILE_SL]
[--noisy_examples_file_tl NOISY_EXAMPLES_FILE_TL]
[--lm_dev_size LM_DEV_SIZE]
[--lm_file_sl LM_FILE_SL]
[--lm_file_tl LM_FILE_TL]
[--lm_training_file_sl LM_TRAINING_FILE_SL]
[--lm_training_file_tl LM_TRAINING_FILE_TL]
[--lm_clean_examples_file_sl LM_CLEAN_EXAMPLES_FILE_SL]
[--lm_clean_examples_file_tl LM_CLEAN_EXAMPLES_FILE_TL]
[--porn_removal_train PORN_REMOVAL_TRAIN]
[--porn_removal_test PORN_REMOVAL_TEST]
[--porn_removal_file PORN_REMOVAL_FILE]
[--porn_removal_side {sl,tl}]
[-q] [--debug] [--logfile LOGFILE]
[input]
```
* positional arguments:
* `input`: Tab-separated bilingual input file (default: Standard input)(line format: SOURCE_SENTENCE TARGET_SENTENCE, tab-separated)
* optional arguments:
* `-h, --help`: show this help message and exit
* Mandatory:
* `-m METADATA, --metadata METADATA`: Output training metadata (YAML file) that will be created after training.
* `-c CLASSIFIER, --classifier CLASSIFIER`: Classifier data file that will be created after training.
* `-s SOURCE_LANG, --source_lang SOURCE_LANG`: Source language code
* `-t TARGET_LANG, --target_lang TARGET_LANG`: Target language code
* `-d SOURCE_TO_TARGET_DICTIONARY, --source_dictionary SOURCE_TO_TARGET_DICTIONARY`: SL-to-TL gzipped probabilistic dictionary
* `-D TARGET_TO_SOURCE_DICTIONARY, --target_dictionary TARGET_TO_SOURCE_DICTIONARY`: TL-to-SL gzipped probabilistic dictionary
* `-f SOURCE_WORD_FREQ_DICTIONARY, --source_word_freqs SOURCE_WORD_FREQ_DICTIONARY`: SL gzipped word frequencies dictionary
* `-F TARGET_WORD_FREQ_DICTIONARY, --target_word_freqs TARGET_WORD_FREQ_DICTIONARY`: TL gzipped word frequencies dictionary
* Options:
* `-S SOURCE_TOKENIZER_COMMAND`: Source language tokenizer full command (including flags if needed). If not given, Sacremoses tokenizer is used (with `escape=False` option).
* `-T TARGET_TOKENIZER_COMMAND`: Target language tokenizer full command (including flags if needed). If not given, Sacremoses tokenizer is used (with `escape=False` option).
* `--normalize_by_length`: Normalize by length in qmax dict feature
* `--treat_oovs`: Special treatment for OOVs in qmax dict feature
* `--qmax_limit`: Number of max target words to be taken into account, sorted by length (default: 20)
* `--disable_features_quest`: Disable less important features
* `--classifier_type {svm,nn,nn1,adaboost,random_forest,extra_trees}`: Classifier type (default: extra_trees)
* `--dump_features DUMP_FEATURES`: Dump training features to file (default: None)
* `-b BLOCK_SIZE, --block_size BLOCK_SIZE`: Sentence pairs per block (default: 10000)
* `-p PROCESSES, --processes PROCESSES`: Number of process to use (default: all CPUs minus one)
* `--wrong_examples_file WRONG_EXAMPLES_FILE`: File with wrong examples extracted to replace the synthetic examples from method used by default (default: None)
* `--features_version FEATURES_VERSION`: Version of the feature (default: extracted from the features.py file)
* `--disable_lang_ident`: Don't apply features that use language detecting (default: False). Useful when the language in use is too similar to other languages, making the automatic identification of language not realiable.
* `--relative_paths`: Ask training to save model files by relative path if they are in the same directory as metadata. Useful if you are going to train distributable models. (default: False)
* `--noisy_examples_file_sl NOISY_EXAMPLES_FILE_SL`: File with noisy text in the SL. These are used to estimate the perplexity of noisy text. (Optional)
* `--noisy_examples_file_tl NOISY_EXAMPLES_FILE_TL`: File with noisy text in the TL. These are used to estimate the perplexity of noisy text. (Optional)
* `--lm_dev_size SIZE`: Number of sentences to be removed from clean text before training LMs. These are used to estimate the perplexity of clean text. (default: 2000)
* `--lm_file_sl LM_FILE_SL`: Output file with the created SL language model. This file should be placed in the same directory as the YAML training metadata, as they are usually distributed together.
* `--lm_file_tl LM_FILE_TL`: Output file with the created TL language model. This file should be placed in the same directory as the YAML training metadata, as they are usually distributed together.
* `--lm_training_file_sl LM_TRAINING_FILE_SL`: SL text from which the SL LM is trained. If this parameter is not specified, SL LM is trained from the SL side of the input file, after removing --lm_dev_size sentences.
* `--lm_training_file_tl LM_TRAINING_FILE_TL`: TL text from which the TL LM is trained. If this parameter is not specified, TL LM is trained from the TL side of the input file, after removing --lm_dev_size sentences.
* `--lm_clean_examples_file_sl LM_CLEAN_EXAMPLES_FILE_SL`: File with clean text in the SL. Used to estimate the perplexity of clean text. This option must be used together with --lm_training_file_sl and both files must not have common sentences. This option replaces --lm_dev_size.
* `--lm_clean_examples_file_tl LM_CLEAN_EXAMPLES_FILE_TL`: File with clean text in the TL. Used to estimate the perplexity of clean text. This option must be used together with --lm_training_file_tl and both files must not have common sentences. This option replaces --lm_dev_size."
* `--porn_removal_train PORN_REMOVAL_TRAIN`: File with training dataset for porn removal classifier. Each sentence must contain at the beginning the `'__label__negative'` or `'__label__positive'` according to FastText [convention](https://fasttext.cc/docs/en/supervised-tutorial.html#getting-and-preparing-the-data). It should be lowercased and tokenized.
* `--porn_removal_test PORN_REMOVAL_TEST`: Test set to compute precision and accuracy of the porn removal classifier.
* `--porn_removal_file PORN_REMOVAL_FILE`: Porn removal classifier output file.
* `--porn_removal_side {sl,tl}`: Whether the porn removal should be applied at the source or at the target language. (default: sl)
* Logging:
* `-q, --quiet`: Silent logging mode (default: False)
* `--debug`: Debug logging mode (default: False)
* `--logfile LOGFILE`: Store log to a file (default: <_io.TextIOWrapper name='<stderr>' mode='w' encoding='UTF-8'>)
### Example
```bash
bicleaner-train \
corpus.en-cs.train\
--normalize_by_length \
-s en \
-t cs \
-d dict-en-cs.gz \
-D dict-cs-en.gz \
-f wordfreqs-en.gz \
-F wordfreqs-cs.gz \
-c en-cs.classifier \
--lm_training_file_sl lmtrain.en-cs.en --lm_training_file_tl lmtrain.en-cs.cs \
--lm_file_sl model.en-cs.en --lm_file_tl model.en-cs.cs \
--porn_removal_train porn-removal.txt.en --porn_removal_file porn-model.en \
-m training.en-cs.yaml \
```
This will train an Extra Trees classifier for English-Czech using the corpus corpus.en-cs.train, the probabilistic dictionaries `dict-en-cs.gz` and `dict-cs-en.gz`, and the word frequency dictionaries `wordfreqs-en.gz` and `wordfreqs-cs.gz`.
This training will use 50000 good and 50000 bad examples.
The classifier data will be stored in `en-cs.classifier`, with the metadata in `training.en-cs.yaml`. The improved fluency language models will be `model.en-cs.en` and `model.en-cs.cs`, and the porn filter model will be `porn-model.en`.
The generated .yaml file provides the following information, that is useful to get a sense on how good or bad was the training (and is also a needed input file for classifying):
```yml
classifier: en-cs.classifier
classifier_type: extra_trees
source_lang: en
target_lang: cs
source_dictionary: dict-en-cs.gz
target_dictionary: dict-cs-en.gz
source_word_freqs: wordfreqs-en.gz
target_word_freqs: wordfreqs-cs.gz
normalize_by_length: True
qmax_limit: 40
disable_features_quest: True
good_test_histogram: [0, 7, 39, 45, 112, 172, 514, 2199, 6912, 0]
wrong_test_histogram: [14, 4548, 4551, 747, 118, 18, 3, 1, 0, 0]
precision_histogram: [0.5000000, 0.5003502, 0.6475925, 0.9181810, 0.9860683, 0.9977594, 0.9995846, 0.9998903, 1.0000000, nan]
recall_histogram: [1.0000000, 1.0000000, 0.9993000, 0.9954000, 0.9909000, 0.9797000, 0.9625000, 0.9111000, 0.6912000, 0.0000000]
accuracy_histogram: [0.5000000, 0.5007000, 0.7277500, 0.9533500, 0.9884500, 0.9887500, 0.9810500, 0.9555000, 0.8456000, 0.5000000]
length_ratio: 1.0111087
features_version: 4
source_lm: model.en-cs.en
target_lm: model.en-cs.cs
lm_type: CHARACTER
clean_mean_perp: -1.0744755342473238
clean_stddev_perp: 0.18368996884800565
noisy_mean_perp: -3.655791900929066
noisy_stddev_perp: 0.9989343799121657
disable_lang_ident: False
porn_removal_file: porn-model.en
porn_removal_side: sl
```
## Lite version
Although `bicleaner-train` and `bicleaner-classify` make use of parallelization by distributing workload to the available cores, some users might prefer to implement their own parallelization strategies. For that reason, single-thread version of Bicleaner classifier script is provided: `bicleaner-classify-lite`. The usage is exactly the same as for the full version, but omitting the blocksize (-b) and processes (-p) parameter.
**Note**: `bicleaner-train-lite` was removed due to the lack of usage by the users and to avoid code duplication.
___

All documents and software contained in this repository reflect only the authors' view. The Innovation and Networks Executive Agency of the European Union is not responsible for any use that may be made of the information it contains.
|
PypiClean
|
/accretion_cli-0.1.0.tar.gz/accretion_cli-0.1.0/src/accretion_cli/_commands/raw/add/artifact_builder.py
|
import sys
import threading
import click
from accretion_common.constants import SOURCE_PREFIX
from ...._templates import artifact_builder as template_builder
from ...._util import Deployment, DeploymentFile
from ...._util.cloudformation import artifacts_bucket, deploy_stack
from ...._util.parameters import try_to_load_deployment_file, try_to_write_deployment_file
from ...._util.s3 import upload_artifact
from ...._util.workers_zip import build_worker_bytes
__all__ = ("add_artifact_builder", "deploy_all_regions")
def _deploy_in_region(*, region: str, deployment: Deployment, workers_zip_data: bytes):
"""Upload the workers data into a region and deploy the artifact builder stack."""
# Upload workers zip to core stack bucket
click.echo(f"Locating artifacts bucket in region {region}")
bucket = artifacts_bucket(region=region, regional_record=deployment)
click.echo(f"Uploading workers zip to {bucket} bucket in {region}")
key = upload_artifact(region=region, bucket=bucket, prefix=SOURCE_PREFIX, artifact_data=workers_zip_data)
click.echo(f"Workers zip uploaded in {region}")
# Deploy artifact builder in region
template = template_builder.build()
click.echo(f"Deploying Artifact Builder template in {region}")
stack_name = deploy_stack(
region=region, template=template.to_json(), allow_iam=True, ArtifactBucketName=bucket, WorkersS3Key=key
)
deployment.ArtifactBuilder = stack_name
click.echo(f"Artifact builder stack {stack_name} successfully deployed in {region}")
def deploy_all_regions(*, record: DeploymentFile, workers_zip_data: bytes):
"""Deploy artifact builder in all regions."""
calls = []
for region, regional_record in record.Deployments.items():
if regional_record.Core is None:
click.echo(f"Region {region} in deployment file is not initialized. Skipping.", file=sys.stderr)
continue
if regional_record.ArtifactBuilder is not None:
click.echo(f"Artifact builder is already deployed in {region}. Skipping.", file=sys.stdout)
continue
call = threading.Thread(
target=_deploy_in_region,
kwargs=dict(region=region, deployment=regional_record, workers_zip_data=workers_zip_data),
name=region,
)
calls.append(call)
call.start()
for call in calls:
call.join()
@click.command("artifact-builder")
@click.argument("deployment_file", required=True, type=click.STRING)
def add_artifact_builder(deployment_file: str):
"""Add the artifact builder to an existing deployment described in DEPLOYMENT_FILE."""
record = try_to_load_deployment_file(deployment_file_name=deployment_file)
workers_zip_data = build_worker_bytes()
deploy_all_regions(record=record, workers_zip_data=workers_zip_data)
try_to_write_deployment_file(deployment_filename=deployment_file, record=record)
|
PypiClean
|
/pensando_ent-1.28.1.tar.gz/pensando_ent-1.28.1/pensando_ent/psm/api/preferences_v1_api.py
|
import re # noqa: F401
import sys # noqa: F401
from pensando_ent.psm.api_client import ApiClient, Endpoint as _Endpoint
from pensando_ent.psm.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
import pensando_ent.psm as psm
from pensando_ent.psm.model.api_label import ApiLabel
from pensando_ent.psm.model.api_status import ApiStatus
from pensando_ent.psm.model.preferences_auto_msg_ui_global_settings_watch_helper import PreferencesAutoMsgUIGlobalSettingsWatchHelper
from pensando_ent.psm.model.preferences_ui_global_settings import PreferencesUIGlobalSettings
class PreferencesV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __get_ui_global_settings(
self,
o_tenant,
**kwargs
):
"""Get UIGlobalSettings object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ui_global_settings(o_tenant, async_req=True)
>>> result = thread.get()
Args:
o_tenant (str):
Keyword Args:
t_kind (str): Kind represents the type of the API object.. [optional]
t_api_version (str): APIVersion defines the version of the API object. This can only be set by the server.. [optional]
meta_name (str): Name of the object, unique within a Namespace for scoped objects. Must start and end with alpha numeric and can have alphanumeric, -, _, . Length of string should be between 2 and 64.. [optional]
meta_namespace (str): Namespace of the object, for scoped objects. Must start and end with alpha numeric and can have alphanumeric, -, _, . Length of string should be between 2 and 64.. [optional]
meta_generation_id (str): GenerationID is the generation Id for the object. This is incremented anytime there is an update to the user intent, including Spec update and any update to ObjectMeta. System generated and updated, not updatable by user.. [optional]
meta_resource_version (str): Resource version in the object store. This is updated anytime there is any change to the object. System generated and updated, not updatable by user.. [optional]
meta_uuid (str): UUID is the unique identifier for the object. This is generated on creation of the object. System generated, not updatable by user.. [optional]
meta_creation_time (datetime): CreationTime is the creation time of the object. System generated and updated, not updatable by user.. [optional]
meta_mod_time (datetime): ModTime is the Last Modification time of the object. System generated and updated, not updatable by user.. [optional]
meta_self_link (str): SelfLink is a link for accessing this object. When the object is served from the API-GW it is the URI path. Example: - \"/v1/tenants/tenants/tenant2\" System generated and updated, not updatable by user.. [optional]
spec_style_options (str): Can contain any UI style preferences. Provide typing through UI code.. [optional]
idle_timeout_duration (str): Time of inactivity after which user logout countdown warning pops up. Should be a valid time duration.. [optional]
idle_timeout_warning_time (str): Warning duration before logout and after system idle time. Should be a valid time duration of at most 5m0s.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PreferencesUIGlobalSettings
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['o_tenant'] = \
o_tenant
return self.call_with_http_info(**kwargs)
self.get_ui_global_settings = _Endpoint(
settings={
'response_type': (PreferencesUIGlobalSettings,),
'auth': [],
'endpoint_path': '/configs/preferences/v1/tenant/{O.Tenant}/uiglobalsettings',
'operation_id': 'get_ui_global_settings',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'o_tenant',
't_kind',
't_api_version',
'meta_name',
'meta_namespace',
'meta_generation_id',
'meta_resource_version',
'meta_uuid',
'meta_creation_time',
'meta_mod_time',
'meta_self_link',
'spec_style_options',
'idle_timeout_duration',
'idle_timeout_warning_time',
],
'required': [
'o_tenant',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'o_tenant':
(str,),
't_kind':
(str,),
't_api_version':
(str,),
'meta_name':
(str,),
'meta_namespace':
(str,),
'meta_generation_id':
(str,),
'meta_resource_version':
(str,),
'meta_uuid':
(str,),
'meta_creation_time':
(datetime,),
'meta_mod_time':
(datetime,),
'meta_self_link':
(str,),
'spec_style_options':
(str,),
'idle_timeout_duration':
(str,),
'idle_timeout_warning_time':
(str,),
},
'attribute_map': {
'o_tenant': 'O.Tenant',
't_kind': 'T.kind',
't_api_version': 'T.api-version',
'meta_name': 'meta.name',
'meta_namespace': 'meta.namespace',
'meta_generation_id': 'meta.generation-id',
'meta_resource_version': 'meta.resource-version',
'meta_uuid': 'meta.uuid',
'meta_creation_time': 'meta.creation-time',
'meta_mod_time': 'meta.mod-time',
'meta_self_link': 'meta.self-link',
'spec_style_options': 'spec.style-options',
'idle_timeout_duration': 'idle-timeout.duration',
'idle_timeout_warning_time': 'idle-timeout.warning-time',
},
'location_map': {
'o_tenant': 'path',
't_kind': 'query',
't_api_version': 'query',
'meta_name': 'query',
'meta_namespace': 'query',
'meta_generation_id': 'query',
'meta_resource_version': 'query',
'meta_uuid': 'query',
'meta_creation_time': 'query',
'meta_mod_time': 'query',
'meta_self_link': 'query',
'spec_style_options': 'query',
'idle_timeout_duration': 'query',
'idle_timeout_warning_time': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_ui_global_settings
)
def __get_ui_global_settings1(
self,
**kwargs
):
"""Get UIGlobalSettings object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ui_global_settings1(async_req=True)
>>> result = thread.get()
Keyword Args:
t_kind (str): Kind represents the type of the API object.. [optional]
t_api_version (str): APIVersion defines the version of the API object. This can only be set by the server.. [optional]
meta_name (str): Name of the object, unique within a Namespace for scoped objects. Must start and end with alpha numeric and can have alphanumeric, -, _, . Length of string should be between 2 and 64.. [optional]
meta_tenant (str): Tenant to which the object belongs to. This can be automatically filled in many cases based on the tenant the user, who created the object, belongs to. Must be alpha-numerics. Length of string should be between 1 and 48.. [optional]
meta_namespace (str): Namespace of the object, for scoped objects. Must start and end with alpha numeric and can have alphanumeric, -, _, . Length of string should be between 2 and 64.. [optional]
meta_generation_id (str): GenerationID is the generation Id for the object. This is incremented anytime there is an update to the user intent, including Spec update and any update to ObjectMeta. System generated and updated, not updatable by user.. [optional]
meta_resource_version (str): Resource version in the object store. This is updated anytime there is any change to the object. System generated and updated, not updatable by user.. [optional]
meta_uuid (str): UUID is the unique identifier for the object. This is generated on creation of the object. System generated, not updatable by user.. [optional]
meta_creation_time (datetime): CreationTime is the creation time of the object. System generated and updated, not updatable by user.. [optional]
meta_mod_time (datetime): ModTime is the Last Modification time of the object. System generated and updated, not updatable by user.. [optional]
meta_self_link (str): SelfLink is a link for accessing this object. When the object is served from the API-GW it is the URI path. Example: - \"/v1/tenants/tenants/tenant2\" System generated and updated, not updatable by user.. [optional]
spec_style_options (str): Can contain any UI style preferences. Provide typing through UI code.. [optional]
idle_timeout_duration (str): Time of inactivity after which user logout countdown warning pops up. Should be a valid time duration.. [optional]
idle_timeout_warning_time (str): Warning duration before logout and after system idle time. Should be a valid time duration of at most 5m0s.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PreferencesUIGlobalSettings
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_ui_global_settings1 = _Endpoint(
settings={
'response_type': (PreferencesUIGlobalSettings,),
'auth': [],
'endpoint_path': '/configs/preferences/v1/uiglobalsettings',
'operation_id': 'get_ui_global_settings1',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
't_kind',
't_api_version',
'meta_name',
'meta_tenant',
'meta_namespace',
'meta_generation_id',
'meta_resource_version',
'meta_uuid',
'meta_creation_time',
'meta_mod_time',
'meta_self_link',
'spec_style_options',
'idle_timeout_duration',
'idle_timeout_warning_time',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
't_kind':
(str,),
't_api_version':
(str,),
'meta_name':
(str,),
'meta_tenant':
(str,),
'meta_namespace':
(str,),
'meta_generation_id':
(str,),
'meta_resource_version':
(str,),
'meta_uuid':
(str,),
'meta_creation_time':
(datetime,),
'meta_mod_time':
(datetime,),
'meta_self_link':
(str,),
'spec_style_options':
(str,),
'idle_timeout_duration':
(str,),
'idle_timeout_warning_time':
(str,),
},
'attribute_map': {
't_kind': 'T.kind',
't_api_version': 'T.api-version',
'meta_name': 'meta.name',
'meta_tenant': 'meta.tenant',
'meta_namespace': 'meta.namespace',
'meta_generation_id': 'meta.generation-id',
'meta_resource_version': 'meta.resource-version',
'meta_uuid': 'meta.uuid',
'meta_creation_time': 'meta.creation-time',
'meta_mod_time': 'meta.mod-time',
'meta_self_link': 'meta.self-link',
'spec_style_options': 'spec.style-options',
'idle_timeout_duration': 'idle-timeout.duration',
'idle_timeout_warning_time': 'idle-timeout.warning-time',
},
'location_map': {
't_kind': 'query',
't_api_version': 'query',
'meta_name': 'query',
'meta_tenant': 'query',
'meta_namespace': 'query',
'meta_generation_id': 'query',
'meta_resource_version': 'query',
'meta_uuid': 'query',
'meta_creation_time': 'query',
'meta_mod_time': 'query',
'meta_self_link': 'query',
'spec_style_options': 'query',
'idle_timeout_duration': 'query',
'idle_timeout_warning_time': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_ui_global_settings1
)
def __label_ui_global_settings(
self,
o_tenant,
body,
**kwargs
):
"""Label UIGlobalSettings object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.label_ui_global_settings(o_tenant, body, async_req=True)
>>> result = thread.get()
Args:
o_tenant (str):
body (ApiLabel):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PreferencesUIGlobalSettings
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['o_tenant'] = \
o_tenant
kwargs['body'] = \
body
return self.call_with_http_info(**kwargs)
self.label_ui_global_settings = _Endpoint(
settings={
'response_type': (PreferencesUIGlobalSettings,),
'auth': [],
'endpoint_path': '/configs/preferences/v1/tenant/{O.Tenant}/uiglobalsettings/label',
'operation_id': 'label_ui_global_settings',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'o_tenant',
'body',
],
'required': [
'o_tenant',
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'o_tenant':
(str,),
'body':
(ApiLabel,),
},
'attribute_map': {
'o_tenant': 'O.Tenant',
},
'location_map': {
'o_tenant': 'path',
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__label_ui_global_settings
)
def __label_ui_global_settings1(
self,
body,
**kwargs
):
"""Label UIGlobalSettings object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.label_ui_global_settings1(body, async_req=True)
>>> result = thread.get()
Args:
body (ApiLabel):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PreferencesUIGlobalSettings
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['body'] = \
body
return self.call_with_http_info(**kwargs)
self.label_ui_global_settings1 = _Endpoint(
settings={
'response_type': (PreferencesUIGlobalSettings,),
'auth': [],
'endpoint_path': '/configs/preferences/v1/uiglobalsettings/label',
'operation_id': 'label_ui_global_settings1',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(ApiLabel,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__label_ui_global_settings1
)
def __update_ui_global_settings(
self,
o_tenant,
body,
**kwargs
):
"""Update UIGlobalSettings object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_ui_global_settings(o_tenant, body, async_req=True)
>>> result = thread.get()
Args:
o_tenant (str):
body (PreferencesUIGlobalSettings):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PreferencesUIGlobalSettings
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['o_tenant'] = \
o_tenant
kwargs['body'] = \
body
return self.call_with_http_info(**kwargs)
self.update_ui_global_settings = _Endpoint(
settings={
'response_type': (PreferencesUIGlobalSettings,),
'auth': [],
'endpoint_path': '/configs/preferences/v1/tenant/{O.Tenant}/uiglobalsettings',
'operation_id': 'update_ui_global_settings',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'o_tenant',
'body',
],
'required': [
'o_tenant',
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'o_tenant':
(str,),
'body':
(PreferencesUIGlobalSettings,),
},
'attribute_map': {
'o_tenant': 'O.Tenant',
},
'location_map': {
'o_tenant': 'path',
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__update_ui_global_settings
)
def __update_ui_global_settings1(
self,
body,
**kwargs
):
"""Update UIGlobalSettings object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_ui_global_settings1(body, async_req=True)
>>> result = thread.get()
Args:
body (PreferencesUIGlobalSettings):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PreferencesUIGlobalSettings
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['body'] = \
body
return self.call_with_http_info(**kwargs)
self.update_ui_global_settings1 = _Endpoint(
settings={
'response_type': (PreferencesUIGlobalSettings,),
'auth': [],
'endpoint_path': '/configs/preferences/v1/uiglobalsettings',
'operation_id': 'update_ui_global_settings1',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(PreferencesUIGlobalSettings,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__update_ui_global_settings1
)
def __watch_ui_global_settings(
self,
o_tenant,
**kwargs
):
"""Watch UIGlobalSettings objects. Supports WebSockets or HTTP long poll # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.watch_ui_global_settings(o_tenant, async_req=True)
>>> result = thread.get()
Args:
o_tenant (str):
Keyword Args:
o_name (str): Name of the object, unique within a Namespace for scoped objects. Must start and end with alpha numeric and can have alphanumeric, -, _, . Length of string should be between 2 and 64.. [optional]
o_namespace (str): Namespace of the object, for scoped objects. Must start and end with alpha numeric and can have alphanumeric, -, _, . Length of string should be between 2 and 64.. [optional]
o_generation_id (str): GenerationID is the generation Id for the object. This is incremented anytime there is an update to the user intent, including Spec update and any update to ObjectMeta. System generated and updated, not updatable by user.. [optional]
o_resource_version (str): Resource version in the object store. This is updated anytime there is any change to the object. System generated and updated, not updatable by user.. [optional]
o_uuid (str): UUID is the unique identifier for the object. This is generated on creation of the object. System generated, not updatable by user.. [optional]
o_creation_time (datetime): CreationTime is the creation time of the object. System generated and updated, not updatable by user.. [optional]
o_mod_time (datetime): ModTime is the Last Modification time of the object. System generated and updated, not updatable by user.. [optional]
o_self_link (str): SelfLink is a link for accessing this object. When the object is served from the API-GW it is the URI path. Example: - \"/v1/tenants/tenants/tenant2\" System generated and updated, not updatable by user.. [optional]
label_selector (str): LabelSelector to select on labels in list or watch results.. [optional]
field_selector (str): FieldSelector to select on field values in list or watch results.. [optional]
field_change_selector ([str]): FieldChangeSelector specifies to generate a watch notification on change in field(s) specified.. [optional]
_from (int): From represents the start index number (1 based - first object starts from index 1), of the results list. The results returned would be in the range [from ... (from + (max-results - 1))]. If From = 0, the server will attempt to return all the results in the list without pagination.. [optional]
max_results (int): MaxResults is the maximum number of results to be returned as part of the response, per page If MaxResults is more than the maximum number of results per page supported by the server, the server will return an err If MaxResults is 0, the server will return all the results without pagination.. [optional]
sort_order (str): order to sort List results in.. [optional]
meta_only (bool): If MetaOnly is set to true, the watch event notification that matches the watch criteria will not contain the full object. It will only contain the information about the object that changed, i.e. which object and what changed. MetaOnly is not set by default.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PreferencesAutoMsgUIGlobalSettingsWatchHelper
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['o_tenant'] = \
o_tenant
return self.call_with_http_info(**kwargs)
self.watch_ui_global_settings = _Endpoint(
settings={
'response_type': (PreferencesAutoMsgUIGlobalSettingsWatchHelper,),
'auth': [],
'endpoint_path': '/configs/preferences/v1/watch/tenant/{O.Tenant}/uiglobalsettings',
'operation_id': 'watch_ui_global_settings',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'o_tenant',
'o_name',
'o_namespace',
'o_generation_id',
'o_resource_version',
'o_uuid',
'o_creation_time',
'o_mod_time',
'o_self_link',
'label_selector',
'field_selector',
'field_change_selector',
'_from',
'max_results',
'sort_order',
'meta_only',
],
'required': [
'o_tenant',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'o_tenant':
(str,),
'o_name':
(str,),
'o_namespace':
(str,),
'o_generation_id':
(str,),
'o_resource_version':
(str,),
'o_uuid':
(str,),
'o_creation_time':
(datetime,),
'o_mod_time':
(datetime,),
'o_self_link':
(str,),
'label_selector':
(str,),
'field_selector':
(str,),
'field_change_selector':
([str],),
'_from':
(int,),
'max_results':
(int,),
'sort_order':
(str,),
'meta_only':
(bool,),
},
'attribute_map': {
'o_tenant': 'O.Tenant',
'o_name': 'O.name',
'o_namespace': 'O.namespace',
'o_generation_id': 'O.generation-id',
'o_resource_version': 'O.resource-version',
'o_uuid': 'O.uuid',
'o_creation_time': 'O.creation-time',
'o_mod_time': 'O.mod-time',
'o_self_link': 'O.self-link',
'label_selector': 'label-selector',
'field_selector': 'field-selector',
'field_change_selector': 'field-change-selector',
'_from': 'from',
'max_results': 'max-results',
'sort_order': 'sort-order',
'meta_only': 'meta-only',
},
'location_map': {
'o_tenant': 'path',
'o_name': 'query',
'o_namespace': 'query',
'o_generation_id': 'query',
'o_resource_version': 'query',
'o_uuid': 'query',
'o_creation_time': 'query',
'o_mod_time': 'query',
'o_self_link': 'query',
'label_selector': 'query',
'field_selector': 'query',
'field_change_selector': 'query',
'_from': 'query',
'max_results': 'query',
'sort_order': 'query',
'meta_only': 'query',
},
'collection_format_map': {
'field_change_selector': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__watch_ui_global_settings
)
def __watch_ui_global_settings1(
self,
**kwargs
):
"""Watch UIGlobalSettings objects. Supports WebSockets or HTTP long poll # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.watch_ui_global_settings1(async_req=True)
>>> result = thread.get()
Keyword Args:
o_name (str): Name of the object, unique within a Namespace for scoped objects. Must start and end with alpha numeric and can have alphanumeric, -, _, . Length of string should be between 2 and 64.. [optional]
o_tenant (str): Tenant to which the object belongs to. This can be automatically filled in many cases based on the tenant the user, who created the object, belongs to. Must be alpha-numerics. Length of string should be between 1 and 48.. [optional]
o_namespace (str): Namespace of the object, for scoped objects. Must start and end with alpha numeric and can have alphanumeric, -, _, . Length of string should be between 2 and 64.. [optional]
o_generation_id (str): GenerationID is the generation Id for the object. This is incremented anytime there is an update to the user intent, including Spec update and any update to ObjectMeta. System generated and updated, not updatable by user.. [optional]
o_resource_version (str): Resource version in the object store. This is updated anytime there is any change to the object. System generated and updated, not updatable by user.. [optional]
o_uuid (str): UUID is the unique identifier for the object. This is generated on creation of the object. System generated, not updatable by user.. [optional]
o_creation_time (datetime): CreationTime is the creation time of the object. System generated and updated, not updatable by user.. [optional]
o_mod_time (datetime): ModTime is the Last Modification time of the object. System generated and updated, not updatable by user.. [optional]
o_self_link (str): SelfLink is a link for accessing this object. When the object is served from the API-GW it is the URI path. Example: - \"/v1/tenants/tenants/tenant2\" System generated and updated, not updatable by user.. [optional]
label_selector (str): LabelSelector to select on labels in list or watch results.. [optional]
field_selector (str): FieldSelector to select on field values in list or watch results.. [optional]
field_change_selector ([str]): FieldChangeSelector specifies to generate a watch notification on change in field(s) specified.. [optional]
_from (int): From represents the start index number (1 based - first object starts from index 1), of the results list. The results returned would be in the range [from ... (from + (max-results - 1))]. If From = 0, the server will attempt to return all the results in the list without pagination.. [optional]
max_results (int): MaxResults is the maximum number of results to be returned as part of the response, per page If MaxResults is more than the maximum number of results per page supported by the server, the server will return an err If MaxResults is 0, the server will return all the results without pagination.. [optional]
sort_order (str): order to sort List results in.. [optional]
meta_only (bool): If MetaOnly is set to true, the watch event notification that matches the watch criteria will not contain the full object. It will only contain the information about the object that changed, i.e. which object and what changed. MetaOnly is not set by default.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PreferencesAutoMsgUIGlobalSettingsWatchHelper
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.watch_ui_global_settings1 = _Endpoint(
settings={
'response_type': (PreferencesAutoMsgUIGlobalSettingsWatchHelper,),
'auth': [],
'endpoint_path': '/configs/preferences/v1/watch/uiglobalsettings',
'operation_id': 'watch_ui_global_settings1',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'o_name',
'o_tenant',
'o_namespace',
'o_generation_id',
'o_resource_version',
'o_uuid',
'o_creation_time',
'o_mod_time',
'o_self_link',
'label_selector',
'field_selector',
'field_change_selector',
'_from',
'max_results',
'sort_order',
'meta_only',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'o_name':
(str,),
'o_tenant':
(str,),
'o_namespace':
(str,),
'o_generation_id':
(str,),
'o_resource_version':
(str,),
'o_uuid':
(str,),
'o_creation_time':
(datetime,),
'o_mod_time':
(datetime,),
'o_self_link':
(str,),
'label_selector':
(str,),
'field_selector':
(str,),
'field_change_selector':
([str],),
'_from':
(int,),
'max_results':
(int,),
'sort_order':
(str,),
'meta_only':
(bool,),
},
'attribute_map': {
'o_name': 'O.name',
'o_tenant': 'O.tenant',
'o_namespace': 'O.namespace',
'o_generation_id': 'O.generation-id',
'o_resource_version': 'O.resource-version',
'o_uuid': 'O.uuid',
'o_creation_time': 'O.creation-time',
'o_mod_time': 'O.mod-time',
'o_self_link': 'O.self-link',
'label_selector': 'label-selector',
'field_selector': 'field-selector',
'field_change_selector': 'field-change-selector',
'_from': 'from',
'max_results': 'max-results',
'sort_order': 'sort-order',
'meta_only': 'meta-only',
},
'location_map': {
'o_name': 'query',
'o_tenant': 'query',
'o_namespace': 'query',
'o_generation_id': 'query',
'o_resource_version': 'query',
'o_uuid': 'query',
'o_creation_time': 'query',
'o_mod_time': 'query',
'o_self_link': 'query',
'label_selector': 'query',
'field_selector': 'query',
'field_change_selector': 'query',
'_from': 'query',
'max_results': 'query',
'sort_order': 'query',
'meta_only': 'query',
},
'collection_format_map': {
'field_change_selector': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__watch_ui_global_settings1
)
|
PypiClean
|
/sandstone-slurm-assist-0.12.0.tar.gz/sandstone-slurm-assist-0.12.0/sandstone_slurm/node_modules/bower/lib/node_modules/uuid/benchmark/benchmark.js
|
try {
var nodeuuid = require('../uuid');
} catch (e) {
console.error('node-uuid require failed - skipping tests');
}
try {
var uuid = require('uuid');
} catch (e) {
console.error('uuid require failed - skipping tests');
}
try {
var uuidjs = require('uuid-js');
} catch (e) {
console.error('uuid-js require failed - skipping tests');
}
var N = 5e5;
function rate(msg, t) {
console.log(msg + ': ' +
(N / (Date.now() - t) * 1e3 | 0) +
' uuids/second');
}
console.log('# v4');
// node-uuid - string form
if (nodeuuid) {
for (var i = 0, t = Date.now(); i < N; i++) nodeuuid.v4();
rate('nodeuuid.v4() - using node.js crypto RNG', t);
for (var i = 0, t = Date.now(); i < N; i++) nodeuuid.v4({rng: nodeuuid.mathRNG});
rate('nodeuuid.v4() - using Math.random() RNG', t);
for (var i = 0, t = Date.now(); i < N; i++) nodeuuid.v4('binary');
rate('nodeuuid.v4(\'binary\')', t);
var buffer = new nodeuuid.BufferClass(16);
for (var i = 0, t = Date.now(); i < N; i++) nodeuuid.v4('binary', buffer);
rate('nodeuuid.v4(\'binary\', buffer)', t);
}
// libuuid - string form
if (uuid) {
for (var i = 0, t = Date.now(); i < N; i++) uuid();
rate('uuid()', t);
for (var i = 0, t = Date.now(); i < N; i++) uuid('binary');
rate('uuid(\'binary\')', t);
}
// uuid-js - string form
if (uuidjs) {
for (var i = 0, t = Date.now(); i < N; i++) uuidjs.create(4);
rate('uuidjs.create(4)', t);
}
// 140byte.es
for (var i = 0, t = Date.now(); i < N; i++) 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g,function(s,r){r=Math.random()*16|0;return (s=='x'?r:r&0x3|0x8).toString(16)});
rate('140byte.es_v4', t);
console.log('');
console.log('# v1');
// node-uuid - v1 string form
if (nodeuuid) {
for (var i = 0, t = Date.now(); i < N; i++) nodeuuid.v1();
rate('nodeuuid.v1()', t);
for (var i = 0, t = Date.now(); i < N; i++) nodeuuid.v1('binary');
rate('nodeuuid.v1(\'binary\')', t);
var buffer = new nodeuuid.BufferClass(16);
for (var i = 0, t = Date.now(); i < N; i++) nodeuuid.v1('binary', buffer);
rate('nodeuuid.v1(\'binary\', buffer)', t);
}
// uuid-js - v1 string form
if (uuidjs) {
for (var i = 0, t = Date.now(); i < N; i++) uuidjs.create(1);
rate('uuidjs.create(1)', t);
}
|
PypiClean
|
/pulumi_aws-6.1.0a1693529760.tar.gz/pulumi_aws-6.1.0a1693529760/pulumi_aws/ses/identity_policy.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IdentityPolicyArgs', 'IdentityPolicy']
@pulumi.input_type
class IdentityPolicyArgs:
def __init__(__self__, *,
identity: pulumi.Input[str],
policy: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IdentityPolicy resource.
:param pulumi.Input[str] identity: Name or Amazon Resource Name (ARN) of the SES Identity.
:param pulumi.Input[str] policy: JSON string of the policy.
:param pulumi.Input[str] name: Name of the policy.
"""
pulumi.set(__self__, "identity", identity)
pulumi.set(__self__, "policy", policy)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def identity(self) -> pulumi.Input[str]:
"""
Name or Amazon Resource Name (ARN) of the SES Identity.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: pulumi.Input[str]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def policy(self) -> pulumi.Input[str]:
"""
JSON string of the policy.
"""
return pulumi.get(self, "policy")
@policy.setter
def policy(self, value: pulumi.Input[str]):
pulumi.set(self, "policy", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the policy.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _IdentityPolicyState:
def __init__(__self__, *,
identity: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
policy: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IdentityPolicy resources.
:param pulumi.Input[str] identity: Name or Amazon Resource Name (ARN) of the SES Identity.
:param pulumi.Input[str] name: Name of the policy.
:param pulumi.Input[str] policy: JSON string of the policy.
"""
if identity is not None:
pulumi.set(__self__, "identity", identity)
if name is not None:
pulumi.set(__self__, "name", name)
if policy is not None:
pulumi.set(__self__, "policy", policy)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input[str]]:
"""
Name or Amazon Resource Name (ARN) of the SES Identity.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the policy.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON string of the policy.
"""
return pulumi.get(self, "policy")
@policy.setter
def policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy", value)
class IdentityPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
policy: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a SES Identity Policy. More information about SES Sending Authorization Policies can be found in the [SES Developer Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-policies.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_domain_identity = aws.ses.DomainIdentity("exampleDomainIdentity", domain="example.com")
example_policy_document = aws.iam.get_policy_document_output(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=[
"SES:SendEmail",
"SES:SendRawEmail",
],
resources=[example_domain_identity.arn],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
identifiers=["*"],
type="AWS",
)],
)])
example_identity_policy = aws.ses.IdentityPolicy("exampleIdentityPolicy",
identity=example_domain_identity.arn,
policy=example_policy_document.json)
```
## Import
Using `pulumi import`, import SES Identity Policies using the identity and policy name, separated by a pipe character (`|`). For example:
```sh
$ pulumi import aws:ses/identityPolicy:IdentityPolicy example 'example.com|example'
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] identity: Name or Amazon Resource Name (ARN) of the SES Identity.
:param pulumi.Input[str] name: Name of the policy.
:param pulumi.Input[str] policy: JSON string of the policy.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IdentityPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a SES Identity Policy. More information about SES Sending Authorization Policies can be found in the [SES Developer Guide](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-policies.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_domain_identity = aws.ses.DomainIdentity("exampleDomainIdentity", domain="example.com")
example_policy_document = aws.iam.get_policy_document_output(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=[
"SES:SendEmail",
"SES:SendRawEmail",
],
resources=[example_domain_identity.arn],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
identifiers=["*"],
type="AWS",
)],
)])
example_identity_policy = aws.ses.IdentityPolicy("exampleIdentityPolicy",
identity=example_domain_identity.arn,
policy=example_policy_document.json)
```
## Import
Using `pulumi import`, import SES Identity Policies using the identity and policy name, separated by a pipe character (`|`). For example:
```sh
$ pulumi import aws:ses/identityPolicy:IdentityPolicy example 'example.com|example'
```
:param str resource_name: The name of the resource.
:param IdentityPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IdentityPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
policy: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IdentityPolicyArgs.__new__(IdentityPolicyArgs)
if identity is None and not opts.urn:
raise TypeError("Missing required property 'identity'")
__props__.__dict__["identity"] = identity
__props__.__dict__["name"] = name
if policy is None and not opts.urn:
raise TypeError("Missing required property 'policy'")
__props__.__dict__["policy"] = policy
super(IdentityPolicy, __self__).__init__(
'aws:ses/identityPolicy:IdentityPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
policy: Optional[pulumi.Input[str]] = None) -> 'IdentityPolicy':
"""
Get an existing IdentityPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] identity: Name or Amazon Resource Name (ARN) of the SES Identity.
:param pulumi.Input[str] name: Name of the policy.
:param pulumi.Input[str] policy: JSON string of the policy.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IdentityPolicyState.__new__(_IdentityPolicyState)
__props__.__dict__["identity"] = identity
__props__.__dict__["name"] = name
__props__.__dict__["policy"] = policy
return IdentityPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[str]:
"""
Name or Amazon Resource Name (ARN) of the SES Identity.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the policy.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def policy(self) -> pulumi.Output[str]:
"""
JSON string of the policy.
"""
return pulumi.get(self, "policy")
|
PypiClean
|
/ensmallen_graph-0.6.0-cp37-cp37m-manylinux2010_x86_64.whl/ensmallen_graph/datasets/networkrepository/jagmesh2.py
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def Jagmesh2(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the jagmesh2 graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of jagmesh2 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 08:12:42.592069
The undirected graph jagmesh2 has 1009 nodes and 3937 unweighted edges,
of which 1009 are self-loops. The graph is sparse as it has a density of
0.00675 and is connected, as it has a single component. The graph median
node degree is 7, the mean node degree is 6.80, and the node degree mode
is 7. The top 5 most central nodes are 1005 (degree 7), 1002 (degree 7),
1001 (degree 7), 998 (degree 7) and 997 (degree 7).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import Jagmesh2
# Then load the graph
graph = Jagmesh2()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="Jagmesh2",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
PypiClean
|
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/Tooltip.js
|
require({cache:{"url:dijit/templates/Tooltip.html":"<div class=\"dijitTooltip dijitTooltipLeft\" id=\"dojoTooltip\"\n\t><div class=\"dijitTooltipContainer dijitTooltipContents\" data-dojo-attach-point=\"containerNode\" role='alert'></div\n\t><div class=\"dijitTooltipConnector\" data-dojo-attach-point=\"connectorNode\"></div\n></div>\n"}});
define("dijit/Tooltip",["dojo/_base/array","dojo/_base/declare","dojo/_base/fx","dojo/dom","dojo/dom-class","dojo/dom-geometry","dojo/dom-style","dojo/_base/lang","dojo/_base/sniff","dojo/_base/window","./_base/manager","./place","./_Widget","./_TemplatedMixin","./BackgroundIframe","dojo/text!./templates/Tooltip.html","."],function(_1,_2,fx,_3,_4,_5,_6,_7,_8,_9,_a,_b,_c,_d,_e,_f,_10){
var _11=_2("dijit._MasterTooltip",[_c,_d],{duration:_a.defaultDuration,templateString:_f,postCreate:function(){
_9.body().appendChild(this.domNode);
this.bgIframe=new _e(this.domNode);
this.fadeIn=fx.fadeIn({node:this.domNode,duration:this.duration,onEnd:_7.hitch(this,"_onShow")});
this.fadeOut=fx.fadeOut({node:this.domNode,duration:this.duration,onEnd:_7.hitch(this,"_onHide")});
},show:function(_12,_13,_14,rtl,_15){
if(this.aroundNode&&this.aroundNode===_13&&this.containerNode.innerHTML==_12){
return;
}
this.domNode.width="auto";
if(this.fadeOut.status()=="playing"){
this._onDeck=arguments;
return;
}
this.containerNode.innerHTML=_12;
this.set("textDir",_15);
this.containerNode.align=rtl?"right":"left";
var pos=_b.around(this.domNode,_13,_14&&_14.length?_14:_16.defaultPosition,!rtl,_7.hitch(this,"orient"));
var _17=pos.aroundNodePos;
if(pos.corner.charAt(0)=="M"&&pos.aroundCorner.charAt(0)=="M"){
this.connectorNode.style.top=_17.y+((_17.h-this.connectorNode.offsetHeight)>>1)-pos.y+"px";
this.connectorNode.style.left="";
}else{
if(pos.corner.charAt(1)=="M"&&pos.aroundCorner.charAt(1)=="M"){
this.connectorNode.style.left=_17.x+((_17.w-this.connectorNode.offsetWidth)>>1)-pos.x+"px";
}
}
_6.set(this.domNode,"opacity",0);
this.fadeIn.play();
this.isShowingNow=true;
this.aroundNode=_13;
},orient:function(_18,_19,_1a,_1b,_1c){
this.connectorNode.style.top="";
var _1d=_1b.w-this.connectorNode.offsetWidth;
_18.className="dijitTooltip "+{"MR-ML":"dijitTooltipRight","ML-MR":"dijitTooltipLeft","TM-BM":"dijitTooltipAbove","BM-TM":"dijitTooltipBelow","BL-TL":"dijitTooltipBelow dijitTooltipABLeft","TL-BL":"dijitTooltipAbove dijitTooltipABLeft","BR-TR":"dijitTooltipBelow dijitTooltipABRight","TR-BR":"dijitTooltipAbove dijitTooltipABRight","BR-BL":"dijitTooltipRight","BL-BR":"dijitTooltipLeft"}[_19+"-"+_1a];
this.domNode.style.width="auto";
var _1e=_5.getContentBox(this.domNode);
var _1f=Math.min((Math.max(_1d,1)),_1e.w);
var _20=_1f<_1e.w;
this.domNode.style.width=_1f+"px";
if(_20){
this.containerNode.style.overflow="auto";
var _21=this.containerNode.scrollWidth;
this.containerNode.style.overflow="visible";
if(_21>_1f){
_21=_21+_6.get(this.domNode,"paddingLeft")+_6.get(this.domNode,"paddingRight");
this.domNode.style.width=_21+"px";
}
}
if(_1a.charAt(0)=="B"&&_19.charAt(0)=="B"){
var mb=_5.getMarginBox(_18);
var _22=this.connectorNode.offsetHeight;
if(mb.h>_1b.h){
var _23=_1b.h-((_1c.h+_22)>>1);
this.connectorNode.style.top=_23+"px";
this.connectorNode.style.bottom="";
}else{
this.connectorNode.style.bottom=Math.min(Math.max(_1c.h/2-_22/2,0),mb.h-_22)+"px";
this.connectorNode.style.top="";
}
}else{
this.connectorNode.style.top="";
this.connectorNode.style.bottom="";
}
return Math.max(0,_1e.w-_1d);
},_onShow:function(){
if(_8("ie")){
this.domNode.style.filter="";
}
},hide:function(_24){
if(this._onDeck&&this._onDeck[1]==_24){
this._onDeck=null;
}else{
if(this.aroundNode===_24){
this.fadeIn.stop();
this.isShowingNow=false;
this.aroundNode=null;
this.fadeOut.play();
}else{
}
}
},_onHide:function(){
this.domNode.style.cssText="";
this.containerNode.innerHTML="";
if(this._onDeck){
this.show.apply(this,this._onDeck);
this._onDeck=null;
}
},_setAutoTextDir:function(_25){
this.applyTextDir(_25,_8("ie")?_25.outerText:_25.textContent);
_1.forEach(_25.children,function(_26){
this._setAutoTextDir(_26);
},this);
},_setTextDirAttr:function(_27){
this._set("textDir",typeof _27!="undefined"?_27:"");
if(_27=="auto"){
this._setAutoTextDir(this.containerNode);
}else{
this.containerNode.dir=this.textDir;
}
}});
_10.showTooltip=function(_28,_29,_2a,rtl,_2b){
if(!_16._masterTT){
_10._masterTT=_16._masterTT=new _11();
}
return _16._masterTT.show(_28,_29,_2a,rtl,_2b);
};
_10.hideTooltip=function(_2c){
return _16._masterTT&&_16._masterTT.hide(_2c);
};
var _16=_2("dijit.Tooltip",_c,{label:"",showDelay:400,connectId:[],position:[],_setConnectIdAttr:function(_2d){
_1.forEach(this._connections||[],function(_2e){
_1.forEach(_2e,_7.hitch(this,"disconnect"));
},this);
this._connectIds=_1.filter(_7.isArrayLike(_2d)?_2d:(_2d?[_2d]:[]),function(id){
return _3.byId(id);
});
this._connections=_1.map(this._connectIds,function(id){
var _2f=_3.byId(id);
return [this.connect(_2f,"onmouseenter","_onHover"),this.connect(_2f,"onmouseleave","_onUnHover"),this.connect(_2f,"onfocus","_onHover"),this.connect(_2f,"onblur","_onUnHover")];
},this);
this._set("connectId",_2d);
},addTarget:function(_30){
var id=_30.id||_30;
if(_1.indexOf(this._connectIds,id)==-1){
this.set("connectId",this._connectIds.concat(id));
}
},removeTarget:function(_31){
var id=_31.id||_31,idx=_1.indexOf(this._connectIds,id);
if(idx>=0){
this._connectIds.splice(idx,1);
this.set("connectId",this._connectIds);
}
},buildRendering:function(){
this.inherited(arguments);
_4.add(this.domNode,"dijitTooltipData");
},startup:function(){
this.inherited(arguments);
var ids=this.connectId;
_1.forEach(_7.isArrayLike(ids)?ids:[ids],this.addTarget,this);
},_onHover:function(e){
if(!this._showTimer){
var _32=e.target;
this._showTimer=setTimeout(_7.hitch(this,function(){
this.open(_32);
}),this.showDelay);
}
},_onUnHover:function(){
if(this._focus){
return;
}
if(this._showTimer){
clearTimeout(this._showTimer);
delete this._showTimer;
}
this.close();
},open:function(_33){
if(this._showTimer){
clearTimeout(this._showTimer);
delete this._showTimer;
}
_16.show(this.label||this.domNode.innerHTML,_33,this.position,!this.isLeftToRight(),this.textDir);
this._connectNode=_33;
this.onShow(_33,this.position);
},close:function(){
if(this._connectNode){
_16.hide(this._connectNode);
delete this._connectNode;
this.onHide();
}
if(this._showTimer){
clearTimeout(this._showTimer);
delete this._showTimer;
}
},onShow:function(){
},onHide:function(){
},uninitialize:function(){
this.close();
this.inherited(arguments);
}});
_16._MasterTooltip=_11;
_16.show=_10.showTooltip;
_16.hide=_10.hideTooltip;
_16.defaultPosition=["after-centered","before-centered"];
return _16;
});
|
PypiClean
|
/pytransform3d-3.4.0.tar.gz/pytransform3d-3.4.0/doc/source/index.rst
|
.. pytransform3d documentation master file, created by
sphinx-quickstart on Thu Nov 20 21:01:30 2014.
.. raw:: html
<div class="text-right">
=============
pytransform3d
=============
.. raw:: html
</div>
<div class="container-fluid">
<div class="row">
<div class="col-md-4">
<div class="panel panel-default">
<div class="panel-heading">
<h3 class="panel-title">Contents</h3>
</div>
<div class="panel-body">
.. toctree::
:maxdepth: 1
install
introduction
rotations
transformations
transformation_ambiguities
euler_angles
transformation_modeling
transform_manager
transformation_over_time
camera
animations
api
_auto_examples/index
.. raw:: html
</div>
</div>
</div>
<div class="col-md-8">
This documentation explains how you can work with pytransform3d and with
3D transformations in general.
-----
Scope
-----
pytransform3d focuses on readability and debugging, not on computational
efficiency. If you want to have an efficient implementation of some function
from the library you can easily extract the relevant code and implement it
more efficiently in a language of your choice.
The library integrates well with the
`scientific Python ecosystem <https://scipy-lectures.org/>`_
with its core libraries NumPy, SciPy and Matplotlib.
We rely on `NumPy <https://numpy.org/>`_ for linear algebra and on
`Matplotlib <https://matplotlib.org/>`_ to offer plotting functionalities.
`SciPy <https://scipy.org/>`_ is used if you want to
automatically compute transformations from a graph of transformations.
pytransform3d offers...
* operations for most common representations of rotation / orientation and
translation / position
* conversions between those representations
* clear documentation of conventions
* tight coupling with matplotlib to quickly visualize (or animate)
transformations
* the TransformManager which organizes complex chains of transformations
* the TransformEditor which allows to modify transformations graphically
* the UrdfTransformManager which is able to load transformations from
`URDF <https://wiki.ros.org/urdf>`_ files
* a matplotlib-like interface to Open3D's visualizer to display
geometries and transformations
--------
Citation
--------
If you use pytransform3d for a scientific publication, I would appreciate
citation of the following paper:
Fabisch, (2019). pytransform3d: 3D Transformations for Python.
Journal of Open Source Software, 4(33), 1159, |DOI|_
.. |DOI| image:: http://joss.theoj.org/papers/10.21105/joss.01159/status.svg
.. _DOI: https://doi.org/10.21105/joss.01159
.. raw:: html
</div>
</div>
</div>
|
PypiClean
|
/dnsadmin53-0.0.1.tar.gz/dnsadmin53-0.0.1/README.md
|
aws-dnsadmin53
==============
[](https://travis-ci.org/huit/python-dnsadmin53)
Manage access to zones in Route 53
There are a couple of limitations on IAM Objects
http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html
Based on this it seems like roles will be the best way to go
Two types of cross account access both grant access by giving the external AWS account the ARN of the role and the user then makes api calls to AssumeRole with the ARN of the role to get temp credentials allowing access to the role. (in our case updating DNS entries within a zone)
The first allows you to delegate access with just Account ID putting the trust on the repote account to create a group which has approriate access to the Role ARN
http://docs.aws.amazon.com/IAM/latest/UserGuide/cross-acct-access.html
The second type of delegation requires both an Account ID AND a External ID, which prevents the "Confused Deputy" problem ( http://en.wikipedia.org/wiki/Confused_deputy_problem)
http://docs.aws.amazon.com/STS/latest/UsingSTS/sts-delegating-externalid.html
http://docs.aws.amazon.com/STS/latest/UsingSTS/Welcome.html
|
PypiClean
|
/patchwork-websoccer-0.1.12.tar.gz/patchwork-websoccer-0.1.12/patchwork/websoccer/fastapi.py
|
import logging
from typing import Callable, Mapping, Union, Coroutine, Any
from fastapi import APIRouter, Depends
from starlette.websockets import WebSocket, WebSocketDisconnect
from patchwork.websoccer.client.base import BaseClient
from patchwork.websoccer.client.websock import WebsockClient
from patchwork.websoccer.court.base import BaseCourt
logger = logging.getLogger('patchwork.websoccer.fastapi')
def noop_authorizer():
return None
DataHandler = Callable[[Union[str, bytes], BaseClient], Coroutine[Any, Any, Union[str, bytes, None]]]
class StarletteWebsockClient(WebsockClient):
_sock: WebSocket
def __init__(self, handler: DataHandler, **kwargs):
super().__init__(**kwargs)
self._handler = handler
async def get(self) -> Union[bytes, str]:
try:
return await super().get()
except WebSocketDisconnect:
raise EOFError()
async def handle(self, data: Union[bytes, str]):
response = await self._handler(data, self)
if response is None:
return
await self.send(response)
def __str__(self):
return f"<{self.__class__.__name__}: {self._sock.client.host}:{self._sock.client.port}>"
def bind_fastapi(
court: BaseCourt,
handler: DataHandler,
authorizer: Callable = None,
binary_mode: bool = False
):
"""
Binds court instance to the FastAPI, by returning a router which can be easily included
at desired path.
Optional authorizer is a FastAPI dependency which is called to determine if incoming connection
is authorized. For unauthorized users it must raise exception.
:param court:
:param authorizer:
:param handler:
:param binary_mode:
:return:
"""
router = APIRouter()
if authorizer is None:
authorizer = noop_authorizer
@router.get('')
async def describe():
"""
Returns available transports with their locations
:return:
"""
return {
'endpoints': {
'websocket': '/ws'
}
}
@router.websocket('/ws')
async def websocket(websocket: WebSocket, auth: Mapping = Depends(authorizer)):
await websocket.accept()
logger.info(f"{websocket.client}: websocket client accepted")
client = StarletteWebsockClient(handler=handler, sock=websocket, binary=binary_mode)
if auth is not None:
client.session.update(auth)
await court.client(client)
logger.info(f"{websocket.client}: connection closed")
# TODO: add SSE endpoint
# TODO: add HTTP poll endpoint
@router.on_event('startup')
async def run_websoccer():
await court.run()
@router.on_event('shutdown')
async def stop_websoccer():
await court.terminate()
logger.info("Websoccer court initialized for FastAPI")
# include router to your FastAPI application
return router
|
PypiClean
|
/smscx_client-0.1.13.tar.gz/smscx_client-0.1.13/smscx_client/model/group_details_response.py
|
import re # noqa: F401
import sys # noqa: F401
from smscx_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
ApiModel
)
from smscx_client.exceptions import ApiAttributeError
def lazy_import():
from smscx_client.model.data_groups_details import DataGroupsDetails
from smscx_client.model.info_groups import InfoGroups
from smscx_client.model.paging import Paging
globals()['DataGroupsDetails'] = DataGroupsDetails
globals()['InfoGroups'] = InfoGroups
globals()['Paging'] = Paging
class GroupDetailsResponse(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def api_types():
"""
Returns
api_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'info': (InfoGroups,), # noqa: E501
'data': ([DataGroupsDetails],), # noqa: E501
'paging': (Paging,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'info': 'info', # noqa: E501
'data': 'data', # noqa: E501
'paging': 'paging', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_api_data(cls, info, data, paging, *args, **kwargs): # noqa: E501
"""GroupDetailsResponse - a model
Args:
info (InfoGroups):
data ([DataGroupsDetails]):
paging (Paging):
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(ApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.info = info
self.data = data
self.paging = paging
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, info, data, paging, *args, **kwargs): # noqa: E501
"""GroupDetailsResponse - a model
Args:
info (InfoGroups):
data ([DataGroupsDetails]):
paging (Paging):
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.info = info
self.data = data
self.paging = paging
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_api_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/geezramlibs-0.2.0.tar.gz/geezramlibs-0.2.0/geezram/geez/database/pmpermitdb.py
|
from geezram.geez.database import db_x
db_y = db_x["PMPERMIT"]
PMPERMIT_MESSAGE = (
"**peringatan! tolong baca pesan ini dengan hati-hati..\n\n**"
"**Saya Geez-Pyro Assistant untuk melindungi tuan saya dari spam**"
"**jika Anda bukan spammer, harap tunggu!.\n\n**"
"**jangan spam atau Anda akan diblokir!**"
)
BLOCKED = "**Spammer, blocked!**"
LIMIT = 5
async def set_pm(value: bool):
doc = {"_id": 1, "pmpermit": value}
doc2 = {"_id": "Approved", "users": []}
r = await db_y.find_one({"_id": 1})
r2 = await db_y.find_one({"_id": "Approved"})
if r:
await db_y.update_one({"_id": 1}, {"$set": {"pmpermit": value}})
else:
await db_y.insert_one(doc)
if not r2:
await db_y.insert_one(doc2)
async def set_permit_message(text):
await db_y.update_one({"_id": 1}, {"$set": {"pmpermit_message": text}})
async def set_block_message(text):
await db_y.update_one({"_id": 1}, {"$set": {"block_message": text}})
async def set_limit(limit):
await db_y.update_one({"_id": 1}, {"$set": {"limit": limit}})
async def get_pm_settings():
result = await db_y.find_one({"_id": 1})
if not result:
return False
pmpermit = result["pmpermit"]
pm_message = result.get("pmpermit_message", PMPERMIT_MESSAGE)
block_message = result.get("block_message", BLOCKED)
limit = result.get("limit", LIMIT)
return pmpermit, pm_message, limit, block_message
async def approve_user(user_id):
cd = await db_y.find_one({"_id": "PmPermit"})
if cd:
await db_y.update_one({"_id": "PmPermit"}, {"$push": {"user_id": user_id}})
else:
user_idc = [user_id]
await db_y.insert_one({"_id": "PmPermit", "user_id": user_idc})
async def disapprove_user(user_id):
await db_y.update_one({"_id": "PmPermit"}, {"$pull": {"user_id": user_id}})
async def is_user_approved(user_id):
sm = await db_y.find_one({"_id": "PmPermit"})
if sm:
kek = list(sm.get("user_id"))
return user_id in kek
else:
return False
async def user_list():
sm = await db_y.find_one({"_id": "PmPermit"})
if sm:
return list(sm.get("user_id"))
else:
return False
|
PypiClean
|
/PyCIM-15.15.0.tar.gz/PyCIM-15.15.0/CIM14/CDPSM/Balanced/IEC61970/Core/VoltageLevel.py
|
from CIM14.CDPSM.Balanced.IEC61970.Core.EquipmentContainer import EquipmentContainer
class VoltageLevel(EquipmentContainer):
"""A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these.
"""
def __init__(self, lowVoltageLimit=0.0, highVoltageLimit=0.0, BaseVoltage=None, Bays=None, Substation=None, *args, **kw_args):
"""Initialises a new 'VoltageLevel' instance.
@param lowVoltageLimit: The bus bar's low voltage limit
@param highVoltageLimit: The bus bar's high voltage limit
@param BaseVoltage: The base voltage used for all equipment within the VoltageLevel.
@param Bays: The association is used in the naming hierarchy.
@param Substation: The association is used in the naming hierarchy.
"""
#: The bus bar's low voltage limit
self.lowVoltageLimit = lowVoltageLimit
#: The bus bar's high voltage limit
self.highVoltageLimit = highVoltageLimit
self._BaseVoltage = None
self.BaseVoltage = BaseVoltage
self._Bays = []
self.Bays = [] if Bays is None else Bays
self._Substation = None
self.Substation = Substation
super(VoltageLevel, self).__init__(*args, **kw_args)
_attrs = ["lowVoltageLimit", "highVoltageLimit"]
_attr_types = {"lowVoltageLimit": float, "highVoltageLimit": float}
_defaults = {"lowVoltageLimit": 0.0, "highVoltageLimit": 0.0}
_enums = {}
_refs = ["BaseVoltage", "Bays", "Substation"]
_many_refs = ["Bays"]
def getBaseVoltage(self):
"""The base voltage used for all equipment within the VoltageLevel.
"""
return self._BaseVoltage
def setBaseVoltage(self, value):
if self._BaseVoltage is not None:
filtered = [x for x in self.BaseVoltage.VoltageLevel if x != self]
self._BaseVoltage._VoltageLevel = filtered
self._BaseVoltage = value
if self._BaseVoltage is not None:
if self not in self._BaseVoltage._VoltageLevel:
self._BaseVoltage._VoltageLevel.append(self)
BaseVoltage = property(getBaseVoltage, setBaseVoltage)
def getBays(self):
"""The association is used in the naming hierarchy.
"""
return self._Bays
def setBays(self, value):
for x in self._Bays:
x.VoltageLevel = None
for y in value:
y._VoltageLevel = self
self._Bays = value
Bays = property(getBays, setBays)
def addBays(self, *Bays):
for obj in Bays:
obj.VoltageLevel = self
def removeBays(self, *Bays):
for obj in Bays:
obj.VoltageLevel = None
def getSubstation(self):
"""The association is used in the naming hierarchy.
"""
return self._Substation
def setSubstation(self, value):
if self._Substation is not None:
filtered = [x for x in self.Substation.VoltageLevels if x != self]
self._Substation._VoltageLevels = filtered
self._Substation = value
if self._Substation is not None:
if self not in self._Substation._VoltageLevels:
self._Substation._VoltageLevels.append(self)
Substation = property(getSubstation, setSubstation)
|
PypiClean
|
/dosage-3.0.tar.gz/dosage-3.0/dosagelib/plugins/creators.py
|
from ..scraper import ParserScraper
from ..helpers import indirectStarter
class Creators(ParserScraper):
imageSearch = '//a[contains(@class,"fancybox")]/img'
prevSearch = '//a[@id="nav_prev"]'
latestSearch = '//div[contains(@class,"caption")]/a'
starter = indirectStarter
def __init__(self, name, path, lang=None):
super(Creators, self).__init__('Creators/' + name)
self.url = 'https://www.creators.com/features/' + path
if lang:
self.lang = lang
@classmethod
def getmodules(cls):
return (
# Some comics are not listed on the "all" page (too old?)
cls('CafeconLeche', 'cafe-con-leche'),
cls('DonaldDuck', 'donald-duck'),
cls('Flare', 'flare'),
cls('FlightDeck', 'flight-deck'),
cls('GirlsAndSports', 'girls-and-sports'),
cls('HomeOffice', 'stay-at-home-dad'),
cls('HopeAndDeath', 'hope-and-death'),
cls('MickeyMouse', 'mickey-mouse'),
cls('NaturalSelection', 'natural-selection'),
cls('OffCenter', 'off-center'),
cls('Rugrats', 'rugrats'),
cls('TheQuigmans', 'the-quigmans'),
cls('WinnieThePooh', 'winnie-the-pooh'),
# do not edit anything below since these entries are generated from
# scripts/creators.py
# START AUTOUPDATE
# Agnes has a duplicate in GoComics/Agnes
# AndyCapp has a duplicate in GoComics/AndyCapp
cls('AndyMarlette', 'andy-marlette'),
cls('Archie', 'archie'),
cls('ArchieSpanish', 'archie-spanish', 'es'),
# AskShagg has a duplicate in GoComics/AskShagg
# BallardStreet has a duplicate in GoComics/BallardStreet
# BC has a duplicate in GoComics/BC
# BobGorrell has a duplicate in GoComics/BobGorrell
# ChipBok has a duplicate in GoComics/ChipBok
# ChrisBritt has a duplicate in GoComics/ChrisBritt
# ChuckleBros has a duplicate in GoComics/ChuckleBros
# DaddysHome has a duplicate in GoComics/DaddysHome
# DiamondLil has a duplicate in GoComics/DiamondLil
# DogEatDoug has a duplicate in GoComics/DogEatDoug
# DogsOfCKennel has a duplicate in GoComics/DogsOfCKennel
# FloAndFriends has a duplicate in GoComics/FloAndFriends
# ForHeavensSake has a duplicate in GoComics/ForHeavensSake
# FreeRange has a duplicate in GoComics/FreeRange
# GaryMarkstein has a duplicate in GoComics/GaryMarkstein
# GaryVarvel has a duplicate in GoComics/GaryVarvel
# Heathcliff has a duplicate in GoComics/Heathcliff
cls('HeathcliffSpanish', 'heathcliff-spanish', 'es'),
# HerbAndJamaal has a duplicate in GoComics/HerbAndJamaal
# JohnDeering has a duplicate in GoComics/JohnDeering
# KenCatalino has a duplicate in GoComics/KenCatalino
# LibertyMeadows has a duplicate in GoComics/LibertyMeadows
cls('LongStoryShort', 'long-story-short'),
# MarshallRamsey has a duplicate in GoComics/MarshallRamsey
# MichaelRamirez has a duplicate in GoComics/MichaelRamirez
# MikeLuckovich has a duplicate in GoComics/MikeLuckovich
# Momma has a duplicate in GoComics/Momma
cls('Mossprints', 'mossprints'),
# NestHeads has a duplicate in GoComics/NestHeads
# OneBigHappy has a duplicate in GoComics/OneBigHappy
# PaulSzep has a duplicate in GoComics/PaulSzep
# Rubes has a duplicate in GoComics/Rubes
# ScaryGary has a duplicate in GoComics/ScaryGary
# SpeedBump has a duplicate in GoComics/SpeedBump
# SteveBenson has a duplicate in GoComics/SteveBenson
# SteveBreen has a duplicate in GoComics/SteveBreen
# SteveKelley has a duplicate in GoComics/SteveKelley
# StrangeBrew has a duplicate in GoComics/StrangeBrew
# TheBarn has a duplicate in GoComics/TheBarn
# TheMeaningOfLila has a duplicate in GoComics/TheMeaningOfLila
# TheOtherCoast has a duplicate in GoComics/TheOtherCoast
cls('TomStiglich', 'tom-stiglich'),
# WeePals has a duplicate in GoComics/WeePals
# WizardOfId has a duplicate in GoComics/WizardOfId
cls('WizardOfIdSpanish', 'wizard-of-id-spanish', 'es'),
# WorkingItOut has a duplicate in GoComics/WorkingItOut
# ZackHill has a duplicate in GoComics/ZackHill
# END AUTOUPDATE
)
|
PypiClean
|
/netbluemind4-4.9.2993.tar.gz/netbluemind4-4.9.2993/netbluemind/core/container/model/IdQuery.py
|
import requests
from netbluemind.python import serder
class IdQuery:
def __init__(self):
self.filter = None
self.limit = None
self.offset = None
self.knownContainerVersion = None
pass
class __IdQuerySerDer__:
def __init__(self):
pass
def parse(self, value):
if (value == None):
return None
instance = IdQuery()
self.parseInternal(value, instance)
return instance
def parseInternal(self, value, instance):
from netbluemind.core.container.model.ItemFlagFilter import ItemFlagFilter
from netbluemind.core.container.model.ItemFlagFilter import __ItemFlagFilterSerDer__
filterValue = value['filter']
instance.filter = __ItemFlagFilterSerDer__().parse(filterValue)
limitValue = value['limit']
instance.limit = serder.INT.parse(limitValue)
offsetValue = value['offset']
instance.offset = serder.INT.parse(offsetValue)
knownContainerVersionValue = value['knownContainerVersion']
instance.knownContainerVersion = serder.LONG.parse(
knownContainerVersionValue)
return instance
def encode(self, value):
if (value == None):
return None
instance = dict()
self.encodeInternal(value, instance)
return instance
def encodeInternal(self, value, instance):
from netbluemind.core.container.model.ItemFlagFilter import ItemFlagFilter
from netbluemind.core.container.model.ItemFlagFilter import __ItemFlagFilterSerDer__
filterValue = value.filter
instance["filter"] = __ItemFlagFilterSerDer__().encode(filterValue)
limitValue = value.limit
instance["limit"] = serder.INT.encode(limitValue)
offsetValue = value.offset
instance["offset"] = serder.INT.encode(offsetValue)
knownContainerVersionValue = value.knownContainerVersion
instance["knownContainerVersion"] = serder.LONG.encode(
knownContainerVersionValue)
return instance
|
PypiClean
|
/glue-ginga-0.2.tar.gz/glue-ginga-0.2/glue_ginga/qt/layer_artist.py
|
from __future__ import absolute_import, division, print_function
from time import time
import numpy as np
from ginga.misc import Bunch
from ginga.util import wcsmod
from ginga import AstroImage, BaseImage
from glue.external.echo import keep_in_sync, CallbackProperty
from glue.core.exceptions import IncompatibleAttribute
from glue.core.layer_artist import LayerArtistBase
from glue.utils import color2rgb
from glue.viewers.image.state import ImageLayerState, ImageSubsetLayerState
wcsmod.use('astropy')
class GingaLayerArtist(LayerArtistBase):
zorder = CallbackProperty()
visible = CallbackProperty()
def __init__(self, viewer_state=None, layer=None, layer_state=None, canvas=None):
super(GingaLayerArtist, self).__init__(layer)
self._canvas = canvas
self.layer = layer or layer_state.layer
self.state = layer_state or self._layer_state_cls(viewer_state=viewer_state,
layer=self.layer)
self._viewer_state = viewer_state
# Should not be needed here? (i.e. should be in add_data/add_subset?)
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
self.zorder = self.state.zorder
self.visible = self.state.visible
self._sync_zorder = keep_in_sync(self, 'zorder', self.state, 'zorder')
self._sync_visible = keep_in_sync(self, 'visible', self.state, 'visible')
self.state.add_global_callback(self.update)
self._viewer_state.add_global_callback(self.update)
def clear(self):
self._canvas.delete_objects_by_tag([self._tag], redraw=True)
def redraw(self, whence=0):
self._canvas.redraw(whence=whence)
def remove(self):
self.clear()
def __gluestate__(self, context):
return dict(state=context.id(self.state))
def update(self, **kwargs):
try:
canvas_img = self._canvas.get_object_by_tag(self._tag)
except KeyError:
pass
else:
canvas_img.set_zorder(self.state.zorder)
class GingaImageLayer(GingaLayerArtist):
_layer_state_cls = ImageLayerState
def __init__(self, viewer_state=None, layer=None, layer_state=None, canvas=None):
super(GingaImageLayer, self).__init__(viewer_state=viewer_state, layer=layer,
layer_state=layer_state, canvas=canvas)
self._tag = '_image'
self._img = DataImage(self.state)
def _ensure_added(self):
"""
Add artist to canvas if needed
"""
try:
self._canvas.get_object_by_tag(self._tag)
except KeyError:
self._canvas.set_image(self._img)
def update(self, **kwargs):
super(GingaImageLayer, self).update(**kwargs)
if self.state.visible and self._img:
self._ensure_added()
elif not self.state.visible:
self.clear()
return
self.redraw()
class GingaSubsetImageLayer(GingaLayerArtist):
_layer_state_cls = ImageSubsetLayerState
def __init__(self, viewer_state=None, layer=None, layer_state=None, canvas=None):
super(GingaSubsetImageLayer, self).__init__(viewer_state=viewer_state, layer=layer,
layer_state=layer_state, canvas=canvas)
self._tag = "layer%s_%s" % (layer.label, time())
self._img = SubsetImage(self.state)
# SubsetImages can't be added to canvases directly. Need
# to wrap into a ginga canvas type.
Image = self._canvas.get_draw_class('image')
self._cimg = Image(0, 0, self._img, alpha=0.5, flipy=False)
def _visible_changed(self, *args):
if self.state.visible and self._cimg:
self._canvas.add(self._cimg, tag=self._tag, redraw=True)
elif not self.state.visible:
self.clear()
def _check_enabled(self):
"""
Sync the enabled/disabled status, based on whether
mask is computable
"""
try:
# Just try computing the subset for the first pixel
view = tuple(0 for _ in self.layer.data.shape)
self.layer.to_mask(view)
except IncompatibleAttribute as exc:
self.disable_invalid_attributes(*exc.args)
return
self.enable()
def _ensure_added(self):
"""
Add artist to canvas if needed
"""
try:
self._canvas.get_object_by_tag(self._tag)
except KeyError:
self._canvas.add(self._cimg, tag=self._tag, redraw=False)
def update(self, **kwargs):
super(GingaSubsetImageLayer, self).update(**kwargs)
self._check_enabled()
if self.state.visible and self._img:
self._ensure_added()
elif not self.state.visible:
self.clear()
return
self.redraw(whence=0)
def forbidden(*args):
raise ValueError("Forbidden")
class DataImage(AstroImage.AstroImage):
"""
A Ginga image subclass to interface with Glue Data objects
"""
get_data = _get_data = copy_data = set_data = get_array = transfer = forbidden
def __init__(self, layer_state, **kwargs):
"""
Parameters
----------
...
kwargs : dict
Extra kwargs are passed to the superclass
"""
self.layer_state = layer_state
super(DataImage, self).__init__(**kwargs)
@property
def shape(self):
"""
The shape of the 2D view into the data
"""
return self.layer_state.get_sliced_data_shape()
def _get_fast_data(self):
return self._slice((slice(None, None, 10), slice(None, None, 10)))
def _slice(self, view):
"""
Extract a view from the 2D image.
"""
return self.layer_state.get_sliced_data(view=view)
class SubsetImage(BaseImage.BaseImage):
"""
A Ginga image subclass to interface with Glue subset objects
"""
get_data = _get_data = copy_data = set_data = get_array = transfer = forbidden
def __init__(self, layer_state=None, **kwargs):
"""
Parameters
----------
...
kwargs : dict
Extra kwargs are passed to the ginga superclass
"""
self.layer_state = layer_state
# NOTE: BaseImage accesses shape property--we need above items
# defined because we override shape()
super(SubsetImage, self).__init__(**kwargs)
self.order = 'RGBA'
@property
def shape(self):
"""
Shape of the 2D view into the subset mask
"""
return self.layer_state.get_sliced_data_shape()
def _rgb_from_mask(self, mask):
"""
Turn a boolean mask into a 4-channel RGBA image
"""
r, g, b = color2rgb(self.layer_state.color)
ones = mask * 0 + 255
alpha = mask * 127
result = np.dstack((ones * r, ones * g, ones * b, alpha)).astype(np.uint8)
return result
def _get_fast_data(self):
return self._slice((slice(None, None, 10), slice(None, None, 10)))
def _calc_order(self, order):
# Override base class because it invokes a glue forbidden method to
# access the data type of the image--we can instead assume RGBA
self.order = 'RGBA'
def _slice(self, view):
"""
Extract a view from the 2D subset mask.
"""
try:
return self._rgb_from_mask(self.layer_state.get_sliced_data(view=view))
except IncompatibleAttribute:
return np.zeros(self.shape + (4,))
def _set_minmax(self):
# we already know the data bounds
self.minval = 0
self.maxval = 256
self.minval_noinf = self.minval
self.maxval_noinf = self.maxval
def get_scaled_cutout_wdht(self, x1, y1, x2, y2, new_wd, new_ht):
doit = getattr(self, '_doit', False)
self._doit = not doit
# default implementation if downsampling
if doit or new_wd <= (x2 - x1 + 1) or new_ht <= (y2 - y1 + 1):
return super(SubsetImage, self).get_scaled_cutout_wdht(x1, y1, x2, y2,
new_wd, new_ht)
# if upsampling, prevent extra to_mask() computation
x1, x2 = np.clip([x1, x2], 0, self.width - 2).astype(np.int)
y1, y2 = np.clip([y1, y2], 0, self.height - 2).astype(np.int)
result = self._slice(np.s_[y1:y2 + 1, x1:x2 + 1])
yi = np.linspace(0, result.shape[0], new_ht).astype(np.int).reshape(-1, 1).clip(0, result.shape[0] - 1)
xi = np.linspace(0, result.shape[1], new_wd).astype(np.int).reshape(1, -1).clip(0, result.shape[1] - 1)
yi, xi = [np.array(a) for a in np.broadcast_arrays(yi, xi)]
result = result[yi, xi]
scale_x = 1.0 * result.shape[1] / (x2 - x1 + 1)
scale_y = 1.0 * result.shape[0] / (y2 - y1 + 1)
return Bunch.Bunch(data=result, scale_x=scale_x, scale_y=scale_y)
|
PypiClean
|
/pyctest-0.0.12.tar.gz/pyctest-0.0.12/source/kitware-cmake/Help/variable/CMAKE_MAKE_PROGRAM.rst
|
CMAKE_MAKE_PROGRAM
------------------
Tool that can launch the native build system.
The value may be the full path to an executable or just the tool
name if it is expected to be in the ``PATH``.
The tool selected depends on the :variable:`CMAKE_GENERATOR` used
to configure the project:
* The :ref:`Makefile Generators` set this to ``make``, ``gmake``, or
a generator-specific tool (e.g. ``nmake`` for :generator:`NMake Makefiles`).
These generators store ``CMAKE_MAKE_PROGRAM`` in the CMake cache
so that it may be edited by the user.
* The :generator:`Ninja` generator sets this to ``ninja``.
This generator stores ``CMAKE_MAKE_PROGRAM`` in the CMake cache
so that it may be edited by the user.
* The :generator:`Xcode` generator sets this to ``xcodebuild`` (or possibly an
otherwise undocumented ``cmakexbuild`` wrapper implementing some
workarounds).
This generator prefers to lookup the build tool at build time
rather than to store ``CMAKE_MAKE_PROGRAM`` in the CMake cache
ahead of time. This is because ``xcodebuild`` is easy to find,
the ``cmakexbuild`` wrapper is needed only for older Xcode versions,
and the path to ``cmakexbuild`` may be outdated if CMake itself moves.
For compatibility with versions of CMake prior to 3.2, if
a user or project explicitly adds ``CMAKE_MAKE_PROGRAM`` to
the CMake cache then CMake will use the specified value.
* The :ref:`Visual Studio Generators` set this to the full path to
``MSBuild.exe`` (VS >= 10), ``devenv.com`` (VS 7,8,9), or
``VCExpress.exe`` (VS Express 8,9).
(See also variables
:variable:`CMAKE_VS_MSBUILD_COMMAND` and
:variable:`CMAKE_VS_DEVENV_COMMAND`.
These generators prefer to lookup the build tool at build time
rather than to store ``CMAKE_MAKE_PROGRAM`` in the CMake cache
ahead of time. This is because the tools are version-specific
and can be located using the Windows Registry. It is also
necessary because the proper build tool may depend on the
project content (e.g. the Intel Fortran plugin to VS 10 and 11
requires ``devenv.com`` to build its ``.vfproj`` project files
even though ``MSBuild.exe`` is normally preferred to support
the :variable:`CMAKE_GENERATOR_TOOLSET`).
For compatibility with versions of CMake prior to 3.0, if
a user or project explicitly adds ``CMAKE_MAKE_PROGRAM`` to
the CMake cache then CMake will use the specified value if
possible.
* The :generator:`Green Hills MULTI` generator sets this to the full
path to ``gbuild.exe`` based upon the toolset being used.
Once the generator has initialized a particular value for this
variable, changing the value has undefined behavior.
The ``CMAKE_MAKE_PROGRAM`` variable is set for use by project code.
The value is also used by the :manual:`cmake(1)` ``--build`` and
:manual:`ctest(1)` ``--build-and-test`` tools to launch the native
build process.
|
PypiClean
|
/django-lfs-0.11.tar.gz/django-lfs-0.11/lfs/manage/product/categories.py
|
import json
# django imports
from django.contrib.auth.decorators import permission_required
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
# lfs imports
from lfs.caching.utils import lfs_get_object_or_404
from lfs.core.utils import LazyEncoder
from lfs.core.signals import category_changed
from lfs.catalog.models import Product
from lfs.catalog.models import Category
@permission_required("core.manage_shop")
def manage_categories(request, product_id, template_name="manage/product/categories.html"):
"""Displays the manage category view.
"""
product = lfs_get_object_or_404(Product, pk=product_id)
product_category_ids = [p.id for p in product.get_categories()]
categories = []
for category in Category.objects.filter(parent=None):
children = children_categories(request, category, product_category_ids)
categories.append({
"id": category.id,
"slug": category.slug,
"name": category.name,
"url": category.get_absolute_url(),
"checked": category.id in product_category_ids,
"children": children,
})
result = render_to_string(template_name, request=request, context={
"product": product,
"categories": categories
})
return HttpResponse(result)
@permission_required("core.manage_shop")
def children_categories(request, category, product_category_ids,
template_name="manage/product/categories_children.html"):
"""Renders the children categories of given category as HTML.
"""
categories = []
for category in category.category_set.all():
children = children_categories(request, category, product_category_ids)
categories.append({
"id": category.id,
"slug": category.slug,
"name": category.name,
"url": category.get_absolute_url(),
"checked": category.id in product_category_ids,
"children": children,
})
result = render_to_string(template_name, request=request, context={
"categories": categories
})
return result
# Actions
@permission_required("core.manage_shop")
def change_categories(request, product_id):
"""Changes categories by passed request body.
"""
product = lfs_get_object_or_404(Product, pk=product_id)
# Signal that the old categories of the product have been changed.
for category in product.categories.all():
category_changed.send(category)
if request.method == "POST":
product.categories = request.POST.getlist("categories")
product.save()
# Signal that the new categories of the product have been changed.
for category in product.categories.all():
category_changed.send(category)
return HttpResponse(json.dumps({
"message": _(u"Categories have been saved."),
}, cls=LazyEncoder), content_type='application/json')
|
PypiClean
|
/client-python-tool-0.7.4.3.tar.gz/client-python-tool-0.7.4.3/client_python_tool/packages/eth_utils/applicators.py
|
from typing import Any, Callable, Dict, Generator, List, Tuple
import warnings
from .decorators import return_arg_type
from .functional import to_dict
from .toolz import compose, curry
Formatters = Callable[[List[Any]], List[Any]]
@return_arg_type(2)
def apply_formatter_at_index(
formatter: Callable[..., Any], at_index: int, value: List[Any]
) -> Generator[List[Any], None, None]:
if at_index + 1 > len(value):
raise IndexError(
"Not enough values in iterable to apply formatter. Got: {0}. "
"Need: {1}".format(len(value), at_index + 1)
)
for index, item in enumerate(value):
if index == at_index:
yield formatter(item)
else:
yield item
def combine_argument_formatters(*formatters: List[Callable[..., Any]]) -> Formatters:
warnings.warn(
DeprecationWarning(
"combine_argument_formatters(formatter1, formatter2)([item1, item2])"
"has been deprecated and will be removed in a subsequent major version "
"release of the eth-utils library. Update your calls to use "
"apply_formatters_to_sequence([formatter1, formatter2], [item1, item2]) "
"instead."
)
)
_formatter_at_index = curry(apply_formatter_at_index)
return compose(
*(
_formatter_at_index(formatter, index)
for index, formatter in enumerate(formatters)
)
)
@return_arg_type(1)
def apply_formatters_to_sequence(
formatters: List[Any], sequence: List[Any]
) -> Generator[List[Any], None, None]:
if len(formatters) > len(sequence):
raise IndexError(
"Too many formatters for sequence: {} formatters for {!r}".format(
len(formatters), sequence
)
)
elif len(formatters) < len(sequence):
raise IndexError(
"Too few formatters for sequence: {} formatters for {!r}".format(
len(formatters), sequence
)
)
else:
for formatter, item in zip(formatters, sequence):
yield formatter(item)
def apply_formatter_if(
condition: Callable[..., bool], formatter: Callable[..., Any], value: Any
) -> Any:
if condition(value):
return formatter(value)
else:
return value
@to_dict
def apply_formatters_to_dict(
formatters: Dict[Any, Any], value: Dict[Any, Any]
) -> Generator[Tuple[Any, Any], None, None]:
for key, item in value.items():
if key in formatters:
try:
yield key, formatters[key](item)
except (TypeError, ValueError) as exc:
raise type(exc)(
"Could not format value %r as field %r" % (item, key)
) from exc
else:
yield key, item
@return_arg_type(1)
def apply_formatter_to_array(
formatter: Callable[..., Any], value: List[Any]
) -> Generator[List[Any], None, None]:
for item in value:
yield formatter(item)
def apply_one_of_formatters(
formatter_condition_pairs: Tuple[Tuple[Callable[..., Any], Callable[..., Any]]],
value: Any,
) -> Any:
for condition, formatter in formatter_condition_pairs:
if condition(value):
return formatter(value)
else:
raise ValueError(
"The provided value did not satisfy any of the formatter conditions"
)
@to_dict
def apply_key_map(
key_mappings: Dict[Any, Any], value: Dict[Any, Any]
) -> Generator[Tuple[Any, Any], None, None]:
key_conflicts = (
set(value.keys())
.difference(key_mappings.keys())
.intersection(v for k, v in key_mappings.items() if v in value)
)
if key_conflicts:
raise KeyError(
"Could not apply key map due to conflicting key(s): {}".format(
key_conflicts
)
)
for key, item in value.items():
if key in key_mappings:
yield key_mappings[key], item
else:
yield key, item
|
PypiClean
|
/novalabs-1.2.75.tar.gz/novalabs-1.2.75/nova/utils/helpers.py
|
from datetime import datetime, timedelta
from typing import Optional, Dict
import time
import re
def convert_candle_to_timedelta(candle: str) -> timedelta:
multi = int(float(re.findall(r'\d+', candle)[0]))
if 'm' in candle:
candle_duration = timedelta(minutes=multi)
elif 'h' in candle:
candle_duration = timedelta(hours=multi)
elif 'd' in candle:
candle_duration = timedelta(days=multi)
else:
raise ValueError(f"Please enter a valid candle value. Must contain the letter m, h or d.")
return candle_duration
def convert_max_holding_to_candle_nb(candle: str, max_holding: timedelta) -> int:
"""
Return:
the number maximum of candle we can hold a position
"""
candle_duration = convert_candle_to_timedelta(candle=candle)
return int(max_holding.total_seconds() / candle_duration.total_seconds())
def get_timedelta_unit(interval: str) -> timedelta:
"""
Returns: a tuple that contains the unit and the multiplier needed to extract the data
"""
multi = int(float(re.findall(r'\d+', interval)[0]))
if 'm' in interval:
return timedelta(minutes=multi)
elif 'h' in interval:
return timedelta(hours=multi)
elif 'd' in interval:
return timedelta(days=multi)
def milliseconds_to_interval(interval_ms: int) -> str:
if interval_ms < 3600000:
return str(int(60 / (3600000 / interval_ms))) + 'T'
elif interval_ms < 86400000:
return str(int(24 / (86400000 / interval_ms))) + 'H'
else:
return str(int(interval_ms / 86400000)) + 'D'
def interval_to_minutes_str(interval: str) -> str:
"""Convert a Binance interval string to milliseconds
Args:
interval: interval string, e.g.: 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w
Returns:
int value of interval in milliseconds
None if interval prefix is not a decimal integer
None if interval suffix is not one of m, h, d, w
"""
if 'm' in interval:
interval += 'in'
if 'h' in interval:
interval += 'our'
if 'd' in interval:
interval += 'ay'
if 'w' in interval:
interval += 'eek'
return interval
def interval_to_minutes(interval: str) -> Optional[int]:
"""Convert a Binance interval string to milliseconds
Args:
interval: interval string, e.g.: 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w
Returns:
int value of interval in milliseconds
None if interval prefix is not a decimal integer
None if interval suffix is not one of m, h, d, w
"""
minutes_per_unit: Dict[str, int] = {
"m": 1,
"h": 60,
"d": 24 * 60,
"w": 7 * 24 * 60,
}
try:
return int(interval[:-1]) * minutes_per_unit[interval[-1]]
except (ValueError, KeyError):
return None
def interval_to_milliseconds(interval: str) -> Optional[int]:
"""Convert a Binance interval string to milliseconds
Args:
interval: interval string, e.g.: 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w
Returns:
int value of interval in milliseconds
None if interval prefix is not a decimal integer
None if interval suffix is not one of m, h, d, w
"""
seconds_per_unit: Dict[str, int] = {
"m": 60,
"h": 60 * 60,
"d": 24 * 60 * 60,
"w": 7 * 24 * 60 * 60,
}
try:
return int(interval[:-1]) * seconds_per_unit[interval[-1]] * 1000
except (ValueError, KeyError):
return None
def limit_to_start_date(interval: str, nb_candles: int):
"""
Note: the number of candle is determine with the "now" timestamp
Args:
interval: interval string, e.g.: 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w
nb_candles: number of candles needed.
Returns:
the start_time timestamp in milliseconds for production data
"""
number_of_milliseconds = interval_to_milliseconds(interval)
now_timestamp = int(time.time() * 1000)
return now_timestamp - (nb_candles + 1) * number_of_milliseconds
def get_timedelta_unit(interval: str) -> timedelta:
"""
Returns: timedelta
"""
multi = int(float(re.findall(r'\d+', interval)[0]))
if 'm' in interval:
return timedelta(minutes=multi)
elif 'h' in interval:
return timedelta(hours=multi)
elif 'd' in interval:
return timedelta(days=multi)
def is_opening_candle(interval: str):
multi = int(float(re.findall(r'\d+', interval)[0]))
unit = interval[-1]
now = datetime.utcnow()
if multi == 1:
if unit == 'm':
return now.second == 0
elif unit == 'h':
return now.minute + now.second == 0
elif unit == 'd':
return now.hour + now.minute + now.second == 0
else:
if unit == 'm':
return now.minute % multi + now.second == 0
elif unit == 'h':
return now.hour % multi + now.minute + now.second == 0
def compute_time_difference(
start_time: Optional[int],
end_time: Optional[int],
unit: str
) -> Optional[float]:
"""
Args:
start_time: start time in timestamp millisecond
end_time: start time in timestamp millisecond
unit: can be 'second', 'minute', 'hour', 'day'
Returns:
"""
start_time_s = int(start_time / 1000)
end_time_s = int(end_time / 1000)
if unit == 'second':
return end_time_s - start_time_s
elif unit == 'minute':
return (end_time_s - start_time_s) / 60
elif unit == 'hour':
return (end_time_s - start_time_s) / 3600
elif unit == 'day':
return (end_time_s - start_time_s) / (3600 * 24)
def interval_to_oanda_granularity(interval: str):
_number = interval[:-1]
_letter = interval[-1].upper()
return f"{_letter}{_number}" if _letter in ['M', 'H'] else f'{_letter}'
|
PypiClean
|
/dnacryptkey-1.0.2.tar.gz/dnacryptkey-1.0.2/README.md
|
# DNACRYPT-KEY
DNACRYPTO-KEY is a package to use in python that uses DNA code-based encryption to hide messages.
## Getting Started
Just use pip to install it!
### Prerequisites
To use dnacrypt-key is necessary have installed random package and unidecode package.
```
pip install random
pip install unidecode
```
### Installing
Check that the packages random and unidecode have been installed
Use this command in terminal to verify it:
```
pip freeze
```
*If necessery install packages random and unidecode!
And install DNA CRYPT using this command in terminal:
```
pip install dnacrypt-key
```
If you want, install dnacrypt-key and use command status() to verify status of package.
## Running the test
dnacrypt uses third-party packages to run.
### Test status dnacrypt-key
```
import dnacrypt-key
print(dnacrypt-key.dnacrypt-key.required)
print(dnacrypt-key.dnacrypt-key.status())
```
### See how to use it
```
import dnacrypt-key
print(dnacrypt-key.dnacrypt-key.use)
```
### See description
```
import dnacrypt-key
print(dnacrypt-key.dnacrypt-key.description)
```
### How to use it
## To encrypt the message use this command:
```
dnacrypt-key.dnacrypt-key('encrypt','Esse script foi feito pensando melhorar minhas skills','chavesecreta')
```
## Output
UUUUUCUUAUUGUCUUCCUCAUCGUAUUACGCAUGUUGCGCGUGGCUUCUCCUACUGCCUCCCCCACCGCAUCACCAACAGCGUCGCCGAAUGUCUAGAAGACGGGAUAGACGCAGCACUAAGAGGGAUAUUAAAACUGAUAUUCGGACUAGGAAAGAUAAGCGGAACAGACAGAACCGAAAAGAUAAUCGGACGAUAAAAAGCCAGAGCGAUAAUACUAACAUACAGAGAGAUAGAACAACUACGACGAGAUAAUUUUUCUUAUUGUCUUCCUCAUCGUAUUACGCAUGUUGCGCGUGGCUUCUCCUACUGCCUCCCCCACCGCAU
## To decrypt the message use this command:
```
dnacrypt-key.dnacrypt-key('decrypt','UUUUUCUUAUUGUCUUCCUCAUCGUAUUACGCAUGUUGCGCGUGGCUUCUCCUACUGCCUCCCCCACCGCAUCACCAACAGCGUCGCCGAAUGUCUAGAAGACGGGAUAGACGCAGCACUAAGAGGGAUAUUAAAACUGAUAUUCGGACUAGGAAAGAUAAGCGGAACAGACAGAACCGAAAAGAUAAUCGGACGAUAAAAAGCCAGAGCGAUAAUACUAACAUACAGAGAGAUAGAACAACUACGACGAGAUAAUUUUUCUUAUUGUCUUCCUCAUCGUAUUACGCAUGUUGCGCGUGGCUUCUCCUACUGCCUCCCCCACCGCAU','chavesecreta')
```
## Output
Esse script foi feito pensando melhorar minhas skills.
## Built With
* [VisualCode](Esse script foi feito pensando melhorar minhas skills) - The web framework used
* [CataLux](https://catalux.com.br/) - CataLux Labs
## Authors
* **Rodrigo Forti** - *Initial work* - [CataLux Python Labs](https://github.com/FortiHub)
See also the list of [contributors](https://github.com/catalux/contributors) who participated in this project.
## License
This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details
## Acknowledgments
* This code was created to improve learning skills.
* Enjoy your self!
|
PypiClean
|
/noc-0.7(3).tar.gz/noc-0.7(3)/lib/daemon.py
|
## Python modules
from __future__ import with_statement
import ConfigParser
import sys
import logging
import os
import signal
import optparse
import logging.handlers
## NOC modules
from noc.lib.debug import error_report, frame_report, set_crashinfo_context
from noc.lib.validators import is_ipv4, is_int
from noc.lib.version import get_version
# Load netifaces to resolve interface addresses when possible
try:
import netifaces
USE_NETIFACES = True
except ImportError:
USE_NETIFACES = False
class Daemon(object):
"""
Daemon base class
"""
daemon_name = "daemon"
defaults_config_path = "etc/%(daemon_name)s.defaults"
config_path = "etc/%(daemon_name)s.conf"
create_piddir = False
LOG_LEVELS = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL
}
def __init__(self):
# Chdir to the root of project
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), ".."))
self.prefix = os.getcwd()
# Parse commandline
self.opt_parser = optparse.OptionParser()
self.opt_parser.add_option("-c", "--config", action="store",
type="string", dest="config",
help="Read config from CONFIG")
self.opt_parser.add_option("-i", "--instance", action="store",
type="string", dest="instance_id",
default="0",
help="Set instnace id")
self.opt_parser.add_option("-f", "--foreground", action="store_false",
dest="daemonize", default=True,
help="Do not daemonize. "
"Run at the foreground")
self.opt_parser.add_option("-V", "--version", action="store_true",
dest="show_version", default=False,
help="Show daemon version")
self.setup_opt_parser()
self.options, self.args = self.opt_parser.parse_args()
if self.options.show_version:
print get_version()
sys.exit(0)
if len(self.args) < 1 or self.args[0] not in ["start", "launch",
"stop", "refresh"]:
self.opt_parser.error(
"You must supply one of start|launch|stop|refresh commands")
# Read config
self.pidfile = None
self.config = None
self.instance_id = self.options.instance_id
self.load_config()
# Register signal handlers if any
for s in [s for s in dir(self) if s.startswith("SIG")]:
try:
sig = getattr(signal, s)
except AttributeError:
logging.error(
"Signal '%s' is not supported on this platform" % s)
continue
signal.signal(sig, getattr(self, s))
def load_config(self):
"""
Load and process configuration files
:return:
"""
first_run = True
if self.config:
logging.info("Loading config")
first_run = False
self.config = ConfigParser.SafeConfigParser()
if self.defaults_config_path:
self.config.read(
self.defaults_config_path % {"daemon_name": self.daemon_name})
if self.options.config:
self.config.read(self.options.config)
elif self.config_path:
self.config.read(
self.config_path % {"daemon_name": self.daemon_name})
if not first_run:
self.on_load_config()
if self.config.get("main", "logfile"):
set_crashinfo_context(self.daemon_name, os.path.dirname(
self.config.get("main", "logfile").replace("{{instance}}",
self.instance_id)))
else:
set_crashinfo_context(None, None)
# Set up logging
if self.config.get("main", "loglevel") not in self.LOG_LEVELS:
raise Exception(
"Invalid loglevel '%s'" % self.config.get("main", "loglevel"))
for h in logging.root.handlers:
logging.root.removeHandler(h) # Dirty hack for baseConfig
self.heartbeat_enable = (self.options.daemonize and
self.config.getboolean("main", "heartbeat"))
if self.options.daemonize:
# Set up logging
logfile = self.config.get("main", "logfile")
syslog_host = self.config.get("main", "syslog_host")
if logfile or syslog_host:
loglevel = self.LOG_LEVELS[self.config.get("main", "loglevel")]
logging.root.setLevel(loglevel)
if logfile:
# Log to file
rf_handler = logging.handlers.RotatingFileHandler(
filename=logfile.replace(
"{{instance}}", self.instance_id),
maxBytes=self.config.getint("main", "logsize"),
backupCount=self.config.getint("main", "logfiles")
)
# @todo: Configurable parameter
rf_handler.setFormatter(
logging.Formatter('%(asctime)s %(message)s', None))
logging.root.addHandler(rf_handler)
if syslog_host:
# Log to remote host
for host in syslog_host.split(","):
host = host.strip()
if not host:
continue
syslog_handler = logging.handlers.SysLogHandler(
address=(host, 514)
)
# @todo: Configurable parameter
syslog_handler.setFormatter(
logging.Formatter('%(asctime)s %(message)s', None))
logging.root.addHandler(syslog_handler)
self.pidfile = self.config.get("main", "pidfile").replace(
"{{instance}}", self.instance_id)
if self.pidfile and self.create_piddir:
piddir = os.path.dirname(self.pidfile)
if not os.path.exists(piddir):
try:
os.makedirs(piddir)
os.chmod(piddir, 01777)
except OSError, why:
logging.error("Cannot create PIDfile directory %s: %s" % (
piddir, why))
sys.exit(1)
elif not os.path.isdir(piddir):
logging.error("'%s' is not a directory" % piddir)
sys.exit(1)
elif not os.access(piddir, os.W_OK):
logging.error("'%s' is not writable" % piddir)
sys.exit(1)
else:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s')
def on_load_config(self):
"""
Called after config has been reloaded on SIGHUP
:return:
"""
pass
def run(self):
"""
Main daemon loop. Must be overriden
:return:
"""
pass
def become_daemon(self):
"""
Daemonize process
:return:
"""
try:
if os.fork():
# Exit parent and return control to the shell immediately
sys.exit(0)
except OSError, e:
sys.stderr.write("Fork failed")
sys.exit(1)
os.setsid() # Become session leader
os.umask(022)
try:
pid = os.fork()
except OSError, e:
sys.stderr.write("Fork failed")
os._exit(1)
if pid:
if self.pidfile:
self.write_pidfile(pid)
os._exit(0)
# In daemon process, redirect stdin/stdout/stderr to /dev/null
i = open("/dev/null", "r")
o = open("/dev/null", "a+")
e = open("/dev/null", "a+")
os.dup2(i.fileno(), sys.stdin.fileno())
os.dup2(o.fileno(), sys.stdout.fileno())
os.dup2(e.fileno(), sys.stderr.fileno())
sys.stdout = o
sys.stderr = e
def resolve_address(self, s):
"""
Resolve interface names to IP addresses
:param s: Interface name or IPv4 address
:return:
"""
if is_ipv4(s):
return s
if USE_NETIFACES:
try:
a = netifaces.ifaddresses(s)
except ValueError:
raise Exception("Invalid interface '%s'" % s)
try:
return a[2][0]["addr"]
except (IndexError, KeyError):
raise Exception("No ip address for interface: '%s' found" % s)
raise Exception("Cannot resolve address '%s'" % s)
def resolve_addresses(self, addr_list, default_port):
"""
Parses string and returns a list of (ip,port)
:param addr_list: Comma-separared list of addresses in form:
* ip
* ip:port
* interface
* interface:port
:param default_port:
:return:
"""
r = []
for x in addr_list.split(","):
x = x.strip()
if not x:
continue
if ":" in x: # Implicit port notation
x, port = x.split(":", 1)
if is_int(port):
port = int(port)
else:
import socket
try:
port = socket.getservbyname(port)
except socket.error:
raise Exception("Invalid port: %s" % port)
else:
port = int(default_port)
if port <= 0 or port > 65535:
raise Exception("Invalid port: %s" % port)
if is_ipv4(x):
r += [(x, port)]
continue
if USE_NETIFACES: # Can resolve interface names
try:
a = netifaces.ifaddresses(x)
except ValueError:
raise Exception("Invalid interface '%s'" % x)
try:
x = a[2][0]["addr"]
except (IndexError, KeyError):
raise Exception(
"No ip address for interface: '%s' found" % x)
r += [(x, port)]
continue
raise Exception("Cannot resolve address '%s'" % x)
return r
def write_pidfile(self, pid=None):
"""
Write pidfile
:return:
"""
if not self.pidfile:
return
if pid is None:
pid = os.getpid() # Process' pid
try:
with open(self.pidfile, "w") as f:
f.write(str(pid))
except IOError, why:
logging.error("Unable to write PIDfile '%s': %s" % (self.pidfile,
why))
sys.exit(1)
def heartbeat(self):
"""
Touch pidfile
:return:
"""
if self.pidfile and self.heartbeat_enable:
logging.debug("Touching pidfile: %s" % self.pidfile)
try:
os.utime(self.pidfile, None)
except OSError, why:
logging.error("Unable to touch pidfile %s: %s" % (self.pidfile,
why))
def setup_opt_parser(self):
"""
Add additional options to setup_opt_parser
:return:
"""
pass
def process_command(self):
"""
Process self.args[0] command
:return:
"""
getattr(self, self.args[0])()
def guarded_run(self):
"""
Run daemon and catch common exceptions
:return:
"""
try:
self.run()
except KeyboardInterrupt:
pass
except MemoryError:
logging.error("Out of memory. Exiting.")
except:
error_report()
def start(self):
"""
"start" command handler
:return:
"""
# Daemonize
if self.options.daemonize:
self.become_daemon()
self.guarded_run()
def stop(self):
"""
"stop" command handler
:return:
"""
pidfile = self.config.get("main", "pidfile")
if os.path.exists(pidfile):
f = open(pidfile)
pid = int(f.read().strip())
f.close()
logging.info("Stopping %s pid=%s" % (self.daemon_name, pid))
try:
os.kill(pid, signal.SIGTERM)
except:
pass
os.unlink(pidfile)
def launch(self):
"""
"launch" command handler
:return:
"""
# Write pidfile
self.write_pidfile()
# Close stdin/stdout/stderr
i = open("/dev/null", "r")
o = open("/dev/null", "a+")
e = open("/dev/null", "a+")
os.dup2(i.fileno(), sys.stdin.fileno())
os.dup2(o.fileno(), sys.stdout.fileno())
os.dup2(e.fileno(), sys.stderr.fileno())
sys.stdout = o
sys.stderr = e
self.guarded_run()
def refresh(self):
"""
"refresh" command handler
:return:
"""
self.stop()
self.start()
def SIGUSR2(self, signo, frame):
"""
Dump current execution frame trace on SIGUSR2
:param signo:
:param frame:
:return:
"""
frames = sys._current_frames().items()
if len(frames) == 1:
# Single thread
frame_report(frame)
else:
# Multi-threaded
import threading
for tid, frame in frames:
if tid in threading._active:
caption = "Thread: name=%s id=%s" % (
threading._active[tid].getName(), tid)
else:
caption = "Unknown thread: id=%s" % tid
frame_report(frame, caption)
def SIGHUP(self, signo, frame):
"""
Reload config on SIGHUP
:param signo:
:param frame:
:return:
"""
self.load_config()
def SIGINT(self, signo, frame):
"""
^C processing
:param signo:
:param frame:
:return:
"""
logging.info("SIGINT received. Exiting")
os._exit(0)
|
PypiClean
|
/HydPy-5.0.1-cp38-cp38-win_amd64.whl/hydpy/models/conv/conv_derived.py
|
# import...
# ...from site-packages
import numpy
# ...from HydPy
from hydpy.core import parametertools
from hydpy.models.conv import conv_control
from hydpy.models.conv import conv_fluxes
class NmbInputs(parametertools.Parameter):
"""The number of inlet nodes [-]"""
NDIM, TYPE, TIME, SPAN = 0, int, None, (1, None)
CONTROLPARAMETERS = (conv_control.InputCoordinates,)
_DEPENDENT_SEQUENCES = (
conv_fluxes.Inputs,
conv_fluxes.InputPredictions,
conv_fluxes.InputResiduals,
)
def __call__(self, *args, **kwargs):
super().__call__(*args, **kwargs)
for sequence in self.subpars.pars.model.sequences.fluxes:
if isinstance(sequence, self._DEPENDENT_SEQUENCES):
sequence.shape = self
def update(self) -> None:
"""Determine the number of inlet nodes via inspecting control parameter
|InputCoordinates|.
Note that invoking method |NmbInputs.update| like calling the parameter
directly also sets the shape of flux sequence |conv_fluxes.Inputs|:
>>> from hydpy.models.conv import *
>>> parameterstep()
>>> inputcoordinates(
... in1=(0.0, 3.0),
... in2=(2.0, -1.0))
>>> derived.nmbinputs.update()
>>> derived.nmbinputs
nmbinputs(2)
>>> fluxes.inputs.shape
(2,)
>>> derived.nmbinputs(3)
>>> derived.nmbinputs
nmbinputs(3)
>>> fluxes.inputs.shape
(3,)
"""
self(self.subpars.pars.control.inputcoordinates.shape[0])
class NmbOutputs(parametertools.Parameter):
"""The number of outlet nodes [-]"""
NDIM, TYPE, TIME, SPAN = 0, int, None, (1, None)
CONTROLPARAMETERS = (conv_control.OutputCoordinates,)
_DEPENDENT_SEQUENCES = (
conv_fluxes.Outputs,
conv_fluxes.OutputPredictions,
conv_fluxes.OutputResiduals,
)
def __call__(self, *args, **kwargs):
super().__call__(*args, **kwargs)
for sequence in self.subpars.pars.model.sequences.fluxes:
if isinstance(sequence, self._DEPENDENT_SEQUENCES):
sequence.shape = self
def update(self) -> None:
"""Determine the number of inlet nodes via inspecting control parameter
|OutputCoordinates|.
Note that invoking method |NmbOutputs.update| like calling the parameter
directly also sets the shape of flux sequence |conv_fluxes.Outputs|:
>>> from hydpy.models.conv import *
>>> parameterstep()
>>> outputcoordinates(
... out1=(0.0, 3.0),
... out2=(2.0, -1.0))
>>> derived.nmboutputs.update()
>>> derived.nmboutputs
nmboutputs(2)
>>> fluxes.outputs.shape
(2,)
>>> derived.nmboutputs(3)
>>> derived.nmboutputs
nmboutputs(3)
>>> fluxes.outputs.shape
(3,)
"""
self(self.subpars.pars.control.outputcoordinates.shape[0])
class Distances(parametertools.Parameter):
"""Distances of the inlet nodes to each outlet node [?]."""
NDIM, TYPE, TIME, SPAN = 2, float, None, (None, None)
CONTROLPARAMETERS = (
conv_control.InputCoordinates,
conv_control.OutputCoordinates,
)
def update(self) -> None:
"""Determine the distances.
The individual rows of parameter |Distances| correspond to the
outlet nodes; the columns contain the inlet nodes' indices:
>>> from hydpy.models.conv import *
>>> parameterstep()
>>> inputcoordinates(
... in1=(0.0, 3.0),
... in2=(2.0, -1.0))
>>> outputcoordinates(
... out1=(0.0, 3.0),
... out2=(3.0, -2.0),
... out3=(1.0, 2.0))
>>> derived.distances.update()
>>> derived.distances
distances([[0.0, 4.472136],
[5.830952, 1.414214],
[1.414214, 3.162278]])
"""
control = self.subpars.pars.control
incoords = control.inputcoordinates.__hydpy__get_value__()
outcoords = control.outputcoordinates.__hydpy__get_value__()
distances = numpy.empty((len(outcoords), len(incoords)), dtype=float)
for idx, outcoord in enumerate(outcoords):
distances[idx, :] = numpy.sqrt(
numpy.sum((outcoord - incoords) ** 2, axis=1)
)
self.__hydpy__set_shape__(distances.shape)
self.__hydpy__set_value__(distances)
class ProximityOrder(parametertools.Parameter):
"""Indices of the inlet nodes in the order of their proximity to each
outlet node [-]."""
NDIM, TYPE, TIME, SPAN = 2, int, None, (None, None)
CONTROLPARAMETERS = (
conv_control.MaxNmbInputs,
conv_control.InputCoordinates,
conv_control.OutputCoordinates,
)
DERIVEDPARAMETERS = (Distances,)
def update(self) -> None:
"""Determine the proximity-order of the inlet and outlet nodes.
The individual rows of parameter |ProximityOrder| correspond to the
outlet nodes; the columns contain the inlet nodes' indices:
>>> from hydpy.models.conv import *
>>> parameterstep()
>>> inputcoordinates(
... in1=(0.0, 3.0),
... in2=(2.0, -1.0))
>>> outputcoordinates(
... out1=(0.0, 3.0),
... out2=(3.0, -2.0),
... out3=(1.0, 2.0))
>>> maxnmbinputs()
>>> derived.distances.update()
>>> derived.proximityorder.update()
>>> derived.proximityorder
proximityorder([[0, 1],
[1, 0],
[0, 1]])
Set the value of parameter |MaxNmbInputs| to one,if you want to
consider the respective nearest input node only:
>>> maxnmbinputs(1)
>>> derived.proximityorder.update()
>>> derived.proximityorder
proximityorder([[0],
[1],
[0]])
"""
control = self.subpars.pars.control
nmbinputs = control.maxnmbinputs.__hydpy__get_value__()
distances = self.subpars.distances.__hydpy__get_value__()
idxs = numpy.empty((len(distances), nmbinputs), dtype=int)
for idx, distances_ in enumerate(distances):
idxs[idx, :] = numpy.argsort(distances_)[:nmbinputs]
self.__hydpy__set_shape__(idxs.shape)
self.__hydpy__set_value__(idxs)
class Weights(parametertools.Parameter):
"""Weighting coefficients of the inlet nodes corresponding to their
proximity to each outlet node and parameter |Power| [-]."""
NDIM, TYPE, TIME, SPAN = 2, float, None, (None, None)
CONTROLPARAMETERS = (
conv_control.MaxNmbInputs,
conv_control.InputCoordinates,
conv_control.OutputCoordinates,
conv_control.Power,
)
DERIVEDPARAMETERS = (
Distances,
ProximityOrder,
)
def update(self) -> None:
"""Determine the weighting coefficients.
The individual rows of parameter |Weights| correspond to the
outlet nodes; the rows contain the weights of the inlet nodes:
>>> from hydpy.models.conv import *
>>> parameterstep()
>>> inputcoordinates(
... in1=(0.0, 3.0),
... in2=(2.0, -1.0),
... in3=(0.0, 3.0),
... in4=(99.0, 99.0))
>>> outputcoordinates(
... out1=(0.0, 3.0),
... out2=(3.0, -2.0),
... out3=(1.0, 2.0))
>>> maxnmbinputs()
>>> power(2.0)
>>> derived.distances.update()
>>> derived.proximityorder.update()
>>> derived.weights.update()
>>> derived.weights
weights([[inf, inf, 0.05, 0.000053],
[0.5, 0.029412, 0.029412, 0.000052],
[0.5, 0.5, 0.1, 0.000053]])
You can restrict the number of inlet nodes used for each outlet
node via parameter |MaxNmbInputs|. In the following example, it
seems reasonable to set its value to three to ignore the far-distant
inlet node `in4`:
>>> maxnmbinputs(3)
>>> derived.distances.update()
>>> derived.proximityorder.update()
>>> derived.weights.update()
>>> derived.weights
weights([[inf, inf, 0.05],
[0.5, 0.029412, 0.029412],
[0.5, 0.5, 0.1]])
"""
control = self.subpars.pars.control
nmbinputs = control.maxnmbinputs.__hydpy__get_value__()
power = control.power.__hydpy__get_value__()
distances = self.subpars.distances.__hydpy__get_value__()
proximityorder = self.subpars.proximityorder.__hydpy__get_value__()
weights = numpy.empty((len(distances), nmbinputs), dtype=float)
for idx, distances_ in enumerate(distances):
sorteddistances = distances_[proximityorder[idx, :]]
jdxs = sorteddistances > 0.0
weights[idx, jdxs] = 1.0 / sorteddistances[jdxs] ** power
weights[idx, ~jdxs] = numpy.inf
self.__hydpy__set_shape__(weights.shape)
self.__hydpy__set_value__(weights)
|
PypiClean
|
/pywellcad-0.3.0.tar.gz/pywellcad-0.3.0/wellcad/com/_font.py
|
from ._dispatch_wrapper import DispatchWrapper
class Font(DispatchWrapper):
""" This class encapsulates the properties of fonts used throughout WellCAD
For example, fonts can be specified in the following places:
* Comment log
* Comment partition
* Property partition
* Log title
In general, attributes of this class reflect those of the ``LOGFONT``
structure in the Win32 API. Documentation for this structure can be found
`here <https://docs.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-logfonta>`_.
Example
-------
>>> log = borehole.log("Comments")
>>> font = log.font
>>> font.name
'Arial Narrow'
>>> font.italic
False
"""
@property
def name(self):
"""str: The name of the font type used."""
return self._dispatch.Name
@name.setter
def name(self, new_name):
self._dispatch.Name = new_name
@property
def weight(self):
"""int: The weight (boldness) of the font used.
Values typically range from 100 to 900 where
400 is regular weight and 700 is bold.
"""
return self._dispatch.Weight
@weight.setter
def weight(self, new_weight):
self._dispatch.Weight = new_weight
@property
def italic(self):
"""bool: Whether the font is italicized."""
return self._dispatch.Italic
@italic.setter
def italic(self, flag):
self._dispatch.Italic = flag
@property
def underline(self):
"""bool: Whether the font is underlined."""
return self._dispatch.Underline
@underline.setter
def underline(self, flag):
self._dispatch.Underline = flag
@property
def bold(self):
"""bool: Whether the font is bold."""
return self._dispatch.Bold
@bold.setter
def bold(self, flag):
self._dispatch.Bold = flag
@property
def strikethrough (self):
"""bool: Whether the font is struck through."""
return self._dispatch.Strikethrough
@strikethrough.setter
def strikethrough(self, flag):
self._dispatch.Strikethrough = flag
@property
def size(self):
"""int: The size of the font.
See the Win32 ``LOGFONT`` documentation for more of an
explanation.
"""
return self._dispatch.Size
@size.setter
def size(self, new_size):
self._dispatch.Size = new_size
@property
def charset(self):
"""int: The index of the character set used.
The character set can be selected from a list of available values
documented in the Win32 ``LOGFONT`` documentation).
"""
return self._dispatch.Charset
@charset.setter
def charset(self, new_charset):
self._dispatch.Charset = new_charset
|
PypiClean
|
/Products.kupu-1.5.2.zip/Products.kupu-1.5.2/Products/kupu/plone/kupu_references/referencebrowser.js
|
function KupuRefDrawer(tool, xsluri, libsuri, searchuri, selecturi) {
/* a specific LibraryDrawer for selection of references */
this.init(tool, xsluri, libsuri, searchuri, 'krb_drawer_base', selecturi);
this.drawertitle = "Add reference";
this.drawertype = "reference";
this.xmldata = null;
this.createContent = function(e, fieldName, label, multiple, resourcetype) {
this.libsuri = libsuri + resourcetype;
this.searchuri = searchuri + resourcetype;
this.selecturi = selecturi + resourcetype;
this.fieldName = fieldName;
this.setTitle(label);
this.loadSelection(fieldName, multiple);
this.setPosition(e);
KupuRefDrawer.prototype.createContent.call(this);
};
this.loadSelection = function(fieldName, multiple) {
this.multiple = multiple;
this.field = document.getElementById(fieldName);
this.preview = document.getElementById(fieldName+'_preview');
this.currentSelection = [];
ids = this.field.value.split(/\n/);
for (var i = 0; i < ids.length; i++) {
var id = ids[i].strip();
if (!id) continue;
this.currentSelection.push(id);
}
this.selectedSrc = this.currentSelection.join(' ');
};
this.save = function() {
var sel = this.currentSelection;
var titles = [];
var el = newElement;
var xml = this.xmldata;
var preview = this.preview;
for (var i = sel.length; i > 0;) {
if (sel[--i]) break;
delete sel[i];
sel.length = i;
}
var emptymsg = preview.getElementsByTagName('em')[0];
emptymsg.style.display = (sel.length==0)?'':'none';
for (var node = preview.firstChild; node; node = nxt) {
var nxt = node.nextSibling;
if (node.nodeName.toLowerCase()=='div') {
preview.removeChild(node);
};
};
for (var i = 0; i < sel.length; i++) {
var id = sel[i];
var t = id;
var node = xml.selectSingleNode("//resource[@id='"+id+"']");
div = document.createElement("div");
div.className = i%2?'odd':'even';
var link = document.createElement('a');
link.href = node.selectSingleNode('uri/text()').nodeValue.strip()+'/view';
if (_SARISSA_IS_IE) {
/* IE won't take a node to transformToDocument */
var result = node.transformNode(this.shared.xsl);
link.innerHTML = result;
} else {
var result = this.shared.xsltproc.transformToDocument(node);
var imp = window.document.importNode(result.documentElement, true);
Sarissa.copyChildNodes(imp, link, true);
};
div.appendChild(link);
preview.appendChild(div);
}
var nvalue = this.currentSelection.join('\n');
if (nvalue != this.field.value) {
this.field.value = nvalue;
kupuFireEvent(this.field, 'change');
}
referencebrowse_showRemove(this.fieldName, this.currentSelection.length);
drawertool.closeDrawer();
};
this.setPosition = function(e){
// this function adapted from code in pdlib.js in CompositePage
var drawernode = this.element;
if (!e)
e = event;
var page_w = window.innerWidth || document.documentElement.clientWidth || document.body.clientWidth;
var page_h = window.innerHeight || document.documentElement.clientHeight || document.body.clientHeight;
// have to check documentElement in some IE6 releases
var page_x = window.pageXOffset || document.documentElement.scrollLeft || document.body.scrollLeft;
var page_y = window.pageYOffset || document.documentElement.scrollTop || document.body.scrollTop;
// Choose a location for the menu based on where the user clicked
var node_top, node_left;
var drawer_w = drawernode.offsetWidth + 20;
var drawer_h = drawernode.offsetHeight + 20;
if (drawer_h < 400) {
drawer_h = 400;
}
var drawer_half_w = Math.floor(drawer_w / 2);
var drawer_half_h = Math.floor(drawer_h / 2);
if (page_w - e.clientX < drawer_half_w) {
// Close to the right edge
node_left = page_x + page_w - drawer_w;
}
else {
node_left = page_x + e.clientX - drawer_half_w;
}
if (node_left < page_x) {
node_left = page_x;
}
if (page_h - e.clientY < drawer_half_h) {
// Close to the bottom
node_top = page_y + page_h - drawer_h;
}
else {
node_top = page_y + e.clientY - drawer_half_h;
}
if (node_top < page_y) {
node_top = page_y;
}
drawernode.style.left = '' + node_left + 'px';
drawernode.style.top = '' + node_top + 'px';
};
};
function kupuFireEvent(el, event) {
if (el.fireEvent) {
el.fireEvent('on'+event); //IE
} else {
var evt = document.createEvent("HTMLEvents");
evt.initEvent(event,true,true);
el.dispatchEvent( evt );
}
}
function fakeEditor() {
this.getBrowserName = function() {
if (_SARISSA_IS_MOZ) {
return "Mozilla";
} else if (_SARISSA_IS_IE) {
return "IE";
} else {
throw "Browser not supported!";
}
};
this.resumeEditing = function() {};
this.suspendEditing = function() {};
this.config = {};
this._saveSelection = function() {};
this.busy = function() {};
this.notbusy = function() {};
};
var drawertool;
function krb_initdrawer(link_xsl_uri, link_libraries_uri, search_links_uri, selecturi) {
// Delay init until the drawer is actually opened.
if (!KupuRefDrawer.init) {
KupuRefDrawer.prototype = new LibraryDrawer;
// drawertool must be set, but must not change if already set.
drawertool = window.drawertool || new DrawerTool;
}
var klass = KupuRefDrawer;
klass.linktool = new LinkTool();
klass.link_xsl_uri = link_xsl_uri;
klass.link_libraries_uri = link_libraries_uri;
klass.search_links_uri = search_links_uri;
klass.selecturi = selecturi;
editor = new fakeEditor();
drawertool.initialize(editor);
};
function referencebrowser_draweropen(e, fieldName, label, multival, resource_type) {
var name = 'krbdrawer-'+fieldName;
var drawer = drawertool.drawers[name];
if (!drawer) {
var klass=KupuRefDrawer;
drawer = new klass(klass.linktool, klass.link_xsl_uri,
klass.link_libraries_uri, klass.search_links_uri, klass.selecturi);
drawertool.registerDrawer(name, drawer);
}
drawertool.openDrawer(name, [e, fieldName, label, multival,resource_type]);
};
function referencebrowse_showRemove(fieldName, items)
{
var btnRemove = document.getElementById(fieldName+'_remove');
if (btnRemove) btnRemove.style.display = items?'':'none';
}
// function to clear the reference field or remove items
// from the multivalued reference list.
function referencebrowser_removeReference(fieldName)
{
var field = document.getElementById(fieldName);
var preview = document.getElementById(fieldName + '_preview');
var emptymsg = preview.getElementsByTagName('em')[0];
emptymsg.style.display = '';
for (var node = preview.firstChild; node; node = nxt) {
var nxt = node.nextSibling;
if (node.nodeName.toLowerCase()=='div') {
preview.removeChild(node);
};
};
field.value = '';
kupuFireEvent(field, 'change');
referencebrowse_showRemove(fieldName, false);
};
/*------------------- Fallback code for non-kupu versions --------------*/
// function to open the popup window
function fallback_openBrowser(path, fieldName, at_url, fieldRealName)
{
atrefpopup = window.open(path + '/referencebrowser_popup?fieldName=' + fieldName + '&fieldRealName=' + fieldRealName +'&at_url=' + at_url,'referencebrowser_popup','toolbar=no,location=no,status=no,menubar=no,scrollbars=yes,resizable=yes,width=500,height=550');
}
// function to return a reference from the popup window back into the widget
function referencebrowser_setReference(widget_id, uid, label, multi)
{
// differentiate between the single and mulitselect widget
// since the single widget has an extra label field.
if (multi==0) {
element=document.getElementById(widget_id)
label_element=document.getElementById(widget_id + '_label')
element.value=uid
label_element.value=label
} else {
list=document.getElementById(widget_id)
// check if the item isn't already in the list
for (var x=0; x < list.length; x++) {
if (list[x].value == uid) {
return false;
}
}
// now add the new item
theLength=list.length;
list[theLength] = new Option(label);
list[theLength].selected='selected';
list[theLength].value=uid
}
}
// function to clear the reference field or remove items
// from the multivalued reference list.
function fallback_removeReference(widget_id, multi)
{
if (multi) {
list=document.getElementById(widget_id)
for (var x=list.length-1; x >= 0; x--) {
if (list[x].selected) {
list[x]=null;
}
}
for (var x=0; x < list.length; x++) {
list[x].selected='selected';
}
} else {
element=document.getElementById(widget_id);
label_element=document.getElementById(widget_id + '_label');
label_element.value = "";
element.value="";
}
}
|
PypiClean
|
/pennaipy-0.17a0.tar.gz/pennaipy-0.17a0/ai/recommender/surprise_recommenders.py
|
import pandas as pd
from pandas.util import hash_pandas_object
import hashlib
import os
import gzip
import pickle
import copy
# import json
# import urllib.request, urllib.parse
from .base import BaseRecommender
#from ..metalearning import get_metafeatures
# from sklearn.preprocessing import RobustScaler
# from sklearn.pipeline import Pipeline
import numpy as np
from collections import defaultdict, OrderedDict
import pdb
from surprise import (Reader, Dataset, CoClustering, SlopeOne, KNNWithMeans,
KNNBasic, mySVD)
# import pyximport
# pyximport.install()
# from .svdedit import mySVD
from collections import defaultdict
import itertools as it
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(module)s: %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class SurpriseRecommender(BaseRecommender):
"""Class to support generic recommenders from the Surprise library.
Not intended to be used as a standalone class.
Parameters
----------
ml_type: str, 'classifier' or 'regressor'
Recommending classifiers or regressors. Used to determine ML options.
metric: str (default: accuracy for classifiers, mse for regressors)
The metric by which to assess performance on the datasets.
"""
def __init__(self,
ml_type='classifier',
metric=None,
ml_p=None,
random_state=None,
knowledgebase_results=None,
knowledgebase_metafeatures=None,
load_serialized_rec="if_exists",
serialized_rec_directory=None,
serialized_rec_filename=None):
""" set default recommender specific parameters; might be overwritten by loading serialized recommender"""
if self.__class__.__name__ == 'SurpriseRecommender':
raise RuntimeError('Do not instantiate the SurpriseRecommender class '
'directly; use one of the method-specific classes instead.')
self.set_algo()
self.random_state = random_state
if hasattr(self.algo, 'random_state'):
self.algo.random_state = self.random_state
# store results
self.results_df = pd.DataFrame()
self.first_fit = True
# reader for translating btw PennAI results and Suprise training set
self.reader = Reader()
self.ml_type = ml_type
if metric is None:
logger.debug('metric is None, setting...')
self.metric='bal_accuracy' if self.ml_type=='classifier' else 'mse'
else:
self.metric = metric
assert(self.metric is not None)
logger.debug('self.algo_name: '+self.algo_name)
logger.debug('ml_type: '+self.ml_type)
logger.debug('metric: '+self.metric)
self.min_epochs = 10
self.max_epochs = 100
"""Initialize recommendation system."""
super().__init__(
ml_type,
metric,
ml_p,
serialized_rec_directory=serialized_rec_directory,
serialized_rec_filename=serialized_rec_filename,
load_serialized_rec=load_serialized_rec,
knowledgebase_results=knowledgebase_results,
random_state=random_state)
@property
def algo_name(self):
if type(self.algo).__name__ is None:
return type(self).__name__
return type(self.algo).__name__
def _reconstruct_training_data(self, results_data, results_mf=None,
source='pennai'):
"""Used for loading pickled recomenders to set results_df
without training.
:param results_data: DataFrame with columns corresponding to:
'dataset'
'algorithm'
'parameters'
self.metric
:param results_mf: metafeatures for the datasets in results_data
:param source: str, optional (default: 'pennai')
if 'pennai', will update tally of trained dataset models
"""
# update trained dataset models and hash table
super().update(results_data, results_mf, source)
# updates self.results_df and self.trainset
self._update_training_data(results_data, shuffle=True)
# check whether the set train data matches the pickled recommender's
# training data.
rowHashes = hash_pandas_object(self.results_df).values
newHash = hashlib.sha256(rowHashes).hexdigest()
if hasattr(self, 'results_df_hash'):
if newHash == self.results_df_hash:
logger.info('results_df hashes match')
else:
error_msg = 'the results_df hash from the pickle is different'
logger.error(error_msg)
raise ValueError(error_msg)
del self.results_df_hash
def load(self, filename=None, knowledgebase = None):
"""Load a saved recommender state."""
if knowledgebase is None:
logger.warning('A knowledgebase needs to be provided to load '
'Surprise Recommenders from file. Not loading.')
return
loaded = super().load(filename=filename)
if loaded:
logger.info('setting training data...')
self._reconstruct_training_data(knowledgebase,
source='knowledgebase')
def update(self, results_data, results_mf=None, source='pennai'):
"""Update ML / Parameter recommendations based on overall performance in
results_data.
:param results_data: DataFrame with columns corresponding to:
'dataset'
'algorithm'
'parameters'
self.metric
:param results_mf: metafeatures for the datasets in results_data
"""
# update trained dataset models and hash table
super().update(results_data, results_mf, source)
# update internal model
self._update_model(results_data)
def _update_training_data(self, results_data, shuffle=False):
"""Appends results_data to self.results_df. Sets the trainset for
the surprise recommender.
:param results_data: DataFrame with columns corresponding to:
'dataset'
'algorithm'
'parameters'
self.metric
:param shuffle: boolean, optional (default: False)
If true, results_data is shuffled before it is added to
self.results_df or self.trainset.
"""
if shuffle:
# shuffle the results data
logger.debug('shuffling results_data')
results_data = results_data.sample(frac=1,
random_state=self.random_state)
results_data.loc[:, 'algorithm-parameters'] = (
results_data['algorithm'].values + '|' +
results_data['parameter_hash'].values)
results_data.rename(columns={self.metric:'score'},inplace=True)
logger.info('append and drop dupes')
self.results_df = self.results_df.append(
results_data[['algorithm-parameters','_id','score']]
).drop_duplicates()
logger.info('load_from_df')
data = Dataset.load_from_df(self.results_df[['_id',
'algorithm-parameters',
'score']],
self.reader, rating_scale=(0,1))
# build training set from the data
self.trainset = data.build_full_trainset()
logger.debug('self.trainset # of ML-P combos: ' +
str(self.trainset.n_items))
logger.debug('self.trainset # of datasets: '
+ str(self.trainset.n_users))
def _update_model(self,results_data):
"""Stores new results and updates algo."""
logger.debug('updating '+self.algo_name+' model')
self._update_training_data(results_data, self.first_fit)
self.first_fit=False
logger.debug('fitting self.algo...')
# set the number of training iterations proportionally to the amount of
# results_data
self.algo.fit(self.trainset)
logger.debug('done.')
logger.debug('model '+self.algo_name+' updated')
def recommend(self, dataset_id, n_recs=1, dataset_mf = None):
"""Return a model and parameter values expected to do best on dataset.
Parameters
----------
dataset_id: string
ID of the dataset for which the recommender is generating
recommendations.
n_recs: int (default: 1), optional
Return a list of length n_recs in order of estimators and
parameters expected to do
best.
"""
# dataset hash table
super().recommend(dataset_id, n_recs, dataset_mf)
# dataset_hash = self.dataset_id_to_hash[dataset_id]
try:
predictions = []
filtered =0
for alg_params in self.mlp_combos:
if (dataset_id+'|'+alg_params not in
self.trained_dataset_models):
predictions.append(self.algo.predict(dataset_id, alg_params,
clip=False))
else:
filtered +=1
logger.debug('filtered '+ str(filtered) + ' recommendations')
logger.debug('getting top n predictions')
ml_rec, phash_rec, score_rec = self._get_top_n(predictions, n_recs)
logger.debug('returning ml recs')
except Exception as e:
logger.error( 'error running self.best_model_prediction for'+
str(dataset_id))
raise e
# update the recommender's memory with the new algorithm-parameter combos
# that it recommended
self._update_trained_dataset_models_from_rec(dataset_id,
ml_rec, phash_rec)
p_rec = [self.hash_2_param[ph] for ph in phash_rec]
return ml_rec, p_rec, score_rec
def _get_top_n(self,predictions, n=10):
'''Return the top-N recommendation for each user from a set of predictions.
Args:
predictions(list of Prediction objects): The list of predictions, as
returned by the test method of an algorithm.
n(int): The number of recommendation to output for each user. Default
is 10.
Returns:
ml recs, parameter recs, and their scores in three lists
'''
# grabs the ml ids and their estimated scores for this dataset
top_n = []
ml_dist = {}
for uid, iid, true_r, est, _ in predictions:
top_n.append((iid, est))
ml = iid.split('|')[0]
if ml in ml_dist.keys():
ml_dist[ml] += 1.0
else:
ml_dist[ml] = 1.0
n_ml = len(ml_dist.keys())
######
# Shuffle top_n just to remove tied algorithm bias when sorting
# Make uniform random choices from the Algorithms, then uniform random
# choices from their parameters to shuffle top_n
# the probability for each ML method is 1/total_methods/(# instances of that
# method)
inv_ml_dist = {k:1/n_ml/v for k,v in ml_dist.items()}
top_n_dist = np.array([inv_ml_dist[tn[0].split('|')[0]]
for tn in top_n])
top_n_idx = np.arange(len(top_n))
top_n_idx_s = np.random.choice(top_n_idx, len(top_n), replace=False,
p=top_n_dist)
top_n = [top_n[i] for i in top_n_idx_s]
#####
# sort top_n
top_n = sorted(top_n, key=lambda x: x[1], reverse=True)
top_n = top_n[:n]
logger.debug('filtered top_n:'+str(top_n))
ml_rec = [n[0].split('|')[0] for n in top_n]
p_rec = [n[0].split('|')[1] for n in top_n]
score_rec = [n[1] for n in top_n]
return ml_rec, p_rec, score_rec
class CoClusteringRecommender(SurpriseRecommender):
"""Generates recommendations via CoClustering, see
https://surprise.readthedocs.io/en/stable/co_clustering.html
"""
def set_algo(self):
self.algo = CoClustering(n_cltr_u = 10)
# def __init__(self, ml_type='classifier', metric=None, ml_p=None,
# algo=None):
# super().__init__(ml_type, metric, ml_p, algo)
# # set n clusters for ML equal to # of ML methods
# self.
def _update_model(self,results_data):
"""Stores new results and updates algo."""
self.algo.n_cltr_i = self.ml_p.algorithm.nunique()
super()._update_model(results_data)
class KNNWithMeansRecommender(SurpriseRecommender):
"""Generates recommendations via KNNWithMeans, see
https://surprise.readthedocs.io/en/stable/knn_inspired.html
"""
def set_algo(self):
self.algo = KNNWithMeans()
class KNNDatasetRecommender(SurpriseRecommender):
"""Generates recommendations via KNN with clusters defined over datasets, see
https://surprise.readthedocs.io/en/stable/knn_inspired.html
"""
def set_algo(self):
self.algo = KNNBasic(sim_options={'user_based':True})
@property
def algo_name(self):
return 'KNN-Dataset'
class KNNMLRecommender(SurpriseRecommender):
"""Generates recommendations via KNN with clusters defined over algorithms, see
https://surprise.readthedocs.io/en/stable/knn_inspired.html
"""
def set_algo(self):
self.algo = KNNBasic(sim_options={'user_based':False})
@property
def algo_name(self):
return 'KNN-ML'
class SlopeOneRecommender(SurpriseRecommender):
"""Generates recommendations via SlopeOne, see
https://surprise.readthedocs.io/en/stable/slope_one.html
"""
def set_algo(self):
self.algo = SlopeOne()
class SVDRecommender(SurpriseRecommender):
"""SVD recommender.
see https://surprise.readthedocs.io/en/stable/matrix_factorization.html
Recommends machine learning algorithms and parameters using the SVD algorithm.
- stores ML + P and every dataset.
- learns a matrix factorization on the non-missing data.
- given a dataset, estimates the rankings of all ML+P and returns the top
n_recs.
Note that we use a custom online version of SVD found here:
https://github.com/lacava/surprise
"""
def set_algo(self, surprise_kwargs={}):
alg_kwargs = {'n_factors':20,
'biased':True,
'init_mean':0,
'init_std_dev':.2,
'lr_all':.01,
'reg_all':.02,
'verbose':False}
alg_kwargs.update(surprise_kwargs)
self.algo = mySVD(**alg_kwargs)
# def __init__(self, ml_type='classifier', metric=None, ml_p=None,
# filename=None, knowledgebase=None, random_state=None,
# surprise_kwargs={}):
# super().__init__(ml_type=ml_type, metric=metric, ml_p=ml_p,
# filename=filename, knowledgebase=knowledgebase,
# random_state=random_state)
def _update_model(self,results_data):
"""Stores new results and updates SVD."""
logger.info('updating SVD model')
# shuffle the results data the first time
if self.first_fit:
logger.debug('shuffling results_data')
results_data = results_data.sample(frac=1,
random_state=self.random_state)
self.first_fit=False
self._update_training_data(results_data)
# set the number of training iterations proportionally to the amount of
# results_data
logger.info('algo random_state: '+str(self.algo.random_state))
self.algo.n_epochs = min(len(results_data),self.max_epochs)
self.algo.n_epochs = max(self.algo.n_epochs,self.min_epochs)
logger.debug('fitting self.algo...')
self.algo.partial_fit(self.trainset)
logger.debug('done.')
logger.debug('model SVD updated')
|
PypiClean
|
/olive_ai-0.3.1-py3-none-any.whl/olive/snpe/utils/local.py
|
import logging
import os
import platform
import time
from pathlib import Path
from typing import Tuple
from olive.common.utils import run_subprocess
logger = logging.getLogger(__name__)
def get_snpe_root() -> str:
"""
Get the SNPE root directory from the SNPE_ROOT environment variable.
"""
try:
snpe_root = os.environ["SNPE_ROOT"]
logger.debug(f"SNPE_ROOT is set to {snpe_root}")
except KeyError:
raise ValueError("SNPE_ROOT is not set")
return snpe_root
def get_snpe_target_arch(fail_on_unsupported: bool = True) -> str:
"""
Get the SNPE target architecture from the system and processor.
fail_on_unsupported: Whether to raise an exception if the system or processor is not supported
"""
system = platform.system()
snpe_target_arch = None
if system == "Linux":
machine = platform.machine()
if machine == "x86_64":
snpe_target_arch = "x64-Linux"
else:
if fail_on_unsupported:
raise ValueError(f"Unsupported machine {machine} on system {system}")
elif system == "Windows":
processor_identifier = os.environ.get("PROCESSOR_IDENTIFIER", "")
snpe_target_arch = "ARM64-Windows" if "ARM" in processor_identifier else "x64-Windows"
else:
if fail_on_unsupported:
raise ValueError(f"Unsupported system {system}")
logger.debug(f"SNPE target architecture: {snpe_target_arch}")
return snpe_target_arch
def get_snpe_win_arch_name(snpe_root: str, snpe_target_arch: str) -> str:
"""
Get the SNPE ARM64-Windows architecture name from the SNPE root directory.
snpe_root: The unzipped SNPE SDK directory
snpe_target_arch: The SNPE target architecture
"""
if not Path(snpe_root).exists():
raise FileNotFoundError(f"Path {snpe_root} does not exist")
prefix_map = {"x64-Windows": "x86_64-windows-", "ARM64-Windows": "aarch64-windows-"}
prefix = prefix_map[snpe_target_arch]
arm_windows_archs = list(Path(snpe_root).glob(f"lib/{prefix}*"))
if len(arm_windows_archs) == 0:
raise FileNotFoundError(f"SNPE_ROOT {snpe_root} missing {prefix}*")
arm_windows_arch = arm_windows_archs[0].name
logger.debug(f"SNPE {snpe_target_arch} arch name: {arm_windows_arch}")
return arm_windows_arch
def get_snpe_env(dev: bool = False) -> dict:
"""
Get the SNPE environment variables.
dev: Whether to use the SNPE development environment. Only supported on x64-Linux
"""
snpe_root = get_snpe_root()
target_arch = get_snpe_target_arch()
if "Linux" in target_arch:
target_arch_name = "x86_64-linux-clang"
else:
target_arch_name = get_snpe_win_arch_name(snpe_root, target_arch)
if dev and target_arch != "x64-Linux":
raise ValueError("SNPE development environment is only supported on x64-Linux")
bin_path = str(Path(f"{snpe_root}/bin/{target_arch_name}"))
lib_path = str(Path(f"{snpe_root}/lib/{target_arch_name}"))
env = {}
delimiter = os.path.pathsep
if platform.system() == "Linux":
env["LD_LIBRARY_PATH"] = lib_path
if dev:
python36_env_path = str(Path(f"{snpe_root}/python36-env/bin"))
if not Path(python36_env_path).exists():
raise FileNotFoundError(
f"Path {python36_env_path} does not exist. Please run 'python -m olive.snpe.configure' to add the"
" missing file"
)
bin_path += delimiter + python36_env_path
env["PYTHONPATH"] = str(Path(f"{snpe_root}/lib/python"))
bin_path += delimiter + "/usr/bin"
elif platform.system() == "Windows":
if target_arch == "ARM64-Windows":
bin_path = str(Path(f"{snpe_root}/olive-arm-win"))
if not Path(bin_path).exists():
raise FileNotFoundError(
f"Path {bin_path} does not exist. Please run 'python -m olive.snpe.configure' to add the"
" missing folder"
)
else:
bin_path += delimiter + lib_path
env["PATH"] = bin_path
for paths in env.values():
for path in paths.split(delimiter):
if not Path(path).exists():
raise FileNotFoundError(f"Path {str(Path(path))} does not exist")
return env
def run_snpe_command(
cmd: str, dev: bool = False, runs: int = 1, sleep: int = 0, log_error: bool = True
) -> Tuple[str, str]:
"""
Run a SNPE command.
cmd: The command to run
dev: Whether to use the SNPE development environment. Only supported on x64-Linux
runs: The number of times to run the command
sleep: The number of seconds to sleep between runs
log_error: Whether to log an error if the command fails
"""
env = get_snpe_env(dev)
full_cmd = cmd
for run in range(runs):
run_log_msg = "" if runs == 1 else f" (run {run + 1}/{runs})"
logger.debug(f"Running SNPE command{run_log_msg}: {full_cmd}")
returncode, stdout, stderr = run_subprocess(full_cmd, env)
logger.debug(f"Return code: {returncode} \n Stdout: {stdout} \n Stderr: {stderr}")
if returncode != 0:
break
if sleep > 0 and run < runs - 1:
time.sleep(sleep)
if returncode != 0:
error_msg = (
f"Error running SNPE command. \n Command: {full_cmd} \n Return code: {returncode} \n Stdout: {stdout} \n"
f" Stderr: {stderr}"
)
if log_error:
logger.error(error_msg)
raise RuntimeError(error_msg)
return stdout, stderr
|
PypiClean
|
/test_meraki-1.0-py3-none-any.whl/test_meraki/models/protected_networks.py
|
class ProtectedNetworks(object):
"""Implementation of the 'ProtectedNetworks' model.
Set the included/excluded networks from the intrusion engine (optional -
omitting will leave current config unchanged). This is available only in
'passthrough' mode
Attributes:
use_default (bool): true/false whether to use special IPv4 addresses:
https://tools.ietf.org/html/rfc5735 (required). Default value is
true if none currently saved
included_cidr (list of string): list of IP addresses or subnets being
protected (required if 'useDefault' is false)
excluded_cidr (list of string): list of IP addresses or subnets being
excluded from protection (required if 'useDefault' is false)
"""
# Create a mapping from Model property names to API property names
_names = {
"use_default":'useDefault',
"included_cidr":'includedCidr',
"excluded_cidr":'excludedCidr'
}
def __init__(self,
use_default=None,
included_cidr=None,
excluded_cidr=None):
"""Constructor for the ProtectedNetworks class"""
# Initialize members of the class
self.use_default = use_default
self.included_cidr = included_cidr
self.excluded_cidr = excluded_cidr
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
use_default = dictionary.get('useDefault')
included_cidr = dictionary.get('includedCidr')
excluded_cidr = dictionary.get('excludedCidr')
# Return an object of this model
return cls(use_default,
included_cidr,
excluded_cidr)
|
PypiClean
|
/django-rte-0.4.0.tar.gz/django-rte-0.4.0/rte/static/rte/tiny_mce/utils/validate.js
|
// String validation:
if (!Validator.isEmail('myemail'))
alert('Invalid email.');
// Form validation:
var f = document.forms['myform'];
if (!Validator.isEmail(f.myemail))
alert('Invalid email.');
*/
var Validator = {
isEmail : function(s) {
return this.test(s, '^[-!#$%&\'*+\\./0-9=?A-Z^_`a-z{|}~]+@[-!#$%&\'*+\\/0-9=?A-Z^_`a-z{|}~]+\.[-!#$%&\'*+\\./0-9=?A-Z^_`a-z{|}~]+$');
},
isAbsUrl : function(s) {
return this.test(s, '^(news|telnet|nttp|file|http|ftp|https)://[-A-Za-z0-9\\.]+\\/?.*$');
},
isSize : function(s) {
return this.test(s, '^[0-9.]+(%|in|cm|mm|em|ex|pt|pc|px)?$');
},
isId : function(s) {
return this.test(s, '^[A-Za-z_]([A-Za-z0-9_])*$');
},
isEmpty : function(s) {
var nl, i;
if (s.nodeName == 'SELECT' && s.selectedIndex < 1)
return true;
if (s.type == 'checkbox' && !s.checked)
return true;
if (s.type == 'radio') {
for (i=0, nl = s.form.elements; i<nl.length; i++) {
if (nl[i].type == "radio" && nl[i].name == s.name && nl[i].checked)
return false;
}
return true;
}
return new RegExp('^\\s*$').test(s.nodeType == 1 ? s.value : s);
},
isNumber : function(s, d) {
return !isNaN(s.nodeType == 1 ? s.value : s) && (!d || !this.test(s, '^-?[0-9]*\\.[0-9]*$'));
},
test : function(s, p) {
s = s.nodeType == 1 ? s.value : s;
return s == '' || new RegExp(p).test(s);
}
};
var AutoValidator = {
settings : {
id_cls : 'id',
int_cls : 'int',
url_cls : 'url',
number_cls : 'number',
email_cls : 'email',
size_cls : 'size',
required_cls : 'required',
invalid_cls : 'invalid',
min_cls : 'min',
max_cls : 'max'
},
init : function(s) {
var n;
for (n in s)
this.settings[n] = s[n];
},
validate : function(f) {
var i, nl, s = this.settings, c = 0;
nl = this.tags(f, 'label');
for (i=0; i<nl.length; i++) {
this.removeClass(nl[i], s.invalid_cls);
nl[i].setAttribute('aria-invalid', false);
}
c += this.validateElms(f, 'input');
c += this.validateElms(f, 'select');
c += this.validateElms(f, 'textarea');
return c == 3;
},
invalidate : function(n) {
this.mark(n.form, n);
},
getErrorMessages : function(f) {
var nl, i, s = this.settings, field, msg, values, messages = [], ed = tinyMCEPopup.editor;
nl = this.tags(f, "label");
for (i=0; i<nl.length; i++) {
if (this.hasClass(nl[i], s.invalid_cls)) {
field = document.getElementById(nl[i].getAttribute("for"));
values = { field: nl[i].textContent };
if (this.hasClass(field, s.min_cls, true)) {
message = ed.getLang('invalid_data_min');
values.min = this.getNum(field, s.min_cls);
} else if (this.hasClass(field, s.number_cls)) {
message = ed.getLang('invalid_data_number');
} else if (this.hasClass(field, s.size_cls)) {
message = ed.getLang('invalid_data_size');
} else {
message = ed.getLang('invalid_data');
}
message = message.replace(/{\#([^}]+)\}/g, function(a, b) {
return values[b] || '{#' + b + '}';
});
messages.push(message);
}
}
return messages;
},
reset : function(e) {
var t = ['label', 'input', 'select', 'textarea'];
var i, j, nl, s = this.settings;
if (e == null)
return;
for (i=0; i<t.length; i++) {
nl = this.tags(e.form ? e.form : e, t[i]);
for (j=0; j<nl.length; j++) {
this.removeClass(nl[j], s.invalid_cls);
nl[j].setAttribute('aria-invalid', false);
}
}
},
validateElms : function(f, e) {
var nl, i, n, s = this.settings, st = true, va = Validator, v;
nl = this.tags(f, e);
for (i=0; i<nl.length; i++) {
n = nl[i];
this.removeClass(n, s.invalid_cls);
if (this.hasClass(n, s.required_cls) && va.isEmpty(n))
st = this.mark(f, n);
if (this.hasClass(n, s.number_cls) && !va.isNumber(n))
st = this.mark(f, n);
if (this.hasClass(n, s.int_cls) && !va.isNumber(n, true))
st = this.mark(f, n);
if (this.hasClass(n, s.url_cls) && !va.isAbsUrl(n))
st = this.mark(f, n);
if (this.hasClass(n, s.email_cls) && !va.isEmail(n))
st = this.mark(f, n);
if (this.hasClass(n, s.size_cls) && !va.isSize(n))
st = this.mark(f, n);
if (this.hasClass(n, s.id_cls) && !va.isId(n))
st = this.mark(f, n);
if (this.hasClass(n, s.min_cls, true)) {
v = this.getNum(n, s.min_cls);
if (isNaN(v) || parseInt(n.value) < parseInt(v))
st = this.mark(f, n);
}
if (this.hasClass(n, s.max_cls, true)) {
v = this.getNum(n, s.max_cls);
if (isNaN(v) || parseInt(n.value) > parseInt(v))
st = this.mark(f, n);
}
}
return st;
},
hasClass : function(n, c, d) {
return new RegExp('\\b' + c + (d ? '[0-9]+' : '') + '\\b', 'g').test(n.className);
},
getNum : function(n, c) {
c = n.className.match(new RegExp('\\b' + c + '([0-9]+)\\b', 'g'))[0];
c = c.replace(/[^0-9]/g, '');
return c;
},
addClass : function(n, c, b) {
var o = this.removeClass(n, c);
n.className = b ? c + (o != '' ? (' ' + o) : '') : (o != '' ? (o + ' ') : '') + c;
},
removeClass : function(n, c) {
c = n.className.replace(new RegExp("(^|\\s+)" + c + "(\\s+|$)"), ' ');
return n.className = c != ' ' ? c : '';
},
tags : function(f, s) {
return f.getElementsByTagName(s);
},
mark : function(f, n) {
var s = this.settings;
this.addClass(n, s.invalid_cls);
n.setAttribute('aria-invalid', 'true');
this.markLabels(f, n, s.invalid_cls);
return false;
},
markLabels : function(f, n, ic) {
var nl, i;
nl = this.tags(f, "label");
for (i=0; i<nl.length; i++) {
if (nl[i].getAttribute("for") == n.id || nl[i].htmlFor == n.id)
this.addClass(nl[i], ic);
}
return null;
}
};
|
PypiClean
|
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_cgg-ug.js
|
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"Sande",
"Orwokubanza",
"Orwakabiri",
"Orwakashatu",
"Orwakana",
"Orwakataano",
"Orwamukaaga"
],
"MONTH": [
"Okwokubanza",
"Okwakabiri",
"Okwakashatu",
"Okwakana",
"Okwakataana",
"Okwamukaaga",
"Okwamushanju",
"Okwamunaana",
"Okwamwenda",
"Okwaikumi",
"Okwaikumi na kumwe",
"Okwaikumi na ibiri"
],
"SHORTDAY": [
"SAN",
"ORK",
"OKB",
"OKS",
"OKN",
"OKT",
"OMK"
],
"SHORTMONTH": [
"KBZ",
"KBR",
"KST",
"KKN",
"KTN",
"KMK",
"KMS",
"KMN",
"KMW",
"KKM",
"KNK",
"KNB"
],
"fullDate": "EEEE, d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y h:mm:ss a",
"mediumDate": "d MMM y",
"mediumTime": "h:mm:ss a",
"short": "dd/MM/y h:mm a",
"shortDate": "dd/MM/y",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "UGX",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "cgg-ug",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
|
PypiClean
|
/nista_library-4.0.9-py3-none-any.whl/data_point_client/models/get_week_period_response.py
|
from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..models.gnista_unit_response import GnistaUnitResponse
from ..models.week_data_transfere import WeekDataTransfere
T = TypeVar("T", bound="GetWeekPeriodResponse")
@attr.s(auto_attribs=True)
class GetWeekPeriodResponse:
"""
Attributes:
discriminator (str):
week_data (Union[Unset, None, WeekDataTransfere]):
unit (Union[Unset, None, GnistaUnitResponse]):
"""
discriminator: str
week_data: Union[Unset, None, "WeekDataTransfere"] = UNSET
unit: Union[Unset, None, "GnistaUnitResponse"] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
discriminator = self.discriminator
week_data: Union[Unset, None, Dict[str, Any]] = UNSET
if not isinstance(self.week_data, Unset):
week_data = self.week_data.to_dict() if self.week_data else None
unit: Union[Unset, None, Dict[str, Any]] = UNSET
if not isinstance(self.unit, Unset):
unit = self.unit.to_dict() if self.unit else None
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"discriminator": discriminator,
}
)
if week_data is not UNSET:
field_dict["weekData"] = week_data
if unit is not UNSET:
field_dict["unit"] = unit
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
from ..models.gnista_unit_response import GnistaUnitResponse
from ..models.week_data_transfere import WeekDataTransfere
d = src_dict.copy()
discriminator = d.pop("discriminator")
_week_data = d.pop("weekData", UNSET)
week_data: Union[Unset, None, WeekDataTransfere]
if _week_data is None:
week_data = None
elif isinstance(_week_data, Unset):
week_data = UNSET
else:
week_data = WeekDataTransfere.from_dict(_week_data)
_unit = d.pop("unit", UNSET)
unit: Union[Unset, None, GnistaUnitResponse]
if _unit is None:
unit = None
elif isinstance(_unit, Unset):
unit = UNSET
else:
unit = GnistaUnitResponse.from_dict(_unit)
get_week_period_response = cls(
discriminator=discriminator,
week_data=week_data,
unit=unit,
)
get_week_period_response.additional_properties = d
return get_week_period_response
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
PypiClean
|
/aidenva-0.0.1-py3-none-any.whl/vaengine/falldown_va_2.py
|
import cv2
import numpy as np
from misc import labelmap_util
from models.classify import Classify
from service import constants as const
from service.va_service import VAService
from misc.estimator.condition_estimate import ConditionEstimator
# classification inference 개수
CLASSIFICATION_IMG_SIZE=20
class FalldownVA(VAService):
def __init__(self, config, va_type, min_threshold=0.5):
super(FalldownVA, self).__init__(config, va_type)
self.default_image_size = 299 # inception_resnet_v2 : 233
self.classify = Classify(self.enabled, config, 'falldown', (self.default_image_size, self.default_image_size))
self.min_score_threshold = config.getvalue(self.conf_prefix + 'min_score_threshold', min_threshold)
self.estimator = dict()
self.conf_prefix = 'va_engines.engines.%s.' % const.FALLDOWN_VA_NAME
def _set_lables(self, path_to_labels):
labels = labelmap_util.create_categories_from_labelmap(path_to_labels)
return dict([(item['id'] + -1, item['name']) for item in labels])
# override
def _execute(self, sc):
# channel 순서를 가지고 있는 image numpy
# list -> [ch_idx, image]
images_by_ch = sc.get_in_by_vatype(self.is_support_vatype_fc)
# 처리할 정보가 없는 경우 return
if len(images_by_ch) == 0: return sc
# person detection 결과 (channel 순서로 detection 저장 )
detect_by_ch = sc.get_out_by_vatype(const.DETECT_VA)
crop_img_np = []
crop_box_np = []
inf_results = []
crop_per_channels = {}
for (seq, image_np, ch_uuid, cfg_json) in images_by_ch:
detect = detect_by_ch[seq]
if ch_uuid not in self.estimator:
self.estimator[ch_uuid] = ConditionEstimator(image_np.shape[:2],
self.config.getvalue(self.conf_prefix + 'queue_size'),
self.config.getvalue(self.conf_prefix + 'activation_per_queue'),
self.config.getvalue(self.conf_prefix + 'iou_threshold'))
boxes, fall_down_corp_np_list = self.expend_box_n_padding_zero(image_np, detect)
if len(fall_down_corp_np_list) > 0 :
crop_img_np.extend(fall_down_corp_np_list)
crop_box_np.extend(boxes)
crop_per_channels[ch_uuid] = len(fall_down_corp_np_list)
if len(crop_img_np) > 0 :
if len(crop_img_np) < CLASSIFICATION_IMG_SIZE:
inf_results.extend(self.classify._inference(crop_img_np))
else:
for i in range(0, len(crop_img_np), CLASSIFICATION_IMG_SIZE):
c = crop_img_np[i: i + CLASSIFICATION_IMG_SIZE]
inf_results.extend(self.classify._inference(c))
idx = 0
for (seq, image_np, ch_uuid, cfg_json) in images_by_ch:
num = crop_per_channels[ch_uuid]
if num == 0:
sc.set_out_by_ch(self.va_type, seq, [[], [], []])
continue
sc.set_out_by_ch(self.va_type, seq, self.aggregte_result(crop_box_np[idx:idx+num], inf_results[idx:idx+num], self.estimator[ch_uuid]))
idx = idx + num
return sc
'''
return : response format : [boxes, score, class]
'''
def aggregte_result(self, boxes, inf_result, e):
detects = list()
falldown = list()
res_boxes = []
res_scores = []
res_classes = []
for box, logits in zip(boxes, inf_result):
prob = logits[0:]
sorted_inds = [i[0] for i in sorted(enumerate(-prob), key=lambda x: x[1])]
if sorted_inds[0] == 2 and prob[sorted_inds[0]] > self.min_score_threshold: # 0: unknown, 1: person, 2: lie_down
# response format : boxes, score, class
res_boxes.append(box)
res_scores.append(prob[sorted_inds[0]])
res_classes.append(self.label_map[2])
falldown.append(box)
else:
detects.append(box)
find_falldown = e.estimate(detects, falldown)
f_boxes = []
f_scores = []
f_classes = []
for fd_box in find_falldown:
find_flag = False
for f_box, f_score, f_classe in zip(res_boxes, res_scores, res_classes):
if np.array_equal(fd_box,f_box):
f_boxes.append(f_box)
f_scores.append(f_score)
f_classes.append(f_classe)
find_flag = True
if not find_flag:
f_boxes.append(fd_box)
f_scores.append(0.7)
f_classes.append(self.label_map[2])
return [f_boxes, f_scores, f_classes]
def expend_box_n_padding_zero(self, image_np, person_detect):
fall_down_crop_list = list()
boxes, scores, classes = person_detect
for idx in range(len(boxes)):
box_t = tuple(boxes[idx])
crop, box = self.__crop_expend_ares(image_np, box_t, 0.1)
crop = self.__resize_and_padding_zero(crop, self.default_image_size)
fall_down_crop_list.append(crop)
return boxes, fall_down_crop_list
'''
bound ract를 image size에 맞게 구성 및 여백은 zero padding
'''
def __resize_and_padding_zero(self, image, desired_size=233):
old_size = image.shape[:2] # old_size is in (height, width) format
ratio = float(desired_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
image = cv2.resize(image, (new_size[1], new_size[0]))
delta_w = desired_size - new_size[1]
delta_h = desired_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
return cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
'''
입력되는 rect 에 비율에 맞게 확장/축소
'''
def __crop_expend_ares(self, image_np, box, ratio=0.2, coordiante=True):
im_height, im_width = image_np.shape[:2]
ymin, xmin, ymax, xmax = box
if (coordiante):
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
vh_ratio = (bottom - top) / (right - left)
hv_ratio = (right - left) / (bottom - top)
if vh_ratio > 0: # 세로가 긴 경우
width_ratio = int(((right - left) * (ratio * vh_ratio)) / 2)
height_ratio = int(((bottom - top) * ratio) / 2)
else:
width_ratio = int(((right - left) * ratio) / 2)
height_ratio = int(((bottom - top) * (ratio * hv_ratio)) / 2)
top = (top - height_ratio) if 0 < (top - height_ratio) else 0
bottom = (bottom + height_ratio) if im_height > (bottom + height_ratio) else im_height
left = (left - width_ratio) if 0 < (left - width_ratio) else 0
right = (right + width_ratio) if im_width > (right + width_ratio) else im_width
return image_np[int(top):int(bottom), int(left):int(right), :], (left, right, top, bottom)
|
PypiClean
|
/data-gate-cli-0.3.0.tar.gz/data-gate-cli-0.3.0/dg/commands/cpd3/install_assembly.py
|
from typing import Optional
import click
import semver
from click_option_group import optgroup
import dg.config.cluster_credentials_manager
import dg.lib.click.utils
from dg.lib.cloud_pak_for_data.cpd3_manager import (
AbstractCloudPakForDataManager,
CloudPakForDataAssemblyBuildType,
)
from dg.lib.cloud_pak_for_data.cpd3_manager_factory import (
CloudPakForDataManagerFactory,
)
from dg.lib.openshift.utils.click import (
openshift_server_command_optgroup_options,
)
from dg.utils.logging import loglevel_command
@loglevel_command(
context_settings=dg.lib.click.utils.create_default_map_from_dict(
dg.config.cluster_credentials_manager.cluster_credentials_manager.get_current_credentials()
)
)
@optgroup.group("Shared options")
@openshift_server_command_optgroup_options
@optgroup.option("--accept-all-licenses", help="Accept all licenses", is_flag=True)
@optgroup.option("--assembly-name", help="Name of the assembly to be installed", required=True)
@optgroup.option(
"--build-type",
default=f"{CloudPakForDataAssemblyBuildType.RELEASE.name}",
help=f"Build type (default: {CloudPakForDataAssemblyBuildType.RELEASE.name.lower()})",
type=click.Choice(
list(map(lambda x: x.name.lower(), CloudPakForDataAssemblyBuildType)),
case_sensitive=False,
),
)
@optgroup.option("--storage-class", help="Storage class used for installation", required=True)
@optgroup.option(
"--version",
default=AbstractCloudPakForDataManager.get_default_cloud_pak_for_data_version(),
help="Cloud Pak for Data version",
)
@optgroup.group("Release build options")
@optgroup.option(
"--ibm-cloud-pak-for-data-entitlement-key",
"-e",
help="IBM Cloud Pak for Data entitlement key (see https://myibm.ibm.com/products-services/containerlibrary)",
)
@optgroup.group("Development build options")
@optgroup.option("--artifactory-user-name", help="Artifactory user name")
@optgroup.option("--artifactory-api-key", help="Artifactory API key")
@click.pass_context
def install_assembly(
ctx: click.Context,
server: Optional[str],
username: Optional[str],
password: Optional[str],
token: Optional[str],
insecure_skip_tls_verify: Optional[bool],
accept_all_licenses: bool,
assembly_name: str,
build_type: str,
storage_class: str,
version: str,
ibm_cloud_pak_for_data_entitlement_key: Optional[str],
artifactory_user_name: str,
artifactory_api_key: str,
):
"""Install an IBM Cloud Pak for Data assembly"""
cloud_pak_for_data_assembly_build_type = CloudPakForDataAssemblyBuildType[build_type.upper()]
dg.lib.click.utils.check_cloud_pak_for_data_options(ctx, cloud_pak_for_data_assembly_build_type, locals().copy())
dg.lib.click.utils.log_in_to_openshift_cluster(ctx, locals().copy())
cloud_pak_for_data_manager = CloudPakForDataManagerFactory.get_cloud_pak_for_data_manager(
semver.VersionInfo.parse(version)
)(cloud_pak_for_data_assembly_build_type)
cloud_pak_for_data_manager.install_assembly_with_prerequisites(
artifactory_user_name,
artifactory_api_key,
ibm_cloud_pak_for_data_entitlement_key,
assembly_name,
accept_all_licenses,
storage_class,
)
|
PypiClean
|
/lcmap_pyccd-2021.7.19-py3-none-any.whl/ccd/__init__.py
|
import time
import logging
from ccd.procedures import fit_procedure as __determine_fit_procedure
import numpy as np
from ccd import app, math_utils, qa
import importlib
from .version import __version
from .version import __name
log = logging.getLogger(__name)
algorithm = ':'.join([__name, __version])
def attr_from_str(value):
"""Returns a reference to the full qualified function, attribute or class.
Args:
value = Fully qualified path (e.g. 'ccd.models.lasso.fitted_model')
Returns:
A reference to the target attribute (e.g. fitted_model)
"""
module, target = value.rsplit('.', 1)
try:
obj = importlib.import_module(module)
return getattr(obj, target)
except (ImportError, AttributeError) as e:
log.debug(e)
return None
def __attach_metadata(procedure_results, probs):
"""
Attach some information on the algorithm version, what procedure was used,
and which inputs were used
Returns:
A dict representing the change detection results
{algorithm: 'pyccd:x.x.x',
processing_mask: (bool, bool, ...),
snow_prob: float,
water_prob: float,
cloud_prob: float,
change_models: [
{start_day: int,
end_day: int,
break_day: int,
observation_count: int,
change_probability: float,
curve_qa: int,
blue: {magnitude: float,
rmse: float,
coefficients: (float, float, ...),
intercept: float},
green: {magnitude: float,
rmse: float,
coefficients: (float, float, ...),
intercept: float},
red: {magnitude: float,
rmse: float,
coefficients: (float, float, ...),
intercept: float},
nir: {magnitude: float,
rmse: float,
coefficients: (float, float, ...),
intercept: float},
swir1: {magnitude: float,
rmse: float,
coefficients: (float, float, ...),
intercept: float},
swir2: {magnitude: float,
rmse: float,
coefficients: (float, float, ...),
intercept: float},
thermal: {magnitude: float,
rmse: float,
coefficients: (float, float, ...),
intercept: float}}
]
}
"""
change_models, processing_mask = procedure_results
return {'algorithm': algorithm,
'processing_mask': [int(_) for _ in processing_mask],
'change_models': change_models,
'cloud_prob': probs[0],
'snow_prob': probs[1],
'water_prob': probs[2]}
def __split_dates_spectra(matrix):
""" Slice the dates and spectra from the matrix and return """
return matrix[0], matrix[1:7]
def __sort_dates(dates):
""" Sort the values chronologically """
return np.argsort(dates)
def __check_inputs(dates, quality, spectra):
"""
Make sure the inputs are of the correct relative size to each-other.
Args:
dates: 1-d ndarray
quality: 1-d ndarray
spectra: 2-d ndarray
"""
# Make sure we only have one dimension
assert dates.ndim == 1
# Make sure we have data
assert dates.shape[0] > 0
# Make sure quality is the same
assert dates.shape == quality.shape
# Make sure there is spectral data for each date
assert dates.shape[0] == spectra.shape[1]
def detect(dates, blues, greens, reds, nirs,
swir1s, swir2s, thermals, qas,
prev_results=None, params=None):
"""Entry point call to detect change
No filtering up-front as different procedures may do things
differently
Args:
dates: 1d-array or list of ordinal date values
blues: 1d-array or list of blue band values
greens: 1d-array or list of green band values
reds: 1d-array or list of red band values
nirs: 1d-array or list of nir band values
swir1s: 1d-array or list of swir1 band values
swir2s: 1d-array or list of swir2 band values
thermals: 1d-array or list of thermal band values
qas: 1d-array or list of qa band values
prev_results: Previous set of results to be updated with
new observations
params: python dictionary to change module wide processing
parameters
Returns:
Tuple of ccd.detections namedtuples
"""
t1 = time.time()
proc_params = app.get_default_params()
if params:
proc_params.update(params)
dates = np.asarray(dates)
qas = np.asarray(qas)
spectra = np.stack((blues, greens,
reds, nirs, swir1s,
swir2s, thermals))
__check_inputs(dates, qas, spectra)
indices = __sort_dates(dates)
dates = dates[indices]
spectra = spectra[:, indices]
qas = qas[indices]
# load the fitter_fn
fitter_fn = attr_from_str(proc_params.FITTER_FN)
if proc_params.QA_BITPACKED is True:
qas = qa.unpackqa(qas, proc_params)
probs = qa.quality_probabilities(qas, proc_params)
# Determine which procedure to use for the detection
procedure = __determine_fit_procedure(dates, qas, prev_results, proc_params)
results = procedure(dates, spectra, fitter_fn, qas, prev_results, proc_params)
log.debug('Total time for algorithm: %s', time.time() - t1)
# call detect and return results as the detections namedtuple
return __attach_metadata(results, probs)
|
PypiClean
|
/library_analyzer-0.1.1.tar.gz/library_analyzer-0.1.1/src/library_analyzer/processing/migration/annotations/_migrate_expert_annotation.py
|
from copy import deepcopy
from library_analyzer.processing.annotations.model import (
AbstractAnnotation,
EnumReviewResult,
ExpertAnnotation,
TodoAnnotation,
)
from library_analyzer.processing.api.model import Attribute, Result
from library_analyzer.processing.migration.model import (
ManyToOneMapping,
Mapping,
OneToOneMapping,
)
from ._constants import migration_author
from ._get_annotated_api_element import get_annotated_api_element
from ._get_migration_text import get_migration_text
def migrate_expert_annotation(
expert_annotation: ExpertAnnotation, mapping: Mapping
) -> list[AbstractAnnotation]:
expert_annotation = deepcopy(expert_annotation)
authors = expert_annotation.authors
authors.append(migration_author)
expert_annotation.authors = authors
if isinstance(mapping, (ManyToOneMapping, OneToOneMapping)):
element = mapping.get_apiv2_elements()[0]
if isinstance(element, (Attribute, Result)):
return []
expert_annotation.target = element.id
return [expert_annotation]
annotated_apiv1_element = get_annotated_api_element(
expert_annotation, mapping.get_apiv1_elements()
)
if annotated_apiv1_element is None:
return []
expert_annotations: list[AbstractAnnotation] = []
for element in mapping.get_apiv2_elements():
if isinstance(element, type(annotated_apiv1_element)) and not isinstance(
element, (Attribute, Result)
):
expert_annotations.append(
ExpertAnnotation(
element.id,
authors,
expert_annotation.reviewers,
expert_annotation.comment,
EnumReviewResult.NONE,
)
)
elif not isinstance(element, (Attribute, Result)):
expert_annotations.append(
TodoAnnotation(
element.id,
authors,
expert_annotation.reviewers,
expert_annotation.comment,
EnumReviewResult.NONE,
get_migration_text(
expert_annotation, mapping, for_todo_annotation=True
),
)
)
return expert_annotations
|
PypiClean
|
/kforge-0.20.tar.gz/kforge-0.20/doc/source/introduction.rst
|
Introduction
============
.. include:: ../../README
Python Package
--------------
The `KForge Python package <http://pypi.python.org/pypi/kforge>`_ can be downloaded from the `Python Package Index <http://pypi.python.org>`_. To build a KForge site, you will also need to install other software. The `KForge Installer <http://pypi.python.org/pypi/kforgeinstall>`_ has been created for this reason. See the `KForge Install Guide <install-guide.html>`_ for more information.
If you are interested in KForge, please join the `Mailing list <http://lists.appropriatesoftware.org/listinfo/kforge-discuss>`_.
|
PypiClean
|
/Tailbone-0.9.45.tar.gz/Tailbone-0.9.45/tailbone/views/tempmon/readings.py
|
from __future__ import unicode_literals, absolute_import
import six
from sqlalchemy import orm
from rattail_tempmon.db import model as tempmon
from webhelpers2.html import tags
from tailbone.views.tempmon import MasterView
class TempmonReadingView(MasterView):
"""
Master view for tempmon readings.
"""
model_class = tempmon.Reading
model_title = "TempMon Reading"
model_title_plural = "TempMon Readings"
route_prefix = 'tempmon.readings'
url_prefix = '/tempmon/readings'
creatable = False
editable = False
bulk_deletable = True
grid_columns = [
'client_key',
'client_host',
'probe',
'taken',
'degrees_f',
]
form_fields = [
'client',
'probe',
'taken',
'degrees_f',
]
def query(self, session):
return session.query(tempmon.Reading)\
.join(tempmon.Client)\
.options(orm.joinedload(tempmon.Reading.client))
def configure_grid(self, g):
super(TempmonReadingView, self).configure_grid(g)
g.sorters['client_key'] = g.make_sorter(tempmon.Client.config_key)
g.filters['client_key'] = g.make_filter('client_key', tempmon.Client.config_key)
g.sorters['client_host'] = g.make_sorter(tempmon.Client.hostname)
g.filters['client_host'] = g.make_filter('client_host', tempmon.Client.hostname)
g.joiners['probe'] = lambda q: q.join(tempmon.Probe, tempmon.Probe.uuid == tempmon.Reading.probe_uuid)
g.sorters['probe'] = g.make_sorter(tempmon.Probe.description)
g.filters['probe'] = g.make_filter('probe', tempmon.Probe.description)
g.set_sort_defaults('taken', 'desc')
g.set_type('taken', 'datetime')
g.set_renderer('client_key', self.render_client_key)
g.set_renderer('client_host', self.render_client_host)
g.set_link('probe')
g.set_link('taken')
def render_client_key(self, reading, column):
return reading.client.config_key
def render_client_host(self, reading, column):
return reading.client.hostname
def configure_form(self, f):
super(TempmonReadingView, self).configure_form(f)
# client
f.set_renderer('client', self.render_client)
f.set_label('client', "Tempmon Client")
# probe
f.set_renderer('probe', self.render_probe)
f.set_label('probe', "Tempmon Probe")
def render_client(self, reading, field):
client = reading.client
if not client:
return ""
text = six.text_type(client)
url = self.request.route_url('tempmon.clients.view', uuid=client.uuid)
return tags.link_to(text, url)
def render_probe(self, reading, field):
probe = reading.probe
if not probe:
return ""
text = six.text_type(probe)
url = self.request.route_url('tempmon.probes.view', uuid=probe.uuid)
return tags.link_to(text, url)
def defaults(config, **kwargs):
base = globals()
TempmonReadingView = kwargs.get('TempmonReadingView', base['TempmonReadingView'])
TempmonReadingView.defaults(config)
def includeme(config):
defaults(config)
|
PypiClean
|
/idasen_controller-2.0.2.tar.gz/idasen_controller-2.0.2/idasen_controller/main.py
|
import os
import sys
import traceback
import shutil
import struct
import argparse
import yaml
import asyncio
import aiohttp
from aiohttp import web
from bleak import BleakClient, BleakError, BleakScanner
from bleak.exc import BleakDBusError
import json
from functools import partial
from appdirs import user_config_dir
IS_WINDOWS = sys.platform == "win32"
# HELPER FUNCTIONS
def mmToRaw(mm):
return (mm - BASE_HEIGHT) * 10
def rawToMM(raw):
return (raw / 10) + BASE_HEIGHT
def rawToSpeed(raw):
return raw / 100
# GATT CHARACTERISTIC AND COMMAND DEFINITIONS
UUID_HEIGHT = "99fa0021-338a-1024-8a49-009c0215f78a" # Read height and speed
UUID_COMMAND = "99fa0002-338a-1024-8a49-009c0215f78a" # Write commands
UUID_DPG = "99fa0011-338a-1024-8a49-009c0215f78a" # Write ?
UUID_REFERENCE_INPUT = "99fa0031-338a-1024-8a49-009c0215f78a" # Write ?
COMMAND_STOP = bytearray(struct.pack("<H", 255))
COMMAND_WAKEUP = bytearray(struct.pack("<H", 254))
# OTHER DEFINITIONS
DEFAULT_CONFIG_DIR = user_config_dir("idasen-controller")
DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_CONFIG_DIR, "config.yaml")
# CONFIGURATION SETUP
# Default config
if not os.path.isfile(DEFAULT_CONFIG_PATH):
os.makedirs(os.path.dirname(DEFAULT_CONFIG_PATH), exist_ok=True)
shutil.copyfile(
os.path.join(os.path.dirname(__file__), "example", "config.yaml"),
DEFAULT_CONFIG_PATH,
)
# Height of the desk at it's lowest (in mm)
DEFAULT_BASE_HEIGHT = 620
# And how high it can rise above that (same for all desks)
DEFAULT_MOVEMENT_RANGE = 650
config = {
"mac_address": None,
"base_height": DEFAULT_BASE_HEIGHT,
"movement_range": DEFAULT_MOVEMENT_RANGE,
"adapter_name": "hci0",
"scan_timeout": 5,
"connection_timeout": 10,
"movement_timeout": 30,
"server_address": "127.0.0.1",
"server_port": 9123,
"favourites": {},
}
parser = argparse.ArgumentParser(description="")
# Config via command line options
parser.add_argument(
"--mac-address", dest="mac_address", type=str, help="Mac address of the Idasen desk"
)
parser.add_argument(
"--base-height",
dest="base_height",
type=int,
help="The height of tabletop above ground at lowest position (mm)",
)
parser.add_argument(
"--movement-range",
dest="movement_range",
type=int,
help="How far above base-height the desk can extend (mm)",
)
parser.add_argument(
"--adapter", dest="adapter_name", type=str, help="The bluetooth adapter device name"
)
parser.add_argument(
"--scan-timeout",
dest="scan_timeout",
type=int,
help="The timeout for bluetooth scan (seconds)",
)
parser.add_argument(
"--connection-timeout",
dest="connection_timeout",
type=int,
help="The timeout for bluetooth connection (seconds)",
)
parser.add_argument(
"--movement-timeout",
dest="movement_timeout",
type=int,
help="The timeout for waiting for the desk to reach the specified height (seconds)",
)
parser.add_argument(
"--forward",
dest="forward",
action="store_true",
help="Forward any commands to a server",
)
parser.add_argument(
"--server-address",
dest="server_address",
type=str,
help="The address the server should run at",
)
parser.add_argument(
"--server_port",
dest="server_port",
type=int,
help="The port the server should run on",
)
parser.add_argument(
"--config",
dest="config",
type=str,
help="File path to the config file (Default: {})".format(DEFAULT_CONFIG_PATH),
default=DEFAULT_CONFIG_PATH,
)
# Command to run
cmd = parser.add_mutually_exclusive_group()
cmd.add_argument(
"--watch",
dest="watch",
action="store_true",
help="Watch for changes to desk height and speed and print them",
)
cmd.add_argument(
"--move-to",
dest="move_to",
help="Move desk to specified height (mm) or to a favourite position",
)
cmd.add_argument(
"--scan",
dest="scan_adapter",
action="store_true",
help="Scan for devices using the configured adapter",
)
cmd.add_argument(
"--server",
dest="server",
action="store_true",
help="Run as a server to accept forwarded commands",
)
cmd.add_argument(
"--tcp-server",
dest="tcp_server",
action="store_true",
help="Run as a simple TCP server to accept forwarded commands",
)
cmd.add_argument(
"--print-exceptions",
dest="print_exceptions",
action="store_true",
help="Print normally harmless exceptions that are hidden",
)
args = {k: v for k, v in vars(parser.parse_args()).items() if v is not None}
# Overwrite config from config.yaml
config_file = {}
config_file_path = os.path.join(args["config"])
if config_file_path and os.path.isfile(config_file_path):
with open(config_file_path, "r") as stream:
try:
config_file = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print("Reading config.yaml failed")
exit(1)
else:
print("No config file found")
config.update(config_file)
# Overwrite config from command line args
config.update(args)
# recompute base and max height
BASE_HEIGHT = config["base_height"]
MAX_HEIGHT = BASE_HEIGHT + config["movement_range"]
if not config["mac_address"]:
parser.error("Mac address must be provided")
if "sit_height_offset" in config:
if not (0 <= config["sit_height_offset"] <= config["movement_range"]):
parser.error(
"Sit height offset must be within [0, {}]".format(config["movement_range"])
)
config["sit_height"] = BASE_HEIGHT + config["sit_height_offset"]
if "stand_height_offset" in config:
if not (0 <= config["stand_height_offset"] <= config["movement_range"]):
parser.error(
"Stand height offset must be within [0, {}]".format(
config["movement_range"]
)
)
config["stand_height"] = BASE_HEIGHT + config["stand_height_offset"]
config["mac_address"] = config["mac_address"].upper()
if IS_WINDOWS:
# Windows doesn't use this parameter so rename it so it looks nice for the logs
config["adapter_name"] = "default adapter"
# MAIN PROGRAM
def handle_exception(e):
if config["print_exceptions"]:
print(traceback.format_exc())
async def get_height_speed(client):
return struct.unpack("<Hh", await client.read_gatt_char(UUID_HEIGHT))
def get_height_data_from_notification(sender, data, log=print):
height, speed = struct.unpack("<Hh", data)
print(
"Height: {:4.0f}mm Speed: {:2.0f}mm/s".format(
rawToMM(height), rawToSpeed(speed)
)
)
async def wakeUp(client):
await client.write_gatt_char(UUID_COMMAND, COMMAND_WAKEUP)
async def move_to_target(client, target):
encoded_target = bytearray(struct.pack("<H", int(target)))
await client.write_gatt_char(UUID_REFERENCE_INPUT, encoded_target)
async def stop(client):
try:
await client.write_gatt_char(UUID_COMMAND, COMMAND_STOP)
except BleakDBusError as e:
# Harmless exception that happens on Raspberry Pis
# bleak.exc.BleakDBusError: [org.bluez.Error.NotPermitted] Write acquired
handle_exception(e)
async def subscribe(client, uuid, callback):
"""Listen for notifications on a characteristic"""
await client.start_notify(uuid, callback)
async def unsubscribe(client, uuid):
"""Stop listenening for notifications on a characteristic"""
await client.stop_notify(uuid)
async def move_to(client, target, log=print):
"""Move the desk to a specified height"""
initial_height, speed = struct.unpack(
"<Hh", await client.read_gatt_char(UUID_HEIGHT)
)
if initial_height == target:
return
await wakeUp(client)
await stop(client)
while True:
await move_to_target(client, target)
await asyncio.sleep(0.5)
height, speed = await get_height_speed(client)
log(
"Height: {:4.0f}mm Speed: {:2.0f}mm/s".format(
rawToMM(height), rawToSpeed(speed)
)
)
if speed == 0:
break
async def scan():
"""Scan for a bluetooth device with the configured address and return it or return all devices if no address specified"""
print("Scanning\r", end="")
devices = await BleakScanner().discover(
device=config["adapter_name"], timeout=config["scan_timeout"]
)
print("Found {} devices using {}".format(len(devices), config["adapter_name"]))
for device in devices:
print(device)
return devices
async def connect(client=None, attempt=0):
"""Attempt to connect to the desk"""
try:
print("Connecting\r", end="")
if not client:
client = BleakClient(config["mac_address"], device=config["adapter_name"])
await client.connect(timeout=config["connection_timeout"])
print("Connected {}".format(config["mac_address"]))
return client
except BleakError as e:
print("Connecting failed")
if ("was not found" in str(e)):
print(e)
else:
print(traceback.format_exc())
os._exit(1)
except asyncio.exceptions.TimeoutError as e:
print("Connecting failed - timed out")
os._exit(1)
async def disconnect(client):
"""Attempt to disconnect cleanly"""
if client.is_connected:
await client.disconnect()
async def run_command(client, config, log=print):
"""Begin the action specified by command line arguments and config"""
# Always print current height
initial_height, speed = struct.unpack(
"<Hh", await client.read_gatt_char(UUID_HEIGHT)
)
log("Height: {:4.0f}mm".format(rawToMM(initial_height)))
target = None
if config.get("watch"):
# Print changes to height data
log("Watching for changes to desk height and speed")
await subscribe(
client, UUID_HEIGHT, partial(get_height_data_from_notification, log=log)
)
wait = asyncio.get_event_loop().create_future()
await wait
elif config.get("move_to"):
# Move to custom height
favouriteValue = config.get("favourites", {}).get(config["move_to"])
if favouriteValue:
target = mmToRaw(favouriteValue)
log(f'Moving to favourite height: {config["move_to"]}')
else:
try:
target = mmToRaw(int(config["move_to"]))
log(f'Moving to height: {config["move_to"]}')
except ValueError:
log(f'Not a valid height or favourite position: {config["move_to"]}')
return
await move_to(client, target, log=log)
if target:
final_height, speed = struct.unpack(
"<Hh", await client.read_gatt_char(UUID_HEIGHT)
)
# If we were moving to a target height, wait, then print the actual final height
log(
"Final height: {:4.0f}mm (Target: {:4.0f}mm)".format(
rawToMM(final_height), rawToMM(target)
)
)
async def run_tcp_server(client, config):
"""Start a simple tcp server to listen for commands"""
def disconnect_callback(client, _=None):
print("Lost connection with {}".format(client.address))
asyncio.create_task(connect(client))
client.set_disconnected_callback(disconnect_callback)
server = await asyncio.start_server(
partial(run_tcp_forwarded_command, client, config),
config["server_address"],
config["server_port"],
)
print("TCP Server listening")
await server.serve_forever()
async def run_tcp_forwarded_command(client, config, reader, writer):
"""Run commands received by the tcp server"""
print("Received command")
request = (await reader.read()).decode("utf8")
forwarded_config = json.loads(str(request))
merged_config = {**config, **forwarded_config}
await run_command(client, merged_config)
writer.close()
async def run_server(client, config):
"""Start a server to listen for commands via websocket connection"""
def disconnect_callback(client, _=None):
print("Lost connection with {}".format(client.address))
asyncio.create_task(connect(client))
client.set_disconnected_callback(disconnect_callback)
app = web.Application()
app.router.add_get("/", partial(run_forwarded_command, client, config))
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, config["server_address"], config["server_port"])
await site.start()
print("Server listening")
while True:
await asyncio.sleep(1000)
async def run_forwarded_command(client, config, request):
"""Run commands received by the server"""
print("Received command")
ws = web.WebSocketResponse()
def log(message, end="\n"):
print(message, end=end)
asyncio.create_task(ws.send_str(str(message)))
await ws.prepare(request)
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
forwarded_config = json.loads(msg.data)
merged_config = {**config, **forwarded_config}
await run_command(client, merged_config, log)
break
await asyncio.sleep(1) # Allows final messages to send on web socket
await ws.close()
return ws
async def forward_command(config):
"""Send commands to a server instance of this script"""
allowed_keys = ["move_to"]
forwarded_config = {key: config[key] for key in allowed_keys if key in config}
session = aiohttp.ClientSession()
ws = await session.ws_connect(
f'http://{config["server_address"]}:{config["server_port"]}'
)
await ws.send_str(json.dumps(forwarded_config))
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.text:
print(msg.data)
elif msg.type in [aiohttp.WSMsgType.closed, aiohttp.WSMsgType.error]:
break
await ws.close()
await session.close()
async def main():
"""Set up the async event loop and signal handlers"""
try:
client = None
# Forward and scan don't require a connection so run them and exit
if config["forward"]:
await forward_command(config)
elif config["scan_adapter"]:
await scan()
else:
# Server and other commands do require a connection so set one up
client = await connect()
if config["server"]:
await run_server(client, config)
elif config.get("tcp_server"):
await run_tcp_server(client, config)
else:
await run_command(client, config, print)
except Exception as e:
print("\nSomething unexpected went wrong:")
print(traceback.format_exc())
finally:
if client:
print("\rDisconnecting\r", end="")
await stop(client)
await disconnect(client)
print("Disconnected ")
def init():
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
if __name__ == "__main__":
init()
|
PypiClean
|
/xmldirector.plonecore-2.1.1.zip/xmldirector.plonecore-2.1.1/xmldirector/plonecore/browser/resources/ace-builds/src-min/mode-vala.js
|
define("ace/mode/vala_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text_highlight_rules").TextHighlightRules,s=function(){this.$rules={start:[{token:["meta.using.vala","keyword.other.using.vala","meta.using.vala","storage.modifier.using.vala","meta.using.vala","punctuation.terminator.vala"],regex:"^(\\s*)(using)\\b(?:(\\s*)([^ ;$]+)(\\s*)((?:;)?))?"},{include:"#code"}],"#all-types":[{include:"#primitive-arrays"},{include:"#primitive-types"},{include:"#object-types"}],"#annotations":[{token:["storage.type.annotation.vala","punctuation.definition.annotation-arguments.begin.vala"],regex:"(@[^ (]+)(\\()",push:[{token:"punctuation.definition.annotation-arguments.end.vala",regex:"\\)",next:"pop"},{token:["constant.other.key.vala","text","keyword.operator.assignment.vala"],regex:"(\\w*)(\\s*)(=)"},{include:"#code"},{token:"punctuation.seperator.property.vala",regex:","},{defaultToken:"meta.declaration.annotation.vala"}]},{token:"storage.type.annotation.vala",regex:"@\\w*"}],"#anonymous-classes-and-new":[{token:"keyword.control.new.vala",regex:"\\bnew\\b",push_disabled:[{token:"text",regex:"(?<=\\)|\\])(?!\\s*{)|(?<=})|(?=;)",TODO:"FIXME: regexp doesn't have js equivalent",originalRegex:"(?<=\\)|\\])(?!\\s*{)|(?<=})|(?=;)",next:"pop"},{token:["storage.type.vala","text"],regex:"(\\w+)(\\s*)(?=\\[)",push:[{token:"text",regex:"}|(?=;|\\))",next:"pop"},{token:"text",regex:"\\[",push:[{token:"text",regex:"\\]",next:"pop"},{include:"#code"}]},{token:"text",regex:"{",push:[{token:"text",regex:"(?=})",next:"pop"},{include:"#code"}]}]},{token:"text",regex:"(?=\\w.*\\()",push:[{token:"text",regex:"(?<=\\))",TODO:"FIXME: regexp doesn't have js equivalent",originalRegex:"(?<=\\))",next:"pop"},{include:"#object-types"},{token:"text",regex:"\\(",push:[{token:"text",regex:"\\)",next:"pop"},{include:"#code"}]}]},{token:"meta.inner-class.vala",regex:"{",push:[{token:"meta.inner-class.vala",regex:"}",next:"pop"},{include:"#class-body"},{defaultToken:"meta.inner-class.vala"}]}]}],"#assertions":[{token:["keyword.control.assert.vala","meta.declaration.assertion.vala"],regex:"\\b(assert|requires|ensures)(\\s)",push:[{token:"meta.declaration.assertion.vala",regex:"$",next:"pop"},{token:"keyword.operator.assert.expression-seperator.vala",regex:":"},{include:"#code"},{defaultToken:"meta.declaration.assertion.vala"}]}],"#class":[{token:"meta.class.vala",regex:"(?=\\w?[\\w\\s]*(?:class|(?:@)?interface|enum|struct|namespace)\\s+\\w+)",push:[{token:"paren.vala",regex:"}",next:"pop"},{include:"#storage-modifiers"},{include:"#comments"},{token:["storage.modifier.vala","meta.class.identifier.vala","entity.name.type.class.vala"],regex:"(class|(?:@)?interface|enum|struct|namespace)(\\s+)([\\w\\.]+)"},{token:"storage.modifier.extends.vala",regex:":",push:[{token:"meta.definition.class.inherited.classes.vala",regex:"(?={|,)",next:"pop"},{include:"#object-types-inherited"},{include:"#comments"},{defaultToken:"meta.definition.class.inherited.classes.vala"}]},{token:["storage.modifier.implements.vala","meta.definition.class.implemented.interfaces.vala"],regex:"(,)(\\s)",push:[{token:"meta.definition.class.implemented.interfaces.vala",regex:"(?=\\{)",next:"pop"},{include:"#object-types-inherited"},{include:"#comments"},{defaultToken:"meta.definition.class.implemented.interfaces.vala"}]},{token:"paren.vala",regex:"{",push:[{token:"paren.vala",regex:"(?=})",next:"pop"},{include:"#class-body"},{defaultToken:"meta.class.body.vala"}]},{defaultToken:"meta.class.vala"}],comment:"attempting to put namespace in here."}],"#class-body":[{include:"#comments"},{include:"#class"},{include:"#enums"},{include:"#methods"},{include:"#annotations"},{include:"#storage-modifiers"},{include:"#code"}],"#code":[{include:"#comments"},{include:"#class"},{token:"text",regex:"{",push:[{token:"text",regex:"}",next:"pop"},{include:"#code"}]},{include:"#assertions"},{include:"#parens"},{include:"#constants-and-special-vars"},{include:"#anonymous-classes-and-new"},{include:"#keywords"},{include:"#storage-modifiers"},{include:"#strings"},{include:"#all-types"}],"#comments":[{token:"punctuation.definition.comment.vala",regex:"/\\*\\*/"},{include:"text.html.javadoc"},{include:"#comments-inline"}],"#comments-inline":[{token:"punctuation.definition.comment.vala",regex:"/\\*",push:[{token:"punctuation.definition.comment.vala",regex:"\\*/",next:"pop"},{defaultToken:"comment.block.vala"}]},{token:["text","punctuation.definition.comment.vala","comment.line.double-slash.vala"],regex:"(\\s*)(//)(.*$)"}],"#constants-and-special-vars":[{token:"constant.language.vala",regex:"\\b(?:true|false|null)\\b"},{token:"variable.language.vala",regex:"\\b(?:this|base)\\b"},{token:"constant.numeric.vala",regex:"\\b(?:0(?:x|X)[0-9a-fA-F]*|(?:[0-9]+\\.?[0-9]*|\\.[0-9]+)(?:(?:e|E)(?:\\+|-)?[0-9]+)?)(?:[LlFfUuDd]|UL|ul)?\\b"},{token:["keyword.operator.dereference.vala","constant.other.vala"],regex:"((?:\\.)?)\\b([A-Z][A-Z0-9_]+)(?!<|\\.class|\\s*\\w+\\s*=)\\b"}],"#enums":[{token:"text",regex:"^(?=\\s*[A-Z0-9_]+\\s*(?:{|\\(|,))",push:[{token:"text",regex:"(?=;|})",next:"pop"},{token:"constant.other.enum.vala",regex:"\\w+",push:[{token:"meta.enum.vala",regex:"(?=,|;|})",next:"pop"},{include:"#parens"},{token:"text",regex:"{",push:[{token:"text",regex:"}",next:"pop"},{include:"#class-body"}]},{defaultToken:"meta.enum.vala"}]}]}],"#keywords":[{token:"keyword.control.catch-exception.vala",regex:"\\b(?:try|catch|finally|throw)\\b"},{token:"keyword.control.vala",regex:"\\?|:|\\?\\?"},{token:"keyword.control.vala",regex:"\\b(?:return|break|case|continue|default|do|while|for|foreach|switch|if|else|in|yield|get|set|value)\\b"},{token:"keyword.operator.vala",regex:"\\b(?:typeof|is|as)\\b"},{token:"keyword.operator.comparison.vala",regex:"==|!=|<=|>=|<>|<|>"},{token:"keyword.operator.assignment.vala",regex:"="},{token:"keyword.operator.increment-decrement.vala",regex:"\\-\\-|\\+\\+"},{token:"keyword.operator.arithmetic.vala",regex:"\\-|\\+|\\*|\\/|%"},{token:"keyword.operator.logical.vala",regex:"!|&&|\\|\\|"},{token:"keyword.operator.dereference.vala",regex:"\\.(?=\\S)",originalRegex:"(?<=\\S)\\.(?=\\S)"},{token:"punctuation.terminator.vala",regex:";"},{token:"keyword.operator.ownership",regex:"owned|unowned"}],"#methods":[{token:"meta.method.vala",regex:"(?!new)(?=\\w.*\\s+)(?=[^=]+\\()",push:[{token:"paren.vala",regex:"}|(?=;)",next:"pop"},{include:"#storage-modifiers"},{token:["entity.name.function.vala","meta.method.identifier.vala"],regex:"([\\~\\w\\.]+)(\\s*\\()",push:[{token:"meta.method.identifier.vala",regex:"\\)",next:"pop"},{include:"#parameters"},{defaultToken:"meta.method.identifier.vala"}]},{token:"meta.method.return-type.vala",regex:"(?=\\w.*\\s+\\w+\\s*\\()",push:[{token:"meta.method.return-type.vala",regex:"(?=\\w+\\s*\\()",next:"pop"},{include:"#all-types"},{defaultToken:"meta.method.return-type.vala"}]},{include:"#throws"},{token:"paren.vala",regex:"{",push:[{token:"paren.vala",regex:"(?=})",next:"pop"},{include:"#code"},{defaultToken:"meta.method.body.vala"}]},{defaultToken:"meta.method.vala"}]}],"#namespace":[{token:"text",regex:"^(?=\\s*[A-Z0-9_]+\\s*(?:{|\\(|,))",push:[{token:"text",regex:"(?=;|})",next:"pop"},{token:"constant.other.namespace.vala",regex:"\\w+",push:[{token:"meta.namespace.vala",regex:"(?=,|;|})",next:"pop"},{include:"#parens"},{token:"text",regex:"{",push:[{token:"text",regex:"}",next:"pop"},{include:"#code"}]},{defaultToken:"meta.namespace.vala"}]}],comment:"This is not quite right. See the class grammar right now"}],"#object-types":[{token:"storage.type.generic.vala",regex:"\\b(?:[a-z]\\w*\\.)*[A-Z]+\\w*<",push:[{token:"storage.type.generic.vala",regex:">|[^\\w\\s,\\?<\\[()\\]]",TODO:"FIXME: regexp doesn't have js equivalent",originalRegex:">|[^\\w\\s,\\?<\\[(?:[,]+)\\]]",next:"pop"},{include:"#object-types"},{token:"storage.type.generic.vala",regex:"<",push:[{token:"storage.type.generic.vala",regex:">|[^\\w\\s,\\[\\]<]",next:"pop"},{defaultToken:"storage.type.generic.vala"}],comment:"This is just to support <>'s with no actual type prefix"},{defaultToken:"storage.type.generic.vala"}]},{token:"storage.type.object.array.vala",regex:"\\b(?:[a-z]\\w*\\.)*[A-Z]+\\w*(?=\\[)",push:[{token:"storage.type.object.array.vala",regex:"(?=[^\\]\\s])",next:"pop"},{token:"text",regex:"\\[",push:[{token:"text",regex:"\\]",next:"pop"},{include:"#code"}]},{defaultToken:"storage.type.object.array.vala"}]},{token:["storage.type.vala","keyword.operator.dereference.vala","storage.type.vala"],regex:"\\b(?:([a-z]\\w*)(\\.))*([A-Z]+\\w*\\b)"}],"#object-types-inherited":[{token:"entity.other.inherited-class.vala",regex:"\\b(?:[a-z]\\w*\\.)*[A-Z]+\\w*<",push:[{token:"entity.other.inherited-class.vala",regex:">|[^\\w\\s,<]",next:"pop"},{include:"#object-types"},{token:"storage.type.generic.vala",regex:"<",push:[{token:"storage.type.generic.vala",regex:">|[^\\w\\s,<]",next:"pop"},{defaultToken:"storage.type.generic.vala"}],comment:"This is just to support <>'s with no actual type prefix"},{defaultToken:"entity.other.inherited-class.vala"}]},{token:["entity.other.inherited-class.vala","keyword.operator.dereference.vala","entity.other.inherited-class.vala"],regex:"\\b(?:([a-z]\\w*)(\\.))*([A-Z]+\\w*)"}],"#parameters":[{token:"storage.modifier.vala",regex:"final"},{include:"#primitive-arrays"},{include:"#primitive-types"},{include:"#object-types"},{token:"variable.parameter.vala",regex:"\\w+"}],"#parens":[{token:"text",regex:"\\(",push:[{token:"text",regex:"\\)",next:"pop"},{include:"#code"}]}],"#primitive-arrays":[{token:"storage.type.primitive.array.vala",regex:"\\b(?:bool|byte|sbyte|char|decimal|double|float|int|uint|long|ulong|object|short|ushort|string|void|int8|int16|int32|int64|uint8|uint16|uint32|uint64)(?:\\[\\])*\\b"}],"#primitive-types":[{token:"storage.type.primitive.vala",regex:"\\b(?:var|bool|byte|sbyte|char|decimal|double|float|int|uint|long|ulong|object|short|ushort|string|void|signal|int8|int16|int32|int64|uint8|uint16|uint32|uint64)\\b",comment:"var is not really a primitive, but acts like one in most cases"}],"#storage-modifiers":[{token:"storage.modifier.vala",regex:"\\b(?:public|private|protected|internal|static|final|sealed|virtual|override|abstract|readonly|volatile|dynamic|async|unsafe|out|ref|weak|owned|unowned|const)\\b",comment:"Not sure about unsafe and readonly"}],"#strings":[{token:"punctuation.definition.string.begin.vala",regex:'@"',push:[{token:"punctuation.definition.string.end.vala",regex:'"',next:"pop"},{token:"constant.character.escape.vala",regex:"\\\\.|%[\\w\\.\\-]+|\\$(?:\\w+|\\([\\w\\s\\+\\-\\*\\/]+\\))"},{defaultToken:"string.quoted.interpolated.vala"}]},{token:"punctuation.definition.string.begin.vala",regex:'"',push:[{token:"punctuation.definition.string.end.vala",regex:'"',next:"pop"},{token:"constant.character.escape.vala",regex:"\\\\."},{token:"constant.character.escape.vala",regex:"%[\\w\\.\\-]+"},{defaultToken:"string.quoted.double.vala"}]},{token:"punctuation.definition.string.begin.vala",regex:"'",push:[{token:"punctuation.definition.string.end.vala",regex:"'",next:"pop"},{token:"constant.character.escape.vala",regex:"\\\\."},{defaultToken:"string.quoted.single.vala"}]},{token:"punctuation.definition.string.begin.vala",regex:'"""',push:[{token:"punctuation.definition.string.end.vala",regex:'"""',next:"pop"},{token:"constant.character.escape.vala",regex:"%[\\w\\.\\-]+"},{defaultToken:"string.quoted.triple.vala"}]}],"#throws":[{token:"storage.modifier.vala",regex:"throws",push:[{token:"meta.throwables.vala",regex:"(?={|;)",next:"pop"},{include:"#object-types"},{defaultToken:"meta.throwables.vala"}]}],"#values":[{include:"#strings"},{include:"#object-types"},{include:"#constants-and-special-vars"}]},this.normalizeRules()};s.metaData={comment:"Based heavily on the Java bundle's language syntax. TODO:\n* Closures\n* Delegates\n* Properties: Better support for properties.\n* Annotations\n* Error domains\n* Named arguments\n* Array slicing, negative indexes, multidimensional\n* construct blocks\n* lock blocks?\n* regex literals\n* DocBlock syntax highlighting. (Currently importing javadoc)\n* Folding rule for comments.\n",fileTypes:["vala"],foldingStartMarker:"(\\{\\s*(//.*)?$|^\\s*// \\{\\{\\{)",foldingStopMarker:"^\\s*(\\}|// \\}\\}\\}$)",name:"Vala",scopeName:"source.vala"},r.inherits(s,i),t.ValaHighlightRules=s}),define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"],function(e,t,n){"use strict";var r=e("../../lib/oop"),i=e("../../range").Range,s=e("./fold_mode").FoldMode,o=t.FoldMode=function(e){e&&(this.foldingStartMarker=new RegExp(this.foldingStartMarker.source.replace(/\|[^|]*?$/,"|"+e.start)),this.foldingStopMarker=new RegExp(this.foldingStopMarker.source.replace(/\|[^|]*?$/,"|"+e.end)))};r.inherits(o,s),function(){this.foldingStartMarker=/(\{|\[)[^\}\]]*$|^\s*(\/\*)/,this.foldingStopMarker=/^[^\[\{]*(\}|\])|^[\s\*]*(\*\/)/,this.singleLineBlockCommentRe=/^\s*(\/\*).*\*\/\s*$/,this.tripleStarBlockCommentRe=/^\s*(\/\*\*\*).*\*\/\s*$/,this.startRegionRe=/^\s*(\/\*|\/\/)#?region\b/,this._getFoldWidgetBase=this.getFoldWidget,this.getFoldWidget=function(e,t,n){var r=e.getLine(n);if(this.singleLineBlockCommentRe.test(r)&&!this.startRegionRe.test(r)&&!this.tripleStarBlockCommentRe.test(r))return"";var i=this._getFoldWidgetBase(e,t,n);return!i&&this.startRegionRe.test(r)?"start":i},this.getFoldWidgetRange=function(e,t,n,r){var i=e.getLine(n);if(this.startRegionRe.test(i))return this.getCommentRegionBlock(e,i,n);var s=i.match(this.foldingStartMarker);if(s){var o=s.index;if(s[1])return this.openingBracketBlock(e,s[1],n,o);var u=e.getCommentFoldRange(n,o+s[0].length,1);return u&&!u.isMultiLine()&&(r?u=this.getSectionRange(e,n):t!="all"&&(u=null)),u}if(t==="markbegin")return;var s=i.match(this.foldingStopMarker);if(s){var o=s.index+s[0].length;return s[1]?this.closingBracketBlock(e,s[1],n,o):e.getCommentFoldRange(n,o,-1)}},this.getSectionRange=function(e,t){var n=e.getLine(t),r=n.search(/\S/),s=t,o=n.length;t+=1;var u=t,a=e.getLength();while(++t<a){n=e.getLine(t);var f=n.search(/\S/);if(f===-1)continue;if(r>f)break;var l=this.getFoldWidgetRange(e,"all",t);if(l){if(l.start.row<=s)break;if(l.isMultiLine())t=l.end.row;else if(r==f)break}u=t}return new i(s,o,u,e.getLine(u).length)},this.getCommentRegionBlock=function(e,t,n){var r=t.search(/\s*$/),s=e.getLength(),o=n,u=/^\s*(?:\/\*|\/\/|--)#?(end)?region\b/,a=1;while(++n<s){t=e.getLine(n);var f=u.exec(t);if(!f)continue;f[1]?a--:a++;if(!a)break}var l=n;if(l>o)return new i(o,r,l,t.length)}}.call(o.prototype)}),define("ace/mode/behaviour/cstyle",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/token_iterator","ace/lib/lang"],function(e,t,n){"use strict";var r=e("../../lib/oop"),i=e("../behaviour").Behaviour,s=e("../../token_iterator").TokenIterator,o=e("../../lib/lang"),u=["text","paren.rparen","punctuation.operator"],a=["text","paren.rparen","punctuation.operator","comment"],f,l={},c=function(e){var t=-1;e.multiSelect&&(t=e.selection.index,l.rangeCount!=e.multiSelect.rangeCount&&(l={rangeCount:e.multiSelect.rangeCount}));if(l[t])return f=l[t];f=l[t]={autoInsertedBrackets:0,autoInsertedRow:-1,autoInsertedLineEnd:"",maybeInsertedBrackets:0,maybeInsertedRow:-1,maybeInsertedLineStart:"",maybeInsertedLineEnd:""}},h=function(e,t,n,r){var i=e.end.row-e.start.row;return{text:n+t+r,selection:[0,e.start.column+1,i,e.end.column+(i?0:1)]}},p=function(){this.add("braces","insertion",function(e,t,n,r,i){var s=n.getCursorPosition(),u=r.doc.getLine(s.row);if(i=="{"){c(n);var a=n.getSelectionRange(),l=r.doc.getTextRange(a);if(l!==""&&l!=="{"&&n.getWrapBehavioursEnabled())return h(a,l,"{","}");if(p.isSaneInsertion(n,r))return/[\]\}\)]/.test(u[s.column])||n.inMultiSelectMode?(p.recordAutoInsert(n,r,"}"),{text:"{}",selection:[1,1]}):(p.recordMaybeInsert(n,r,"{"),{text:"{",selection:[1,1]})}else if(i=="}"){c(n);var d=u.substring(s.column,s.column+1);if(d=="}"){var v=r.$findOpeningBracket("}",{column:s.column+1,row:s.row});if(v!==null&&p.isAutoInsertedClosing(s,u,i))return p.popAutoInsertedClosing(),{text:"",selection:[1,1]}}}else{if(i=="\n"||i=="\r\n"){c(n);var m="";p.isMaybeInsertedClosing(s,u)&&(m=o.stringRepeat("}",f.maybeInsertedBrackets),p.clearMaybeInsertedClosing());var d=u.substring(s.column,s.column+1);if(d==="}"){var g=r.findMatchingBracket({row:s.row,column:s.column+1},"}");if(!g)return null;var y=this.$getIndent(r.getLine(g.row))}else{if(!m){p.clearMaybeInsertedClosing();return}var y=this.$getIndent(u)}var b=y+r.getTabString();return{text:"\n"+b+"\n"+y+m,selection:[1,b.length,1,b.length]}}p.clearMaybeInsertedClosing()}}),this.add("braces","deletion",function(e,t,n,r,i){var s=r.doc.getTextRange(i);if(!i.isMultiLine()&&s=="{"){c(n);var o=r.doc.getLine(i.start.row),u=o.substring(i.end.column,i.end.column+1);if(u=="}")return i.end.column++,i;f.maybeInsertedBrackets--}}),this.add("parens","insertion",function(e,t,n,r,i){if(i=="("){c(n);var s=n.getSelectionRange(),o=r.doc.getTextRange(s);if(o!==""&&n.getWrapBehavioursEnabled())return h(s,o,"(",")");if(p.isSaneInsertion(n,r))return p.recordAutoInsert(n,r,")"),{text:"()",selection:[1,1]}}else if(i==")"){c(n);var u=n.getCursorPosition(),a=r.doc.getLine(u.row),f=a.substring(u.column,u.column+1);if(f==")"){var l=r.$findOpeningBracket(")",{column:u.column+1,row:u.row});if(l!==null&&p.isAutoInsertedClosing(u,a,i))return p.popAutoInsertedClosing(),{text:"",selection:[1,1]}}}}),this.add("parens","deletion",function(e,t,n,r,i){var s=r.doc.getTextRange(i);if(!i.isMultiLine()&&s=="("){c(n);var o=r.doc.getLine(i.start.row),u=o.substring(i.start.column+1,i.start.column+2);if(u==")")return i.end.column++,i}}),this.add("brackets","insertion",function(e,t,n,r,i){if(i=="["){c(n);var s=n.getSelectionRange(),o=r.doc.getTextRange(s);if(o!==""&&n.getWrapBehavioursEnabled())return h(s,o,"[","]");if(p.isSaneInsertion(n,r))return p.recordAutoInsert(n,r,"]"),{text:"[]",selection:[1,1]}}else if(i=="]"){c(n);var u=n.getCursorPosition(),a=r.doc.getLine(u.row),f=a.substring(u.column,u.column+1);if(f=="]"){var l=r.$findOpeningBracket("]",{column:u.column+1,row:u.row});if(l!==null&&p.isAutoInsertedClosing(u,a,i))return p.popAutoInsertedClosing(),{text:"",selection:[1,1]}}}}),this.add("brackets","deletion",function(e,t,n,r,i){var s=r.doc.getTextRange(i);if(!i.isMultiLine()&&s=="["){c(n);var o=r.doc.getLine(i.start.row),u=o.substring(i.start.column+1,i.start.column+2);if(u=="]")return i.end.column++,i}}),this.add("string_dquotes","insertion",function(e,t,n,r,i){if(i=='"'||i=="'"){c(n);var s=i,o=n.getSelectionRange(),u=r.doc.getTextRange(o);if(u!==""&&u!=="'"&&u!='"'&&n.getWrapBehavioursEnabled())return h(o,u,s,s);if(!u){var a=n.getCursorPosition(),f=r.doc.getLine(a.row),l=f.substring(a.column-1,a.column),p=f.substring(a.column,a.column+1),d=r.getTokenAt(a.row,a.column),v=r.getTokenAt(a.row,a.column+1);if(l=="\\"&&d&&/escape/.test(d.type))return null;var m=d&&/string|escape/.test(d.type),g=!v||/string|escape/.test(v.type),y;if(p==s)y=m!==g;else{if(m&&!g)return null;if(m&&g)return null;var b=r.$mode.tokenRe;b.lastIndex=0;var w=b.test(l);b.lastIndex=0;var E=b.test(l);if(w||E)return null;if(p&&!/[\s;,.})\]\\]/.test(p))return null;y=!0}return{text:y?s+s:"",selection:[1,1]}}}}),this.add("string_dquotes","deletion",function(e,t,n,r,i){var s=r.doc.getTextRange(i);if(!i.isMultiLine()&&(s=='"'||s=="'")){c(n);var o=r.doc.getLine(i.start.row),u=o.substring(i.start.column+1,i.start.column+2);if(u==s)return i.end.column++,i}})};p.isSaneInsertion=function(e,t){var n=e.getCursorPosition(),r=new s(t,n.row,n.column);if(!this.$matchTokenType(r.getCurrentToken()||"text",u)){var i=new s(t,n.row,n.column+1);if(!this.$matchTokenType(i.getCurrentToken()||"text",u))return!1}return r.stepForward(),r.getCurrentTokenRow()!==n.row||this.$matchTokenType(r.getCurrentToken()||"text",a)},p.$matchTokenType=function(e,t){return t.indexOf(e.type||e)>-1},p.recordAutoInsert=function(e,t,n){var r=e.getCursorPosition(),i=t.doc.getLine(r.row);this.isAutoInsertedClosing(r,i,f.autoInsertedLineEnd[0])||(f.autoInsertedBrackets=0),f.autoInsertedRow=r.row,f.autoInsertedLineEnd=n+i.substr(r.column),f.autoInsertedBrackets++},p.recordMaybeInsert=function(e,t,n){var r=e.getCursorPosition(),i=t.doc.getLine(r.row);this.isMaybeInsertedClosing(r,i)||(f.maybeInsertedBrackets=0),f.maybeInsertedRow=r.row,f.maybeInsertedLineStart=i.substr(0,r.column)+n,f.maybeInsertedLineEnd=i.substr(r.column),f.maybeInsertedBrackets++},p.isAutoInsertedClosing=function(e,t,n){return f.autoInsertedBrackets>0&&e.row===f.autoInsertedRow&&n===f.autoInsertedLineEnd[0]&&t.substr(e.column)===f.autoInsertedLineEnd},p.isMaybeInsertedClosing=function(e,t){return f.maybeInsertedBrackets>0&&e.row===f.maybeInsertedRow&&t.substr(e.column)===f.maybeInsertedLineEnd&&t.substr(0,e.column)==f.maybeInsertedLineStart},p.popAutoInsertedClosing=function(){f.autoInsertedLineEnd=f.autoInsertedLineEnd.substr(1),f.autoInsertedBrackets--},p.clearMaybeInsertedClosing=function(){f&&(f.maybeInsertedBrackets=0,f.maybeInsertedRow=-1)},r.inherits(p,i),t.CstyleBehaviour=p}),define("ace/mode/matching_brace_outdent",["require","exports","module","ace/range"],function(e,t,n){"use strict";var r=e("../range").Range,i=function(){};(function(){this.checkOutdent=function(e,t){return/^\s+$/.test(e)?/^\s*\}/.test(t):!1},this.autoOutdent=function(e,t){var n=e.getLine(t),i=n.match(/^(\s*\})/);if(!i)return 0;var s=i[1].length,o=e.findMatchingBracket({row:t,column:s});if(!o||o.row==t)return 0;var u=this.$getIndent(e.getLine(o.row));e.replace(new r(t,0,t,s-1),u)},this.$getIndent=function(e){return e.match(/^\s*/)[0]}}).call(i.prototype),t.MatchingBraceOutdent=i}),define("ace/mode/vala",["require","exports","module","ace/lib/oop","ace/mode/text","ace/tokenizer","ace/mode/vala_highlight_rules","ace/mode/folding/cstyle","ace/mode/behaviour/cstyle","ace/mode/folding/cstyle","ace/mode/matching_brace_outdent"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text").Mode,s=e("../tokenizer").Tokenizer,o=e("./vala_highlight_rules").ValaHighlightRules,u=e("./folding/cstyle").FoldMode,a=e("./behaviour/cstyle").CstyleBehaviour,f=e("./folding/cstyle").FoldMode,l=e("./matching_brace_outdent").MatchingBraceOutdent,c=function(){this.HighlightRules=o,this.$outdent=new l,this.$behaviour=new a,this.foldingRules=new f};r.inherits(c,i),function(){this.lineCommentStart="//",this.blockComment={start:"/*",end:"*/"},this.getNextLineIndent=function(e,t,n){var r=this.$getIndent(t),i=this.getTokenizer().getLineTokens(t,e),s=i.tokens,o=i.state;if(s.length&&s[s.length-1].type=="comment")return r;if(e=="start"||e=="no_regex"){var u=t.match(/^.*(?:\bcase\b.*\:|[\{\(\[])\s*$/);u&&(r+=n)}else if(e=="doc-start"){if(o=="start"||o=="no_regex")return"";var u=t.match(/^\s*(\/?)\*/);u&&(u[1]&&(r+=" "),r+="* ")}return r},this.checkOutdent=function(e,t,n){return this.$outdent.checkOutdent(t,n)},this.autoOutdent=function(e,t,n){this.$outdent.autoOutdent(t,n)},this.$id="ace/mode/vala"}.call(c.prototype),t.Mode=c})
|
PypiClean
|
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/diag_scripts/droughtindex/diag_cdd.py
|
import logging
import os
from copy import deepcopy
import cmocean.cm
import iris
import numpy as np
from esmvaltool.diag_scripts.shared import (
run_diagnostic,
save_data,
save_figure,
)
from esmvaltool.diag_scripts.shared.plot import global_pcolormesh
logger = logging.getLogger(os.path.basename(__file__))
def save_results(cfg, cube, basename, ancestor_files):
"""Create a provenance record describing the diagnostic data and plot."""
basename = basename + '_' + cube.var_name
provenance = {
'caption': cube.long_name.replace('\n', ' '),
'statistics': ['other'],
'domains': ['global'],
'authors': ['berg_peter'],
'references': ['acknow_project'],
'ancestors': ancestor_files,
}
save_data(basename, provenance, cfg, cube)
kwargs = dict(cfg.get('plot', {}))
cmap_name = kwargs.get('cmap', 'rain')
if cmap_name in cmocean.cm.cmap_d:
kwargs['cmap'] = cmocean.cm.cmap_d[cmap_name]
global_pcolormesh(cube, **kwargs)
save_figure(basename, provenance, cfg)
def main(cfg):
"""Calculate drought indices."""
for filename, attributes in cfg['input_data'].items():
logger.info("Processing variable %s from dataset %s",
attributes['standard_name'], attributes['dataset'])
logger.debug("Loading %s", filename)
cube = iris.load_cube(filename)
drymaxcube, fqthcube = droughtindex(cube, cfg)
basename = os.path.splitext(os.path.basename(filename))[0]
save_results(cfg, drymaxcube, basename, ancestor_files=[filename])
save_results(cfg, fqthcube, basename, ancestor_files=[filename])
def droughtindex(cube, cfg):
"""Calculate drought stats."""
if cfg['dryindex'] == 'cdd':
plim = float(cfg['plim']) / 86400. # units of kg m-2 s-1
frlim = float(cfg['frlim'])
precip = deepcopy(cube.data)
precip[cube.data < plim] = 1
precip[cube.data >= plim] = 0
cube.data[0, :, :] = precip[0, :, :]
for ttt in range(1, cube.data.shape[0]):
cube.data[ttt, :, :] = (
(precip[ttt, :, :] + cube.data[ttt - 1, :, :]) *
precip[ttt, :, :])
dif = cube.data[0:-1, :, :] - cube.data[1:cube.data.shape[0], :, :]
whh = np.where(dif != cube.data[0:-1])
cube.data[whh] = 0
# Longest consecutive period
drymaxcube = cube.collapsed('time', iris.analysis.MAX)
drymaxcube.long_name = (
'The greatest number of consecutive days per time period\n'
'with daily precipitation amount below {plim} mm.').format(**cfg)
drymaxcube.var_name = 'drymax'
drymaxcube.standard_name = None
drymaxcube.units = 'days'
whth = np.where(cube.data > frlim)
cube.data = cube.data * 0
cube.data[whth] = 1
fqthcube = cube.collapsed('time', iris.analysis.SUM)
fqthcube.long_name = (
'The number of consecutive dry day periods of at least {frlim} '
'days\nwith precipitation below {plim} mm each day.').format(**cfg)
fqthcube.var_name = 'dryfreq'
fqthcube.standard_name = None
fqthcube.units = None
return drymaxcube, fqthcube
if __name__ == '__main__':
with run_diagnostic() as config:
main(config)
|
PypiClean
|
/yt-dlp-custom-0.0.1.tar.gz/yt-dlp-custom-0.0.1/yt_dlp/extractor/thisav.py
|
from .common import InfoExtractor
from ..utils import remove_end
class ThisAVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?thisav\.com/video/(?P<id>[0-9]+)/.*'
_TESTS = [{
# jwplayer
'url': 'http://www.thisav.com/video/47734/%98%26sup1%3B%83%9E%83%82---just-fit.html',
'md5': '0480f1ef3932d901f0e0e719f188f19b',
'info_dict': {
'id': '47734',
'ext': 'flv',
'title': '高樹マリア - Just fit',
'uploader': 'dj7970',
'uploader_id': 'dj7970'
}
}, {
# html5 media
'url': 'http://www.thisav.com/video/242352/nerdy-18yo-big-ass-tattoos-and-glasses.html',
'md5': 'ba90c076bd0f80203679e5b60bf523ee',
'info_dict': {
'id': '242352',
'ext': 'mp4',
'title': 'Nerdy 18yo Big Ass Tattoos and Glasses',
'uploader': 'cybersluts',
'uploader_id': 'cybersluts',
},
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = remove_end(self._html_extract_title(webpage), ' - 視頻 - ThisAV.com-世界第一中文成人娛樂網站')
video_url = self._html_search_regex(
r"addVariable\('file','([^']+)'\);", webpage, 'video url', default=None)
if video_url:
info_dict = {
'formats': [{
'url': video_url,
}],
}
else:
entries = self._parse_html5_media_entries(url, webpage, video_id)
if entries:
info_dict = entries[0]
else:
info_dict = self._extract_jwplayer_data(
webpage, video_id, require_title=False)
uploader = self._html_search_regex(
r': <a href="http://www\.thisav\.com/user/[0-9]+/(?:[^"]+)">([^<]+)</a>',
webpage, 'uploader name', fatal=False)
uploader_id = self._html_search_regex(
r': <a href="http://www\.thisav\.com/user/[0-9]+/([^"]+)">(?:[^<]+)</a>',
webpage, 'uploader id', fatal=False)
info_dict.update({
'id': video_id,
'uploader': uploader,
'uploader_id': uploader_id,
'title': title,
})
return info_dict
|
PypiClean
|
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/customerinsights/get_relationship_link.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetRelationshipLinkResult',
'AwaitableGetRelationshipLinkResult',
'get_relationship_link',
]
@pulumi.output_type
class GetRelationshipLinkResult:
"""
The relationship link resource format.
"""
def __init__(__self__, description=None, display_name=None, id=None, interaction_type=None, link_name=None, mappings=None, name=None, profile_property_references=None, provisioning_state=None, related_profile_property_references=None, relationship_guid_id=None, relationship_name=None, tenant_id=None, type=None):
if description and not isinstance(description, dict):
raise TypeError("Expected argument 'description' to be a dict")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, dict):
raise TypeError("Expected argument 'display_name' to be a dict")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if interaction_type and not isinstance(interaction_type, str):
raise TypeError("Expected argument 'interaction_type' to be a str")
pulumi.set(__self__, "interaction_type", interaction_type)
if link_name and not isinstance(link_name, str):
raise TypeError("Expected argument 'link_name' to be a str")
pulumi.set(__self__, "link_name", link_name)
if mappings and not isinstance(mappings, list):
raise TypeError("Expected argument 'mappings' to be a list")
pulumi.set(__self__, "mappings", mappings)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if profile_property_references and not isinstance(profile_property_references, list):
raise TypeError("Expected argument 'profile_property_references' to be a list")
pulumi.set(__self__, "profile_property_references", profile_property_references)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if related_profile_property_references and not isinstance(related_profile_property_references, list):
raise TypeError("Expected argument 'related_profile_property_references' to be a list")
pulumi.set(__self__, "related_profile_property_references", related_profile_property_references)
if relationship_guid_id and not isinstance(relationship_guid_id, str):
raise TypeError("Expected argument 'relationship_guid_id' to be a str")
pulumi.set(__self__, "relationship_guid_id", relationship_guid_id)
if relationship_name and not isinstance(relationship_name, str):
raise TypeError("Expected argument 'relationship_name' to be a str")
pulumi.set(__self__, "relationship_name", relationship_name)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[Mapping[str, str]]:
"""
Localized descriptions for the Relationship Link.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[Mapping[str, str]]:
"""
Localized display name for the Relationship Link.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="interactionType")
def interaction_type(self) -> str:
"""
The InteractionType associated with the Relationship Link.
"""
return pulumi.get(self, "interaction_type")
@property
@pulumi.getter(name="linkName")
def link_name(self) -> str:
"""
The name of the Relationship Link.
"""
return pulumi.get(self, "link_name")
@property
@pulumi.getter
def mappings(self) -> Optional[Sequence['outputs.RelationshipLinkFieldMappingResponse']]:
"""
The mappings between Interaction and Relationship fields.
"""
return pulumi.get(self, "mappings")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="profilePropertyReferences")
def profile_property_references(self) -> Sequence['outputs.ParticipantProfilePropertyReferenceResponse']:
"""
The property references for the Profile of the Relationship.
"""
return pulumi.get(self, "profile_property_references")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="relatedProfilePropertyReferences")
def related_profile_property_references(self) -> Sequence['outputs.ParticipantProfilePropertyReferenceResponse']:
"""
The property references for the Related Profile of the Relationship.
"""
return pulumi.get(self, "related_profile_property_references")
@property
@pulumi.getter(name="relationshipGuidId")
def relationship_guid_id(self) -> str:
"""
The relationship guid id.
"""
return pulumi.get(self, "relationship_guid_id")
@property
@pulumi.getter(name="relationshipName")
def relationship_name(self) -> str:
"""
The Relationship associated with the Link.
"""
return pulumi.get(self, "relationship_name")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The hub name.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetRelationshipLinkResult(GetRelationshipLinkResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRelationshipLinkResult(
description=self.description,
display_name=self.display_name,
id=self.id,
interaction_type=self.interaction_type,
link_name=self.link_name,
mappings=self.mappings,
name=self.name,
profile_property_references=self.profile_property_references,
provisioning_state=self.provisioning_state,
related_profile_property_references=self.related_profile_property_references,
relationship_guid_id=self.relationship_guid_id,
relationship_name=self.relationship_name,
tenant_id=self.tenant_id,
type=self.type)
def get_relationship_link(hub_name: Optional[str] = None,
relationship_link_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRelationshipLinkResult:
"""
Use this data source to access information about an existing resource.
:param str hub_name: The name of the hub.
:param str relationship_link_name: The name of the relationship link.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['hubName'] = hub_name
__args__['relationshipLinkName'] = relationship_link_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:customerinsights:getRelationshipLink', __args__, opts=opts, typ=GetRelationshipLinkResult).value
return AwaitableGetRelationshipLinkResult(
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
interaction_type=__ret__.interaction_type,
link_name=__ret__.link_name,
mappings=__ret__.mappings,
name=__ret__.name,
profile_property_references=__ret__.profile_property_references,
provisioning_state=__ret__.provisioning_state,
related_profile_property_references=__ret__.related_profile_property_references,
relationship_guid_id=__ret__.relationship_guid_id,
relationship_name=__ret__.relationship_name,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
|
PypiClean
|
/paddle2coreml-6.2-cp38-none-macosx_11_0_arm64.whl/coremltools/proto/ArrayFeatureExtractor_pb2.py
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pb2
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='ArrayFeatureExtractor.proto',
package='CoreML.Specification',
syntax='proto3',
serialized_pb=_b('\n\x1b\x41rrayFeatureExtractor.proto\x12\x14\x43oreML.Specification\"-\n\x15\x41rrayFeatureExtractor\x12\x14\n\x0c\x65xtractIndex\x18\x01 \x03(\x04\x42\x02H\x03\x62\x06proto3')
)
_ARRAYFEATUREEXTRACTOR = _descriptor.Descriptor(
name='ArrayFeatureExtractor',
full_name='CoreML.Specification.ArrayFeatureExtractor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='extractIndex', full_name='CoreML.Specification.ArrayFeatureExtractor.extractIndex', index=0,
number=1, type=4, cpp_type=4, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=98,
)
DESCRIPTOR.message_types_by_name['ArrayFeatureExtractor'] = _ARRAYFEATUREEXTRACTOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ArrayFeatureExtractor = _reflection.GeneratedProtocolMessageType('ArrayFeatureExtractor', (_message.Message,), dict(
DESCRIPTOR = _ARRAYFEATUREEXTRACTOR,
__module__ = 'ArrayFeatureExtractor_pb2'
# @@protoc_insertion_point(class_scope:CoreML.Specification.ArrayFeatureExtractor)
))
_sym_db.RegisterMessage(ArrayFeatureExtractor)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003'))
# @@protoc_insertion_point(module_scope)
|
PypiClean
|
/pulumi_aws_native-0.75.1a1693503310.tar.gz/pulumi_aws_native-0.75.1a1693503310/pulumi_aws_native/apigatewayv2/api.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ApiArgs', 'Api']
@pulumi.input_type
class ApiArgs:
def __init__(__self__, *,
api_key_selection_expression: Optional[pulumi.Input[str]] = None,
base_path: Optional[pulumi.Input[str]] = None,
body: Optional[Any] = None,
body_s3_location: Optional[pulumi.Input['ApiBodyS3LocationArgs']] = None,
cors_configuration: Optional[pulumi.Input['ApiCorsArgs']] = None,
credentials_arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_execute_api_endpoint: Optional[pulumi.Input[bool]] = None,
disable_schema_validation: Optional[pulumi.Input[bool]] = None,
fail_on_warnings: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol_type: Optional[pulumi.Input[str]] = None,
route_key: Optional[pulumi.Input[str]] = None,
route_selection_expression: Optional[pulumi.Input[str]] = None,
tags: Optional[Any] = None,
target: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Api resource.
:param Any tags: This resource type use map for Tags, suggest to use List of Tag
"""
if api_key_selection_expression is not None:
pulumi.set(__self__, "api_key_selection_expression", api_key_selection_expression)
if base_path is not None:
pulumi.set(__self__, "base_path", base_path)
if body is not None:
pulumi.set(__self__, "body", body)
if body_s3_location is not None:
pulumi.set(__self__, "body_s3_location", body_s3_location)
if cors_configuration is not None:
pulumi.set(__self__, "cors_configuration", cors_configuration)
if credentials_arn is not None:
pulumi.set(__self__, "credentials_arn", credentials_arn)
if description is not None:
pulumi.set(__self__, "description", description)
if disable_execute_api_endpoint is not None:
pulumi.set(__self__, "disable_execute_api_endpoint", disable_execute_api_endpoint)
if disable_schema_validation is not None:
pulumi.set(__self__, "disable_schema_validation", disable_schema_validation)
if fail_on_warnings is not None:
pulumi.set(__self__, "fail_on_warnings", fail_on_warnings)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol_type is not None:
pulumi.set(__self__, "protocol_type", protocol_type)
if route_key is not None:
pulumi.set(__self__, "route_key", route_key)
if route_selection_expression is not None:
pulumi.set(__self__, "route_selection_expression", route_selection_expression)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if target is not None:
pulumi.set(__self__, "target", target)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="apiKeySelectionExpression")
def api_key_selection_expression(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "api_key_selection_expression")
@api_key_selection_expression.setter
def api_key_selection_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_key_selection_expression", value)
@property
@pulumi.getter(name="basePath")
def base_path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "base_path")
@base_path.setter
def base_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "base_path", value)
@property
@pulumi.getter
def body(self) -> Optional[Any]:
return pulumi.get(self, "body")
@body.setter
def body(self, value: Optional[Any]):
pulumi.set(self, "body", value)
@property
@pulumi.getter(name="bodyS3Location")
def body_s3_location(self) -> Optional[pulumi.Input['ApiBodyS3LocationArgs']]:
return pulumi.get(self, "body_s3_location")
@body_s3_location.setter
def body_s3_location(self, value: Optional[pulumi.Input['ApiBodyS3LocationArgs']]):
pulumi.set(self, "body_s3_location", value)
@property
@pulumi.getter(name="corsConfiguration")
def cors_configuration(self) -> Optional[pulumi.Input['ApiCorsArgs']]:
return pulumi.get(self, "cors_configuration")
@cors_configuration.setter
def cors_configuration(self, value: Optional[pulumi.Input['ApiCorsArgs']]):
pulumi.set(self, "cors_configuration", value)
@property
@pulumi.getter(name="credentialsArn")
def credentials_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "credentials_arn")
@credentials_arn.setter
def credentials_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "credentials_arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disableExecuteApiEndpoint")
def disable_execute_api_endpoint(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "disable_execute_api_endpoint")
@disable_execute_api_endpoint.setter
def disable_execute_api_endpoint(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_execute_api_endpoint", value)
@property
@pulumi.getter(name="disableSchemaValidation")
def disable_schema_validation(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "disable_schema_validation")
@disable_schema_validation.setter
def disable_schema_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_schema_validation", value)
@property
@pulumi.getter(name="failOnWarnings")
def fail_on_warnings(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "fail_on_warnings")
@fail_on_warnings.setter
def fail_on_warnings(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "fail_on_warnings", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="protocolType")
def protocol_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol_type")
@protocol_type.setter
def protocol_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol_type", value)
@property
@pulumi.getter(name="routeKey")
def route_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "route_key")
@route_key.setter
def route_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_key", value)
@property
@pulumi.getter(name="routeSelectionExpression")
def route_selection_expression(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "route_selection_expression")
@route_selection_expression.setter
def route_selection_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_selection_expression", value)
@property
@pulumi.getter
def tags(self) -> Optional[Any]:
"""
This resource type use map for Tags, suggest to use List of Tag
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[Any]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
class Api(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_key_selection_expression: Optional[pulumi.Input[str]] = None,
base_path: Optional[pulumi.Input[str]] = None,
body: Optional[Any] = None,
body_s3_location: Optional[pulumi.Input[pulumi.InputType['ApiBodyS3LocationArgs']]] = None,
cors_configuration: Optional[pulumi.Input[pulumi.InputType['ApiCorsArgs']]] = None,
credentials_arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_execute_api_endpoint: Optional[pulumi.Input[bool]] = None,
disable_schema_validation: Optional[pulumi.Input[bool]] = None,
fail_on_warnings: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol_type: Optional[pulumi.Input[str]] = None,
route_key: Optional[pulumi.Input[str]] = None,
route_selection_expression: Optional[pulumi.Input[str]] = None,
tags: Optional[Any] = None,
target: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Resource Type definition for AWS::ApiGatewayV2::Api
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param Any tags: This resource type use map for Tags, suggest to use List of Tag
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ApiArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::ApiGatewayV2::Api
:param str resource_name: The name of the resource.
:param ApiArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApiArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_key_selection_expression: Optional[pulumi.Input[str]] = None,
base_path: Optional[pulumi.Input[str]] = None,
body: Optional[Any] = None,
body_s3_location: Optional[pulumi.Input[pulumi.InputType['ApiBodyS3LocationArgs']]] = None,
cors_configuration: Optional[pulumi.Input[pulumi.InputType['ApiCorsArgs']]] = None,
credentials_arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_execute_api_endpoint: Optional[pulumi.Input[bool]] = None,
disable_schema_validation: Optional[pulumi.Input[bool]] = None,
fail_on_warnings: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol_type: Optional[pulumi.Input[str]] = None,
route_key: Optional[pulumi.Input[str]] = None,
route_selection_expression: Optional[pulumi.Input[str]] = None,
tags: Optional[Any] = None,
target: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApiArgs.__new__(ApiArgs)
__props__.__dict__["api_key_selection_expression"] = api_key_selection_expression
__props__.__dict__["base_path"] = base_path
__props__.__dict__["body"] = body
__props__.__dict__["body_s3_location"] = body_s3_location
__props__.__dict__["cors_configuration"] = cors_configuration
__props__.__dict__["credentials_arn"] = credentials_arn
__props__.__dict__["description"] = description
__props__.__dict__["disable_execute_api_endpoint"] = disable_execute_api_endpoint
__props__.__dict__["disable_schema_validation"] = disable_schema_validation
__props__.__dict__["fail_on_warnings"] = fail_on_warnings
__props__.__dict__["name"] = name
__props__.__dict__["protocol_type"] = protocol_type
__props__.__dict__["route_key"] = route_key
__props__.__dict__["route_selection_expression"] = route_selection_expression
__props__.__dict__["tags"] = tags
__props__.__dict__["target"] = target
__props__.__dict__["version"] = version
__props__.__dict__["api_endpoint"] = None
__props__.__dict__["api_id"] = None
super(Api, __self__).__init__(
'aws-native:apigatewayv2:Api',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Api':
"""
Get an existing Api resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ApiArgs.__new__(ApiArgs)
__props__.__dict__["api_endpoint"] = None
__props__.__dict__["api_id"] = None
__props__.__dict__["api_key_selection_expression"] = None
__props__.__dict__["base_path"] = None
__props__.__dict__["body"] = None
__props__.__dict__["body_s3_location"] = None
__props__.__dict__["cors_configuration"] = None
__props__.__dict__["credentials_arn"] = None
__props__.__dict__["description"] = None
__props__.__dict__["disable_execute_api_endpoint"] = None
__props__.__dict__["disable_schema_validation"] = None
__props__.__dict__["fail_on_warnings"] = None
__props__.__dict__["name"] = None
__props__.__dict__["protocol_type"] = None
__props__.__dict__["route_key"] = None
__props__.__dict__["route_selection_expression"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["target"] = None
__props__.__dict__["version"] = None
return Api(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiEndpoint")
def api_endpoint(self) -> pulumi.Output[str]:
return pulumi.get(self, "api_endpoint")
@property
@pulumi.getter(name="apiId")
def api_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "api_id")
@property
@pulumi.getter(name="apiKeySelectionExpression")
def api_key_selection_expression(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "api_key_selection_expression")
@property
@pulumi.getter(name="basePath")
def base_path(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "base_path")
@property
@pulumi.getter
def body(self) -> pulumi.Output[Optional[Any]]:
return pulumi.get(self, "body")
@property
@pulumi.getter(name="bodyS3Location")
def body_s3_location(self) -> pulumi.Output[Optional['outputs.ApiBodyS3Location']]:
return pulumi.get(self, "body_s3_location")
@property
@pulumi.getter(name="corsConfiguration")
def cors_configuration(self) -> pulumi.Output[Optional['outputs.ApiCors']]:
return pulumi.get(self, "cors_configuration")
@property
@pulumi.getter(name="credentialsArn")
def credentials_arn(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "credentials_arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="disableExecuteApiEndpoint")
def disable_execute_api_endpoint(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "disable_execute_api_endpoint")
@property
@pulumi.getter(name="disableSchemaValidation")
def disable_schema_validation(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "disable_schema_validation")
@property
@pulumi.getter(name="failOnWarnings")
def fail_on_warnings(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "fail_on_warnings")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protocolType")
def protocol_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "protocol_type")
@property
@pulumi.getter(name="routeKey")
def route_key(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "route_key")
@property
@pulumi.getter(name="routeSelectionExpression")
def route_selection_expression(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "route_selection_expression")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Any]]:
"""
This resource type use map for Tags, suggest to use List of Tag
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def target(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "target")
@property
@pulumi.getter
def version(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "version")
|
PypiClean
|
/julynter-0.4.2.tar.gz/julynter-0.4.2/src/linterlab/notebookhandler.ts
|
import { JSONObject, PartialJSONArray } from '@lumino/coreutils';
import { IDisposable } from '@lumino/disposable';
import { Signal, ISignal } from '@lumino/signaling';
import { ISessionContext } from '@jupyterlab/apputils';
import { IDocumentManager } from '@jupyterlab/docmanager';
import { NotebookPanel } from '@jupyterlab/notebook';
import { KernelMessage } from '@jupyterlab/services';
import { IInfoReply } from '@jupyterlab/services/lib/kernel/messages';
import { IComm } from '@jupyterlab/services/lib/kernel/kernel';
import {
IGenericNotebookMetadata,
IGenericCellMetadata,
IQueryResult,
IKernelMatcher,
GenericMatcher,
ILintingResult,
IReport,
} from '../linter/interfaces';
import { Linter } from '../linter/lint';
import { Config } from './config';
import { ExperimentManager } from './experimentmanager';
import { GroupGenerator, ItemGenerator } from './itemgenerator';
import { OptionsManager } from './optionsmanager';
import { ErrorHandler } from './errorhandler';
import { CellWidget } from './view/cellwidget';
export interface IJulynterKernelUpdate {
status: string;
kernelName?: string;
result?: IQueryResult;
}
export class NotebookHandler implements IDisposable {
private _language: IKernelMatcher | null;
private _kernelRestarted = new Signal<this, Promise<void>>(this);
private _disposed = new Signal<this, void>(this);
private _inspected = new Signal<this, IJulynterKernelUpdate>(this);
private _isDisposed = false;
private _ready: Promise<void>;
private _panelId: string;
private _nbPanel: NotebookPanel;
private _session: ISessionContext;
private _docManager: IDocumentManager;
private _update: () => void;
private _experimentManager: ExperimentManager;
private _eh: ErrorHandler;
private _reportedStart: boolean;
private _icomm: IComm;
public options: OptionsManager;
public update: IQueryResult | null;
public hasKernel: boolean;
public cellLints: { [num: string]: CellWidget };
_boundQueryCall: (
sess: ISessionContext,
args: KernelMessage.IMessage<KernelMessage.MessageType>
) => void;
constructor(
docManager: IDocumentManager,
session: ISessionContext,
nbPanel: NotebookPanel,
config: Config,
em: ExperimentManager,
eh: ErrorHandler,
update: () => void
) {
this._eh = eh;
try {
this.cellLints = {};
this._docManager = docManager;
this._session = session;
this._nbPanel = nbPanel;
this._experimentManager = em;
this._update = update;
this._panelId = this._nbPanel.id;
this._language = GenericMatcher;
this.options = new OptionsManager(nbPanel, config, em, eh, update);
this.update = {};
this.hasKernel = false;
this._reportedStart = false;
this._icomm = null;
this._boundQueryCall = this._queryCall.bind(this);
em.reportActivity(this, 'open');
session.statusChanged.connect(
(sender: ISessionContext, status: KernelMessage.Status) => {
try {
const kdn = session.kernelDisplayName;
if (status.endsWith('restarting')) {
em.reportKernelActivity(this, 'restartKernel', kdn);
this._reportedStart = true;
this._kernelRestarted.emit(this._session.ready);
} else if (status === 'unknown') {
em.reportKernelActivity(this, 'stopKernel', kdn);
this._reportedStart = false;
} else if (
(status === 'idle' || status === 'busy') &&
!this._reportedStart
) {
em.reportKernelActivity(this, 'useKernel', kdn);
this._reportedStart = true;
}
} catch (error) {
throw this._eh.report(
error,
'NotebookHandler:constructor.statusChanged',
[nbPanel.title.label, status]
);
}
}
);
} catch (error) {
throw this._eh.report(error, 'NotebookHandler:constructor', [
nbPanel.title.label,
]);
}
}
findLanguage(
kernelName: string,
languageName: string
): Promise<IKernelMatcher> {
return new Promise((resolve, reject) => {
this.options.reloadOptions();
for (const kid of this.options.checks.kernel.order) {
const kernel = this.options.checks.kernel.values[kid];
if (kernel.kernel && kernelName.match(kernel.kernel)) {
resolve(kernel);
return;
}
if (kernel.language && languageName.match(kernel.language)) {
resolve(kernel);
return;
}
}
resolve({
kernel: null,
language: null,
initScript: null,
name: 'default',
});
});
}
getKernelLanguage(): Promise<IKernelMatcher> {
try {
return this._session.session.kernel.info.then((infoReply: IInfoReply) => {
try {
this._session.session.kernel.name;
const model = this.findLanguage(
this._session.session.kernel.name,
infoReply.language_info.name
);
this._experimentManager.reportNotebookKernel(
this,
this._session.session.kernel.name,
infoReply.language_info.name
);
return model;
} catch (error) {
throw this._eh.report(
error,
'NotebookHandler:getKernelLanguage.then',
[this._session.session.kernel.name, infoReply.language_info.name]
);
}
});
} catch (error) {
throw this._eh.report(error, 'NotebookHandler:getKernelLanguage', []);
}
}
createComm(): void {
const kernel = this._session.session.kernel;
if (kernel) {
kernel.registerCommTarget('julynter.comm', (comm, msg) => {
this._icomm = comm;
this._icomm.onMsg = this._receiveJulynterQuery.bind(this);
// console.log('ICOMM!', this._icomm.commId);
});
}
}
configureHandler(language: IKernelMatcher): void {
try {
this._language = language;
this._ready = this._session.ready.then(() => {
this.createComm();
this._initOnKernel();
});
this._kernelRestarted.connect(
(sender: any, kernelReady: Promise<void>) => {
this._inspected.emit({
status: 'Restarting Kernel...',
} as IJulynterKernelUpdate);
// Emit restarting
this._ready = kernelReady.then(() => {
this.createComm();
this._initOnKernel();
});
}
);
} catch (error) {
throw this._eh.report(error, 'NotebookHandler:configureHandler', [
language.name,
]);
}
}
disconnectHandler(): void {
try {
this.inspected.disconnect(this.onQueryUpdate, this);
this.disposed.disconnect(this.onSourceDisposed, this);
this._experimentManager.reportActivity(this, 'MoveOut');
} catch (error) {
throw this._eh.report(error, 'NotebookHandler:disconnectHandler', []);
}
}
connectHandler(): void {
try {
this.inspected.connect(this.onQueryUpdate, this);
this.disposed.connect(this.onSourceDisposed, this);
this.performQuery();
this._experimentManager.reportActivity(this, 'MoveIn');
} catch (error) {
throw this._eh.report(error, 'NotebookHandler:connectHandler', []);
}
}
get name(): string {
return this._session.path;
}
get id(): string {
return this._panelId;
}
get nbPanel(): NotebookPanel {
return this._nbPanel;
}
get experimentManager(): ExperimentManager {
return this._experimentManager;
}
/**
* A signal emitted when the handler is disposed.
*/
get disposed(): ISignal<NotebookHandler, void> {
return this._disposed;
}
get isDisposed(): boolean {
return this._isDisposed;
}
get ready(): Promise<void> {
return this._ready;
}
/**
* A signal emitted when an inspector value is generated.
*/
get inspected(): ISignal<NotebookHandler, IJulynterKernelUpdate> {
return this._inspected;
}
/**
* Disposes the kernel connector.
*/
dispose(): void {
try {
if (this.isDisposed) {
return;
}
this._experimentManager.reportCloseNotebook(this);
this._isDisposed = true;
this._disposed.emit(void 0);
Signal.clearData(this);
} catch (error) {
throw this._eh.report(error, 'NotebookHandler:dispose', []);
}
}
/**
* Map linting reports
*/
private mapReports(reports: IReport[]): PartialJSONArray {
const result: PartialJSONArray = [];
reports.forEach((report) => {
result.push({
text: report.text,
reportType: report.reportType,
reportId: report.reportId,
suggestion: report.suggestion,
reason: report.reason,
cellId: report.cellId,
hash: report.hash,
});
});
return result;
}
/**
* Lint notebook
*/
public lint(): ILintingResult {
try {
const groupGenerator = new GroupGenerator(
this._nbPanel,
this._update,
this._eh
);
const itemGenerator = new ItemGenerator(this._docManager, this, this._eh);
const notebookMetadata: IGenericNotebookMetadata = {
title: this.nbPanel.title.label,
cells: (this.nbPanel.content
.widgets as unknown) as IGenericCellMetadata[],
};
const linter = new Linter(this.options, this.update, this.hasKernel);
const results = linter.generate(
notebookMetadata,
itemGenerator,
groupGenerator
);
this._experimentManager.reportLinting(this, results);
this.nbPanel.model.metadata.set('julynter-results', {
visible: this.mapReports(results.visible),
filteredType: this.mapReports(results.filteredType),
filteredId: this.mapReports(results.filteredId),
filteredRestart: this.mapReports(results.filteredRestart),
filteredIndividual: this.mapReports(results.filteredIndividual),
hash: results.hash,
});
return results;
} catch (error) {
throw this._eh.report(error, 'NotebookHandler:lint', []);
}
}
private _createPromise(error: any = null): Promise<any> {
return new Promise<void>((resolve, reject) => {
if (!error) {
resolve();
} else {
reject(error);
}
});
}
private initScript(): Promise<void> {
try {
if (this._language === null) {
return this._createPromise('Language not loaded');
}
const code = this._language.initScript;
if (code === null) {
return this._createPromise();
}
const content: KernelMessage.IExecuteRequestMsg['content'] = {
code: code,
stop_on_error: false,
store_history: false,
silent: true,
};
const kernel = this._session.session.kernel;
if (!kernel) {
return Promise.reject(
new Error('Require kernel to perform advanced julynter operations!')
);
}
const future = kernel.requestExecute(content, false);
future.onIOPub = (msg: KernelMessage.IIOPubMessage): void => {
this.performQuery();
};
return future.done.then(() => {
return;
});
} catch (error) {
throw this._eh.report(error, 'NotebookHandler:initScript', []);
}
}
public send(data: JSONObject): void {
const session = this._session.session;
if (
this._icomm &&
session &&
session.kernel &&
session.kernel.hasComm(this._icomm.commId)
) {
this._icomm.send(data);
}
}
/**
* Send a query command to the kernel
*/
public performQuery(): void {
this.send({
operation: 'query',
requirements: this.options.checkRequirements(),
});
}
/**
* Send message to kernel add a module
*/
public addModule(module: string): void {
this.send({
operation: 'addModule',
module: module,
requirements: this.options.checkRequirements(),
});
}
/**
* Initializes the kernel by running the set up script located at _initScriptPath.
*/
private _initOnKernel(): Promise<void> {
return this.initScript().then(() => {
this._session.iopubMessage.disconnect(this._boundQueryCall);
this._session.iopubMessage.connect(this._boundQueryCall);
});
}
/*
* Handle query response
*/
private _receiveJulynterQuery(
msg: KernelMessage.ICommMsgMsg
): void | PromiseLike<void> {
try {
const operation = msg.content.data.operation;
if (operation === 'queryResult') {
this._inspected.emit({
status: '',
kernelName: this._session.kernelDisplayName || '',
result: msg.content.data as IQueryResult,
});
} else if (operation === 'error') {
this._eh.report(
'Failed to run ICOMM command',
'NotebookHandler:_receiveJulynterQuery',
[msg]
);
} else if (operation === 'init') {
this.performQuery();
}
} catch (error) {
throw this._eh.report(error, 'NotebookHandler:_receiveJulynterQuery', [
msg,
]);
}
}
/*
* Invokes a inspection if the signal emitted from specified session is an 'execute_input' msg.
*/
private _queryCall(
sess: ISessionContext,
args: KernelMessage.IMessage
): void {
try {
const msg: KernelMessage.IIOPubMessage = args as KernelMessage.IIOPubMessage;
const msgType = msg.header.msg_type;
switch (msgType) {
case 'execute_input':
this.performQuery();
break;
default:
break;
}
} catch (error) {
throw this._eh.report(error, 'NotebookHandler:_queryCall', [
args.content,
]);
}
}
/**
* Handle kernel signals.
*/
protected onQueryUpdate(
sender: NotebookHandler,
update: IJulynterKernelUpdate
): void {
if (update.status !== '') {
this.update = {};
this.hasKernel = false;
} else {
this.hasKernel = true;
this.update = update.result;
}
this._update();
}
/**
* Handle disposed signals.
*/
protected onSourceDisposed(sender: NotebookHandler, args: void): void {
return;
}
}
|
PypiClean
|
/alibabacloud_scsp20200702-1.0.0.tar.gz/alibabacloud_scsp20200702-1.0.0/README.md
|
English | [简体中文](README-CN.md)

## Alibaba Cloud scsp SDK for Python
## Requirements
- Python >= 3.6
## Installation
- **Install with pip**
Python SDK uses a common package management tool named `pip`. If pip is not installed, see the [pip user guide](https://pip.pypa.io/en/stable/installing/ "pip User Guide") to install pip.
```bash
# Install the alibabacloud_scsp20200702
pip install alibabacloud_scsp20200702
```
## Issues
[Opening an Issue](https://github.com/aliyun/alibabacloud-sdk/issues/new), Issues not conforming to the guidelines may be closed immediately.
## Usage
[Quick Examples](https://github.com/aliyun/alibabacloud-python-sdk/blob/master/docs/0-Usage-EN.md#quick-examples)
## Changelog
Detailed changes for each release are documented in the [release notes](./ChangeLog.md).
## References
- [Latest Release](https://github.com/aliyun/alibabacloud-sdk/tree/master/python)
## License
[Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0)
Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
|
PypiClean
|
/MDSuite-0.2.0-py3-none-any.whl/mdsuite/utils/calculator_helper_methods.py
|
import logging
from typing import Any, Iterable, Tuple, Union
import jax
import jax.numpy as jnp
import numpy as np
from numpy import ndarray
from scipy.interpolate import UnivariateSpline
from scipy.optimize import curve_fit
log = logging.getLogger(__name__)
def fit_einstein_curve(
x_data: np.ndarray, y_data: np.ndarray, fit_max_index: int
) -> Tuple[Union[ndarray, Iterable, int, float], Any, list, list]:
"""
Fit operation for Einstein calculations
Parameters
----------
x_data : np.ndarray
x data to use in the fitting.
y_data : np.ndarray
y_data to use in the fitting.
fit_max_index : int
Range at which to store values.
Returns
-------
popt : list
List of fit values
pcov : list
Covariance matrix of the fit values.
"""
# Defined here for completeness.
popt = []
pcov = []
def func(x, m, a):
"""
Standard linear function for fitting.
Parameters
----------
x : list/np.array
x axis tensor_values for the fit
m : float
gradient of the line
a : float
scalar offset, also the y-intercept for those who did not
get much maths in school.
Returns
-------
m * x + a
"""
return m * x + a
spline_data = UnivariateSpline(x_data, y_data, s=0, k=4)
derivatives = spline_data.derivative(n=2)(x_data)
derivatives[abs(derivatives) < 1e-5] = 0
start_index = np.argmin(abs(derivatives))
gradients = []
gradient_errors = []
for i in range(start_index + 2, len(y_data)):
popt_temp, pcov_temp = curve_fit(
func, xdata=x_data[start_index:i], ydata=y_data[start_index:i]
)
gradients.append(popt_temp[0])
gradient_errors.append(np.sqrt(np.diag(pcov_temp))[0])
if i == fit_max_index:
popt = popt_temp
pcov = pcov_temp
return popt, pcov, gradients, gradient_errors
def correlate(ds_a: np.ndarray, ds_b: np.ndarray) -> np.ndarray:
"""
Compute a simple correlation computation mapped over the spatial dimension of
the array.
Parameters
----------
ds_a : np.ndarray (n_configurations, dimension)
Tensor of the first set of data for a single particle.
ds_b : np.ndarray (n_configurations, dimension)
Tensor of the second set of data for a single particle.
Returns
-------
Computes the correlation between the two data sets and averages over the spatial
dimension.
"""
def _correlate_op(a: np.ndarray, b: np.ndarray):
"""
Actual correlation op to be mapped over the spatial dimension.
Parameters
----------
a : np.ndarray (n_configurations, dimension)
Tensor of the first set of data for a single particle.
b : np.ndarray (n_configurations, dimension)
Tensor of the second set of data for a single particle.
Returns
-------
correlation over a single dimension.
"""
return jnp.correlate(a, b, mode="full")
# We want to vmap over the last axis
correlate_vmap = jax.vmap(_correlate_op, in_axes=-1)
acf = np.mean(correlate_vmap(ds_a, ds_b), axis=0)
return acf[int(len(acf) / 2) :]
def msd_operation(ds_a, ds_b) -> np.ndarray:
"""
Perform an msd operation between two data sets mapping over spatial dimension.
Parameters
----------
ds_a : np.ndarray (n_timesteps, dimension)
ds_b : np.ndarray (n_timesteps, dimension)
Returns
-------
"""
def _difference_op(a, b):
"""
Difference operation to map over spatial dimension.
Parameters
----------
a : (n_timesteps)
b : (n_timesteps)
Returns
-------
"""
return (a - a[0]) * (b - b[0])
difference_vmap = jax.vmap(_difference_op, in_axes=-1)
return np.mean(difference_vmap(ds_a, ds_b), axis=0)
|
PypiClean
|
/rezdy_api_for_suppliers_client-1.0.1.tar.gz/rezdy_api_for_suppliers_client-1.0.1/rezdy_api_for_suppliers_client/api/categories/remove_product.py
|
from typing import Any, Dict, Optional
import httpx
from ...client import Client
from ...models.response_rate import ResponseRate
from ...types import Response
def _get_kwargs(
category_id: int,
product_code: str,
*,
client: Client,
) -> Dict[str, Any]:
url = "{}/categories/{categoryId}/products/{productCode}".format(
client.base_url, categoryId=category_id, productCode=product_code
)
headers: Dict[str, str] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
return {
"method": "delete",
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
}
def _parse_response(*, response: httpx.Response) -> Optional[ResponseRate]:
if response.status_code == 200:
response_200 = ResponseRate.from_dict(response.json())
return response_200
return None
def _build_response(*, response: httpx.Response) -> Response[ResponseRate]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
category_id: int,
product_code: str,
*,
client: Client,
) -> Response[ResponseRate]:
"""Remove product from category
Removes a product from an existing category
Args:
category_id (int):
product_code (str):
Returns:
Response[ResponseRate]
"""
kwargs = _get_kwargs(
category_id=category_id,
product_code=product_code,
client=client,
)
response = httpx.request(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
def sync(
category_id: int,
product_code: str,
*,
client: Client,
) -> Optional[ResponseRate]:
"""Remove product from category
Removes a product from an existing category
Args:
category_id (int):
product_code (str):
Returns:
Response[ResponseRate]
"""
return sync_detailed(
category_id=category_id,
product_code=product_code,
client=client,
).parsed
async def asyncio_detailed(
category_id: int,
product_code: str,
*,
client: Client,
) -> Response[ResponseRate]:
"""Remove product from category
Removes a product from an existing category
Args:
category_id (int):
product_code (str):
Returns:
Response[ResponseRate]
"""
kwargs = _get_kwargs(
category_id=category_id,
product_code=product_code,
client=client,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.request(**kwargs)
return _build_response(response=response)
async def asyncio(
category_id: int,
product_code: str,
*,
client: Client,
) -> Optional[ResponseRate]:
"""Remove product from category
Removes a product from an existing category
Args:
category_id (int):
product_code (str):
Returns:
Response[ResponseRate]
"""
return (
await asyncio_detailed(
category_id=category_id,
product_code=product_code,
client=client,
)
).parsed
|
PypiClean
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/libzxbasm/z80.py
|
class Opcode:
""" Describes opcodes and other info.
"""
def __init__(self, asm: str, time: int, size: int, opcode: str):
self.asm = asm
self.T = time
self.size = size
self.opcode = opcode
Z80SET = {
"ADC A,(HL)": Opcode("ADC A,(HL)", 7, 1, "8E"),
"ADC A,(IX+N)": Opcode("ADC A,(IX+N)", 19, 3, "DD 8E XX"),
"ADC A,(IY+N)": Opcode("ADC A,(IY+N)", 19, 3, "FD 8E XX"),
"ADC A,A": Opcode("ADC A,A", 4, 1, "8F"),
"ADC A,C": Opcode("ADC A,C", 4, 1, "89"),
"ADC A,B": Opcode("ADC A,B", 4, 1, "88"),
"ADC A,E": Opcode("ADC A,E", 4, 1, "8B"),
"ADC A,D": Opcode("ADC A,D", 4, 1, "8A"),
"ADC A,H": Opcode("ADC A,H", 4, 1, "8C"),
"ADC A,L": Opcode("ADC A,L", 4, 1, "8D"),
"ADC A,N": Opcode("ADC A,N", 7, 2, "CE XX"),
"ADC HL,BC": Opcode("ADC HL,BC", 15, 2, "ED 4A"),
"ADC HL,DE": Opcode("ADC HL,DE", 15, 2, "ED 5A"),
"ADC HL,HL": Opcode("ADC HL,HL", 15, 2, "ED 6A"),
"ADC HL,SP": Opcode("ADC HL,SP", 15, 2, "ED 7A"),
"ADD A,(HL)": Opcode("ADD A,(HL)", 7, 1, "86"),
"ADD A,(IX+N)": Opcode("ADD A,(IX+N)", 19, 3, "DD 86 XX"),
"ADD A,(IY+N)": Opcode("ADD A,(IY+N)", 19, 3, "FD 86 XX"),
"ADD A,A": Opcode("ADD A,A", 4, 1, "87"),
"ADD A,C": Opcode("ADD A,C", 4, 1, "81"),
"ADD A,B": Opcode("ADD A,B", 4, 1, "80"),
"ADD A,E": Opcode("ADD A,E", 4, 1, "83"),
"ADD A,D": Opcode("ADD A,D", 4, 1, "82"),
"ADD A,H": Opcode("ADD A,H", 4, 1, "84"),
"ADD A,L": Opcode("ADD A,L", 4, 1, "85"),
"ADD A,N": Opcode("ADD A,N", 7, 2, "C6 XX"),
"ADD HL,BC": Opcode("ADD HL,BC", 11, 1, "09"),
"ADD HL,DE": Opcode("ADD HL,DE", 11, 1, "19"),
"ADD HL,HL": Opcode("ADD HL,HL", 11, 1, "29"),
"ADD HL,SP": Opcode("ADD HL,SP", 11, 1, "39"),
"ADD IX,BC": Opcode("ADD IX,BC", 15, 2, "DD 09"),
"ADD IX,DE": Opcode("ADD IX,DE", 15, 2, "DD 19"),
"ADD IX,IX": Opcode("ADD IX,IX", 15, 2, "DD 29"),
"ADD IX,SP": Opcode("ADD IX,SP", 15, 2, "DD 39"),
"ADD IY,BC": Opcode("ADD IY,BC", 15, 2, "FD 09"),
"ADD IY,DE": Opcode("ADD IY,DE", 15, 2, "FD 19"),
"ADD IY,IY": Opcode("ADD IY,IY", 15, 2, "FD 29"),
"ADD IY,SP": Opcode("ADD IY,SP", 15, 2, "FD 39"),
"AND (HL)": Opcode("AND (HL)", 7, 1, "A6"),
"AND (IX+N)": Opcode("AND (IX+N)", 19, 3, "DD A6 XX"),
"AND (IY+N)": Opcode("AND (IY+N)", 19, 3, "FD A6 XX"),
"AND A": Opcode("AND A", 4, 1, "A7"),
"AND C": Opcode("AND C", 4, 1, "A1"),
"AND B": Opcode("AND B", 4, 1, "A0"),
"AND E": Opcode("AND E", 4, 1, "A3"),
"AND D": Opcode("AND D", 4, 1, "A2"),
"AND H": Opcode("AND H", 4, 1, "A4"),
"AND L": Opcode("AND L", 4, 1, "A5"),
"AND N": Opcode("AND N", 7, 2, "E6 XX"),
"BIT 0,(HL)": Opcode("BIT 0,(HL)", 12, 2, "CB 46"),
"BIT 1,(HL)": Opcode("BIT 1,(HL)", 12, 2, "CB 4E"),
"BIT 2,(HL)": Opcode("BIT 2,(HL)", 12, 2, "CB 56"),
"BIT 3,(HL)": Opcode("BIT 3,(HL)", 12, 2, "CB 5E"),
"BIT 4,(HL)": Opcode("BIT 4,(HL)", 12, 2, "CB 66"),
"BIT 5,(HL)": Opcode("BIT 5,(HL)", 12, 2, "CB 6E"),
"BIT 6,(HL)": Opcode("BIT 6,(HL)", 12, 2, "CB 76"),
"BIT 7,(HL)": Opcode("BIT 7,(HL)", 12, 2, "CB 7E"),
"BIT 0,(IX+N)": Opcode("BIT 0,(IX+N)", 20, 4, "DD CB XX 46"),
"BIT 1,(IX+N)": Opcode("BIT 1,(IX+N)", 20, 4, "DD CB XX 4E"),
"BIT 2,(IX+N)": Opcode("BIT 2,(IX+N)", 20, 4, "DD CB XX 56"),
"BIT 3,(IX+N)": Opcode("BIT 3,(IX+N)", 20, 4, "DD CB XX 5E"),
"BIT 4,(IX+N)": Opcode("BIT 4,(IX+N)", 20, 4, "DD CB XX 66"),
"BIT 5,(IX+N)": Opcode("BIT 5,(IX+N)", 20, 4, "DD CB XX 6E"),
"BIT 6,(IX+N)": Opcode("BIT 6,(IX+N)", 20, 4, "DD CB XX 76"),
"BIT 7,(IX+N)": Opcode("BIT 7,(IX+N)", 20, 4, "DD CB XX 7E"),
"BIT 0,(IY+N)": Opcode("BIT 0,(IY+N)", 20, 4, "FD CB XX 46"),
"BIT 1,(IY+N)": Opcode("BIT 1,(IY+N)", 20, 4, "FD CB XX 4E"),
"BIT 2,(IY+N)": Opcode("BIT 2,(IY+N)", 20, 4, "FD CB XX 56"),
"BIT 3,(IY+N)": Opcode("BIT 3,(IY+N)", 20, 4, "FD CB XX 5E"),
"BIT 4,(IY+N)": Opcode("BIT 4,(IY+N)", 20, 4, "FD CB XX 66"),
"BIT 5,(IY+N)": Opcode("BIT 5,(IY+N)", 20, 4, "FD CB XX 6E"),
"BIT 6,(IY+N)": Opcode("BIT 6,(IY+N)", 20, 4, "FD CB XX 76"),
"BIT 7,(IY+N)": Opcode("BIT 7,(IY+N)", 20, 4, "FD CB XX 7E"),
"BIT 0,A": Opcode("BIT 0,A", 8, 2, "CB 47"),
"BIT 1,A": Opcode("BIT 1,A", 8, 2, "CB 4F"),
"BIT 2,A": Opcode("BIT 2,A", 8, 2, "CB 57"),
"BIT 3,A": Opcode("BIT 3,A", 8, 2, "CB 5F"),
"BIT 4,A": Opcode("BIT 4,A", 8, 2, "CB 67"),
"BIT 5,A": Opcode("BIT 5,A", 8, 2, "CB 6F"),
"BIT 6,A": Opcode("BIT 6,A", 8, 2, "CB 77"),
"BIT 7,A": Opcode("BIT 7,A", 8, 2, "CB 7F"),
"BIT 0,C": Opcode("BIT 0,C", 8, 2, "CB 41"),
"BIT 1,C": Opcode("BIT 1,C", 8, 2, "CB 49"),
"BIT 2,C": Opcode("BIT 2,C", 8, 2, "CB 51"),
"BIT 3,C": Opcode("BIT 3,C", 8, 2, "CB 59"),
"BIT 4,C": Opcode("BIT 4,C", 8, 2, "CB 61"),
"BIT 5,C": Opcode("BIT 5,C", 8, 2, "CB 69"),
"BIT 6,C": Opcode("BIT 6,C", 8, 2, "CB 71"),
"BIT 7,C": Opcode("BIT 7,C", 8, 2, "CB 79"),
"BIT 0,B": Opcode("BIT 0,B", 8, 2, "CB 40"),
"BIT 1,B": Opcode("BIT 1,B", 8, 2, "CB 48"),
"BIT 2,B": Opcode("BIT 2,B", 8, 2, "CB 50"),
"BIT 3,B": Opcode("BIT 3,B", 8, 2, "CB 58"),
"BIT 4,B": Opcode("BIT 4,B", 8, 2, "CB 60"),
"BIT 5,B": Opcode("BIT 5,B", 8, 2, "CB 68"),
"BIT 6,B": Opcode("BIT 6,B", 8, 2, "CB 70"),
"BIT 7,B": Opcode("BIT 7,B", 8, 2, "CB 78"),
"BIT 0,E": Opcode("BIT 0,E", 8, 2, "CB 43"),
"BIT 1,E": Opcode("BIT 1,E", 8, 2, "CB 4B"),
"BIT 2,E": Opcode("BIT 2,E", 8, 2, "CB 53"),
"BIT 3,E": Opcode("BIT 3,E", 8, 2, "CB 5B"),
"BIT 4,E": Opcode("BIT 4,E", 8, 2, "CB 63"),
"BIT 5,E": Opcode("BIT 5,E", 8, 2, "CB 6B"),
"BIT 6,E": Opcode("BIT 6,E", 8, 2, "CB 73"),
"BIT 7,E": Opcode("BIT 7,E", 8, 2, "CB 7B"),
"BIT 0,D": Opcode("BIT 0,D", 8, 2, "CB 42"),
"BIT 1,D": Opcode("BIT 1,D", 8, 2, "CB 4A"),
"BIT 2,D": Opcode("BIT 2,D", 8, 2, "CB 52"),
"BIT 3,D": Opcode("BIT 3,D", 8, 2, "CB 5A"),
"BIT 4,D": Opcode("BIT 4,D", 8, 2, "CB 62"),
"BIT 5,D": Opcode("BIT 5,D", 8, 2, "CB 6A"),
"BIT 6,D": Opcode("BIT 6,D", 8, 2, "CB 72"),
"BIT 7,D": Opcode("BIT 7,D", 8, 2, "CB 7A"),
"BIT 0,H": Opcode("BIT 0,H", 8, 2, "CB 44"),
"BIT 1,H": Opcode("BIT 1,H", 8, 2, "CB 4C"),
"BIT 2,H": Opcode("BIT 2,H", 8, 2, "CB 54"),
"BIT 3,H": Opcode("BIT 3,H", 8, 2, "CB 5C"),
"BIT 4,H": Opcode("BIT 4,H", 8, 2, "CB 64"),
"BIT 5,H": Opcode("BIT 5,H", 8, 2, "CB 6C"),
"BIT 6,H": Opcode("BIT 6,H", 8, 2, "CB 74"),
"BIT 7,H": Opcode("BIT 7,H", 8, 2, "CB 7C"),
"BIT 0,L": Opcode("BIT 0,L", 8, 2, "CB 45"),
"BIT 1,L": Opcode("BIT 1,L", 8, 2, "CB 4D"),
"BIT 2,L": Opcode("BIT 2,L", 8, 2, "CB 55"),
"BIT 3,L": Opcode("BIT 3,L", 8, 2, "CB 5D"),
"BIT 4,L": Opcode("BIT 4,L", 8, 2, "CB 65"),
"BIT 5,L": Opcode("BIT 5,L", 8, 2, "CB 6D"),
"BIT 6,L": Opcode("BIT 6,L", 8, 2, "CB 75"),
"BIT 7,L": Opcode("BIT 7,L", 8, 2, "CB 7D"),
"CALL C,NN": Opcode("CALL C,NN", 17, 3, "DC XX XX"),
"CALL M,NN": Opcode("CALL M,NN", 17, 3, "FC XX XX"),
"CALL NC,NN": Opcode("CALL NC,NN", 17, 3, "D4 XX XX"),
"CALL NN": Opcode("CALL NN", 17, 3, "CD XX XX"),
"CALL NZ,NN": Opcode("CALL NZ,NN", 17, 3, "C4 XX XX"),
"CALL P,NN": Opcode("CALL P,NN", 17, 3, "F4 XX XX"),
"CALL PE,NN": Opcode("CALL PE,NN", 17, 3, "EC XX XX"),
"CALL PO,NN": Opcode("CALL PO,NN", 17, 3, "E4 XX XX"),
"CALL Z,NN": Opcode("CALL Z,NN", 17, 3, "CC XX XX"),
"CCF": Opcode("CCF", 4, 1, "3F"),
"CP (HL)": Opcode("CP (HL)", 7, 1, "BE"),
"CP (IX+N)": Opcode("CP (IX+N)", 19, 3, "DD BE XX"),
"CP (IY+N)": Opcode("CP (IY+N)", 19, 3, "FD BE XX"),
"CP A": Opcode("CP A", 4, 1, "BF"),
"CP C": Opcode("CP C", 4, 1, "B9"),
"CP B": Opcode("CP B", 4, 1, "B8"),
"CP E": Opcode("CP E", 4, 1, "BB"),
"CP D": Opcode("CP D", 4, 1, "BA"),
"CP H": Opcode("CP H", 4, 1, "BC"),
"CP L": Opcode("CP L", 4, 1, "BD"),
"CP N": Opcode("CP N", 7, 2, "FE XX"),
"CPD": Opcode("CPD", 16, 2, "ED A9"),
"CPDR": Opcode("CPDR", 21, 2, "ED B9"),
"CPI": Opcode("CPI", 16, 2, "ED A1"),
"CPIR": Opcode("CPIR", 21, 2, "ED B1"),
"CPL": Opcode("CPL", 4, 1, "2F"),
"DAA": Opcode("DAA", 4, 1, "27"),
"DEC (HL)": Opcode("DEC (HL)", 11, 1, "35"),
"DEC (IX+N)": Opcode("DEC (IX+N)", 23, 3, "DD 35 XX"),
"DEC (IY+N)": Opcode("DEC (IY+N)", 23, 3, "FD 35 XX"),
"DEC A": Opcode("DEC A", 4, 1, "3D"),
"DEC B": Opcode("DEC B", 4, 1, "05"),
"DEC BC": Opcode("DEC BC", 6, 1, "0B"),
"DEC C": Opcode("DEC C", 4, 1, "0D"),
"DEC D": Opcode("DEC D", 4, 1, "15"),
"DEC DE": Opcode("DEC DE", 6, 1, "1B"),
"DEC E": Opcode("DEC E", 4, 1, "1D"),
"DEC H": Opcode("DEC H", 4, 1, "25"),
"DEC HL": Opcode("DEC HL", 6, 1, "2B"),
"DEC IX": Opcode("DEC IX", 10, 2, "DD 2B"),
"DEC IY": Opcode("DEC IY", 10, 2, "FD 2B"),
"DEC L": Opcode("DEC L", 4, 1, "2D"),
"DEC SP": Opcode("DEC SP", 6, 1, "3B"),
"DI": Opcode("DI", 4, 1, "F3"),
"DJNZ N": Opcode("DJNZ N", 13, 2, "10 XX"),
"EI": Opcode("EI", 4, 1, "FB"),
"EX (SP),HL": Opcode("EX (SP),HL", 19, 1, "E3"),
"EX (SP),IX": Opcode("EX (SP),IX", 23, 2, "DD E3"),
"EX (SP),IY": Opcode("EX (SP),IY", 23, 2, "FD E3"),
"EX AF,AF'": Opcode("EX AF,AF'", 4, 1, "08"),
"EX DE,HL": Opcode("EX DE,HL", 4, 1, "EB"),
"EXX": Opcode("EXX", 4, 1, "D9"),
"HALT": Opcode("HALT", 4, 1, "76"),
"IM 0": Opcode("IM 0", 8, 2, "ED 46"),
"IM 1": Opcode("IM 1", 8, 2, "ED 56"),
"IM 2": Opcode("IM 2", 8, 2, "ED 5E"),
"IN A,(C)": Opcode("IN A,(C)", 12, 2, "ED 78"),
"IN A,(N)": Opcode("IN A,(N)", 11, 2, "DB XX"),
"IN B,(C)": Opcode("IN B,(C)", 12, 2, "ED 40"),
"IN C,(C)": Opcode("IN C,(C)", 12, 2, "ED 48"),
"IN D,(C)": Opcode("IN D,(C)", 12, 2, "ED 50"),
"IN E,(C)": Opcode("IN E,(C)", 12, 2, "ED 58"),
"IN H,(C)": Opcode("IN H,(C)", 12, 2, "ED 60"),
"IN L,(C)": Opcode("IN L,(C)", 12, 2, "ED 68"),
"INC (HL)": Opcode("INC (HL)", 11, 1, "34"),
"INC (IX+N)": Opcode("INC (IX+N)", 23, 3, "DD 34 XX"),
"INC (IY+N)": Opcode("INC (IY+N)", 23, 3, "FD 34 XX"),
"INC A": Opcode("INC A", 4, 1, "3C"),
"INC B": Opcode("INC B", 4, 1, "04"),
"INC BC": Opcode("INC BC", 6, 1, "03"),
"INC C": Opcode("INC C", 4, 1, "0C"),
"INC D": Opcode("INC D", 4, 1, "14"),
"INC DE": Opcode("INC DE", 6, 1, "13"),
"INC E": Opcode("INC E", 4, 1, "1C"),
"INC H": Opcode("INC H", 4, 1, "24"),
"INC HL": Opcode("INC HL", 6, 1, "23"),
"INC IX": Opcode("INC IX", 10, 2, "DD 23"),
"INC IY": Opcode("INC IY", 10, 2, "FD 23"),
"INC L": Opcode("INC L", 4, 1, "2C"),
"INC SP": Opcode("INC SP", 6, 1, "33"),
"IND": Opcode("IND", 16, 2, "ED AA"),
"INDR": Opcode("INDR", 21, 2, "ED BA"),
"INI": Opcode("INI", 16, 2, "ED A2"),
"INIR": Opcode("INIR", 21, 2, "ED B2"),
"JP NN": Opcode("JP NN", 10, 3, "C3 XX XX"),
"JP (HL)": Opcode("JP (HL)", 4, 1, "E9"),
"JP (IX)": Opcode("JP (IX)", 8, 2, "DD E9"),
"JP (IY)": Opcode("JP (IY)", 8, 2, "FD E9"),
"JP C,NN": Opcode("JP C,NN", 10, 3, "DA XX XX"),
"JP M,NN": Opcode("JP M,NN", 10, 3, "FA XX XX"),
"JP NC,NN": Opcode("JP NC,NN", 10, 3, "D2 XX XX"),
"JP NZ,NN": Opcode("JP NZ,NN", 10, 3, "C2 XX XX"),
"JP P,NN": Opcode("JP P,NN", 10, 3, "F2 XX XX"),
"JP PE,NN": Opcode("JP PE,NN", 10, 3, "EA XX XX"),
"JP PO,NN": Opcode("JP PO,NN", 10, 3, "E2 XX XX"),
"JP Z,NN": Opcode("JP Z,NN", 10, 3, "CA XX XX"),
"JR N": Opcode("JR N", 12, 2, "18 XX"),
"JR C,N": Opcode("JR C,N", 12, 2, "38 XX"),
"JR NC,N": Opcode("JR NC,N", 12, 2, "30 XX"),
"JR NZ,N": Opcode("JR NZ,N", 12, 2, "20 XX"),
"JR Z,N": Opcode("JR Z,N", 12, 2, "28 XX"),
"LD (BC),A": Opcode("LD (BC),A", 7, 1, "02"),
"LD (DE),A": Opcode("LD (DE),A", 7, 1, "12"),
"LD (HL),A": Opcode("LD (HL),A", 7, 1, "77"),
"LD (HL),C": Opcode("LD (HL),C", 7, 1, "71"),
"LD (HL),B": Opcode("LD (HL),B", 7, 1, "70"),
"LD (HL),E": Opcode("LD (HL),E", 7, 1, "73"),
"LD (HL),D": Opcode("LD (HL),D", 7, 1, "72"),
"LD (HL),H": Opcode("LD (HL),H", 7, 1, "74"),
"LD (HL),L": Opcode("LD (HL),L", 7, 1, "75"),
"LD (HL),N": Opcode("LD (HL),N", 10, 2, "36 XX"),
"LD (IX+N),A": Opcode("LD (IX+N),A", 19, 3, "DD 77 XX"),
"LD (IX+N),C": Opcode("LD (IX+N),C", 19, 3, "DD 71 XX"),
"LD (IX+N),B": Opcode("LD (IX+N),B", 19, 3, "DD 70 XX"),
"LD (IX+N),E": Opcode("LD (IX+N),E", 19, 3, "DD 73 XX"),
"LD (IX+N),D": Opcode("LD (IX+N),D", 19, 3, "DD 72 XX"),
"LD (IX+N),H": Opcode("LD (IX+N),H", 19, 3, "DD 74 XX"),
"LD (IX+N),L": Opcode("LD (IX+N),L", 19, 3, "DD 75 XX"),
"LD (IX+N),N": Opcode("LD (IX+N),N", 19, 4, "DD 36 XX XX"),
"LD (IY+N),A": Opcode("LD (IY+N),A", 19, 3, "FD 77 XX"),
"LD (IY+N),C": Opcode("LD (IY+N),C", 19, 3, "FD 71 XX"),
"LD (IY+N),B": Opcode("LD (IY+N),B", 19, 3, "FD 70 XX"),
"LD (IY+N),E": Opcode("LD (IY+N),E", 19, 3, "FD 73 XX"),
"LD (IY+N),D": Opcode("LD (IY+N),D", 19, 3, "FD 72 XX"),
"LD (IY+N),H": Opcode("LD (IY+N),H", 19, 3, "FD 74 XX"),
"LD (IY+N),L": Opcode("LD (IY+N),L", 19, 3, "FD 75 XX"),
"LD (IY+N),N": Opcode("LD (IY+N),N", 19, 4, "FD 36 XX XX"),
"LD (NN),A": Opcode("LD (NN),A", 13, 3, "32 XX XX"),
"LD (NN),BC": Opcode("LD (NN),BC", 20, 4, "ED 43 XX XX"),
"LD (NN),DE": Opcode("LD (NN),DE", 20, 4, "ED 53 XX XX"),
"LD (NN),HL": Opcode("LD (NN),HL", 16, 3, "22 XX XX"),
"LD (NN),IX": Opcode("LD (NN),IX", 20, 4, "DD 22 XX XX"),
"LD (NN),IY": Opcode("LD (NN),IY", 20, 4, "FD 22 XX XX"),
"LD (NN),SP": Opcode("LD (NN),SP", 20, 4, "ED 73 XX XX"),
"LD A,(BC)": Opcode("LD A,(BC)", 7, 1, "0A"),
"LD A,(DE)": Opcode("LD A,(DE)", 7, 1, "1A"),
"LD A,(HL)": Opcode("LD A,(HL)", 7, 1, "7E"),
"LD A,(IX+N)": Opcode("LD A,(IX+N)", 19, 3, "DD 7E XX"),
"LD A,(IY+N)": Opcode("LD A,(IY+N)", 19, 3, "FD 7E XX"),
"LD A,(NN)": Opcode("LD A,(NN)", 13, 3, "3A XX XX"),
"LD A,A": Opcode("LD A,A", 4, 1, "7F"),
"LD A,C": Opcode("LD A,C", 4, 1, "79"),
"LD A,B": Opcode("LD A,B", 4, 1, "78"),
"LD A,E": Opcode("LD A,E", 4, 1, "7B"),
"LD A,D": Opcode("LD A,D", 4, 1, "7A"),
"LD A,H": Opcode("LD A,H", 4, 1, "7C"),
"LD A,L": Opcode("LD A,L", 4, 1, "7D"),
"LD A,I": Opcode("LD A,I", 9, 2, "ED 57"),
"LD A,N": Opcode("LD A,N", 7, 2, "3E XX"),
"LD A,R": Opcode("LD A,R", 4, 2, "ED 5F"),
"LD B,(HL)": Opcode("LD B,(HL)", 7, 1, "46"),
"LD B,(IX+N)": Opcode("LD B,(IX+N)", 19, 3, "DD 46 XX"),
"LD B,(IY+N)": Opcode("LD B,(IY+N)", 19, 3, "FD 46 XX"),
"LD B,A": Opcode("LD B,A", 4, 1, "47"),
"LD B,C": Opcode("LD B,C", 4, 1, "41"),
"LD B,B": Opcode("LD B,B", 4, 1, "40"),
"LD B,E": Opcode("LD B,E", 4, 1, "43"),
"LD B,D": Opcode("LD B,D", 4, 1, "42"),
"LD B,H": Opcode("LD B,H", 4, 1, "44"),
"LD B,L": Opcode("LD B,L", 4, 1, "45"),
"LD B,N": Opcode("LD B,N", 7, 2, "06 XX"),
"LD BC,(NN)": Opcode("LD BC,(NN)", 20, 4, "ED 4B XX XX"),
"LD BC,NN": Opcode("LD BC,NN", 10, 3, "01 XX XX"),
"LD C,(HL)": Opcode("LD C,(HL)", 7, 1, "4E"),
"LD C,(IX+N)": Opcode("LD C,(IX+N)", 19, 3, "DD 4E XX"),
"LD C,(IY+N)": Opcode("LD C,(IY+N)", 19, 3, "FD 4E XX"),
"LD C,A": Opcode("LD C,A", 4, 1, "4F"),
"LD C,C": Opcode("LD C,C", 4, 1, "49"),
"LD C,B": Opcode("LD C,B", 4, 1, "48"),
"LD C,E": Opcode("LD C,E", 4, 1, "4B"),
"LD C,D": Opcode("LD C,D", 4, 1, "4A"),
"LD C,H": Opcode("LD C,H", 4, 1, "4C"),
"LD C,L": Opcode("LD C,L", 4, 1, "4D"),
"LD C,N": Opcode("LD C,N", 7, 2, "0E XX"),
"LD D,(HL)": Opcode("LD D,(HL)", 7, 1, "56"),
"LD D,(IX+N)": Opcode("LD D,(IX+N)", 19, 3, "DD 56 XX"),
"LD D,(IY+N)": Opcode("LD D,(IY+N)", 19, 3, "FD 56 XX"),
"LD D,A": Opcode("LD D,A", 4, 1, "57"),
"LD D,C": Opcode("LD D,C", 4, 1, "51"),
"LD D,B": Opcode("LD D,B", 4, 1, "50"),
"LD D,E": Opcode("LD D,E", 4, 1, "53"),
"LD D,D": Opcode("LD D,D", 4, 1, "52"),
"LD D,H": Opcode("LD D,H", 4, 1, "54"),
"LD D,L": Opcode("LD D,L", 4, 1, "55"),
"LD D,N": Opcode("LD D,N", 7, 2, "16 XX"),
"LD DE,(NN)": Opcode("LD DE,(NN)", 20, 4, "ED 5B XX XX"),
"LD DE,NN": Opcode("LD DE,NN", 10, 3, "11 XX XX"),
"LD E,(HL)": Opcode("LD E,(HL)", 7, 1, "5E"),
"LD E,(IX+N)": Opcode("LD E,(IX+N)", 19, 3, "DD 5E XX"),
"LD E,(IY+N)": Opcode("LD E,(IY+N)", 19, 3, "FD 5E XX"),
"LD E,A": Opcode("LD E,A", 4, 1, "5F"),
"LD E,C": Opcode("LD E,C", 4, 1, "59"),
"LD E,B": Opcode("LD E,B", 4, 1, "58"),
"LD E,E": Opcode("LD E,E", 4, 1, "5B"),
"LD E,D": Opcode("LD E,D", 4, 1, "5A"),
"LD E,H": Opcode("LD E,H", 4, 1, "5C"),
"LD E,L": Opcode("LD E,L", 4, 1, "5D"),
"LD E,N": Opcode("LD E,N", 7, 2, "1E XX"),
"LD H,(HL)": Opcode("LD H,(HL)", 7, 1, "66"),
"LD H,(IX+N)": Opcode("LD H,(IX+N)", 19, 3, "DD 66 XX"),
"LD H,(IY+N)": Opcode("LD H,(IY+N)", 19, 3, "FD 66 XX"),
"LD H,A": Opcode("LD H,A", 4, 1, "67"),
"LD H,C": Opcode("LD H,C", 4, 1, "61"),
"LD H,B": Opcode("LD H,B", 4, 1, "60"),
"LD H,E": Opcode("LD H,E", 4, 1, "63"),
"LD H,D": Opcode("LD H,D", 4, 1, "62"),
"LD H,H": Opcode("LD H,H", 4, 1, "64"),
"LD H,L": Opcode("LD H,L", 4, 1, "65"),
"LD H,N": Opcode("LD H,N", 7, 2, "26 XX"),
"LD HL,(NN)": Opcode("LD HL,(NN)", 20, 3, "2A XX XX"),
"LD HL,NN": Opcode("LD HL,NN", 10, 3, "21 XX XX"),
"LD I,A": Opcode("LD I,A", 9, 2, "ED 47"),
"LD IX,(NN)": Opcode("LD IX,(NN)", 20, 4, "DD 2A XX XX"),
"LD IX,NN": Opcode("LD IX,NN", 14, 4, "DD 21 XX XX"),
"LD IY,(NN)": Opcode("LD IY,(NN)", 20, 4, "FD 2A XX XX"),
"LD IY,NN": Opcode("LD IY,NN", 14, 4, "FD 21 XX XX"),
"LD L,(HL)": Opcode("LD L,(HL)", 7, 1, "6E"),
"LD L,(IX+N)": Opcode("LD L,(IX+N)", 19, 3, "DD 6E XX"),
"LD L,(IY+N)": Opcode("LD L,(IY+N)", 19, 3, "FD 6E XX"),
"LD L,A": Opcode("LD L,A", 4, 1, "6F"),
"LD L,C": Opcode("LD L,C", 4, 1, "69"),
"LD L,B": Opcode("LD L,B", 4, 1, "68"),
"LD L,E": Opcode("LD L,E", 4, 1, "6B"),
"LD L,D": Opcode("LD L,D", 4, 1, "6A"),
"LD L,H": Opcode("LD L,H", 4, 1, "6C"),
"LD L,L": Opcode("LD L,L", 4, 1, "6D"),
"LD L,N": Opcode("LD L,N", 7, 2, "2E XX"),
"LD R,A": Opcode("LD R,A", 4, 2, "ED 4F"),
"LD SP,(NN)": Opcode("LD SP,(NN)", 20, 4, "ED 7B XX XX"),
"LD SP,HL": Opcode("LD SP,HL", 6, 1, "F9"),
"LD SP,IX": Opcode("LD SP,IX", 10, 2, "DD F9"),
"LD SP,IY": Opcode("LD SP,IY", 10, 2, "FD F9"),
"LD SP,NN": Opcode("LD SP,NN", 10, 3, "31 XX XX"),
"LDD": Opcode("LDD", 16, 2, "ED A8"),
"LDDR": Opcode("LDDR", 21, 2, "ED B8"),
"LDI": Opcode("LDI", 16, 2, "ED A0"),
"LDIR": Opcode("LDIR", 21, 2, "ED B0"),
"NEG": Opcode("NEG", 8, 2, "ED 44"),
"NOP": Opcode("NOP", 4, 1, "00"),
"OR (HL)": Opcode("OR (HL)", 7, 1, "B6"),
"OR (IX+N)": Opcode("OR (IX+N)", 19, 3, "DD B6 XX"),
"OR (IY+N)": Opcode("OR (IY+N)", 19, 3, "FD B6 XX"),
"OR A": Opcode("OR A", 4, 1, "B7"),
"OR C": Opcode("OR C", 4, 1, "B1"),
"OR B": Opcode("OR B", 4, 1, "B0"),
"OR E": Opcode("OR E", 4, 1, "B3"),
"OR D": Opcode("OR D", 4, 1, "B2"),
"OR H": Opcode("OR H", 4, 1, "B4"),
"OR L": Opcode("OR L", 4, 1, "B5"),
"OR N": Opcode("OR N", 7, 2, "F6 XX"),
"OTDR": Opcode("OTDR", 21, 2, "ED BB"),
"OTIR": Opcode("OTIR", 21, 2, "ED B3"),
"OUT (C),A": Opcode("OUT (C),A", 12, 2, "ED 79"),
"OUT (C),B": Opcode("OUT (C),B", 12, 2, "ED 41"),
"OUT (C),C": Opcode("OUT (C),C", 12, 2, "ED 49"),
"OUT (C),D": Opcode("OUT (C),D", 12, 2, "ED 51"),
"OUT (C),E": Opcode("OUT (C),E", 12, 2, "ED 59"),
"OUT (C),H": Opcode("OUT (C),H", 12, 2, "ED 61"),
"OUT (C),L": Opcode("OUT (C),L", 12, 2, "ED 69"),
"OUT (N),A": Opcode("OUT (N),A", 11, 2, "D3 XX"),
"OUTD": Opcode("OUTD", 16, 2, "ED AB"),
"OUTI": Opcode("OUTI", 16, 2, "ED A3"),
"POP AF": Opcode("POP AF", 10, 1, "F1"),
"POP BC": Opcode("POP BC", 10, 1, "C1"),
"POP DE": Opcode("POP DE", 10, 1, "D1"),
"POP HL": Opcode("POP HL", 10, 1, "E1"),
"POP IX": Opcode("POP IX", 14, 2, "DD E1"),
"POP IY": Opcode("POP IY", 14, 2, "FD E1"),
"PUSH AF": Opcode("PUSH AF", 11, 1, "F5"),
"PUSH BC": Opcode("PUSH BC", 11, 1, "C5"),
"PUSH DE": Opcode("PUSH DE", 11, 1, "D5"),
"PUSH HL": Opcode("PUSH HL", 11, 1, "E5"),
"PUSH IX": Opcode("PUSH IX", 15, 2, "DD E5"),
"PUSH IY": Opcode("PUSH IY", 15, 2, "FD E5"),
"RES 0,(HL)": Opcode("RES 0,(HL)", 15, 2, "CB 86"),
"RES 1,(HL)": Opcode("RES 1,(HL)", 15, 2, "CB 8E"),
"RES 2,(HL)": Opcode("RES 2,(HL)", 15, 2, "CB 96"),
"RES 3,(HL)": Opcode("RES 3,(HL)", 15, 2, "CB 9E"),
"RES 4,(HL)": Opcode("RES 4,(HL)", 15, 2, "CB A6"),
"RES 5,(HL)": Opcode("RES 5,(HL)", 15, 2, "CB AE"),
"RES 6,(HL)": Opcode("RES 6,(HL)", 15, 2, "CB B6"),
"RES 7,(HL)": Opcode("RES 7,(HL)", 15, 2, "CB BE"),
"RES 0,(IX+N)": Opcode("RES 0,(IX+N)", 23, 4, "DD CB XX 86"),
"RES 1,(IX+N)": Opcode("RES 1,(IX+N)", 23, 4, "DD CB XX 8E"),
"RES 2,(IX+N)": Opcode("RES 2,(IX+N)", 23, 4, "DD CB XX 96"),
"RES 3,(IX+N)": Opcode("RES 3,(IX+N)", 23, 4, "DD CB XX 9E"),
"RES 4,(IX+N)": Opcode("RES 4,(IX+N)", 23, 4, "DD CB XX A6"),
"RES 5,(IX+N)": Opcode("RES 5,(IX+N)", 23, 4, "DD CB XX AE"),
"RES 6,(IX+N)": Opcode("RES 6,(IX+N)", 23, 4, "DD CB XX B6"),
"RES 7,(IX+N)": Opcode("RES 7,(IX+N)", 23, 4, "DD CB XX BE"),
"RES 0,(IY+N)": Opcode("RES 0,(IY+N)", 23, 4, "FD CB XX 86"),
"RES 1,(IY+N)": Opcode("RES 1,(IY+N)", 23, 4, "FD CB XX 8E"),
"RES 2,(IY+N)": Opcode("RES 2,(IY+N)", 23, 4, "FD CB XX 96"),
"RES 3,(IY+N)": Opcode("RES 3,(IY+N)", 23, 4, "FD CB XX 9E"),
"RES 4,(IY+N)": Opcode("RES 4,(IY+N)", 23, 4, "FD CB XX A6"),
"RES 5,(IY+N)": Opcode("RES 5,(IY+N)", 23, 4, "FD CB XX AE"),
"RES 6,(IY+N)": Opcode("RES 6,(IY+N)", 23, 4, "FD CB XX B6"),
"RES 7,(IY+N)": Opcode("RES 7,(IY+N)", 23, 4, "FD CB XX BE"),
"RES 0,A": Opcode("RES 0,A", 8, 2, "CB 87"),
"RES 1,A": Opcode("RES 1,A", 8, 2, "CB 8F"),
"RES 2,A": Opcode("RES 2,A", 8, 2, "CB 97"),
"RES 3,A": Opcode("RES 3,A", 8, 2, "CB 9F"),
"RES 4,A": Opcode("RES 4,A", 8, 2, "CB A7"),
"RES 5,A": Opcode("RES 5,A", 8, 2, "CB AF"),
"RES 6,A": Opcode("RES 6,A", 8, 2, "CB B7"),
"RES 7,A": Opcode("RES 7,A", 8, 2, "CB BF"),
"RES 0,C": Opcode("RES 0,C", 8, 2, "CB 81"),
"RES 1,C": Opcode("RES 1,C", 8, 2, "CB 89"),
"RES 2,C": Opcode("RES 2,C", 8, 2, "CB 91"),
"RES 3,C": Opcode("RES 3,C", 8, 2, "CB 99"),
"RES 4,C": Opcode("RES 4,C", 8, 2, "CB A1"),
"RES 5,C": Opcode("RES 5,C", 8, 2, "CB A9"),
"RES 6,C": Opcode("RES 6,C", 8, 2, "CB B1"),
"RES 7,C": Opcode("RES 7,C", 8, 2, "CB B9"),
"RES 0,B": Opcode("RES 0,B", 8, 2, "CB 80"),
"RES 1,B": Opcode("RES 1,B", 8, 2, "CB 88"),
"RES 2,B": Opcode("RES 2,B", 8, 2, "CB 90"),
"RES 3,B": Opcode("RES 3,B", 8, 2, "CB 98"),
"RES 4,B": Opcode("RES 4,B", 8, 2, "CB A0"),
"RES 5,B": Opcode("RES 5,B", 8, 2, "CB A8"),
"RES 6,B": Opcode("RES 6,B", 8, 2, "CB B0"),
"RES 7,B": Opcode("RES 7,B", 8, 2, "CB B8"),
"RES 0,E": Opcode("RES 0,E", 8, 2, "CB 83"),
"RES 1,E": Opcode("RES 1,E", 8, 2, "CB 8B"),
"RES 2,E": Opcode("RES 2,E", 8, 2, "CB 93"),
"RES 3,E": Opcode("RES 3,E", 8, 2, "CB 9B"),
"RES 4,E": Opcode("RES 4,E", 8, 2, "CB A3"),
"RES 5,E": Opcode("RES 5,E", 8, 2, "CB AB"),
"RES 6,E": Opcode("RES 6,E", 8, 2, "CB B3"),
"RES 7,E": Opcode("RES 7,E", 8, 2, "CB BB"),
"RES 0,D": Opcode("RES 0,D", 8, 2, "CB 82"),
"RES 1,D": Opcode("RES 1,D", 8, 2, "CB 8A"),
"RES 2,D": Opcode("RES 2,D", 8, 2, "CB 92"),
"RES 3,D": Opcode("RES 3,D", 8, 2, "CB 9A"),
"RES 4,D": Opcode("RES 4,D", 8, 2, "CB A2"),
"RES 5,D": Opcode("RES 5,D", 8, 2, "CB AA"),
"RES 6,D": Opcode("RES 6,D", 8, 2, "CB B2"),
"RES 7,D": Opcode("RES 7,D", 8, 2, "CB BA"),
"RES 0,H": Opcode("RES 0,H", 8, 2, "CB 84"),
"RES 1,H": Opcode("RES 1,H", 8, 2, "CB 8C"),
"RES 2,H": Opcode("RES 2,H", 8, 2, "CB 94"),
"RES 3,H": Opcode("RES 3,H", 8, 2, "CB 9C"),
"RES 4,H": Opcode("RES 4,H", 8, 2, "CB A4"),
"RES 5,H": Opcode("RES 5,H", 8, 2, "CB AC"),
"RES 6,H": Opcode("RES 6,H", 8, 2, "CB B4"),
"RES 7,H": Opcode("RES 7,H", 8, 2, "CB BC"),
"RES 0,L": Opcode("RES 0,L", 8, 2, "CB 85"),
"RES 1,L": Opcode("RES 1,L", 8, 2, "CB 8D"),
"RES 2,L": Opcode("RES 2,L", 8, 2, "CB 95"),
"RES 3,L": Opcode("RES 3,L", 8, 2, "CB 9D"),
"RES 4,L": Opcode("RES 4,L", 8, 2, "CB A5"),
"RES 5,L": Opcode("RES 5,L", 8, 2, "CB AD"),
"RES 6,L": Opcode("RES 6,L", 8, 2, "CB B5"),
"RES 7,L": Opcode("RES 7,L", 8, 2, "CB BD"),
"RET": Opcode("RET", 10, 1, "C9"),
"RET C": Opcode("RET C", 11, 1, "D8"),
"RET M": Opcode("RET M", 11, 1, "F8"),
"RET NC": Opcode("RET NC", 11, 1, "D0"),
"RET NZ": Opcode("RET NZ", 11, 1, "C0"),
"RET P": Opcode("RET P", 11, 1, "F0"),
"RET PE": Opcode("RET PE", 11, 1, "E8"),
"RET PO": Opcode("RET PO", 11, 1, "E0"),
"RET Z": Opcode("RET Z", 11, 1, "C8"),
"RETI": Opcode("RETI", 14, 2, "ED 4D"),
"RETN": Opcode("RETN", 14, 2, "ED 45"),
"RL (HL)": Opcode("RL (HL)", 15, 2, "CB 16"),
"RL A": Opcode("RL A", 8, 2, "CB 17"),
"RL C": Opcode("RL C", 8, 2, "CB 11"),
"RL B": Opcode("RL B", 8, 2, "CB 10"),
"RL E": Opcode("RL E", 8, 2, "CB 13"),
"RL D": Opcode("RL D", 8, 2, "CB 12"),
"RL H": Opcode("RL H", 8, 2, "CB 14"),
"RL L": Opcode("RL L", 8, 2, "CB 15"),
"RL (IX+N)": Opcode("RL (IX+N)", 23, 4, "DD CB XX 16"),
"RL (IY+N)": Opcode("RL (IY+N)", 23, 4, "FD CB XX 16"),
"RLA": Opcode("RLA", 4, 1, "17"),
"RLC (HL)": Opcode("RLC (HL)", 15, 2, "CB 06"),
"RLC (IX+N)": Opcode("RLC (IX+N)", 23, 4, "DD CB XX 06"),
"RLC (IY+N)": Opcode("RLC (IY+N)", 23, 4, "FD CB XX 06"),
"RLC A": Opcode("RLC A", 8, 2, "CB 07"),
"RLC C": Opcode("RLC C", 8, 2, "CB 01"),
"RLC B": Opcode("RLC B", 8, 2, "CB 00"),
"RLC E": Opcode("RLC E", 8, 2, "CB 03"),
"RLC D": Opcode("RLC D", 8, 2, "CB 02"),
"RLC H": Opcode("RLC H", 8, 2, "CB 04"),
"RLC L": Opcode("RLC L", 8, 2, "CB 05"),
"RLCA": Opcode("RLCA", 4, 1, "07"),
"RLD": Opcode("RLD", 18, 2, "ED 6F"),
"RR (HL)": Opcode("RR (HL)", 15, 2, "CB 1E"),
"RR A": Opcode("RR A", 8, 2, "CB 1F"),
"RR C": Opcode("RR C", 8, 2, "CB 19"),
"RR B": Opcode("RR B", 8, 2, "CB 18"),
"RR E": Opcode("RR E", 8, 2, "CB 1B"),
"RR D": Opcode("RR D", 8, 2, "CB 1A"),
"RR H": Opcode("RR H", 8, 2, "CB 1C"),
"RR L": Opcode("RR L", 8, 2, "CB 1D"),
"RR (IX+N)": Opcode("RR (IX+N)", 23, 4, "DD CB XX 1E"),
"RR (IY+N)": Opcode("RR (IY+N)", 23, 4, "FD CB XX 1E"),
"RRA": Opcode("RRA", 4, 1, "1F"),
"RRC (HL)": Opcode("RRC (HL)", 15, 2, "CB 0E"),
"RRC (IX+N)": Opcode("RRC (IX+N)", 23, 4, "DD CB XX 0E"),
"RRC (IY+N)": Opcode("RRC (IY+N)", 23, 4, "FD CB XX 0E"),
"RRC A": Opcode("RRC A", 8, 2, "CB 0F"),
"RRC C": Opcode("RRC C", 8, 2, "CB 09"),
"RRC B": Opcode("RRC B", 8, 2, "CB 08"),
"RRC E": Opcode("RRC E", 8, 2, "CB 0B"),
"RRC D": Opcode("RRC D", 8, 2, "CB 0A"),
"RRC H": Opcode("RRC H", 8, 2, "CB 0C"),
"RRC L": Opcode("RRC L", 8, 2, "CB 0D"),
"RRCA": Opcode("RRCA", 4, 1, "0F"),
"RRD": Opcode("RRD", 18, 2, "ED 67"),
"RST 0H": Opcode("RST 0H", 11, 1, "C7"),
"RST 8H": Opcode("RST 8H", 11, 1, "CF"),
"RST 10H": Opcode("RST 10H", 11, 1, "D7"),
"RST 18H": Opcode("RST 18H", 11, 1, "DF"),
"RST 20H": Opcode("RST 20H", 11, 1, "E7"),
"RST 28H": Opcode("RST 28H", 11, 1, "EF"),
"RST 30H": Opcode("RST 30H", 11, 1, "F7"),
"RST 38H": Opcode("RST 38H", 11, 1, "FF"),
"SBC A,(HL)": Opcode("SBC (HL)", 7, 1, "9E"),
"SBC A,(IX+N)": Opcode("SBC A,(IX+N)", 19, 3, "DD 9E XX"),
"SBC A,(IY+N)": Opcode("SBC A,(IY+N)", 19, 3, "FD 9E XX"),
"SBC A,N": Opcode("SBC A,N", 7, 2, "DE XX"),
"SBC A,A": Opcode("SBC A,A", 4, 1, "9F"),
"SBC A,B": Opcode("SBC A,B", 4, 1, "98"),
"SBC A,C": Opcode("SBC A,C", 4, 1, "99"),
"SBC A,D": Opcode("SBC A,D", 4, 1, "9A"),
"SBC A,E": Opcode("SBC A,E", 4, 1, "9B"),
"SBC A,H": Opcode("SBC A,H", 4, 1, "9C"),
"SBC A,L": Opcode("SBC A,L", 4, 1, "9D"),
"SBC HL,BC": Opcode("SBC HL,BC", 15, 2, "ED 42"),
"SBC HL,DE": Opcode("SBC HL,DE", 15, 2, "ED 52"),
"SBC HL,HL": Opcode("SBC HL,HL", 15, 2, "ED 62"),
"SBC HL,SP": Opcode("SBC HL,SP", 15, 2, "ED 72"),
"SCF": Opcode("SCF", 4, 1, "37"),
"SET 0,(HL)": Opcode("SET 0,(HL)", 15, 2, "CB C6"),
"SET 1,(HL)": Opcode("SET 1,(HL)", 15, 2, "CB CE"),
"SET 2,(HL)": Opcode("SET 2,(HL)", 15, 2, "CB D6"),
"SET 3,(HL)": Opcode("SET 3,(HL)", 15, 2, "CB DE"),
"SET 4,(HL)": Opcode("SET 4,(HL)", 15, 2, "CB E6"),
"SET 5,(HL)": Opcode("SET 5,(HL)", 15, 2, "CB EE"),
"SET 6,(HL)": Opcode("SET 6,(HL)", 15, 2, "CB F6"),
"SET 7,(HL)": Opcode("SET 7,(HL)", 15, 2, "CB FE"),
"SET 0,(IX+N)": Opcode("SET 0,(IX+N)", 23, 4, "DD CB XX C6"),
"SET 1,(IX+N)": Opcode("SET 1,(IX+N)", 23, 4, "DD CB XX CE"),
"SET 2,(IX+N)": Opcode("SET 2,(IX+N)", 23, 4, "DD CB XX D6"),
"SET 3,(IX+N)": Opcode("SET 3,(IX+N)", 23, 4, "DD CB XX DE"),
"SET 4,(IX+N)": Opcode("SET 4,(IX+N)", 23, 4, "DD CB XX E6"),
"SET 5,(IX+N)": Opcode("SET 5,(IX+N)", 23, 4, "DD CB XX EE"),
"SET 6,(IX+N)": Opcode("SET 6,(IX+N)", 23, 4, "DD CB XX F6"),
"SET 7,(IX+N)": Opcode("SET 7,(IX+N)", 23, 4, "DD CB XX FE"),
"SET 0,(IY+N)": Opcode("SET 0,(IY+N)", 23, 4, "FD CB XX C6"),
"SET 1,(IY+N)": Opcode("SET 1,(IY+N)", 23, 4, "FD CB XX CE"),
"SET 2,(IY+N)": Opcode("SET 2,(IY+N)", 23, 4, "FD CB XX D6"),
"SET 3,(IY+N)": Opcode("SET 3,(IY+N)", 23, 4, "FD CB XX DE"),
"SET 4,(IY+N)": Opcode("SET 4,(IY+N)", 23, 4, "FD CB XX E6"),
"SET 5,(IY+N)": Opcode("SET 5,(IY+N)", 23, 4, "FD CB XX EE"),
"SET 6,(IY+N)": Opcode("SET 6,(IY+N)", 23, 4, "FD CB XX F6"),
"SET 7,(IY+N)": Opcode("SET 7,(IY+N)", 23, 4, "FD CB XX FE"),
"SET 0,A": Opcode("SET 0,A", 8, 2, "CB C7"),
"SET 1,A": Opcode("SET 1,A", 8, 2, "CB CF"),
"SET 2,A": Opcode("SET 2,A", 8, 2, "CB D7"),
"SET 3,A": Opcode("SET 3,A", 8, 2, "CB DF"),
"SET 4,A": Opcode("SET 4,A", 8, 2, "CB E7"),
"SET 5,A": Opcode("SET 5,A", 8, 2, "CB EF"),
"SET 6,A": Opcode("SET 6,A", 8, 2, "CB F7"),
"SET 7,A": Opcode("SET 7,A", 8, 2, "CB FF"),
"SET 0,C": Opcode("SET 0,C", 8, 2, "CB C1"),
"SET 1,C": Opcode("SET 1,C", 8, 2, "CB C9"),
"SET 2,C": Opcode("SET 2,C", 8, 2, "CB D1"),
"SET 3,C": Opcode("SET 3,C", 8, 2, "CB D9"),
"SET 4,C": Opcode("SET 4,C", 8, 2, "CB E1"),
"SET 5,C": Opcode("SET 5,C", 8, 2, "CB E9"),
"SET 6,C": Opcode("SET 6,C", 8, 2, "CB F1"),
"SET 7,C": Opcode("SET 7,C", 8, 2, "CB F9"),
"SET 0,B": Opcode("SET 0,B", 8, 2, "CB C0"),
"SET 1,B": Opcode("SET 1,B", 8, 2, "CB C8"),
"SET 2,B": Opcode("SET 2,B", 8, 2, "CB D0"),
"SET 3,B": Opcode("SET 3,B", 8, 2, "CB D8"),
"SET 4,B": Opcode("SET 4,B", 8, 2, "CB E0"),
"SET 5,B": Opcode("SET 5,B", 8, 2, "CB E8"),
"SET 6,B": Opcode("SET 6,B", 8, 2, "CB F0"),
"SET 7,B": Opcode("SET 7,B", 8, 2, "CB F8"),
"SET 0,E": Opcode("SET 0,E", 8, 2, "CB C3"),
"SET 1,E": Opcode("SET 1,E", 8, 2, "CB CB"),
"SET 2,E": Opcode("SET 2,E", 8, 2, "CB D3"),
"SET 3,E": Opcode("SET 3,E", 8, 2, "CB DB"),
"SET 4,E": Opcode("SET 4,E", 8, 2, "CB E3"),
"SET 5,E": Opcode("SET 5,E", 8, 2, "CB EB"),
"SET 6,E": Opcode("SET 6,E", 8, 2, "CB F3"),
"SET 7,E": Opcode("SET 7,E", 8, 2, "CB FB"),
"SET 0,D": Opcode("SET 0,D", 8, 2, "CB C2"),
"SET 1,D": Opcode("SET 1,D", 8, 2, "CB CA"),
"SET 2,D": Opcode("SET 2,D", 8, 2, "CB D2"),
"SET 3,D": Opcode("SET 3,D", 8, 2, "CB DA"),
"SET 4,D": Opcode("SET 4,D", 8, 2, "CB E2"),
"SET 5,D": Opcode("SET 5,D", 8, 2, "CB EA"),
"SET 6,D": Opcode("SET 6,D", 8, 2, "CB F2"),
"SET 7,D": Opcode("SET 7,D", 8, 2, "CB FA"),
"SET 0,H": Opcode("SET 0,H", 8, 2, "CB C4"),
"SET 1,H": Opcode("SET 1,H", 8, 2, "CB CC"),
"SET 2,H": Opcode("SET 2,H", 8, 2, "CB D4"),
"SET 3,H": Opcode("SET 3,H", 8, 2, "CB DC"),
"SET 4,H": Opcode("SET 4,H", 8, 2, "CB E4"),
"SET 5,H": Opcode("SET 5,H", 8, 2, "CB EC"),
"SET 6,H": Opcode("SET 6,H", 8, 2, "CB F4"),
"SET 7,H": Opcode("SET 7,H", 8, 2, "CB FC"),
"SET 0,L": Opcode("SET 0,L", 8, 2, "CB C5"),
"SET 1,L": Opcode("SET 1,L", 8, 2, "CB CD"),
"SET 2,L": Opcode("SET 2,L", 8, 2, "CB D5"),
"SET 3,L": Opcode("SET 3,L", 8, 2, "CB DD"),
"SET 4,L": Opcode("SET 4,L", 8, 2, "CB E5"),
"SET 5,L": Opcode("SET 5,L", 8, 2, "CB ED"),
"SET 6,L": Opcode("SET 6,L", 8, 2, "CB F5"),
"SET 7,L": Opcode("SET 7,L", 8, 2, "CB FD"),
"SLA (HL)": Opcode("SLA (HL)", 15, 2, "CB 26"),
"SLA (IX+N)": Opcode("SLA (IX+N)", 23, 4, "DD CB XX 26"),
"SLA (IY+N)": Opcode("SLA (IY+N)", 23, 4, "FD CB XX 26"),
"SLA A": Opcode("SLA A", 8, 2, "CB 27"),
"SLA C": Opcode("SLA C", 8, 2, "CB 21"),
"SLA B": Opcode("SLA B", 8, 2, "CB 20"),
"SLA E": Opcode("SLA E", 8, 2, "CB 23"),
"SLA D": Opcode("SLA D", 8, 2, "CB 22"),
"SLA H": Opcode("SLA H", 8, 2, "CB 24"),
"SLA L": Opcode("SLA L", 8, 2, "CB 25"),
"SRA (HL)": Opcode("SRA (HL)", 15, 2, "CB 2E"),
"SRA (IX+N)": Opcode("SRA (IX+N)", 23, 4, "DD CB XX 2E"),
"SRA (IY+N)": Opcode("SRA (IY+N)", 23, 4, "FD CB XX 2E"),
"SRA A": Opcode("SRA A", 8, 2, "CB 2F"),
"SRA C": Opcode("SRA C", 8, 2, "CB 29"),
"SRA B": Opcode("SRA B", 8, 2, "CB 28"),
"SRA E": Opcode("SRA E", 8, 2, "CB 2B"),
"SRA D": Opcode("SRA D", 8, 2, "CB 2A"),
"SRA H": Opcode("SRA H", 8, 2, "CB 2C"),
"SRA L": Opcode("SRA L", 8, 2, "CB 2D"),
"SRL (HL)": Opcode("SRL (HL)", 15, 2, "CB 3E"),
"SRL (IX+N)": Opcode("SRL (IX+N)", 23, 4, "DD CB XX 3E"),
"SRL (IY+N)": Opcode("SRL (IY+N)", 23, 4, "FD CB XX 3E"),
"SRL A": Opcode("SRL A", 8, 2, "CB 3F"),
"SRL C": Opcode("SRL C", 8, 2, "CB 39"),
"SRL B": Opcode("SRL B", 8, 2, "CB 38"),
"SRL E": Opcode("SRL E", 8, 2, "CB 3B"),
"SRL D": Opcode("SRL D", 8, 2, "CB 3A"),
"SRL H": Opcode("SRL H", 8, 2, "CB 3C"),
"SRL L": Opcode("SRL L", 8, 2, "CB 3D"),
"SUB (HL)": Opcode("SUB (HL)", 7, 1, "96"),
"SUB (IX+N)": Opcode("SUB (IX+N)", 19, 3, "DD 96 XX"),
"SUB (IY+N)": Opcode("SUB (IY+N)", 19, 3, "FD 96 XX"),
"SUB A": Opcode("SUB A", 4, 1, "97"),
"SUB C": Opcode("SUB C", 4, 1, "91"),
"SUB B": Opcode("SUB B", 4, 1, "90"),
"SUB E": Opcode("SUB E", 4, 1, "93"),
"SUB D": Opcode("SUB D", 4, 1, "92"),
"SUB H": Opcode("SUB H", 4, 1, "94"),
"SUB L": Opcode("SUB L", 4, 1, "95"),
"SUB N": Opcode("SUB N", 7, 2, "D6 XX"),
"XOR (HL)": Opcode("XOR (HL)", 7, 1, "AE"),
"XOR (IX+N)": Opcode("XOR (IX+N)", 19, 3, "DD AE XX"),
"XOR (IY+N)": Opcode("XOR (IY+N)", 19, 3, "FD AE XX"),
"XOR A": Opcode("XOR A", 4, 1, "AF"),
"XOR C": Opcode("XOR C", 4, 1, "A9"),
"XOR B": Opcode("XOR B", 4, 1, "A8"),
"XOR E": Opcode("XOR E", 4, 1, "AB"),
"XOR D": Opcode("XOR D", 4, 1, "AA"),
"XOR H": Opcode("XOR H", 4, 1, "AC"),
"XOR L": Opcode("XOR L", 4, 1, "AD"),
"XOR N": Opcode("XOR N", 7, 2, "EE XX"),
# Undocumented opcodes
"SLL A": Opcode("SLL A", 8, 2, "CB 37"),
"SLL C": Opcode("SLL C", 8, 2, "CB 31"),
"SLL B": Opcode("SLL B", 8, 2, "CB 30"),
"SLL E": Opcode("SLL E", 8, 2, "CB 33"),
"SLL D": Opcode("SLL D", 8, 2, "CB 32"),
"SLL H": Opcode("SLL H", 8, 2, "CB 34"),
"SLL L": Opcode("SLL L", 8, 2, "CB 35"),
"SLL (HL)": Opcode("SLL (HL)", 15, 2, "CB 36"),
"SLL (IX+N)": Opcode("SLL (IX+N)", 19, 4, "DD CB XX 36"),
"SLL (IY+N)": Opcode("SLL (IY+N)", 19, 4, "FD CB XX 36"),
"INC IXH": Opcode("INC IXH", 8, 2, "DD 24"),
"DEC IXH": Opcode("DEC IXH", 8, 2, "DD 25"),
"INC IXL": Opcode("INC IXL", 8, 2, "DD 2C"),
"DEC IXL": Opcode("DEC IXL", 8, 2, "DD 2D"),
"INC IYH": Opcode("INC IYH", 8, 2, "FD 24"),
"DEC IYH": Opcode("DEC IYH", 8, 2, "FD 25"),
"INC IYL": Opcode("INC IYL", 8, 2, "FD 2C"),
"DEC IYL": Opcode("DEC IYL", 8, 2, "FD 2D"),
"LD IXH,N": Opcode("LD IXH,N", 12, 3, "DD 26 XX"),
"LD IXL,N": Opcode("LD IXL,N", 12, 3, "DD 2E XX"),
"LD IYH,N": Opcode("LD IYH,N", 12, 3, "FD 26 XX"),
"LD IYL,N": Opcode("LD IYL,N", 12, 3, "FD 2E XX"),
"LD A,IXH": Opcode("LD A,IXH", 8, 2, "DD 7C"),
"LD A,IXL": Opcode("LD A,IXL", 8, 2, "DD 7D"),
"LD B,IXH": Opcode("LD B,IXH", 8, 2, "DD 44"),
"LD B,IXL": Opcode("LD B,IXL", 8, 2, "DD 45"),
"LD C,IXH": Opcode("LD C,IXH", 8, 2, "DD 4C"),
"LD C,IXL": Opcode("LD C,IXL", 8, 2, "DD 4D"),
"LD D,IXH": Opcode("LD D,IXH", 8, 2, "DD 54"),
"LD D,IXL": Opcode("LD D,IXL", 8, 2, "DD 55"),
"LD E,IXH": Opcode("LD E,IXH", 8, 2, "DD 5C"),
"LD E,IXL": Opcode("LD E,IXL", 8, 2, "DD 5D"),
"LD A,IYH": Opcode("LD A,IYH", 8, 2, "FD 7C"),
"LD A,IYL": Opcode("LD A,IYL", 8, 2, "FD 7D"),
"LD B,IYH": Opcode("LD B,IYH", 8, 2, "FD 44"),
"LD B,IYL": Opcode("LD B,IYL", 8, 2, "FD 45"),
"LD C,IYH": Opcode("LD C,IYH", 8, 2, "FD 4C"),
"LD C,IYL": Opcode("LD C,IYL", 8, 2, "FD 4D"),
"LD D,IYH": Opcode("LD D,IYH", 8, 2, "FD 54"),
"LD D,IYL": Opcode("LD D,IYL", 8, 2, "FD 55"),
"LD E,IYH": Opcode("LD E,IYH", 8, 2, "FD 5C"),
"LD E,IYL": Opcode("LD E,IYL", 8, 2, "FD 5D"),
"LD IXH,B": Opcode("LD IXH,B", 8, 2, "DD 60"),
"LD IXH,C": Opcode("LD IXH,C", 8, 2, "DD 61"),
"LD IXH,D": Opcode("LD IXH,D", 8, 2, "DD 62"),
"LD IXH,E": Opcode("LD IXH,E", 8, 2, "DD 63"),
"LD IXH,IXH": Opcode("LD IXH,IXH", 8, 2, "DD 64"),
"LD IXH,IXL": Opcode("LD IXH,IXL", 8, 2, "DD 65"),
"LD IXH,A": Opcode("LD IXH,A", 8, 2, "DD 67"),
"LD IXL,B": Opcode("LD IXL,B", 8, 2, "DD 68"),
"LD IXL,C": Opcode("LD IXL,C", 8, 2, "DD 69"),
"LD IXL,D": Opcode("LD IXL,D", 8, 2, "DD 6A"),
"LD IXL,E": Opcode("LD IXL,E", 8, 2, "DD 6B"),
"LD IXL,IXH": Opcode("LD IXL,IXH", 8, 2, "DD 6C"),
"LD IXL,IXL": Opcode("LD IXL,IXL", 8, 2, "DD 6D"),
"LD IXL,A": Opcode("LD IXL,A", 8, 2, "DD 6F"),
"LD IYH,B": Opcode("LD IYH,B", 8, 2, "FD 60"),
"LD IYH,C": Opcode("LD IYH,C", 8, 2, "FD 61"),
"LD IYH,D": Opcode("LD IYH,D", 8, 2, "FD 62"),
"LD IYH,E": Opcode("LD IYH,E", 8, 2, "FD 63"),
"LD IYH,IYH": Opcode("LD IYH,IYH", 8, 2, "DD 64"),
"LD IYH,IYL": Opcode("LD IYH,IYL", 8, 2, "DD 65"),
"LD IYH,A": Opcode("LD IYH,A", 8, 2, "FD 67"),
"LD IYL,B": Opcode("LD IYL,B", 8, 2, "FD 68"),
"LD IYL,C": Opcode("LD IYL,C", 8, 2, "FD 69"),
"LD IYL,D": Opcode("LD IYL,D", 8, 2, "FD 6A"),
"LD IYL,E": Opcode("LD IYL,E", 8, 2, "FD 6B"),
"LD IYL,IYH": Opcode("LD IYL,IYH", 8, 2, "FD 6C"),
"LD IYL,IYL": Opcode("LD IYL,IYL", 8, 2, "FD 6D"),
"LD IYL,A": Opcode("LD IYL,A", 8, 2, "FD 6F"),
"ADD A,IXH": Opcode("ADD A,IXH", 8, 2, "DD 84"),
"ADD A,IXL": Opcode("ADD A,IXL", 8, 2, "DD 85"),
"ADC A,IXH": Opcode("ADC A,IXH", 8, 2, "DD 8C"),
"ADC A,IXL": Opcode("ADC A,IXL", 8, 2, "DD 8D"),
"ADD A,IYH": Opcode("ADD A,IYH", 8, 2, "FD 84"),
"ADD A,IYL": Opcode("ADD A,IYL", 8, 2, "FD 85"),
"ADC A,IYH": Opcode("ADC A,IYH", 8, 2, "FD 8C"),
"ADC A,IYL": Opcode("ADC A,IYL", 8, 2, "FD 8D"),
"SUB IXH": Opcode("SUB IXH", 8, 2, "DD 94"),
"SUB IXL": Opcode("SUB IXL", 8, 2, "DD 95"),
"SBC A,IXH": Opcode("SBC A,IXH", 8, 2, "DD 9C"),
"SBC A,IXL": Opcode("SBC A,IXL", 8, 2, "DD 9D"),
"SUB IYH": Opcode("SUB IYH", 8, 2, "FD 94"),
"SUB IYL": Opcode("SUB IYL", 8, 2, "FD 95"),
"SBC A,IYH": Opcode("SBC A,IYH", 8, 2, "FD 9C"),
"SBC A,IYL": Opcode("SBC A,IYL", 8, 2, "FD 9D"),
"AND IXH": Opcode("AND IXH", 8, 2, "DD A4"),
"AND IXL": Opcode("AND IXL", 8, 2, "DD A5"),
"AND IYH": Opcode("AND IYH", 8, 2, "FD A4"),
"AND IYL": Opcode("AND IYL", 8, 2, "FD A5"),
"XOR IXH": Opcode("XOR IXH", 8, 2, "DD AC"),
"XOR IXL": Opcode("XOR IXL", 8, 2, "DD AD"),
"XOR IYH": Opcode("XOR IYH", 8, 2, "FD AC"),
"XOR IYL": Opcode("XOR IYL", 8, 2, "FD AD"),
"OR IXH": Opcode("OR IXH", 8, 2, "DD B4"),
"OR IXL": Opcode("OR IXL", 8, 2, "DD B5"),
"OR IYH": Opcode("OR IYH", 8, 2, "FD B4"),
"OR IYL": Opcode("OR IYL", 8, 2, "FD B5"),
"CP IXH": Opcode("CP IXH", 8, 2, "DD BC"),
"CP IXL": Opcode("CP IXL", 8, 2, "DD BD"),
"CP IYH": Opcode("CP IYH", 8, 2, "FD BC"),
"CP IYL": Opcode("CP IYL", 8, 2, "FD BD"),
# ZX NEXT extra opcodes
"LDIX": Opcode("LDIX", 16, 2, "ED A4"),
"LDWS": Opcode("LDWS", 14, 2, "ED A5"),
"LDIRX": Opcode("LDIRX", 21, 2, "ED B4"),
"LDDX": Opcode("LDDX", 16, 2, "ED AC"),
"LDDRX": Opcode("LDDRX", 21, 2, "ED BC"),
"LDPIRX": Opcode("LDPIRX", 21, 2, "ED B7"),
"OUTINB": Opcode("OUTINB", 16, 2, "ED 90"),
"MUL D,E": Opcode("MUL D,E", 8, 2, "ED 30"),
"ADD HL,A": Opcode("ADD HL,A", 8, 2, "ED 31"),
"ADD DE,A": Opcode("ADD DE,A", 8, 2, "ED 32"),
"ADD BC,A": Opcode("ADD BC,A", 8, 2, "ED 33"),
"ADD HL,NN": Opcode("ADD HL,NN", 16, 4, "ED 34 XX XX"),
"ADD DE,NN": Opcode("ADD DE,NN", 16, 4, "ED 35 XX XX"),
"ADD BC,NN": Opcode("ADD BC,NN", 16, 4, "ED 36 XX XX"),
"SWAPNIB": Opcode("SWAPNIB", 8, 2, "ED 23"),
"MIRROR": Opcode("MIRROR", 8, 2, "ED 24"),
"PUSH NN": Opcode("PUSH NN", 23, 4, "ED 8A XX XX"),
"NEXTREG N,N": Opcode("NEXTREG N,N", 20, 4, "ED 91 XX XX"),
"NEXTREG N,A": Opcode("NEXTREG N,A", 17, 3, "ED 92 XX"),
"PIXELDN": Opcode("PIXELDN", 8, 2, "ED 93"),
"PIXELAD": Opcode("PIXELAD", 8, 2, "ED 94"),
"SETAE": Opcode("SETAE", 8, 2, "ED 95"),
"TEST N": Opcode("TEST N", 11, 3, "ED 27 XX"),
"BSLA DE,B": Opcode("BSLA DE,B", 8, 2, "ED 28"),
"BSRA DE,B": Opcode("BSRA DE,B", 8, 2, "ED 29"),
"BSRL DE,B": Opcode("BSRL DE,B", 8, 2, "ED 2A"),
"BSRF DE,B": Opcode("BSRF DE,B", 8, 2, "ED 2B"),
"BRLC DE,B": Opcode("BRLC DE,B", 8, 2, "ED 2C"),
"JP (C)": Opcode("JP (C)", 13, 2, "ED 98"),
}
# Z80 asm instruction list
Z80INSTR = set(x.split()[0] for x in Z80SET)
|
PypiClean
|
/ymxadmin-1.0.9.tar.gz/ymxadmin-1.0.9/xadmin/views/edit.py
|
from __future__ import absolute_import
import copy
from crispy_forms.utils import TEMPLATE_PACK
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied, FieldError
from django.db import models, transaction
from django.forms.models import modelform_factory, modelform_defines_fields
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.text import capfirst, get_text_list
from django.template import loader
from django.utils.translation import ugettext as _
from xadmin import widgets
from xadmin.layout import FormHelper, Layout, Fieldset, TabHolder, Container, Column, Col, Field
from xadmin.util import unquote
from xadmin.views.detail import DetailAdminUtil
from .base import ModelAdminView, filter_hook, csrf_protect_m
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.IPAddressField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.ForeignKey: {'widget': widgets.AdminSelectWidget},
models.OneToOneField: {'widget': widgets.AdminSelectWidget},
models.ManyToManyField: {'widget': widgets.AdminSelectMultiple},
}
class ReadOnlyField(Field):
template = "xadmin/layout/field_value.html"
def __init__(self, *args, **kwargs):
self.detail = kwargs.pop('detail')
super(ReadOnlyField, self).__init__(*args, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
html = ''
for field in self.fields:
result = self.detail.get_field_result(field)
field = {'auto_id': field}
html += loader.render_to_string(
self.template, {'field': field, 'result': result})
return html
class ModelFormAdminView(ModelAdminView):
form = forms.ModelForm
formfield_overrides = {}
readonly_fields = ()
style_fields = {}
exclude = None
relfield_style = None
save_as = False
save_on_top = False
add_form_template = None
change_form_template = None
form_layout = None
def __init__(self, request, *args, **kwargs):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
super(ModelFormAdminView, self).__init__(request, *args, **kwargs)
@filter_hook
def formfield_for_dbfield(self, db_field, **kwargs):
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if isinstance(db_field, models.ManyToManyField) and not db_field.rel.through._meta.auto_created:
return None
attrs = self.get_field_attrs(db_field, **kwargs)
return db_field.formfield(**dict(attrs, **kwargs))
@filter_hook
def get_field_style(self, db_field, style, **kwargs):
if style in ('radio', 'radio-inline') and (db_field.choices or isinstance(db_field, models.ForeignKey)):
attrs = {'widget': widgets.AdminRadioSelect(
attrs={'inline': 'inline' if style == 'radio-inline' else ''})}
if db_field.choices:
attrs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('Null'))]
)
return attrs
if style in ('checkbox', 'checkbox-inline') and isinstance(db_field, models.ManyToManyField):
return {'widget': widgets.AdminCheckboxSelect(attrs={'inline': style == 'checkbox-inline'}),
'help_text': None}
@filter_hook
def get_field_attrs(self, db_field, **kwargs):
if db_field.name in self.style_fields:
attrs = self.get_field_style(
db_field, self.style_fields[db_field.name], **kwargs)
if attrs:
return attrs
if hasattr(db_field, "rel") and db_field.rel:
related_modeladmin = self.admin_site._registry.get(db_field.rel.to)
if related_modeladmin and hasattr(related_modeladmin, 'relfield_style'):
attrs = self.get_field_style(
db_field, related_modeladmin.relfield_style, **kwargs)
if attrs:
return attrs
if db_field.choices:
return {'widget': widgets.AdminSelectWidget}
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
return self.formfield_overrides[klass].copy()
return {}
@filter_hook
def prepare_form(self):
self.model_form = self.get_model_form()
@filter_hook
def instance_forms(self):
self.form_obj = self.model_form(**self.get_form_datas())
def setup_forms(self):
helper = self.get_form_helper()
if helper:
self.form_obj.helper = helper
@filter_hook
def valid_forms(self):
return self.form_obj.is_valid()
@filter_hook
def get_model_form(self, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields())
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistant with the
# default on modelform_factory
exclude = exclude or None
defaults = {
"form": self.form,
"fields": self.fields and list(self.fields) or None,
"exclude": exclude,
"formfield_callback": self.formfield_for_dbfield,
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
@filter_hook
def get_form_layout(self):
layout = copy.deepcopy(self.form_layout)
arr = self.form_obj.fields.keys()
if six.PY3:
arr = [k for k in arr]
fields = arr + list(self.get_readonly_fields())
if layout is None:
layout = Layout(Container(Col('full',
Fieldset("", *fields, css_class="unsort no_title"), horizontal=True, span=12)
))
elif type(layout) in (list, tuple) and len(layout) > 0:
if isinstance(layout[0], Column):
fs = layout
elif isinstance(layout[0], (Fieldset, TabHolder)):
fs = (Col('full', *layout, horizontal=True, span=12),)
else:
fs = (Col('full', Fieldset("", *layout, css_class="unsort no_title"), horizontal=True, span=12),)
layout = Layout(Container(*fs))
rendered_fields = [i[1] for i in layout.get_field_names()]
container = layout[0].fields
other_fieldset = Fieldset(_(u'Other Fields'), *[f for f in fields if f not in rendered_fields])
if len(other_fieldset.fields):
if len(container) and isinstance(container[0], Column):
container[0].fields.append(other_fieldset)
else:
container.append(other_fieldset)
return layout
@filter_hook
def get_form_helper(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
helper.add_layout(self.get_form_layout())
# deal with readonly fields
readonly_fields = self.get_readonly_fields()
if readonly_fields:
detail = self.get_model_view(
DetailAdminUtil, self.model, self.form_obj.instance)
for field in readonly_fields:
helper[field].wrap(ReadOnlyField, detail=detail)
return helper
@filter_hook
def get_readonly_fields(self):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
@filter_hook
def save_forms(self):
self.new_obj = self.form_obj.save(commit=False)
@filter_hook
def change_message(self):
change_message = []
if self.org_obj is None:
change_message.append(_('Added.'))
elif self.form_obj.changed_data:
change_message.append(_('Changed %s.') % get_text_list(self.form_obj.changed_data, _('and')))
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
@filter_hook
def save_models(self):
self.new_obj.save()
flag = self.org_obj is None and 'create' or 'change'
self.log(flag, self.change_message(), self.new_obj)
@filter_hook
def save_related(self):
self.form_obj.save_m2m()
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
self.instance_forms()
self.setup_forms()
return self.get_response()
@csrf_protect_m
@transaction.atomic
@filter_hook
def post(self, request, *args, **kwargs):
self.instance_forms()
self.setup_forms()
if self.valid_forms():
self.save_forms()
self.save_models()
self.save_related()
response = self.post_response()
cls_str = str if six.PY3 else basestring
if isinstance(response, cls_str):
return HttpResponseRedirect(response)
else:
return response
return self.get_response()
@filter_hook
def get_context(self):
add = self.org_obj is None
change = self.org_obj is not None
new_context = {
'form': self.form_obj,
'original': self.org_obj,
'show_delete': self.org_obj is not None,
'add': add,
'change': change,
'errors': self.get_error_list(),
'has_add_permission': self.has_add_permission(),
'has_view_permission': self.has_view_permission(),
'has_change_permission': self.has_change_permission(self.org_obj),
'has_delete_permission': self.has_delete_permission(self.org_obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'form_url': '',
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
}
# for submit line
new_context.update({
'onclick_attrib': '',
'show_delete_link': (new_context['has_delete_permission']
and (change or new_context['show_delete'])),
'show_save_as_new': change and self.save_as,
'show_save_and_add_another': new_context['has_add_permission'] and
(not self.save_as or add),
'show_save_and_continue': new_context['has_change_permission'],
'show_save': True
})
if self.org_obj and new_context['show_delete_link']:
new_context['delete_url'] = self.model_admin_url(
'delete', self.org_obj.pk)
context = super(ModelFormAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_error_list(self):
errors = forms.utils.ErrorList()
if self.form_obj.is_bound:
errors.extend(self.form_obj.errors.values())
return errors
@filter_hook
def get_media(self):
return super(ModelFormAdminView, self).get_media() + self.form_obj.media + \
self.vendor('xadmin.page.form.js', 'xadmin.form.css')
class CreateAdminView(ModelFormAdminView):
def init_request(self, *args, **kwargs):
self.org_obj = None
if not self.has_add_permission():
raise PermissionDenied
# comm method for both get and post
self.prepare_form()
@filter_hook
def get_form_datas(self):
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
if self.request_method == 'get':
initial = dict(self.request.GET.items())
for k in initial:
try:
f = self.opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return {'initial': initial}
else:
return {'data': self.request.POST, 'files': self.request.FILES}
@filter_hook
def get_context(self):
new_context = {
'title': _('Add %s') % force_text(self.opts.verbose_name),
}
context = super(CreateAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelFormAdminView, self).get_breadcrumb()
item = {'title': _('Add %s') % force_text(self.opts.verbose_name)}
if self.has_add_permission():
item['url'] = self.model_admin_url('add')
bcs.append(item)
return bcs
@filter_hook
def get_response(self):
context = self.get_context()
context.update(self.kwargs or {})
return TemplateResponse(
self.request, self.add_form_template or self.get_template_list(
'views/model_form.html'),
context)
@filter_hook
def post_response(self):
"""
Determines the HttpResponse for the add_view stage.
"""
request = self.request
msg = _(
'The %(name)s "%(obj)s" was added successfully.') % {'name': force_text(self.opts.verbose_name),
'obj': "<a class='alert-link' href='%s'>%s</a>" % (self.model_admin_url('change', self.new_obj._get_pk_val()), force_text(self.new_obj))}
if "_continue" in request.POST:
self.message_user(
msg + ' ' + _("You may edit it again below."), 'success')
return self.model_admin_url('change', self.new_obj._get_pk_val())
if "_addanother" in request.POST:
self.message_user(msg + ' ' + (_("You may add another %s below.") % force_text(self.opts.verbose_name)), 'success')
return request.path
else:
self.message_user(msg, 'success')
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if "_redirect" in request.POST:
return request.POST["_redirect"]
elif self.has_view_permission():
return self.model_admin_url('changelist')
else:
return self.get_admin_url('index')
class UpdateAdminView(ModelFormAdminView):
def init_request(self, object_id, *args, **kwargs):
self.org_obj = self.get_object(unquote(object_id))
if not self.has_change_permission(self.org_obj):
raise PermissionDenied
if self.org_obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(self.opts.verbose_name), 'key': escape(object_id)})
# comm method for both get and post
self.prepare_form()
@filter_hook
def get_form_datas(self):
params = {'instance': self.org_obj}
if self.request_method == 'post':
params.update(
{'data': self.request.POST, 'files': self.request.FILES})
return params
@filter_hook
def get_context(self):
new_context = {
'title': _('Change %s') % force_text(self.org_obj),
'object_id': str(self.org_obj.pk),
}
context = super(UpdateAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelFormAdminView, self).get_breadcrumb()
item = {'title': force_text(self.org_obj)}
if self.has_change_permission():
item['url'] = self.model_admin_url('change', self.org_obj.pk)
bcs.append(item)
return bcs
@filter_hook
def get_response(self, *args, **kwargs):
context = self.get_context()
context.update(kwargs or {})
return TemplateResponse(
self.request, self.change_form_template or self.get_template_list(
'views/model_form.html'),
context)
def post(self, request, *args, **kwargs):
if "_saveasnew" in self.request.POST:
return self.get_model_view(CreateAdminView, self.model).post(request)
return super(UpdateAdminView, self).post(request, *args, **kwargs)
@filter_hook
def post_response(self):
"""
Determines the HttpResponse for the change_view stage.
"""
opts = self.new_obj._meta
obj = self.new_obj
request = self.request
verbose_name = opts.verbose_name
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name':
force_text(verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
self.message_user(
msg + ' ' + _("You may edit it again below."), 'success')
return request.path
elif "_addanother" in request.POST:
self.message_user(msg + ' ' + (_("You may add another %s below.")
% force_text(verbose_name)), 'success')
return self.model_admin_url('add')
else:
self.message_user(msg, 'success')
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if "_redirect" in request.POST:
return request.POST["_redirect"]
elif self.has_view_permission():
change_list_url = self.model_admin_url('changelist')
if 'LIST_QUERY' in self.request.session \
and self.request.session['LIST_QUERY'][0] == self.model_info:
change_list_url += '?' + self.request.session['LIST_QUERY'][1]
return change_list_url
else:
return self.get_admin_url('index')
class ModelFormAdminUtil(ModelFormAdminView):
def init_request(self, obj=None):
self.org_obj = obj
self.prepare_form()
self.instance_forms()
@filter_hook
def get_form_datas(self):
return {'instance': self.org_obj}
|
PypiClean
|
/ansible-8.3.0-py3-none-any.whl/ansible_collections/community/hashi_vault/plugins/module_utils/_auth_method_aws_iam.py
|
# FOR INTERNAL COLLECTION USE ONLY
# The interfaces in this file are meant for use within the community.hashi_vault collection
# and may not remain stable to outside uses. Changes may be made in ANY release, even a bugfix release.
# See also: https://github.com/ansible/community/issues/539#issuecomment-780839686
# Please open an issue if you have questions about this.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.community.hashi_vault.plugins.module_utils._hashi_vault_common import (
HashiVaultAuthMethodBase,
HashiVaultValueError,
)
class HashiVaultAuthMethodAwsIam(HashiVaultAuthMethodBase):
'''HashiVault option group class for auth: userpass'''
NAME = 'aws_iam'
OPTIONS = [
'aws_profile',
'aws_access_key',
'aws_secret_key',
'aws_security_token',
'region',
'aws_iam_server_id',
'role_id',
]
def __init__(self, option_adapter, warning_callback, deprecate_callback):
super(HashiVaultAuthMethodAwsIam, self).__init__(option_adapter, warning_callback, deprecate_callback)
def validate(self):
params = {
'access_key': self._options.get_option_default('aws_access_key'),
'secret_key': self._options.get_option_default('aws_secret_key'),
}
session_token = self._options.get_option_default('aws_security_token')
if session_token:
params['session_token'] = session_token
mount_point = self._options.get_option_default('mount_point')
if mount_point:
params['mount_point'] = mount_point
role = self._options.get_option_default('role_id')
if role:
params['role'] = role
region = self._options.get_option_default('region')
if region:
params['region'] = region
header_value = self._options.get_option_default('aws_iam_server_id')
if header_value:
params['header_value'] = header_value
if not (params['access_key'] and params['secret_key']):
try:
import boto3
import botocore
except ImportError:
raise HashiVaultValueError("boto3 is required for loading a profile or IAM role credentials.")
profile = self._options.get_option_default('aws_profile')
try:
session_credentials = boto3.session.Session(profile_name=profile).get_credentials()
except botocore.exceptions.ProfileNotFound:
raise HashiVaultValueError("The AWS profile '%s' was not found." % profile)
if not session_credentials:
raise HashiVaultValueError("No AWS credentials supplied or available.")
params['access_key'] = session_credentials.access_key
params['secret_key'] = session_credentials.secret_key
if session_credentials.token:
params['session_token'] = session_credentials.token
self._auth_aws_iam_login_params = params
def authenticate(self, client, use_token=True):
params = self._auth_aws_iam_login_params
try:
response = client.auth.aws.iam_login(use_token=use_token, **params)
except (NotImplementedError, AttributeError):
self.warn("HVAC should be updated to version 0.9.3 or higher. Deprecated method 'auth_aws_iam' will be used.")
client.auth_aws_iam(use_token=use_token, **params)
return response
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.