id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/luma.core-2.4.1.tar.gz/luma.core-2.4.1/luma/core/bitmap_font.py
|
from pathlib import Path
from math import ceil
from copy import deepcopy
from PIL import Image, ImageFont
import cbor2
from luma.core.util import from_16_to_8, from_8_to_16
class bitmap_font():
"""
An ``PIL.Imagefont`` style font.
The structure of this class was modeled after the PIL ``ImageFont`` class
and is intended to be interchangable for :py:class:`PIL.ImageFont` objects.
It has the following additional capabilities:
* Allows fonts larger than 256 characters to be created
* Font can be combined with other fonts
* Font characters can be mapped to their correct unicode codepoints
* Font can be initialized from a basic sprite table (with conditions)
.. note:
Because this font is implemented completely in Python it will be slower
than a native PIL.ImageFont object.
.. versionadded:: 1.16.0
"""
PUA_SPACE = 0xF8000
def __init__(self):
self.mappings = {}
self.metrics = []
def load(self, filename):
"""
Load font from filename
:param filename: the filename of the file containing the font data
:type filename: str
:return: a font object
:rtype: :py:class:`luma.core.bitmap_font`
"""
with open(filename, 'rb') as fp:
s = fp.readline()
if s != b'LUMA.CORE.BITMAP_FONT\n':
raise SyntaxError('Not a luma.core.bitmap_font file')
fontdata = cbor2.load(fp)
self._load_fontdata(fontdata)
return self
def loads(self, fontdata):
"""
Load :py:class:`luma.core.bitmap_font` from a string of serialized data produced
by the ``dumps`` method
:param fontdata: The serialized font data that will be used to initialize
the font. This data is produced by the :py:func:`luma.core.bitmap_font.dumps`
method.
:type fontdata: bytes
:return: a font object
:rtype: :py:class:`luma.core.bitmap_font`
"""
fontdata = cbor2.loads(fontdata)
self._load_fontdata(fontdata)
return self
def load_pillow_font(self, file, mappings=None):
"""
Create :py:class:`luma.core.bitmap_font` from a PIL ImageFont style font.
:param file: The filename of the PIL.ImageFont to load
:type file: str
:param mappings: a dictionary of unicode to value pairs (optional).
Mappings allow the appropriate unicode values to be provided for
each character contained within the font
:type mappings: dict
:return: a font object
:rtype: :py:class:`luma.core.bitmap_font`
"""
with open(file, 'rb') as fp:
if fp.readline() != b"PILfont\n":
raise SyntaxError("Not a PIL.ImageFont file")
while True:
s = fp.readline()
if not s:
raise SyntaxError("PIL.ImageFont file missing metric data")
if s == b"DATA\n":
break
data = fp.read(256 * 20)
if len(data) != 256 * 20:
raise SyntaxError("PIL.ImageFont file metric data incomplete")
sprite_table = self._get_image(file)
self._populate_metrics(sprite_table, data, range(256), mappings)
sprite_table.close()
return self
def load_sprite_table(self, sprite_table, index, xwidth, glyph_size, cell_size, mappings=None):
"""
Load a font from a sprite table
:param sprite_table: A PIL.Image representation of every glyph within the font
:type sprite_table: PIL.Image
:param index: The list of character values contained within sprite_table.
This list MUST be in the same order that the glyphs for the characters
appear within the sprite_table (in left to right, top to bottom order)
:type index: list or other iterable
:param xwidth: number of pixels between placements of each character in a
line of text
:type xwidth: int
:param glyph_size: tuple containing the width and height of each character
in the font
:type glyph_size: tuple(int, int)
:param cell_size: tuple containing the width and height of each cell in the
sprite table. Defaults to the size of the glyphs.
:type cell_size: tuple(int, int)
:param mappings: a dictionary of unicode to value pairs (optional)
Mappings allow the appropriate unicode values to be provided for
each character contained within the font
:type mappings: dict
:return: a font object
:rtype: :py:class:`luma.core.bitmap_font`
.. note:
Font contained within table must adhere to the following conditions
* All glyphs must be the same size
* Glyphs are contained within the sprite table in a grid arrangement
* The grid is filled with glyphs placed in horizontal order
* Each cell in the grid is the same size
* The placement of each glyph has no offset from its origin
"""
table_width = sprite_table.size[0]
# Each character uses the same data
line = [xwidth, 0, 0, -glyph_size[1], glyph_size[0], 0]
data = []
# Generate an entry for each character
for c in range(len(index)):
offset = c * cell_size[0]
left = offset % table_width
top = (offset // table_width) * cell_size[1]
right = left + glyph_size[0]
bottom = top + glyph_size[1]
data = data + line + [left, top, right, bottom]
self._populate_metrics(sprite_table, from_16_to_8(data), index, mappings)
return self
def save(self, filename):
"""
Write :py:class:`luma.core.bitmap_font` data to a file
"""
with open(filename, 'wb') as fp:
fontdata = self._generate_fontdata()
fp.write(b'LUMA.CORE.BITMAP_FONT\n')
cbor2.dump(fontdata, fp)
def dumps(self):
"""
Serializes the font data for transfer or storage
:return: Serialized font data
:rtype: bytes
"""
fontdata = self._generate_fontdata()
return cbor2.dumps(fontdata)
def _generate_fontdata(self):
"""
Utility method to create an efficient serializable representation
of a :py:class:`luma.core.bitmap_font`
"""
cell_size = (self.width, self.height)
area = self.count * self.width * self.height
table_width = ((ceil(area**0.5) + self.width - 1) // self.width) * self.width
table_height = ((ceil(area / table_width) + self.height - 1) // self.height) * self.height
image = Image.new('1', (table_width, table_height))
metrics = []
for i, v in enumerate(self.metrics):
offset = i * cell_size[0]
left = offset % table_width
top = (offset // table_width) * cell_size[1]
image.paste(v['img'], (left, top))
metrics.append((v['xwidth'], v['dst']))
fontdata = {}
fontdata['type'] = 'LUMA.CORE.BITMAP_FONT'
fontdata['count'] = len(self.metrics)
if self.regular:
fontdata['xwidth'] = self.regular[0]
fontdata['glyph_size'] = (self.width, self.height)
fontdata['cell_size'] = cell_size
fontdata['mappings'] = self.mappings
fontdata['sprite_table_dimensions'] = (table_width, table_height)
fontdata['sprite_table'] = image.tobytes()
if not self.regular:
fontdata['metrics'] = metrics
return fontdata
def _load_fontdata(self, fontdata):
"""
Initialize font from deserialized data
"""
try:
count = fontdata['count']
xwidth = fontdata.get('xwidth')
glyph_size = fontdata.get('glyph_size')
cell_size = fontdata['cell_size']
self.mappings = fontdata['mappings']
table_width, table_height = fontdata['sprite_table_dimensions']
image = Image.frombytes('1', (table_width, table_height), fontdata['sprite_table'])
metrics = fontdata.get('metrics')
except (KeyError, TypeError, ValueError):
raise ValueError('Cannot parse fontdata. It is invalid.')
self.metrics = []
for i in range(count):
offset = i * cell_size[0]
metric = metrics[i] if metrics else (xwidth, [0, -glyph_size[1], glyph_size[0], 0])
left = offset % table_width
top = (offset // table_width) * cell_size[1]
right = left + (metric[1][2] - metric[1][0])
bottom = top + (metric[1][3] - metric[1][1])
self.metrics.append({
'xwidth': metric[0],
'dst': metric[1],
'img': image.crop((left, top, right, bottom))
})
self._calculate_font_size()
def _get_image(self, filename):
"""
Load sprite_table associated with font
"""
ifs = {p.resolve() for p in Path(filename).parent.glob(Path(filename).stem + ".*") if p.suffix in (".png", ".gif", ".pbm")}
for f in ifs:
try:
image = Image.open(f)
except:
pass
else:
if image.mode in ['1', 'L']:
break
image.close()
else:
raise OSError('cannot find glyph data file')
return image
def _lookup(self, val):
"""
Utility method to determine a characters placement within the metrics list
"""
if val in self.mappings:
return self.mappings[val]
if val + self.PUA_SPACE in self.mappings:
return self.mappings[val + self.PUA_SPACE]
return None
def _getsize(self, text):
"""
Utility method to compute the rendered size of a line of text. It
also computes the minimum column value for the line of text. This is
needed in case the font has a negative horizontal offset which
requires that the size be expanded to accomodate the extra pixels.
"""
min_col = max_col = cp = 0
for c in text:
m = self._lookup(ord(c))
if m is None:
# Ignore characters that do not exist in font
continue
char = self.metrics[m]
min_col = min(min_col, char['dst'][0] + cp)
max_col = max(max_col, char['dst'][2] + cp)
cp += char['xwidth']
return (max_col - min_col, self.height, min_col)
def getsize(self, text, *args, **kwargs):
"""
Wrapper for _getsize to match the interface of PIL.ImageFont
"""
width, height, min = self._getsize(text)
return (width, height)
def getmask(self, text, mode="1", *args, **kwargs):
"""
Implements an PIL.ImageFont compatible method to return the rendered
image of a line of text
"""
# TODO: Test for potential character overwrite if horizontal offset is < 0
assert mode in ['1', 'L']
width, height, min = self._getsize(text)
image = Image.new(mode, (width, height))
# Adjust start if any glyph is placed before origin
cp = -min if min < 0 else 0
for c in text:
m = self._lookup(ord(c))
if m is None:
# Ignore characters that do not exist in font
continue
char = self.metrics[m]
px = char['dst'][0] + cp
py = char['dst'][1] + self.baseline
image.paste(char['img'], (px, py))
cp += char['xwidth']
return image.im
def _populate_metrics(self, sprite_table, data, index, mappings):
"""
Populate metrics on initial font load from a sprite table or PIL ImageFont
Place characters contained on the sprite_table into Unicode
private use area (PUA). Create a reverse lookup from the values
that are contained on the sprite_table.
.. note:
Arbritarily using Supplemental Private Use Area-A starting at
PUA_SPACE (0xF8000) to give the raw sprite_table locations
a unicode codepoint.
"""
self.metrics = []
self.glyph_index = {}
self.data = data
idx = 0
rev_map = {}
if mappings is not None:
for k, v in mappings.items():
if v in rev_map:
rev_map[v].append(k)
else:
rev_map[v] = [k]
self.mappings = {}
for i, c in enumerate(index):
metric = from_8_to_16(data[i * 20:(i + 1) * 20])
# If character position has no data, skip it
if sum(metric) == 0:
continue
xwidth = metric[0]
dst = metric[2:6]
src = metric[6:10]
img = sprite_table.crop((src[0], src[1], src[2], src[3]))
self.metrics.append({
'xwidth': xwidth,
'dst': dst,
'img': img
})
self.mappings[c + self.PUA_SPACE] = idx
if c in rev_map:
for u in rev_map[c]:
self.mappings[u] = idx
i2b = img.tobytes()
# Only add new glyphs except always add space character
if i2b not in self.glyph_index or c == 0x20:
self.glyph_index[i2b] = c
idx += 1
self._calculate_font_size()
def _calculate_font_size(self):
# Calculate height and baseline of font
ascent = descent = width = 0
m = self.metrics[0]
regular = (m['xwidth'], m['dst'])
xwidth = regular[0]
regular_flag = True
for m in self.metrics:
if regular != (m['xwidth'], m['dst']):
regular_flag = False
ascent = max(ascent, -m['dst'][1])
descent = max(descent, m['dst'][3])
width = max(width, m['dst'][2] - m['dst'][0])
xwidth = max(xwidth, m['xwidth'])
self.height = ascent + descent
self.width = width
self.baseline = ascent
self.regular = regular if regular_flag else None
self.count = len(self.metrics)
def combine(self, source_font, characters=None, force=False):
"""
Combine two :py:class:`luma.core.bitmap_font` instances.
:param source_font: a :py:class:`luma.core.bitmap_font` to copy from
:type source_font: :py:class:`luma.core.bitmap_font`
:param characters: (optional) A list of the characters to transfer from
the source_font. If not provided, all of the characters within
the source_font will be transferred.
:type characters: str
:param force: If set, the source_font can overwrite values that already
exists within this font. Default is False.
:type force: bool
"""
if characters:
for c in characters:
if ord(c) in self.mappings and not force:
continue
m = source_font._lookup(ord(c))
if m is not None:
v = source_font.metrics[m]
else:
raise ValueError(f'{c} is not a valid character within the source font')
self.metrics.append(v)
self.mappings[ord(c)] = len(self.metrics) - 1
else:
# Copy source values into destination but don't overwrite existing characters unless force set
for k, v in source_font.mappings.items():
if k in self.mappings and not force:
continue
self.metrics.append(source_font.metrics[v])
self.mappings[k] = len(self.metrics) - 1
# Recompute font size metrics
self._calculate_font_size()
def load(filename):
"""
Load a :py:class:`luma.core.bitmap_font` file. This function creates a
:py:class:`luma.core.bitmap_font` object from the given :py:class:`luma.core.bitmap_font`
file, and returns the corresponding font object.
:param filename: Filename of font file.
:type filename: str
:return: A :py:class:`luma.core.bitmap_font` object.
:exception OSError: If the file could not be read.
:exception SyntaxError: If the file does not contain the expected data
"""
f = bitmap_font()
f.load(filename)
return f
def loads(data):
"""
Load a :py:class:`luma.core.bitmap_font` from a string of serialized data. This function
creates a :py:class:`luma.core.bitmap_font` object from serialized data produced from the
``dumps`` method and returns the corresponding font object.
:param data: Serialized :py:class:`luma.core.bitmap_font` data.
:type data: str
:return: A :py:class:`luma.core.bitmap_font` object.
:exception ValueError: If the data does not a valid luma.core.bitmap_font
"""
f = bitmap_font()
f.loads(data)
return f
def load_pillow_font(filename, mappings=None):
"""
Load a PIL font file. This function creates a luma.core.bitmap_font object
from the given PIL bitmap font file, and returns the corresponding font object.
:param filename: Filename of font file.
:type filename: str
:param mappings: a dictionary of unicode to value pairs (optional)
:type mappings: dict
:return: A font object.
:exception OSError: If the file could not be read.
:exception SyntaxError: If the file does not contain the expected data
"""
f = bitmap_font()
f.load_pillow_font(filename, mappings)
return f
def load_sprite_table(sprite_table, index, xwidth, glyph_size, cell_size=None, mappings=None):
"""
Create a :py:class:`luma.core.bitmap_font` from a sprite table.
:param sprite_table: Filename of a sprite_table file or a PIL.Image containing the
sprite_table
:type sprite_table: str or PIL.Image
:param index: The list of character values contained within sprite_table.
This list MUST be in the same order that the glyphs for the characters
appear within the sprite_table (in left to right, top to bottom order)
:type index: list or other iterable
:param xwidth: number of pixels between placements of each character in a
line of text
:type xwidth: int
:param glyph_size: tuple containing the width and height of each character
in the font
:type glyph_size: tuple(int, int)
:param cell_size: tuple containing the width and height of each cell in the
sprite table. Defaults to the size of the glyphs.
:type cell_size: tuple(int, int)
:param mappings: a dictionary of unicode to value pairs (optional)
:type mappings: dict
:return: A font object.
:exception OSError: If the file could not be read.
:exception SyntaxError: If the file does not contain the expected data
.. note:
Requires a font where each character is the same size with no horizontal
or vertical offset and has consistant horizontal distance between each
character
"""
f = bitmap_font()
need_to_close = False
if type(sprite_table) is str:
try:
sprite_table = Image.open(sprite_table)
need_to_close = True
# Differentiate between file not found and invalid sprite table
except FileNotFoundError:
raise
except IOError:
raise ValueError(f'File {sprite_table} not a valid sprite table')
if isinstance(sprite_table, Image.Image):
cell_size = cell_size if cell_size is not None else glyph_size
f.load_sprite_table(sprite_table, index, xwidth, glyph_size, cell_size, mappings)
else:
raise ValueError('Provided image is not an instance of PIL.Image')
if need_to_close:
sprite_table.close()
return f
class embedded_fonts(ImageFont.ImageFont):
"""
Utility class to manage the set of fonts that are embedded within a
compatible device.
:param data: The font data from the device. See note below.
:type data: dict
:param selected_font: The font that should be loaded as this device's
default. Will accept the font's index or its name.
:type selected_font: str or int
..note:
The class is used by devices which have embedded fonts and is not intended
to be used directly. To initialize it requires providing a dictionary
of font data including a `PIL.Image.tobytes` representation of a
sprite_table which contains the glyphs of the font organized in
consistent rows and columns, a metrics dictionary which provides the
information on how to retrieve fonts from the sprite_table, and a
mappings dictionary that provides unicode to table mappings.
.. versionadded:: 1.16.0
"""
def __init__(self, data, selected_font=0):
self.data = data
self.font_by_number = {}
self.names_index = {}
for i in range(len(data['metrics'])):
name = data['metrics'][i]['name']
self.names_index[name] = i
self.current = selected_font
def load(self, val):
"""
Load a font by its index value or name and return it
:param val: The index or the name of the font to return
:type val: int or str
"""
if type(val) is str:
if val in self.names_index:
index = self.names_index[val]
else:
raise ValueError(f'No font with name {val}')
elif type(val) is int:
if val in range(len(self.names_index)):
index = val
else:
raise ValueError(f'No font with index {val}')
else:
raise TypeError(f'Expected int or str. Received {type(val)}')
if index not in self.font_by_number:
i = index
index_list = self.data['metrics'][i]['index']
xwidth = self.data['metrics'][i]['xwidth']
cell_size = self.data['metrics'][i]['cell_size']
glyph_size = self.data['metrics'][i]['glyph_size']
table_size = self.data['metrics'][i]['table_size']
mappings = self.data['mappings'][i] if 'mappings' in self.data else None
sprite_table = Image.frombytes('1', table_size, self.data['fonts'][i])
font = load_sprite_table(sprite_table, index_list, xwidth, glyph_size, cell_size, mappings)
self.font_by_number[i] = font
return self.font_by_number[index]
@property
def current(self):
"""
Returns the currently selected font
"""
return self.font
@current.setter
def current(self, val):
"""
Sets the current font, loading the font if it has not previously been selected
:param val: The name or index number of the selected font.
:type val: str or int
"""
self.font = self.load(val)
def combine(self, font, characters=None, force=False):
"""
Combine the current font with a new one
:param font: The font to combine with the current font
:type font: :py:class:`luma.core.bitmap_font`
:param characters: (Optional) A list of characters to move from the new font to the
current font. If not provided all characters from the new font will
be transferred.
:type characters: list of unicode characters
:param force: Determines if conflicting characters should be ignored (default)
or overwritten.
.. note:
This does not permanently change the embedded font. If you set the value
of current again even if setting it to the same font, the changes that combine
has made will be lost.
"""
destination = deepcopy(self.font)
destination.combine(font, characters, force)
self.font = destination
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/models/device_management_configuration_exchange_online_setting_applicability.py
|
from __future__ import annotations
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from . import device_management_configuration_setting_applicability
from . import device_management_configuration_setting_applicability
class DeviceManagementConfigurationExchangeOnlineSettingApplicability(device_management_configuration_setting_applicability.DeviceManagementConfigurationSettingApplicability):
def __init__(self,) -> None:
"""
Instantiates a new DeviceManagementConfigurationExchangeOnlineSettingApplicability and sets the default values.
"""
super().__init__()
self.odata_type = "#microsoft.graph.deviceManagementConfigurationExchangeOnlineSettingApplicability"
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> DeviceManagementConfigurationExchangeOnlineSettingApplicability:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: DeviceManagementConfigurationExchangeOnlineSettingApplicability
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return DeviceManagementConfigurationExchangeOnlineSettingApplicability()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from . import device_management_configuration_setting_applicability
fields: Dict[str, Callable[[Any], None]] = {
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
super().serialize(writer)
|
PypiClean
|
/fhi-vibes-1.0.5.tar.gz/fhi-vibes-1.0.5/vibes/cli/scripts/get_relaxation_info.py
|
# Find the optimizer type
def get_optimizer(f):
"""Find the optimzer type
Parameters
----------
f: str
file to search through
Returns
-------
int
Optimizer type, 1 for Textbook BFGS, 2 for TRM, -1 for undefined
"""
try:
line = next(l for l in f if "Geometry relaxation:" in l)
except StopIteration:
exit("Optimizer not found -- is this output from a relaxation?")
if "Textbook BFGS" in line:
return 1
if "TRM" in line:
return 2
return -1
# find energy
def get_energy(f):
"""Find the total energy for the calculation
Parameters
----------
f: str
file to search through
Returns
-------
total_energy: float
the total energy corrected of the structure
free_energy: float
the electronic free energy of the structure
"""
spacegroup = None
total_energy = None
for line in f:
if "Space group" in line:
spacegroup = int(line.split()[4])
if "| Total energy corrected :" in line:
total_energy = float(line.split()[5])
break
if "| Electronic free energy :" in line:
free_energy = float(line.split()[5])
if not total_energy:
raise StopIteration
return total_energy, free_energy, spacegroup
# get max_force
def get_forces(f):
"""Find the maximum force component
Parameters
----------
f: str
file to search through
Returns
-------
float
The maximum force component of the structure
"""
line = next(l for l in f if "Maximum force component" in l)
return float(line.split()[4])
# get current volume
def get_volume(f):
"""Find the volume of the structure
Parameters
----------
f: str
file to search through
Returns
-------
float
The structures volume
"""
for line in f:
if "| Unit cell volume " in line:
return float(line.split()[5])
if "Begin self-consistency loop:" in line:
return -1
if "Final output of selected total energy values:" in line:
return -1
return -1
# parse info of one step
def parser(f, n_init=0, optimizer=2):
"""Parse info of one step
Parameters
----------
f: str
file to search through
n_init: int
The initial step
optimizer: int
Optimizer type, 1 for Textbook BFGS, 2 for TRM, -1 for undefined
Yields
------
n_rel: int
Current relaxation step
energy: float
The total energy corrected of the step
free_energy: float
The electronic free energy of the step
max_force: float
The maximum force component of the step
volume: float
The volume of the step
status: int
status of the step, 0 is normal, 1 is unproductive step, 2 is optimizer is stuck
converged: bool
If True the relaxation is converged
abort: int
If 1 the relaxation is aborting
"""
n_rel = n_init
converged = 0
abort = 0
volume = -1
while not converged and not abort:
n_rel += 1
status = 0
try:
energy, free_energy, spacegroup = get_energy(f)
max_force = get_forces(f)
except StopIteration:
break
for line in f:
if "Present geometry is converged." in line:
converged = 1
break
elif "Advancing" in line:
pass
elif "Aborting optimization" in line:
abort = 1
elif "Counterproductive step -> revert!" in line:
status = 1
elif "Optimizer is stuck" in line:
status = 2
# elif '**' in line:
# status = 3
elif "Finished advancing geometry" in line:
volume = get_volume(f)
break
elif "Updated atomic structure" in line:
volume = get_volume(f)
break
yield (
n_rel,
energy,
free_energy,
max_force,
volume,
spacegroup,
status,
converged,
abort,
)
def print_status(
n_rel, energy, de, free_energy, df, max_force, volume, spacegroup, status_string
):
"""Print the status line, skip volume if not found
Parameters
----------
n_rel: int
Current relaxation step
energy: float
The total energy corrected of the step
de: float
Change in total energy
free_energy: float
The electronic free energy of the step
df: float
Change in electronic free energy
max_force: float
The maximum force component of the step
volume: float
The volume of the step
status_string: str
The status of the relaxation
"""
if volume and volume > 0:
vol_str = f"{volume:15.4f}"
else:
vol_str = ""
if spacegroup:
sg_str = f"{spacegroup:5d}"
else:
sg_str = ""
print(
"{:5d} {:16.8f} {:16.8f} {:14.6f} {:20.6f} {} {} {}".format(
n_rel,
energy,
free_energy,
df,
max_force * 1000,
vol_str,
status_string,
sg_str,
)
)
def get_relaxation_info(files):
"""print information about relaxation performed with FHIaims
Parameters
----------
files: list of str
The file paths of the aims.out files to analyze
"""
init, n_rel, converged, abort = 4 * (None,)
status_string = [
"",
"rejected.",
"rejected: force <-> energy inconsistency?",
"stuck.",
]
# Run
print(
"\n# Step Total energy [eV] Free energy [eV] F-F(1)"
+ " [meV] max. force [meV/AA] Volume [AA^3] Spacegroup\n"
)
converged, abort = False, False
for infile in files:
with open(infile) as f:
# Check optimizer
optimizer = get_optimizer(f)
ps = parser(f, n_init=n_rel or 0, optimizer=optimizer)
for (n_rel, ener, free_ener, fmax, vol, sg, status, _conv, _abort) in ps:
if not init:
first_energy, first_free_energy = ener, free_ener
init = 1
print_status(
n_rel,
ener,
1000 * (ener - first_energy),
free_ener,
1000 * (free_ener - first_free_energy),
fmax,
vol,
sg,
status_string[status],
)
converged, abort = _conv, _abort
if converged:
print("--> converged.")
if abort:
print("*--> aborted, too many steps.")
def main():
"""wrap get_relaxation_info"""
from argparse import ArgumentParser as argpars
parser = argpars(description="Summarize the relaxation path")
parser.add_argument("aimsouts", type=str, nargs="+", help="aims output files")
args = parser.parse_args()
get_relaxation_info(args.aimsouts)
if __name__ == "__main__":
main()
|
PypiClean
|
/v2/model/create_sub_customer_response.py
|
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class CreateSubCustomerResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'domain_id': 'str',
'domain_name': 'str'
}
attribute_map = {
'domain_id': 'domain_id',
'domain_name': 'domain_name'
}
def __init__(self, domain_id=None, domain_name=None):
"""CreateSubCustomerResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._domain_id = None
self._domain_name = None
self.discriminator = None
if domain_id is not None:
self.domain_id = domain_id
if domain_name is not None:
self.domain_name = domain_name
@property
def domain_id(self):
"""Gets the domain_id of this CreateSubCustomerResponse.
|参数名称:客户ID| |参数的约束及描述:只有成功或客户向伙伴授权发生异常(CBC.5025)时才会返回,且只允许最大长度64的字符串|
:return: The domain_id of this CreateSubCustomerResponse.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this CreateSubCustomerResponse.
|参数名称:客户ID| |参数的约束及描述:只有成功或客户向伙伴授权发生异常(CBC.5025)时才会返回,且只允许最大长度64的字符串|
:param domain_id: The domain_id of this CreateSubCustomerResponse.
:type: str
"""
self._domain_id = domain_id
@property
def domain_name(self):
"""Gets the domain_name of this CreateSubCustomerResponse.
|参数名称:用户登录名| |参数的约束及描述:只有成功的时候才会返回,且只允许最大长度64的字符串|
:return: The domain_name of this CreateSubCustomerResponse.
:rtype: str
"""
return self._domain_name
@domain_name.setter
def domain_name(self, domain_name):
"""Sets the domain_name of this CreateSubCustomerResponse.
|参数名称:用户登录名| |参数的约束及描述:只有成功的时候才会返回,且只允许最大长度64的字符串|
:param domain_name: The domain_name of this CreateSubCustomerResponse.
:type: str
"""
self._domain_name = domain_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateSubCustomerResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/yaqc-cmds-2022.3.0.tar.gz/yaqc-cmds-2022.3.0/yaqc_cmds/somatic/modules/motortune.py
|
import pathlib
import numpy as np
import matplotlib
matplotlib.pyplot.ioff()
import WrightTools as wt
import yaqc_cmds.project.classes as pc
import yaqc_cmds.project.widgets as pw
import yaqc_cmds.somatic.acquisition as acquisition
import yaqc_cmds.sensors.signals as sensor_signals
import yaqc_cmds
import yaqc_cmds.hardware.opas as opas
import yaqc_cmds.hardware.spectrometers as spectrometers
import yaqc_cmds.sensors as sensors
from yaqc_cmds.somatic import _wt5
### define ####################################################################
module_name = "MOTORTUNE"
### custom classes ############################################################
class MotorGUI:
def __init__(self, name, center, width, number, use_tune_points):
self.name = name
self.use_tune_points = use_tune_points
self.input_table = pw.InputTable()
self.input_table.add(name, None)
allowed = ["Set", "Scan", "Static"]
self.method = pc.Combo(allowed_values=allowed)
if self.use_tune_points is not None:
self.use_tune_points.updated.connect(self.update_disabled)
self.method.updated.connect(self.update_disabled)
self.input_table.add("Method", self.method)
self.center = pc.Number(initial_value=center)
self.input_table.add("Center", self.center)
self.width = pc.Number(initial_value=width)
self.input_table.add("Width", self.width)
self.npts = pc.Number(initial_value=number, decimals=0)
self.input_table.add("Number", self.npts)
self.update_disabled()
def update_disabled(self):
self.center.set_disabled(True)
self.width.set_disabled(True)
self.npts.set_disabled(True)
method = self.method.read()
if method == "Set":
self.center.set_disabled(self.use_tune_points.read())
elif method == "Scan":
self.center.set_disabled(self.use_tune_points.read())
self.width.set_disabled(False)
self.npts.set_disabled(False)
elif method == "Static":
self.center.set_disabled(False)
class OPA_GUI:
def __init__(self, hardware, layout, use_tune_points):
self.hardware = hardware
motor_names = self.hardware.motor_names
self.motors = []
for name in motor_names:
motor = MotorGUI(name, 30, 1, 11, use_tune_points)
if layout is not None:
layout.addWidget(motor.input_table)
self.motors.append(motor)
self.hide() # initialize hidden
def hide(self):
for motor in self.motors:
motor.input_table.hide()
def show(self):
for motor in self.motors:
motor.input_table.show()
### Worker ####################################################################
class Worker(acquisition.Worker):
def process(self, scan_folder):
with _wt5.data_container as data:
# decide which channels to make plots for
channel_name = self.aqn.read("processing", "channel")
# make figures for each channel
data_path = pathlib.Path(_wt5.data_container.data_filepath)
data_folder = data_path.parent
# make all images
channel_path = data_folder / channel_name
output_path = data_folder
if data.ndim > 2:
output_path = channel_path
channel_path.mkdir()
image_fname = channel_name
if data.ndim == 1:
outs = wt.artists.quick1D(
data,
channel=channel_name,
autosave=True,
save_directory=output_path,
fname=image_fname,
verbose=False,
)
else:
outs = wt.artists.quick2D(
data,
-1,
-2,
channel=channel_name,
autosave=True,
save_directory=output_path,
fname=image_fname,
verbose=False,
)
# get output image
if len(outs) == 1:
output_image_path = outs[0]
else:
output_image_path = output_path / "animation.gif"
wt.artists.stitch_to_animation(images=outs, outpath=output_image_path)
# upload
self.upload(scan_folder, reference_image=str(output_image_path))
def run(self):
# assemble axes
axes = []
# get OPA properties
opa_name = self.aqn.read("motortune", "opa name")
opa_names = [h.name for h in opas.hardwares]
opa_index = opa_names.index(opa_name)
opa_hardware = opas.hardwares[opa_index]
opa_friendly_name = opa_hardware.name
curve = opa_hardware.curve
arrangement = opa_hardware.curve.arrangements[opa_hardware.arrangement]
motor_names = self.aqn.read("motortune", "motor names")
scanned_motors = [m for m in motor_names if self.aqn.read(m, "method") == "Scan"]
tune_units = "nm" # needs update if/when attune supports other units for independents
# tune points
if self.aqn.read("motortune", "use tune points"):
tune_points = get_tune_points(curve, arrangement, scanned_motors)
motors_excepted = [] # list of indicies
for motor_name in motor_names:
if not self.aqn.read(motor_name, "method") == "Set":
motors_excepted.append(motor_name)
if self.aqn.read("spectrometer", "method") == "Set":
hardware_dict = {
opa_friendly_name: [
opa_hardware,
"set_position_except",
["destination", motors_excepted, "units"],
],
"wm": [spectrometers.hardwares[0], "set_position", None],
}
axis = acquisition.Axis(
tune_points,
tune_units,
opa_friendly_name,
hardware_dict,
)
axes.append(axis)
else:
hardware_dict = {
opa_friendly_name: [
opa_hardware,
"set_position_except",
["destination", motors_excepted, "units"],
]
}
axis = acquisition.Axis(
tune_points,
tune_units,
opa_friendly_name,
hardware_dict,
)
axes.append(axis)
# motor
for motor_name in motor_names:
if self.aqn.read(motor_name, "method") == "Scan":
motor_units = None
name = "_".join([opa_friendly_name, motor_name])
width = self.aqn.read(motor_name, "width") / 2.0
npts = int(self.aqn.read(motor_name, "number"))
if self.aqn.read("motortune", "use tune points"):
center = 0.0
kwargs = {
"centers": [curve(t, arrangement.name)[motor_name] for t in tune_points]
}
else:
center = self.aqn.read(motor_name, "center")
kwargs = {}
points = np.linspace(center - width, center + width, npts)
hardware_dict = {name: [opa_hardware, "set_motor", [motor_name, "destination"]]}
axis = acquisition.Axis(points, motor_units, name, hardware_dict, **kwargs)
axes.append(axis)
elif self.aqn.read(motor_name, "method") == "Set":
pass
elif self.aqn.read(motor_name, "method") == "Static":
opa_hardware.q.push("set_motor", [motor_name, self.aqn.read(motor_name, "center")])
# mono
if self.aqn.read("spectrometer", "method") == "Scan":
name = "wm"
units = "wn"
width = self.aqn.read("spectrometer", "width") / 2.0
npts = int(self.aqn.read("spectrometer", "number"))
if self.aqn.read("motortune", "use tune points"):
center = 0.0
kwargs = {"centers": wt.units.convert(tune_points, tune_units, units)}
else:
center = self.aqn.read("spectrometer", "center")
center = wt.units.convert(
center, self.aqn.read("spectrometer", "center units"), "wn"
)
kwargs = {}
points = np.linspace(center - width, center + width, npts)
axis = acquisition.Axis(points, units, name, **kwargs)
axes.append(axis)
elif self.aqn.read("spectrometer", "method") == "Set":
if self.aqn.read("motortune", "use tune points"):
# already handled above
pass
else:
center = self.aqn.read("spectrometer", "center")
center = wt.units.convert(
center, self.aqn.read("spectrometer", "center units"), "wn"
)
spectrometers.hardwares[0].set_position(center, "wn")
elif self.aqn.read("spectrometer", "method") == "Static":
center = self.aqn.read("spectrometer", "center")
center = wt.units.convert(center, self.aqn.read("spectrometer", "center units"), "wn")
spectrometers.hardwares[0].set_position(center, "wn")
# handle centers
for axis_index, axis in enumerate(axes):
centers_shape = [a.points.size for i, a in enumerate(axes) if not i == axis_index]
ones = np.ones(centers_shape)
if hasattr(axis, "centers"):
# arrays always follow
axis.centers = np.transpose(axis.centers * ones.T)
# launch
pre_wait_methods = [
lambda: opa_hardware.q.push("wait_until_still"),
lambda: opa_hardware.q.push("get_motor_positions"),
lambda: opa_hardware.q.push("get_position"),
]
# do scan
self.scan(axes, constants=[], pre_wait_methods=pre_wait_methods)
if not self.stopped.read():
self.finished.write(True) # only if acquisition successfull
### GUI #######################################################################
class GUI(acquisition.GUI):
def create_frame(self):
# shared settings
input_table = pw.InputTable()
allowed = [hardware.name for hardware in opas.hardwares]
self.opa_combo = pc.Combo(allowed)
input_table.add("OPA", self.opa_combo)
self.opa_combo.updated.connect(self.on_opa_combo_updated)
self.use_tune_points = pc.Bool(initial_value=True)
input_table.add("Use Tune Points", self.use_tune_points)
self.layout.addWidget(input_table)
# motor settings
self.opa_guis = [
OPA_GUI(hardware, self.layout, self.use_tune_points) for hardware in opas.hardwares
]
self.opa_guis[0].show()
# mono settings
allowed = ["Set", "Scan", "Static"]
self.mono_method_combo = pc.Combo(allowed, disable_under_module_control=True)
self.mono_method_combo.updated.connect(self.update_mono_settings)
self.mono_center = pc.Number(
initial_value=7000, units="wn", disable_under_module_control=True
)
self.mono_width = pc.Number(
initial_value=500, units="wn", disable_under_module_control=True
)
self.mono_width.set_disabled_units(True)
self.mono_npts = pc.Number(initial_value=51, decimals=0, disable_under_module_control=True)
input_table = pw.InputTable()
input_table.add("Spectrometer", None)
input_table.add("Method", self.mono_method_combo)
input_table.add("Center", self.mono_center)
input_table.add("Width", self.mono_width)
input_table.add("Number", self.mono_npts)
self.layout.addWidget(input_table)
self.update_mono_settings()
# processing
input_table = pw.InputTable()
input_table.add("Processing", None)
self.do_post_process = pc.Bool(initial_value=True)
input_table.add("Process", self.do_post_process)
# TODO: allowed values, update
channel_names = list(yaqc_cmds.sensors.get_channels_dict().keys())
if (
"main_channel" not in self.state.keys()
or self.state["main_channel"] not in channel_names
):
self.state["main_channel"] = channel_names[0]
self.main_channel = pc.Combo(
allowed_values=channel_names,
initial_value=self.state["main_channel"],
)
input_table.add("Channel", self.main_channel)
self.layout.addWidget(input_table)
sensor_signals.channels_changed.connect(self.on_device_settings_updated)
def load(self, aqn_path):
aqn = wt.kit.INI(aqn_path)
# shared settings
self.opa_combo.write(aqn.read("motortune", "opa name"))
self.use_tune_points.write(aqn.read("motortune", "use tune points"))
# motor settings
opa = self.opa_guis[self.opa_combo.read_index()]
for motor, motor_name in zip(opa.motors, aqn.read("motortune", "motor names")):
motor.method.write(aqn.read(motor_name, "method"))
motor.center.write(aqn.read(motor_name, "center"))
motor.width.write(aqn.read(motor_name, "width"))
motor.npts.write(aqn.read(motor_name, "number"))
# mono settings
self.mono_method_combo.write(aqn.read("spectrometer", "method"))
self.mono_center.write(aqn.read("spectrometer", "center"))
self.mono_width.write(aqn.read("spectrometer", "width"))
self.mono_npts.write(aqn.read("spectrometer", "number"))
# processing
self.do_post_process.write(aqn.read("processing", "do post process"))
self.main_channel.write(aqn.read("processing", "channel"))
# allow sensors to read from aqn
# self.device_widget.load(aqn_path)
def on_device_settings_updated(self):
channel_names = list(yaqc_cmds.sensors.get_channels_dict().keys())
self.main_channel.set_allowed_values(channel_names)
def on_opa_combo_updated(self):
self.show_opa_gui(self.opa_combo.read_index())
def save(self, aqn_path):
aqn = wt.kit.INI(aqn_path)
opa = self.opa_guis[self.opa_combo.read_index()]
scanned_motor_names = []
for motor in opa.motors:
if motor.method == "Scan":
scanned_motor_names.append(motor.name)
scanned_motor_names = str(scanned_motor_names).replace("'", "")
aqn.write(
"info",
"description",
"MOTORTUNE: {} {}".format(self.opa_combo.read(), scanned_motor_names),
)
# shared settings
aqn.add_section("motortune")
aqn.write("motortune", "opa name", self.opa_combo.read())
aqn.write(
"motortune",
"motor names",
[motor.name for motor in self.opa_guis[self.opa_combo.read_index()].motors],
)
aqn.write("motortune", "use tune points", self.use_tune_points.read())
# motor settings
for motor in opa.motors:
aqn.add_section(motor.name)
aqn.write(motor.name, "method", motor.method.read())
aqn.write(motor.name, "center", motor.center.read())
aqn.write(motor.name, "width", motor.width.read())
aqn.write(motor.name, "number", motor.npts.read())
# mono settings
aqn.add_section("spectrometer")
aqn.write("spectrometer", "method", self.mono_method_combo.read())
aqn.write("spectrometer", "center", self.mono_center.read())
aqn.write("spectrometer", "center units", self.mono_center.units)
aqn.write("spectrometer", "width", self.mono_width.read())
aqn.write("spectrometer", "number", self.mono_npts.read())
# processing
aqn.add_section("processing")
aqn.write("processing", "do post process", self.do_post_process.read())
aqn.write("processing", "channel", self.main_channel.read())
# allow sensors to save to aqn
# self.device_widget.save(aqn_path)
def show_opa_gui(self, index):
for gui in self.opa_guis:
gui.hide()
self.opa_guis[index].show()
def update_mono_settings(self):
self.mono_center.set_disabled(True)
self.mono_width.set_disabled(True)
self.mono_npts.set_disabled(True)
method = self.mono_method_combo.read()
if method == "Set":
self.mono_center.set_disabled(self.use_tune_points.read())
elif method == "Scan":
self.mono_center.set_disabled(self.use_tune_points.read())
self.mono_width.set_disabled(False)
self.mono_npts.set_disabled(False)
elif method == "Static":
self.mono_center.set_disabled(False)
def save_state(self):
self.state["main_channel"] = self.channel_combo.read()
super().save_state()
def mkGUI():
global gui
gui = GUI(module_name)
def load():
return True
def get_tune_points(instrument, arrangement, scanned_motors):
min_ = arrangement.ind_min
max_ = arrangement.ind_max
if not scanned_motors:
scanned_motors = arrangement.keys()
inds = []
for scanned in scanned_motors:
if scanned in arrangement.keys() and hasattr(arrangement[scanned], "independent"):
inds += [arrangement[scanned].independent]
continue
for name in arrangement.keys():
if (
name in instrument.arrangements
and scanned in instrument(instrument[name].ind_min, name).keys()
and hasattr(arrangement[scanned], "independent")
):
inds += [arrangement[scanned].independent]
if len(inds) > 1:
inds = np.concatenate(inds)
else:
inds = inds[0]
unique = np.unique(inds)
tol = 1e-3 * (max_ - min_)
diff = np.append(tol * 2, np.diff(unique))
return unique[diff > tol]
|
PypiClean
|
/django-things-0.4.5.tar.gz/django-things-0.4.5/things/renderers.py
|
from math import ceil
from datetime import datetime
from django_medusa.renderers import StaticSiteRenderer
from django.utils import timezone
from django.conf import settings
from .models import Thing, StaticBuild
from snippets.models import Snippet
class ThingRenderer(StaticSiteRenderer):
def get_paths(self):
# A "set" so we can throw items in blindly and be guaranteed that
# we don't end up with dupes.
paths = set(['/', '/feed/', '/sitemap.xml'])
for subclass in Thing.__subclasses__():
# Special case for built-in pages module
if subclass._meta.app_label == "pages":
has_urls = True
else:
try:
__import__('.'.join([subclass._meta.app_label, 'urls']))
has_urls = True
except ImportError:
print "No urls for %s found" % subclass._meta.app_label
has_urls = False
if has_urls:
# Get the latest StaticBuild time to see what we need to rebuild
rebuild_list = False
latest_static_build = StaticBuild.objects.order_by('-created_at')
if latest_static_build:
dt = latest_static_build[0].created_at
else:
dt = timezone.make_aware(datetime(1, 1, 1), timezone.get_current_timezone())
# if a snippet changes, rebuild everything
snips = Snippet.objects.filter(updated_at__gte=dt)
for item in subclass.objects.filter(**subclass.public_filter_out):
# Thing detail view
# Only add the path if a snippet has been changed or if
# the item had been updated since the last build.
if item.updated_at > dt or snips:
paths.add(item.get_absolute_url())
if settings.FULL_STATIC_SITE:
rebuild_list = True
if subclass._meta.app_label != "pages":
if rebuild_list:
list_view = "/%s/" % subclass._meta.app_label
paths.add(list_view)
# Add in paginated pages
for p in xrange(int(ceil(subclass.objects.count()/float(20)))):
paths.add("%s%s/" % (list_view, (p + 1)))
# Cast back to a list since that's what we're expecting.
return list(paths)
renderers = [ThingRenderer, ]
|
PypiClean
|
/confluent-kafka-pypy-1.9.2.tar.gz/confluent-kafka-pypy-1.9.2/src/confluent_kafka/kafkatest/verifiable_client.py
|
import datetime
import json
import os
import re
import signal
import socket
import sys
import time
class VerifiableClient(object):
"""
Generic base class for a kafkatest verifiable client.
Implements the common kafkatest protocol and semantics.
"""
def __init__(self, conf):
"""
"""
super(VerifiableClient, self).__init__()
self.conf = conf
self.conf['client.id'] = 'python@' + socket.gethostname()
self.run = True
signal.signal(signal.SIGTERM, self.sig_term)
self.dbg('Pid is %d' % os.getpid())
def sig_term(self, sig, frame):
self.dbg('SIGTERM')
self.run = False
@staticmethod
def _timestamp():
return time.strftime('%H:%M:%S', time.localtime())
def dbg(self, s):
""" Debugging printout """
sys.stderr.write('%% %s DEBUG: %s\n' % (self._timestamp(), s))
def err(self, s, term=False):
""" Error printout, if term=True the process will terminate immediately. """
sys.stderr.write('%% %s ERROR: %s\n' % (self._timestamp(), s))
if term:
sys.stderr.write('%% FATAL ERROR ^\n')
sys.exit(1)
def send(self, d):
""" Send dict as JSON to stdout for consumtion by kafkatest handler """
d['_time'] = str(datetime.datetime.now())
self.dbg('SEND: %s' % json.dumps(d))
sys.stdout.write('%s\n' % json.dumps(d))
sys.stdout.flush()
@staticmethod
def set_config(conf, args):
""" Set client config properties using args dict. """
for n, v in args.iteritems():
if v is None:
continue
if n.startswith('topicconf_'):
conf[n[10:]] = v
continue
if not n.startswith('conf_'):
# App config, skip
continue
# Remove conf_ prefix
n = n[5:]
# Handle known Java properties to librdkafka properties.
if n == 'partition.assignment.strategy':
# Convert Java class name to config value.
# "org.apache.kafka.clients.consumer.RangeAssignor" -> "range"
v = re.sub(r'org.apache.kafka.clients.consumer.(\w+)Assignor',
lambda x: x.group(1).lower(), v)
if v == 'sticky':
v = 'cooperative-sticky'
conf[n] = v
@staticmethod
def read_config_file(path):
"""Read (java client) config file and return dict with properties"""
conf = {}
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('#') or len(line) == 0:
continue
fi = line.find('=')
if fi < 1:
raise Exception('%s: invalid line, no key=value pair: %s' % (path, line))
k = line[:fi]
v = line[fi+1:]
conf[k] = v
return conf
|
PypiClean
|
/pytest_insta-0.2.0.tar.gz/pytest_insta-0.2.0/pytest_insta/review.py
|
__all__ = ["ReviewTool"]
import os
from code import InteractiveConsole
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Collection, Dict, Iterator, List, Optional, Tuple
from _pytest.terminal import TerminalReporter
from .format import Fmt
from .utils import node_path_name
class ReviewEnvironment(Dict[str, Any]):
outcome: Optional[Tuple[str, str]] = None
decisions = {
"a": "accepting snapshot",
"r": "rejecting snapshot",
"s": "skipping snapshot",
}
def __getitem__(self, key: str) -> Any:
if value := self.decisions.get(key):
self.outcome = key, value
return None
return super().__getitem__(key)
class ReviewConsole(InteractiveConsole):
def raw_input(self, prompt: str = "") -> str:
if self.locals.outcome: # type: ignore
raise EOFError()
return super().raw_input(prompt)
@dataclass
class ReviewTool:
tr: TerminalReporter
config: Any
record_dir: Path
tests: Collection[Any]
def scan_recorded_snapshots(self) -> Iterator[Tuple[Any, Path, Path]]:
for test in self.tests:
path, name = node_path_name(test)
directory = path.parent.resolve().relative_to(self.config.rootpath)
for snapshot in (self.record_dir / directory).glob(f"{name}__*"):
original = Path(
os.path.relpath(
self.config.rootpath / directory / "snapshots" / snapshot.name,
Path(".").resolve(),
)
)
yield test, snapshot, original
def display_assertion(self, old: Any, new: Any):
self.tr.write_line("\n> assert old == new")
lines, *_ = self.config.hook.pytest_assertrepr_compare(
config=self.config, op="==", left=old, right=new
)
explanation = "assert " + "\n".join(" " + line for line in lines).strip()
for line in explanation.splitlines():
self.tr.write_line(f"E {line}", blue=True, bold=True)
def collect(self) -> Iterator[Tuple[Path, Optional[Path]]]:
to_review: List[Tuple[Any, Path, Path]] = []
for test, recorded, original in self.scan_recorded_snapshots():
if original.exists():
to_review.append((test, recorded, original))
else:
yield recorded, None
if to_review:
self.tr.write_line("")
self.tr.section("SNAPSHOT REVIEWS")
for i, (test, recorded, original) in enumerate(to_review):
self.tr.ensure_newline()
self.tr.section(f"[{i + 1}/{len(to_review)}]", "_", blue=True, bold=True)
self.tr.write_line(f"\nold: {original!s}")
self.tr.write_line(f"new: {recorded!s}")
if not (fmt := Fmt.from_spec(original.name)[1]):
self.tr.write_line(
f"\ninvalid snapshot format: {original.name!r}", red=True, bold=True
)
continue
old = fmt.load(original)
new = fmt.load(recorded)
self.display_assertion(old, new)
module, line, name = test.location
self.tr.write(f"\n{module}", blue=True, bold=True)
self.tr.write_line(f":{line + 1}: {name}")
decision, message = self.prompt(old, new)
self.tr.write_line(message, bold=True)
if decision == "a":
yield recorded, original
elif decision == "r":
yield recorded, None
def prompt(self, old: Any, new: Any) -> Tuple[str, str]:
review_env = ReviewEnvironment(old=old, new=new)
try:
import readline
import rlcompleter
readline.set_completer(rlcompleter.Completer(review_env).complete)
readline.parse_and_bind("tab: complete")
except ImportError:
pass
console = ReviewConsole(review_env)
console.interact("\na: accept, r: reject, s: skip", "")
return review_env.outcome or ("s", "skipping snapshot")
|
PypiClean
|
/tensorflow-2.1.1-cp36-cp36m-macosx_10_11_x86_64.whl/tensorflow_core/python/ops/gen_audio_ops.py
|
import collections
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
def audio_spectrogram(input, window_size, stride, magnitude_squared=False, name=None):
r"""Produces a visualization of audio data over time.
Spectrograms are a standard way of representing audio information as a series of
slices of frequency information, one slice for each window of time. By joining
these together into a sequence, they form a distinctive fingerprint of the sound
over time.
This op expects to receive audio data as an input, stored as floats in the range
-1 to 1, together with a window width in samples, and a stride specifying how
far to move the window between slices. From this it generates a three
dimensional output. The first dimension is for the channels in the input, so a
stereo audio input would have two here for example. The second dimension is time,
with successive frequency slices. The third dimension has an amplitude value for
each frequency during that time slice.
This means the layout when converted and saved as an image is rotated 90 degrees
clockwise from a typical spectrogram. Time is descending down the Y axis, and
the frequency decreases from left to right.
Each value in the result represents the square root of the sum of the real and
imaginary parts of an FFT on the current window of samples. In this way, the
lowest dimension represents the power of each frequency in the current window,
and adjacent windows are concatenated in the next dimension.
To get a more intuitive and visual look at what this operation does, you can run
tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
resulting spectrogram as a PNG image.
Args:
input: A `Tensor` of type `float32`. Float representation of audio data.
window_size: An `int`.
How wide the input window is in samples. For the highest efficiency
this should be a power of two, but other values are accepted.
stride: An `int`.
How widely apart the center of adjacent sample windows should be.
magnitude_squared: An optional `bool`. Defaults to `False`.
Whether to return the squared magnitude or just the
magnitude. Using squared magnitude can avoid extra calculations.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "AudioSpectrogram", name,
tld.op_callbacks, input, "window_size", window_size, "stride", stride,
"magnitude_squared", magnitude_squared)
return _result
except _core._FallbackException:
try:
return audio_spectrogram_eager_fallback(
input, window_size=window_size, stride=stride,
magnitude_squared=magnitude_squared, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
window_size = _execute.make_int(window_size, "window_size")
stride = _execute.make_int(stride, "stride")
if magnitude_squared is None:
magnitude_squared = False
magnitude_squared = _execute.make_bool(magnitude_squared, "magnitude_squared")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"AudioSpectrogram", input=input, window_size=window_size,
stride=stride,
magnitude_squared=magnitude_squared, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("window_size", _op._get_attr_int("window_size"), "stride",
_op._get_attr_int("stride"), "magnitude_squared",
_op._get_attr_bool("magnitude_squared"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"AudioSpectrogram", _inputs_flat, _attrs, _result)
_result, = _result
return _result
AudioSpectrogram = tf_export("raw_ops.AudioSpectrogram")(_ops.to_raw_op(audio_spectrogram))
def audio_spectrogram_eager_fallback(input, window_size, stride, magnitude_squared, name, ctx):
window_size = _execute.make_int(window_size, "window_size")
stride = _execute.make_int(stride, "stride")
if magnitude_squared is None:
magnitude_squared = False
magnitude_squared = _execute.make_bool(magnitude_squared, "magnitude_squared")
input = _ops.convert_to_tensor(input, _dtypes.float32)
_inputs_flat = [input]
_attrs = ("window_size", window_size, "stride", stride, "magnitude_squared",
magnitude_squared)
_result = _execute.execute(b"AudioSpectrogram", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"AudioSpectrogram", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_DecodeWavOutput = collections.namedtuple(
"DecodeWav",
["audio", "sample_rate"])
@_dispatch.add_dispatch_list
@tf_export('audio.decode_wav')
def decode_wav(contents, desired_channels=-1, desired_samples=-1, name=None):
r"""Decode a 16-bit PCM WAV file to a float tensor.
The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
When desired_channels is set, if the input contains fewer channels than this
then the last channel will be duplicated to give the requested number, else if
the input has more channels than requested then the additional channels will be
ignored.
If desired_samples is set, then the audio will be cropped or padded with zeroes
to the requested length.
The first output contains a Tensor with the content of the audio samples. The
lowest dimension will be the number of channels, and the second will be the
number of samples. For example, a ten-sample-long stereo WAV file should give an
output shape of [10, 2].
Args:
contents: A `Tensor` of type `string`.
The WAV-encoded audio, usually from a file.
desired_channels: An optional `int`. Defaults to `-1`.
Number of sample channels wanted.
desired_samples: An optional `int`. Defaults to `-1`.
Length of audio requested.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (audio, sample_rate).
audio: A `Tensor` of type `float32`.
sample_rate: A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "DecodeWav", name,
tld.op_callbacks, contents, "desired_channels", desired_channels,
"desired_samples", desired_samples)
_result = _DecodeWavOutput._make(_result)
return _result
except _core._FallbackException:
try:
return decode_wav_eager_fallback(
contents, desired_channels=desired_channels,
desired_samples=desired_samples, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
decode_wav, contents=contents,
desired_channels=desired_channels,
desired_samples=desired_samples, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if desired_channels is None:
desired_channels = -1
desired_channels = _execute.make_int(desired_channels, "desired_channels")
if desired_samples is None:
desired_samples = -1
desired_samples = _execute.make_int(desired_samples, "desired_samples")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DecodeWav", contents=contents, desired_channels=desired_channels,
desired_samples=desired_samples, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
decode_wav, contents=contents, desired_channels=desired_channels,
desired_samples=desired_samples, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("desired_channels", _op._get_attr_int("desired_channels"),
"desired_samples", _op._get_attr_int("desired_samples"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DecodeWav", _inputs_flat, _attrs, _result)
_result = _DecodeWavOutput._make(_result)
return _result
DecodeWav = tf_export("raw_ops.DecodeWav")(_ops.to_raw_op(decode_wav))
def decode_wav_eager_fallback(contents, desired_channels, desired_samples, name, ctx):
if desired_channels is None:
desired_channels = -1
desired_channels = _execute.make_int(desired_channels, "desired_channels")
if desired_samples is None:
desired_samples = -1
desired_samples = _execute.make_int(desired_samples, "desired_samples")
contents = _ops.convert_to_tensor(contents, _dtypes.string)
_inputs_flat = [contents]
_attrs = ("desired_channels", desired_channels, "desired_samples",
desired_samples)
_result = _execute.execute(b"DecodeWav", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DecodeWav", _inputs_flat, _attrs, _result)
_result = _DecodeWavOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('audio.encode_wav')
def encode_wav(audio, sample_rate, name=None):
r"""Encode audio data using the WAV file format.
This operation will generate a string suitable to be saved out to create a .wav
audio file. It will be encoded in the 16-bit PCM format. It takes in float
values in the range -1.0f to 1.0f, and any outside that value will be clamped to
that range.
`audio` is a 2-D float Tensor of shape `[length, channels]`.
`sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
Args:
audio: A `Tensor` of type `float32`. 2-D with shape `[length, channels]`.
sample_rate: A `Tensor` of type `int32`.
Scalar containing the sample frequency.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "EncodeWav", name,
tld.op_callbacks, audio, sample_rate)
return _result
except _core._FallbackException:
try:
return encode_wav_eager_fallback(
audio, sample_rate, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
encode_wav, audio=audio, sample_rate=sample_rate, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"EncodeWav", audio=audio, sample_rate=sample_rate, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
encode_wav, audio=audio, sample_rate=sample_rate, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"EncodeWav", _inputs_flat, _attrs, _result)
_result, = _result
return _result
EncodeWav = tf_export("raw_ops.EncodeWav")(_ops.to_raw_op(encode_wav))
def encode_wav_eager_fallback(audio, sample_rate, name, ctx):
audio = _ops.convert_to_tensor(audio, _dtypes.float32)
sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.int32)
_inputs_flat = [audio, sample_rate]
_attrs = None
_result = _execute.execute(b"EncodeWav", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"EncodeWav", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def mfcc(spectrogram, sample_rate, upper_frequency_limit=4000, lower_frequency_limit=20, filterbank_channel_count=40, dct_coefficient_count=13, name=None):
r"""Transforms a spectrogram into a form that's useful for speech recognition.
Mel Frequency Cepstral Coefficients are a way of representing audio data that's
been effective as an input feature for machine learning. They are created by
taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
higher frequencies that are less significant to the human ear. They have a long
history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
is a good resource to learn more.
Args:
spectrogram: A `Tensor` of type `float32`.
Typically produced by the Spectrogram op, with magnitude_squared
set to true.
sample_rate: A `Tensor` of type `int32`.
How many samples per second the source audio used.
upper_frequency_limit: An optional `float`. Defaults to `4000`.
The highest frequency to use when calculating the
ceptstrum.
lower_frequency_limit: An optional `float`. Defaults to `20`.
The lowest frequency to use when calculating the
ceptstrum.
filterbank_channel_count: An optional `int`. Defaults to `40`.
Resolution of the Mel bank used internally.
dct_coefficient_count: An optional `int`. Defaults to `13`.
How many output channels to produce per time slice.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Mfcc", name, tld.op_callbacks,
spectrogram, sample_rate, "upper_frequency_limit",
upper_frequency_limit, "lower_frequency_limit", lower_frequency_limit,
"filterbank_channel_count", filterbank_channel_count,
"dct_coefficient_count", dct_coefficient_count)
return _result
except _core._FallbackException:
try:
return mfcc_eager_fallback(
spectrogram, sample_rate,
upper_frequency_limit=upper_frequency_limit,
lower_frequency_limit=lower_frequency_limit,
filterbank_channel_count=filterbank_channel_count,
dct_coefficient_count=dct_coefficient_count, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if upper_frequency_limit is None:
upper_frequency_limit = 4000
upper_frequency_limit = _execute.make_float(upper_frequency_limit, "upper_frequency_limit")
if lower_frequency_limit is None:
lower_frequency_limit = 20
lower_frequency_limit = _execute.make_float(lower_frequency_limit, "lower_frequency_limit")
if filterbank_channel_count is None:
filterbank_channel_count = 40
filterbank_channel_count = _execute.make_int(filterbank_channel_count, "filterbank_channel_count")
if dct_coefficient_count is None:
dct_coefficient_count = 13
dct_coefficient_count = _execute.make_int(dct_coefficient_count, "dct_coefficient_count")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Mfcc", spectrogram=spectrogram, sample_rate=sample_rate,
upper_frequency_limit=upper_frequency_limit,
lower_frequency_limit=lower_frequency_limit,
filterbank_channel_count=filterbank_channel_count,
dct_coefficient_count=dct_coefficient_count, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("upper_frequency_limit", _op.get_attr("upper_frequency_limit"),
"lower_frequency_limit", _op.get_attr("lower_frequency_limit"),
"filterbank_channel_count",
_op._get_attr_int("filterbank_channel_count"),
"dct_coefficient_count",
_op._get_attr_int("dct_coefficient_count"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Mfcc", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Mfcc = tf_export("raw_ops.Mfcc")(_ops.to_raw_op(mfcc))
def mfcc_eager_fallback(spectrogram, sample_rate, upper_frequency_limit, lower_frequency_limit, filterbank_channel_count, dct_coefficient_count, name, ctx):
if upper_frequency_limit is None:
upper_frequency_limit = 4000
upper_frequency_limit = _execute.make_float(upper_frequency_limit, "upper_frequency_limit")
if lower_frequency_limit is None:
lower_frequency_limit = 20
lower_frequency_limit = _execute.make_float(lower_frequency_limit, "lower_frequency_limit")
if filterbank_channel_count is None:
filterbank_channel_count = 40
filterbank_channel_count = _execute.make_int(filterbank_channel_count, "filterbank_channel_count")
if dct_coefficient_count is None:
dct_coefficient_count = 13
dct_coefficient_count = _execute.make_int(dct_coefficient_count, "dct_coefficient_count")
spectrogram = _ops.convert_to_tensor(spectrogram, _dtypes.float32)
sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.int32)
_inputs_flat = [spectrogram, sample_rate]
_attrs = ("upper_frequency_limit", upper_frequency_limit,
"lower_frequency_limit", lower_frequency_limit, "filterbank_channel_count",
filterbank_channel_count, "dct_coefficient_count", dct_coefficient_count)
_result = _execute.execute(b"Mfcc", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Mfcc", _inputs_flat, _attrs, _result)
_result, = _result
return _result
|
PypiClean
|
/space_tracer-4.10.1-py3-none-any.whl/space_tracer/main.py
|
import argparse
from contextlib import contextmanager
from functools import wraps
from inspect import currentframe, stack
import io
from io import StringIO
from pathlib import Path
from itertools import zip_longest as izip_longest
import os
import os.path
import re
import sys
import traceback
import types
try:
# noinspection PyUnresolvedReferences
from js import document, window # type: ignore
IS_PYODIDE = True
def get_terminal_size(_=None):
return 0, 0
except ImportError:
IS_PYODIDE = False
document = window = None
from os import get_terminal_size
try:
from .mock_turtle import MockTurtle
except ImportError:
MockTurtle = None # type: ignore
from .canvas import Canvas
from .code_tracer import CONTEXT_NAME, find_line_numbers
from .module_importers import TracedModuleImporter, PatchedModuleFinder, \
SourceLoadError
from .report_builder import ReportBuilder
from .traced_finder import DEFAULT_MODULE_NAME, LIVE_MODULE_NAME, \
PSEUDO_FILENAME
def parse_args(command_args=None):
if command_args is None:
command_args = sys.argv
launcher = command_args[0]
if launcher.endswith("__main__.py"):
executable = os.path.basename(sys.executable)
launcher = executable + " -m " + __package__
terminal_width = 0
try:
terminal_width, _ = get_terminal_size()
except OSError:
pass
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
launcher,
description='Trace Python code.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c',
'--canvas',
action='store_true',
help='Should canvas commands be printed?')
parser.add_argument('-x',
'--width',
type=int,
default=800,
help='width of the canvas in pixels')
parser.add_argument('-y',
'--height',
type=int,
default=600,
help='height of the canvas in pixels')
parser.add_argument('-z',
'--zoomed',
action='store_true',
help='matplotlib is zoomed to fit the canvas size')
parser.add_argument('--source_width',
type=int,
help='Width of source code - use 0 to hide or '
'negative numbers to trim columns from the end, '
'None to fit source code.')
parser.add_argument('-n',
'--source_indent',
type=int,
default=0,
help='Number of spaces to indent source code. '
'Negative to skip first columns of source code.')
parser.add_argument('--trace_offset',
type=int,
default=0,
help='Number of columns to skip at start of tracing '
'display.')
parser.add_argument('--trace_width',
type=int,
default=terminal_width,
help='Number of columns to display, including source '
'code. Negative to trim columns from the end, 0 '
'for no limit.')
parser.add_argument('-b',
'--bad_driver',
help="message to display if driver doesn't call module")
parser.add_argument('-i',
'--stdin',
default=os.devnull,
help="file to read stdin from, or - for normal stdin")
parser.add_argument('-o',
'--stdout',
default=os.devnull,
help="file to write stdout to (not tracing), "
"or - for normal stdout")
parser.add_argument('-e',
'--stderr',
default=os.devnull,
help="file to write stderr to, or ! for normal stderr")
parser.add_argument('-r',
'--report',
default='-',
help="file to write tracing to, or - for stdout, "
"or ! for stderr.")
parser.add_argument('--traced_file',
help='file to replace with source code from stdin')
parser.add_argument('--traced',
help='module, function, or method to display trace '
'for. Default: %%(default)s to trace %s, %s,'
'or whatever is replaced by --traced_file.' %
(DEFAULT_MODULE_NAME, LIVE_MODULE_NAME))
parser.add_argument('--hide',
help='variable names to hide',
nargs='*')
parser.add_argument('--start_line',
type=int,
help='first line number to trace')
parser.add_argument('--end_line',
type=int,
help='last line number to trace')
parser.add_argument('--line_numbers',
'-l',
action='store_true',
help='include line numbers with source code')
parser.add_argument('--live',
action='store_true',
help=f'load main module as {LIVE_MODULE_NAME} instead '
f'of {DEFAULT_MODULE_NAME} and show all source '
f'code lines.')
parser.add_argument('-m',
dest='is_module',
action='store_true',
help='driver is an importable module, not a script')
parser.add_argument('driver',
nargs='*',
help='script to call traced code, plus any arguments. '
'Default: %(default)s to use --traced_file.')
args = parser.parse_args(command_args[1:])
if args.driver:
if args.driver[0] == '-m':
args.is_module = True
args.driver = args.driver[1:]
else:
if args.traced_file:
args.driver = [args.traced_file]
else:
parser.error('one of the following arguments are required: '
'driver or traced_file')
if args.traced is None:
if args.traced_file is None or args.traced_file == args.driver[0]:
args.traced = LIVE_MODULE_NAME if args.live else DEFAULT_MODULE_NAME
else:
# Wait until the file is imported to see what module got traced.
pass
return args
def main():
tracer = TraceRunner()
code_report = tracer.trace_command()
if code_report:
print(code_report)
if tracer.return_code:
try:
return_code = int(tracer.return_code)
except ValueError:
return_code = 1
exit(return_code)
def analyze(source_code, canvas_size=None):
""" Trace the source code for display in the browser.
:param source_code: Source code to trace.
:param canvas_size: (width, height), set if the report should include turtle
commands.
:return: (tracing_report, output)
"""
tracer = TraceRunner()
tracer.standard_files.old_files['stderr'] = StringIO()
tracer.max_width = 200000
with replace_input(source_code):
tracer_args = ['space_tracer',
'--traced_file', PSEUDO_FILENAME,
'--source_width', '0',
'--live',
'--stdout', '!',
'--stderr', '!']
if canvas_size is not None:
canvas_width, canvas_height = canvas_size
tracer_args.append('--canvas')
tracer_args.append('-x{}'.format(canvas_width))
tracer_args.append('-y{}'.format(canvas_height))
tracer_args.append('--zoomed')
tracer_args.append(PSEUDO_FILENAME)
code_report = tracer.trace_command(tracer_args)
# noinspection PyUnresolvedReferences
stdout = tracer.standard_files.old_files['stderr'].getvalue()
return code_report, stdout
def web_main():
window.analyze = analyze
class StandardFiles(dict):
def __init__(self):
super().__init__()
self.old_files = dict(stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
report=StringIO())
self.new_files = {} # {filename: file}
def __setitem__(self, key, filename):
if filename == '-':
if key == 'stdin':
file = self.old_files[key]
else:
file = self.old_files['stdout']
elif filename == '!':
file = self.old_files['stderr']
else:
if key == 'stdin':
mode = 'r'
else:
mode = 'w'
file_key = (filename, mode)
file = self.new_files.get(file_key)
if file is None:
file = argparse.FileType(mode)(filename)
self.new_files[file_key] = file
super().__setitem__(key, file)
def __missing__(self, key):
return self.old_files[key]
def close_all(self):
for file in self.new_files.values():
file.close()
def traced(target=None, hide=None):
""" A decorator for a function or with block that should be traced. """
def is_in_traced_module():
""" Check if this was called directly by the traced module. """
call_stack = stack()
# expected frames in call stack:
# 0. This function.
# 1. traced() decorator
# 2. Module that's being traced
# 3. module_importers.py that executed the traced module.
this_filepath = Path(__file__)
module_importers_filepath = this_filepath.parent / "module_importers.py"
if len(call_stack) < 4:
return False
expected_frame = call_stack[3]
return expected_frame.filename == str(module_importers_filepath)
if is_in_traced_module():
ReportBuilder.is_using_traced_blocks = True
@contextmanager
def traced_options():
ReportBuilder.is_tracing_next_block = True
old_hide = ReportBuilder.hide
ReportBuilder.hide = hide
yield
ReportBuilder.hide = old_hide
ReportBuilder.is_tracing_next_block = False
if target is None:
# Not decorating a function, must be a with block.
return traced_options()
@wraps(target)
def wrapped(*args, **kwargs):
with traced_options():
return target(*args, **kwargs)
return wrapped
@contextmanager
def swallow_output(standard_files: StandardFiles):
old_main_mod = sys.modules.get(DEFAULT_MODULE_NAME, None)
# noinspection PyUnresolvedReferences
old_string_io = io.StringIO
try:
sys.stdout = FileSwallower(standard_files['stdout']) # type: ignore
sys.stderr = FileSwallower(standard_files['stderr'], # type: ignore
target_name='sys.stderr')
sys.stdin = standard_files['stdin']
io.StringIO = TracedStringIO # type: ignore
yield
finally:
if old_main_mod is not None:
sys.modules[DEFAULT_MODULE_NAME] = old_main_mod
else:
sys.modules.pop(DEFAULT_MODULE_NAME, None)
sys.stdout = standard_files.old_files['stdout']
sys.stderr = standard_files.old_files['stderr']
sys.stdin = standard_files.old_files['stdin']
io.StringIO = old_string_io # type: ignore
@contextmanager
def replace_input(stdin_text=None):
old_stdin = sys.stdin
sys.stdin = StringIO(stdin_text)
try:
yield
finally:
sys.stdin = old_stdin
def display_error_on_canvas():
if MockTurtle is None:
return
t = MockTurtle()
t.display_error()
class TraceRunner(object):
def __init__(self):
self.canvas = None
self.message_limit = 10000
self.max_width = 200000
self.keepalive = False
self.return_code = None
self.standard_files = StandardFiles()
def trace_turtle(self, source, width: int = 0, height: int = 0):
MockTurtle.monkey_patch()
try:
with replace_input(source):
self.trace_command(['space_tracer',
'--traced_file', PSEUDO_FILENAME,
'--source_width', '0',
'--width', str(width),
'--height', str(height),
'--live',
PSEUDO_FILENAME])
return '\n'.join(MockTurtle.get_all_reports())
finally:
MockTurtle.remove_monkey_patch()
def trace_code(self, source):
""" Trace a module of source code.
:param str source: source code to trace and run.
"""
with replace_input(source):
return self.trace_command(['space_tracer',
'--traced_file', PSEUDO_FILENAME,
'--source_width', '0',
'--live',
PSEUDO_FILENAME])
def trace_command(self, command_args=None):
""" Trace a module, based on arguments from the command line.
:param command_args: list of strings, like sys.argv
:return: the tracing report, including the canvas report
"""
args = parse_args(command_args)
if self.canvas is None:
self.canvas = Canvas(args.width, args.height)
was_patched = False
if MockTurtle is not None:
if MockTurtle.is_patched():
was_patched = True
else:
MockTurtle.monkey_patch(self.canvas)
plt = sys.modules.get('matplotlib.pyplot')
if plt is not None:
# Clear any plot state from previous runs.
plt.close()
plt.live_coding_size = (args.width, args.height)
if args.zoomed:
plt.live_coding_zoom()
self.standard_files['stdin'] = args.stdin
self.standard_files['stdout'] = args.stdout
self.standard_files['stderr'] = args.stderr
self.standard_files['report'] = args.report
ReportBuilder.hide = args.hide
ReportBuilder.is_using_traced_blocks = False
builder = ReportBuilder(self.message_limit)
builder.max_width = self.max_width
if args.start_line or args.end_line:
builder.trace_block(args.start_line, args.end_line)
traced_importer = TracedModuleImporter(
args.traced,
args.traced_file,
args.driver,
args.is_module,
args.live,
builder)
patched_finder = PatchedModuleFinder(args.zoomed)
self.return_code = 0
try:
# Set sys.argv properly.
old_argv = sys.argv
sys.argv = args.driver
sys.meta_path.insert(0, patched_finder)
sys.meta_path.insert(0, traced_importer)
# During testing, we import these modules for every test case,
# so force a reload. This is only likely to happen during testing.
traced_target = traced_importer.traced
for name, module in list(sys.modules.items()):
if name == DEFAULT_MODULE_NAME:
continue
module_file = getattr(module, '__file__', '')
if (traced_target and traced_target.startswith(name) or
name == LIVE_MODULE_NAME or
module_file == traced_importer.traced_file):
del sys.modules[name]
try:
self.run_code(args.bad_driver, traced_importer)
finally:
# Restore the old argv and path
sys.argv = old_argv
sys.meta_path.remove(traced_importer)
sys.meta_path.remove(patched_finder)
except SyntaxError as ex:
self.return_code = 1
messages = traceback.format_exception_only(type(ex), ex)
message = messages[-1].strip()
if ex.filename == PSEUDO_FILENAME:
line_number = ex.lineno
else:
line_number = 1
message = '{} line {}: {}'.format(ex.filename,
ex.lineno,
message)
builder.add_message(message, line_number)
if args.canvas:
display_error_on_canvas()
except SourceLoadError as ex:
builder.add_message(str(ex), 1)
self.return_code = 1
except BaseException as ex:
self.return_code = getattr(ex, 'code', 1)
etype, value, tb = sys.exc_info()
is_reported = False
entries = traceback.extract_tb(tb)
for traced_file, _, _, _ in entries:
if traced_file == PSEUDO_FILENAME:
is_reported = True
space_tracer_folder = os.path.dirname(__file__)
while not is_reported and tb is not None:
traced_file = tb.tb_frame.f_code.co_filename
traced_folder = os.path.dirname(traced_file)
if traced_folder != space_tracer_folder:
break
tb = tb.tb_next
if not is_reported:
if tb:
messages = traceback.format_exception(etype, value, tb)
else:
messages = traceback.format_exception_only(etype, value)
traced_importer.report_driver_result(messages)
if args.canvas:
display_error_on_canvas()
used_finder = (traced_importer.source_finder or
traced_importer.driver_finder)
is_traced = (traced_importer.is_traced_module_imported or
(used_finder and used_finder.is_tracing))
if not is_traced:
source_code = ''
elif used_finder and used_finder.source_code:
source_code = used_finder.source_code
elif traced_importer and traced_importer.source_code:
source_code = traced_importer.source_code
else:
source_code = ''
source_lines = source_code.splitlines()
if source_lines and traced_importer.is_live:
total_lines = len(source_lines)
else:
total_lines = 0
report = builder.report(total_lines)
source_width = args.source_width
if source_code is None or source_width == 0 or not source_lines:
reported_source_lines = []
indent = 0
else:
if args.source_indent >= 0:
indent = args.source_indent
start_char = 0
else:
indent = 0
start_char = -args.source_indent
reported_source_lines = []
if args.live:
source_blocks = [(1, len(source_lines))]
else:
source_blocks = builder.reported_blocks
last_line = max(last for first, last in source_blocks)
number_width = len(str(last_line))
for first_line, last_line in source_blocks:
if first_line is None:
first_line = 1
if last_line is None:
last_line = len(source_lines)
for line_number in range(first_line, last_line+1):
if line_number > len(source_lines):
reported_source_lines.append('')
else:
line = source_lines[line_number - 1][start_char:]
if args.line_numbers:
line = '{:{}}) {}'.format(line_number,
number_width,
line)
reported_source_lines.append(line)
max_source_width = max(map(len, reported_source_lines))
if source_width is None:
source_width = max_source_width + indent
elif source_width < 0:
source_width += max_source_width + indent
trace_width = args.trace_width
if trace_width or reported_source_lines:
report_lines = report.splitlines()
dump_lines = []
if trace_width < 0:
max_report_width = max(len(report_line)
for report_line in report_lines)
if source_width:
trace_width += source_width + 3
trace_width += max_report_width
for source_line, report_line in izip_longest(reported_source_lines,
report_lines,
fillvalue=''):
padded_source_line = indent * ' ' + source_line
padded_source_line += (source_width - len(source_line)) * ' '
line = padded_source_line[:source_width]
if line:
line += ' |'
if report_line:
line += ' '
if report_line:
line += report_line[args.trace_offset:]
if trace_width:
line = line[:trace_width]
dump_lines.append(line)
report = '\n'.join(dump_lines)
if MockTurtle is None:
turtle_report = None
else:
turtle_report = MockTurtle.get_all_reports()
if not was_patched:
MockTurtle.remove_monkey_patch()
if turtle_report and args.canvas:
report = ('start_canvas\n' +
'\n'.join(turtle_report) +
'\nend_canvas\n.\n' +
report)
if args.report != '-':
self.standard_files['report'].write(report)
report = ''
self.standard_files.close_all()
return report
def run_code(self,
bad_driver,
traced_importer):
""" Run the traced module, plus its driver.
:param str bad_driver: a message to display if the driver doesn't call
the module
:param traced_importer: holds details of what to trace
__main__.
"""
for module_name in ('random', 'numpy.random'):
random_module = sys.modules.get(module_name)
if random_module is not None:
# noinspection PyUnresolvedReferences
random_module.seed(0)
builder = traced_importer.report_builder
output_context = swallow_output(self.standard_files)
try:
with output_context:
try:
traced_importer.run_main()
# noinspection PyUnresolvedReferences
if sys.stdout.saw_failures:
traced_importer.report_driver_result(
['Pytest reported failures.'])
self.return_code = 1
except SystemExit as ex:
if ex.code:
self.return_code = ex.code
messages = traceback.format_exception_only(type(ex),
ex)
message = messages[-1].strip()
traced_importer.report_driver_result([message])
for value in traced_importer.environment.values():
if isinstance(value, types.GeneratorType):
value.close()
if not traced_importer.is_traced_module_imported:
traced_target = traced_importer.traced
driver_name = os.path.basename(traced_importer.driver[0])
if bad_driver:
message = bad_driver
elif traced_target is None:
traced_name = os.path.basename(traced_importer.traced_file)
message = ("{} doesn't call {}. Try a different "
"driver.").format(driver_name, traced_name)
else:
message = ("{} doesn't call the {} module. Try a different "
"driver.").format(driver_name, traced_target)
traced_importer.report_driver_result([message])
finally:
is_decorated = any(frame.is_decorated for frame in builder.history)
used_finder = traced_importer.source_finder or traced_importer.driver_finder
if used_finder and not is_decorated:
line_numbers = set()
if used_finder.traced_node:
find_line_numbers(used_finder.traced_node, line_numbers)
is_minimum = False
else:
find_line_numbers(used_finder.source_tree, line_numbers)
is_minimum = True
if line_numbers:
builder.trace_block(min(line_numbers),
max(line_numbers),
is_minimum)
class FileSwallower(object):
def __init__(self,
target,
check_buffer=True,
target_name=None):
self.target = target
self.target_name = target_name
self.saw_failures = False
if check_buffer:
buffer = getattr(target, 'buffer', None)
if buffer is not None:
self.buffer = FileSwallower(buffer, check_buffer=False)
def write(self, *args, **kwargs):
self.target.write(*args, **kwargs)
text = args and str(args[0]) or ''
if re.search(r'^=+\s*FAILURES\s*=+$', text):
self.saw_failures = True
frame = currentframe()
while frame is not None:
report_builder = frame.f_locals.get(CONTEXT_NAME)
if report_builder is not None:
has_print_function = True
report_builder.add_output(text,
frame.f_lineno,
has_print_function,
target_name=self.target_name)
break
frame = frame.f_back
def __getattr__(self, name):
return getattr(self.target, name)
def find_string_io_targets(frame):
for name, value in frame.f_locals.items():
yield name, value
if name == 'self':
for attr_name, attr_value in value.__dict__.items():
yield 'self.' + attr_name, attr_value
class TracedStringIO(io.StringIO):
def write(self, text):
super(TracedStringIO, self).write(text)
frame = currentframe()
while frame is not None:
report_builder = frame.f_locals.get(CONTEXT_NAME)
if report_builder is not None:
for name, value in find_string_io_targets(frame):
if value is self:
report_builder.add_output(text,
frame.f_lineno,
target_name=name)
return
frame = frame.f_back
if __name__ == '__main__':
main()
|
PypiClean
|
/xart-0.2.0.tar.gz/xart-0.2.0/README.md
|
# xart: generate art ascii texts. [![Version][version-badge]][version-link] ![WTFPL License][license-badge]
`xart` is a pure Python library that provides an easy way to generate art ascii texts. Life is short, be cool.
```
██╗ ██╗ █████╗ ██████╗ ████████╗
╚██╗██╔╝██╔══██╗██╔══██╗╚══██╔══╝
╚███╔╝ ███████║██████╔╝ ██║
██╔██╗ ██╔══██║██╔══██╗ ██║
██╔╝ ██╗██║ ██║██║ ██║ ██║
╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝
```
### Getting Started
---
#### help
```
$ xart -h
usage: __init__.py [-h] [-f FONT] [-c COLOR] [-i] [-s] [-l] [-v]
xart : generate art ascii texts.
optional arguments:
-h, --help show this help message and exit
-f FONT, --font FONT font to render with, default random
-c COLOR, --color COLOR
font color, default WHITE, all : BLACK, RED, GREEN,
YELLOW, BLUE, PURPLE, CYAN, GRAY, WHITE
-i, --info show information of given font
-s, --show show random fonts
-l, --list list all supported fonts
-v, --version version
```
#### generate ascii text via random font
```
$ xart test
███ ▄████████ ▄████████ ███
▀█████████▄ ███ ███ ███ ███ ▀█████████▄
▀███▀▀██ ███ █▀ ███ █▀ ▀███▀▀██
███ ▀ ▄███▄▄▄ ███ ███ ▀
███ ▀▀███▀▀▀ ▀███████████ ███
███ ███ █▄ ███ ███
███ ███ ███ ▄█ ███ ███
▄████▀ ██████████ ▄████████▀ ▄████▀
```
#### generate ascii text via given font
```
$ xart test -f 3D_Diagonal
___ ___
,--.'|_ ,--.'|_
| | :,' | | :,'
: : ' : .--.--. : : ' :
.;__,' / ,---. / / ' .;__,' /
| | | / \ | : /`./ | | |
:__,'| : / / | | : ;_ :__,'| :
' : |__ . ' / | \ \ `. ' : |__
| | '.'| ' ; /| `----. \ | | '.'|
; : ; ' | / | / /`--' / ; : ;
| , / | : | '--'. / | , /
---`-' \ \ / `--'---' ---`-'
`----'
```
#### generate ascii text via given color
![COLOR][color-demo]
#### show all supported fonts
```
$ xart -l
xart : generate art ascii texts.
0. 1Row
1. 3-D
...
277. Wow
All 278 fonts.
```
#### show font infomation
```
$ xart -i -f Weird
weird.flf (version 2)
by: Bas Meijer [email protected] [email protected]
fixed by: Ryan Youck [email protected]
some special characters '#%*' etc. are not matching, they are from other fonts.
Explanation of first line:
flf2 - "magic number" for file identification
a - should always be `a', for now
$ - the "hardblank" -- prints as a blank, but can't be smushed
6 - height of a character
5 - height of a character, not including descenders
20 - max line length (excluding comment lines) + a fudge factor
15 - default smushmode for this font (like "-m 15" on command line)
13 - number of comment lines
```
#### version
```
$ xart -v
xart : generate art ascii fonts, version 0.1.5.
___ ____ ___
/ _ \ (___ \ / _ \
| | | | __) ) | | | |
| | | | / __/ | | | |
| |_| | _ | |___ _ | |_| |
\___/ (_)|_____)(_) \___/
```
### Installation
---
`xart ` is hosted on [PYPI](https://pypi.python.org/pypi/xart) and can be installed as such:
```
$ pip install xart
```
Alternatively, you can also get the latest source code from [GitHub](https://github.com/xlzd/xart) and install it manually:
```
$ git clone [email protected]:xlzd/xart.git
$ cd xart
$ python setup.py install
```
For update:
```
$ pip install xart --upgrade
```
### License
---
WTFPL ([here](https://github.com/xlzd/xart/blob/master/LICENSE))
[version-badge]: https://img.shields.io/pypi/v/xart.svg?label=version
[version-link]: https://pypi.python.org/pypi/xart/
[license-badge]: https://img.shields.io/badge/license-WTFPL-007EC7.svg
[color-demo]: https://raw.githubusercontent.com/xlzd/xart/master/printscreen/color.png
|
PypiClean
|
/mroylib_min-2.2.5.tar.gz/mroylib_min-2.2.5/qlib/text/parser.py
|
import re, itertools
from string import digits, ascii_letters ,punctuation, whitespace
def extract_ip(string):
return re.findall(r'\D((?:[1-2]?\d?\d\.){3}[1-2]?\d?\d)', string)
def extract_http(string):
return re.findall(r'(https?\://[\w\.\%\#\/\&\=\?\-]+)', string)
def extract_host(string):
return re.findall(r'((?:www\.|mail\.|ftp\.|news\.|blog\.|info\.)?\w[\w\.\-]+(?:\.com|\.net|\.org|\.cn|\.jp|\.uk|\.gov))', string)
# \-\.\/\;\#\(\)
def extract_dict(string, sep='\n'):
dicts = {}
for line in string.split(sep):
ds = re.findall(r'^\s*([\w\s\-\.\/]+)\s*[\:\=]\s*([\w\S\s]+)\s*$', line )
if ds and ds[0][1].strip() and ds[0][1].strip() not in (':','=',):
dicts[ds[0][0].strip()] = ds[0][1].strip()
return dicts
def extract_fuzzy_regex(string):
res = ''
res_k = []
last = ''
last_k = 0
words_re = r'[\s\w\-\_\.\'\"]'
for k, group in itertools.groupby(string):
if k in ascii_letters + digits + "-_ '\"\." :
l, k = words_re, len(list(group))
if l == last:
last_k += k
else:
# res.append([last, last_k])
res_k.append(last_k)
if last == words_re:
res += "(%s+)" % last
else:
res += last + "+?"
last_k = k
last = l
elif k in punctuation:
l, k = r'\%s' % k , len(list(group))
if l == last:
last_k += k
else:
# res.append([last, last_k])
res_k.append(last_k)
if last == words_re:
res += "(%s+)" % last
else:
res += last + "+?"
last_k = k
last = l
else:
l, k = '.', len(list(group))
if l == last:
last_k += k
else:
# res.append([last, last_k])
res_k.append(last_k)
if last == words_re:
res += "(%s+)" % last
else:
res += last + "+?"
last_k = k
last = l
# res.append([last, last_k])
if last == words_re:
res += "(%s+)" % last
else:
res += last + "+?"
return res[2:], res_k[1:]
def extract_regex(string):
res =[]
last = ''
last_k = 0
for k, group in itertools.groupby(string):
if k in ascii_letters:
l, k = '\w', len(list(group))
if l == last:
last_k += k
else:
res.append([last, last_k])
last_k = k
last = l
elif k in digits:
l, k = '\d', len(list(group))
if l == last:
last_k += k
else:
res.append([last, last_k])
last_k = k
last = l
elif k in punctuation:
l, k = '\%s' % k , len(list(group))
if l == last:
last_k += k
else:
res.append([last, last_k])
last_k = k
last = l
elif k in whitespace:
l, k = '\s', len(list(group))
if l == last:
last_k += k
else:
res.append([last, last_k])
last_k = k
last = l
else:
l, k = '.', len(list(group))
if l == last:
last_k += k
else:
res.append([last, last_k])
last_k = k
last = l
res.append([last, last_k])
return [i[0] for i in res[1:]], [i[1] for i in res[1:]]
def extract_table(string, sep=" |,"):
last_len = 0
row = 0
table = []
last_re_str = ''
table_dim = []
for line in string.split('\n'):
if not line.strip():continue
re_str, dim = extract_fuzzy_regex(line)
# print(re_str)
if re_str == last_re_str:
r = re.findall(re_str, line)[0]
if isinstance(r, str):
table.append(r)
else:
table.append(list(r))
# print('table', r, re_str)
table_dim.append(dim)
row += 1
else:
last_re_str = re_str
if not table:
row = 1
table = []
table_dim = []
continue
if isinstance(table[0] , str):
t = [ [i2 for i2 in re.split(sep, i) if i2 ] for i in table]
cols = max([ len(i) for i in t])
t = [ i for i in t if len(i) == cols]
if len(t) >1:
yield t
else:
if table[0]:
yield table
row = 1
table = []
table_dim = []
if isinstance(table[0] , str):
t = [ [i2 for i2 in re.split(sep, i) if i2 ] for i in table]
cols = max([ len(i) for i in t])
t = [ i for i in t if len(i) == cols]
if len(t) >1:
yield t
else:
if table[0]:
yield table
|
PypiClean
|
/code_ast-0.1.0.tar.gz/code_ast-0.1.0/README.md
|
# Code AST
> Fast structural analysis of any programming language in Python
Programming Language Processing (PLP) brings the capabilities of modern NLP systems to the world of programming languages.
To achieve high performance PLP systems, existing methods often take advantage of the fully defined nature of programming languages. Especially the syntactical structure can be exploited to gain knowledge about programs.
**code.ast** provides easy access to the syntactic structure of a program. By relying on [tree-sitter](https://github.com/tree-sitter) as the back end, the parser supports fast parsing of variety of programming languages.
The goal of code.ast is to combine the efficiency and variety of languages supported by tree-sitter with the convenience of more native parsers (like [libcst](https://github.com/Instagram/LibCST)).
To achieve this, code.ast adds the features:
1. **Auto-loading:** Compile of source code parsers for any language supported by tree-sitter with a single keyword,
2. **Visitors:** Search the AST quickly,
3. **Transformers:** Transform source code easily by transforming the AST structure
## Installation
The package is tested under Python 3. It can be installed via:
```bash
pip install code-ast
```
## Quick start
code.ast can parse nearly any program code in a few lines of code:
```python
import code_ast
# Python
code_ast.ast(
'''
def my_func():
print("Hello World")
''',
lang = "python")
# Output:
# PythonCodeAST [0, 0] - [4, 4]
# module [1, 8] - [3, 4]
# function_definition [1, 8] - [2, 32]
# identifier [1, 12] - [1, 19]
# parameters [1, 19] - [1, 21]
# block [2, 12] - [2, 32]
# expression_statement [2, 12] - [2, 32]
# call [2, 12] - [2, 32]
# identifier [2, 12] - [2, 17]
# argument_list [2, 17] - [2, 32]
# string [2, 18] - [2, 31]
# Java
code_ast.ast(
'''
public class HelloWorld {
public static void main(String[] args){
System.out.println("Hello World");
}
}
''',
lang = "java")
# Output:
# JavaCodeAST [0, 0] - [7, 4]
# program [1, 0] - [6, 4]
# class_declaration [1, 0] - [5, 1]
# modifiers [1, 0] - [1, 6]
# identifier [1, 13] - [1, 23]
# class_body [1, 24] - [5, 1]
# method_declaration [2, 8] - [4, 9]
# ...
```
## Visitors
code.ast implements the visitor pattern to quickly traverse the AST structure:
```python
import code_ast
from code_ast import ASTVisitor
code = '''
def f(x, y):
return x + y
'''
# Count the number of identifiers
class IdentifierCounter(ASTVisitor):
def __init__(self):
self.count = 0
def visit_identifier(self, node):
self.count += 1
# Parse the AST and then visit it with our visitor
source_ast = code_ast.ast(code, lang = "python")
count_visitor = IdentifierCounter()
source_ast.visit(count_visitor)
count_visitor.count
# Output: 5
```
## Transformers
Transformers provide an easy way to transform source code. For example, in the following, we want to mirror each binary addition:
```python
import code_ast
from code_ast import ASTTransformer, FormattedUpdate, TreeUpdate
code = '''
def f(x, y):
return x + y + 0.5
'''
# Mirror binary operator on leave
class MirrorAddTransformer(ASTTransformer):
def leave_binary_operator(self, node):
if node.children[1].type == "+":
return FormattedUpdate(
" %s + %s",
[
TreeUpdate(node.children[2]),
TreeUpdate(node.children[0])
]
)
# Parse the AST and then visit it with our visitor
source_ast = code_ast.ast(code, lang = "python")
mirror_transformer = MirrorAddTransformer()
# Mirror transformer are initialized by running them as visitors
source_ast.visit(mirror_transformer)
# Transformer provide a minimal AST edit
mirror_transformer.edit()
# Output:
# module [2, 0] - [5, 0]
# function_definition [2, 0] - [3, 22]
# block [3, 4] - [3, 22]
# return_statement [3, 4] - [3, 22]
# binary_operator -> FormattedUpdate [3, 11] - [3, 22]
# binary_operator -> FormattedUpdate [3, 11] - [3, 16]
# And it can be used to directly transform the code
mirror_transformer.code()
# Output:
# def f(x, y):
# return 0.5 + y + x
```
## Project Info
The goal of this project is to provide developer in the
programming language processing community with easy
access to AST parsing. This is currently developed as a helper library for internal research projects. Therefore, it will only be updated
as needed.
Feel free to open an issue if anything unexpected
happens.
Distributed under the MIT license. See ``LICENSE`` for more information.
We thank the developer of [tree-sitter](https://tree-sitter.github.io/tree-sitter/) library. Without tree-sitter this project would not be possible.
|
PypiClean
|
/thoth-ssdeep-3.4.tar.gz/thoth-ssdeep-3.4/docs/source/index.rst
|
python-ssdeep
=============
This is a straightforward Python wrapper for `ssdeep by Jesse Kornblum`_, which is a library for computing context
triggered piecewise hashes (CTPH). Also called fuzzy hashes, CTPH can match inputs that have homologies. Such inputs
have sequences of identical bytes in the same order, although bytes in between these sequences may be different in both
content and length.
You can install ``python-ssdeep`` with ``pip``:
.. code-block:: console
$ pip install ssdeep
See :doc:`Installation <installation>` for more information.
Contents:
.. toctree::
:maxdepth: 2
installation
usage
api
faq
contributing
changelog
History
=======
* The initial version was published in 2010 by `Denis Bilenko on bitbucket`_.
* Since 2012 the source is maintained by PhiBo (`DinoTools`_) and has been published on `github`_.
* In 2014 the wrapper has been rewritten to use cffi.
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
.. _ssdeep by Jesse Kornblum: https://ssdeep-project.github.io/ssdeep/
.. _Denis Bilenko on bitbucket: https://bitbucket.org/denis/ssdeep
.. _github: https://github.com/DinoTools/python-ssdeep
.. _Dinotools: https://www.dinotools.org/
|
PypiClean
|
/unrest_schema-0.1.1.tar.gz/unrest_schema-0.1.1/unrest_schema/views.py
|
from django.http import JsonResponse, Http404
from .utils import form_to_schema
import json
FORMS = {}
def FormResponse(form):
if not form.errors:
return JsonResponse({})
return JsonResponse({'errors': form.errors.get_json_data()})
def register(form, form_name=None):
if isinstance(form, str):
# register is being used as a decorator and args are curried and reversed
return lambda actual_form: register(actual_form, form_name=form)
form_name = form_name or form.__name__
old_form = FORMS.get(form_name, form)
if form != old_form:
e = f"Form with name {form_name} has already been registered.\nOld: {old_form}\nNew:{form}"
raise ValueError(e)
FORMS[form_name] = form
return form
def schema_form(request, form_name, object_id=None, method=None, content_type=None):
if not form_name in FORMS:
raise Http404(f"Form with name {form_name} does not exist")
method = method or request.method
content_type = content_type or request.headers.get('Content-Type', None)
form_class = FORMS[form_name]
_meta = getattr(form_class, 'Meta', object())
kwargs = {}
if object_id and hasattr(_meta, 'model'):
kwargs['instance'] = _meta.model.objects.get(id=object_id)
if getattr(_meta, 'login_required', None) and not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged in to do this'}, status=403)
if request.method == "POST":
if content_type == 'application/json':
data = json.loads(request.body.decode('utf-8') or "{}")
form = form_class(data, **kwargs)
else:
form = form_class(request.POST, request.FILES, **kwargs)
form.request = request
if form.is_valid():
instance = form.save()
data = {}
if instance:
data = {'id': instance.id, 'name': str(instance)}
return JsonResponse(data)
return FormResponse(form)
schema = form_to_schema(FORMS[form_name]())
return JsonResponse({'schema': schema})
|
PypiClean
|
/data_harvesting-1.0.0.tar.gz/data_harvesting-1.0.0/data_harvesting/util/json_ld_util.py
|
"""This module contains utility to process and handle, validate json-ld data """
import copy
from copy import deepcopy
from datetime import datetime
from pathlib import Path
from typing import Callable
from typing import List
from typing import Optional
from typing import Union
from pyld import jsonld
from pyshacl import validate as shacl_validate
from rdflib import Graph
from rdflib import Literal
from rdflib.compare import graph_diff
from rdflib.plugins.sparql.results.csvresults import CSVResultSerializer
from data_harvesting.util.pid_util import generate_uuid
#from itertools import chain
# validating jsonlD is not so clear:
# there is framing https://www.w3.org/TR/json-ld-framing/ and for example in R this https://codemeta.github.io/codemetar/articles/validation-in-json-ld.html
# For rdf data there is shacl. where one can define shapes for validation, which are kind of
# schema graphs
# these might also be used to do some logic operation stuff like inference of new triples
def validate_jsonld_simple(jsonld_data: dict) -> bool:
"""
Test if the integrety of the json-ld file is right,
i.e we do not validate the content of an instance like the schema does
Returns True if it validates
returns False if not
"""
context = jsonld_data.get('@context', None)
if context is None:
print('Missing context, so probably no or broken json-LD data given')
return False
instance = copy.deepcopy(jsonld_data)
# perform some roundturn json-LD operation to see if they work
# TODO check if they are proper
# Check if URIs are resolvable...
instance = jsonld.expand(instance)
instance = jsonld.compact(instance, context)
# maybe also flatten the jsonLD to get all keys in general
# check if we end with the same
diffk: set = set(instance.keys()) - set(jsonld_data.keys())
if len(diffk) != 0:
print(f'The following keys are not supported: {diffk}')
return False
return True
def valdiate_from_file(filepath: Path, file_format='json-ld', options: Optional[dict] = None):
"""validate a given file"""
data_graph = Graph()
data_graph.parse(filepath, format=file_format)
return validate(data_graph, options=options)
def validate(graph: Graph, validate_against: Optional[Graph] = None, options: Optional[dict] = None):
"""Validate a given rdf graph with shacl"""
if validate_against is None:
validate_against = Graph()
# default is unhide specific
# if not they should be downloaded, also they should be read once somewhere else and used from
# there..
# /data_harvesting/external_schemas/*
basepath = Path(__file__).parent.parent.resolve() / 'external_schemas'
schema_org = basepath / 'schemaorg-current-https.jsonld'
codemeta = basepath / 'schema-codemeta.jsonld'
if schema_org.exists():
validate_against.parse(schema_org, format='json-ld')
if codemeta.exists():
validate_against.parse(codemeta, format='json-ld')
# add other unhide specific things, ... or use only certain terms if class is given or so
if options is None:
options = {}
vali = shacl_validate(graph, shacl_graph=validate_against, **options)
conforms, results_graph, results_text = vali
return conforms, vali
def convert(
filepath: Path,
destfilepath: Optional[Path] = None,
informat: str = 'json-ld',
outformat: str = 'ttl',
overwrite: bool = False
) -> None:
"""
convert a given graph file to a different format using rdflib
"""
name = filepath.name.rstrip(filepath.suffix)
destfilepath = destfilepath or Path(f'./{name}.{outformat}').resolve()
if not overwrite and destfilepath.exists():
return
graph = Graph()
graph.parse(filepath, format=informat)
if outformat == 'csv':
convert_to_csv(graph, destination=destfilepath)
else:
graph.serialize(destination=destfilepath, format=outformat)
return
def convert_to_csv(graph: Graph, query: str = None, destination: Union[Path, str] = 'converted.csv'):
"""Convert results of a sparql query of a given graph to to a csv file
Default query results:
link table. Source | link_type | Target
"""
default_edge_query = """
PREFIX schema: <http://schema.org/>
SELECT DISTINCT ?Source ?Type ?Target
WHERE {
?Source ?Type ?Target .
}
"""
# ?sType ?tType
# ?source a ?sType .
# ?target a ?tType .
# FILTER((?sType) IN (schema:Person, schema:Organization, schema:Dataset, schema:SoftwareSourceCode, schema:Document))
# FILTER((?tType) IN (schema:Person, schema:Organization, schema:Dataset, schema:SoftwareSourceCode, schema:Document))
#}
#"""
query = query or default_edge_query
results = graph.query(query)
csv_s = CSVResultSerializer(results)
with open(destination, 'wb') as fileo:
csv_s.serialize(fileo)
# Add URIs and types
def add_missing_uris(data: dict, path_prefix: str, alternative_ids: Optional[List[str]] = None) -> dict:
"""
Add for each for each entity an internal id corresponding to the given prefix
and the internal jsonpath (since jsonpath is bad for urls we use the xpath syntax)
if it has an @id then sameAs is added
further rules, if there is a url contentUrl identifier or email present, this becomes the id
instead and our custom id is put in sameAs
"""
# To avoid mutable as default value, through this is not so nice...
if alternative_ids is None:
alternative_ids = ['identifier', 'contentUrl', 'url']
# For now we do this rekursive, iterative might be safer
id_path = path_prefix
new_data = data.copy()
same_as = new_data.get('sameAs', [])
if '@id' in new_data:
if id_path not in same_as:
same_as.append(id_path)
new_data['sameAs'] = same_as
else:
found = False
for term in alternative_ids:
if term in new_data:
new_data['@id'] = new_data['term']
found = True
break # Only use the first one, so there is an order we want to replace these
if not found:
new_data['@id'] = id_path
for key, val in new_data.items():
if key == 'sameAs':
continue
id_path = path_prefix + f'/{key}'
if isinstance(val, dict):
new_data[key] = add_missing_uris(val, id_path) # rekursion
elif isinstance(val, str): # str is also list
new_data[key] = val
elif isinstance(val, list):
new_entry: list = []
for i, entry in enumerate(val):
if isinstance(entry, str):
new_entry.append(entry)
prefix = id_path + f'_{i}'
new_entry.append(add_missing_uris(entry, prefix)) # rekursion
new_data[key] = new_entry
else:
new_data[key] = val
return new_data
def add_missing_types(data: dict, type_map: Optional[List[dict]] = None) -> dict:
"""
Add @types to data where it can be known for sure.
TODO: There should be a general solution for this on the
semantic/reasoner level, i.e schema.org allows for some reasoning, other rules could be stated by us
like schema.org author, creator and contributor get type @Person or @organization
the affiliation key is only allowed for a Person
example type_map = [{'type': 'Person', 'keys': ['author', 'creator', 'contributor'], 'if_present' : 'affiliation'}]
"""
if type_map is None:
type_map = [{'type': 'Person', 'keys': ['author', 'creator', 'contributor'], 'if_present': 'affiliation'}]
# If 'affiliation' present, type is a person
new_data = data.copy()
def add_type(data_d: Union[dict, list, str], mapping: dict) -> Union[dict, list, str]:
"""Add type"""
if not isinstance(data_d, dict):
return data_d
if not '@type' in data_d.keys():
condition = mapping.get('if_present', '') # empty string is false
if condition:
if condition in data_d.keys():
data_d['@type'] = mapping.get('type')
return data_d
for key, val in new_data.items(): # Currently Only first level, maybe we have to do rekursion
for mapping in type_map:
if key in mapping.get('keys', []):
if isinstance(val, list):
new_data[key] = [add_type(entry, mapping) for entry in val]
elif isinstance(val, dict):
new_data[key] = add_type(val, mapping)
else:
new_data[key] = val
return new_data
# complete affiliations and organizations
# organizations with the same name should become the same id
# there should be a list of HGF orgas with ROARs.
# Also if a name of an org contains the name of a org with roar, this new org, should be created and
# linked to the org with the roar. like (Forschungszentrum Jülich GmbH, PGI-7)
def complete_affiliations(data: dict, roar_id_dict: dict, re3data_dict: dict, blank_node_identifier='_:N'):
"""
Completes the given affiliation and organization where possible.
roar_dict={ror_id1:{metadata}, ror_id2:{metadata}}
the roar_id_dict is the inverse, of that. i.e: {'name': [roar_ids], 'grid_id': roar_id}
for example:
"affiliation": "Helmholtz-Zentrum Dresden-Rossendorf",
- > "affiliation": {'@id': 'roar', '@type': organization 'name': "Helmholtz-Zentrum Dresden-Rossendorf"}
more information about it should be somewhere else in the graph, we just want to link it to the right id
example 2: (same for publisher, and includedInDataCatalog)
"provider": {"@type": "Organization", "name": "J\u00fclich DATA"},
-> "provider": {"@type": "Organization", "name": "J\u00fclich DATA", '@id': 'http://doi.org/10.17616/R31NJMYC'},
"""
raise NotImplementedError
def update_key(data: dict, key: Union[str, int], val_dict: dict, overwrite: bool = False):
"""
Update the metadata of a certain key with a certain dict
if the provider is already present, then we might want to complete the metadata, that it is linked correctly
example:
val = {
"@id": " http://doi.org/10.17616/R31NJMYC",
"@type": "Organization",
"name": "J\u00fclich DATA"}
"""
orgi = data.get(key, {})
new = orgi
if isinstance(orgi, list):
for i, entry in enumerate(orgi):
if isinstance(entry, dict):
# todo
pass
if isinstance(orgi, dict):
new = orgi
if overwrite:
new.update(val_dict) # shallow merge for now
else:
for nkey, val in val_dict.items():
if nkey not in orgi.keys():
new[nkey] = val
data[key] = new
return data
def generate_patch(graph_key='graph', patch_format='jena') -> Callable:
"""
Generate a rdf patch for a given function which inputs a graph and outputs a graph.
This function is meant to be used as a decorator generator.
In order to find the graphs the input graph has to be the first argument to the function func,
or a kwarg with the key provided by graph_key, default 'graph'.
Also to find the output graph it requires the return value or the first return value to be a graph
returns function
raises ValueError
"""
def generate_patch_dekorator(func, graph_key='graph', patch_format='jena'):
"""
The actual decorator
"""
def _generate_patch(*args, **kwargs):
"""
returns the results of func plus a patch in front
"""
# deepcopy because graph is parsed per refernce, often this will lead then to
# the output graph == input graph after function execution
if graph_key in kwargs:
graph = deepcopy(kwargs[graph_key])
else:
if len(args) > 0:
if isinstance(args[0], Graph):
graph = deepcopy(args[0])
else:
raise ValueError(
f'No input graph found! Has to be provided first argument, or a kwargs {graph_key}!'
)
results = func(*args, **kwargs)
out_graph = None
if isinstance(results, Graph):
out_graph = results
elif isinstance(results, list):
if len(results) > 0:
if isinstance(results[0], Graph):
out_graph = results[0]
if out_graph is None:
raise ValueError('No output graph found! Has to single return or first return!')
in_both, in_first, in_second = graph_diff(graph, out_graph)
metadata = {
'function_module': func.__module__,
'function_name': func.__name__,
'creation_time': datetime.now().isoformat()
}
patch = generate_patch_from_graph(in_first, in_second, patch_format=patch_format, metadata=metadata)
return patch, results
return _generate_patch
return generate_patch_dekorator
def generate_patch_from_graph(in_first: Graph, in_second: Graph, patch_format='jena', metadata=None) -> Graph:
"""
Generate a rdf patch for a given graph difference
:param in_first: a graph, set of tiples containing triples only in the first/input graph
from a diff, i.e. these were deleted
:type in_first: Graph
:param in_first: a graph, set of tiples containing triples only in the second/output graph
from a diff, i.e. these were added
:type in_first: Graph
:param patch_format: Format in which the patch shall be returned, default 'jena'
see: https://jena.apache.org/documentation/rdf-patch/, or
https://github.com/afs/rdf-delta
:type patch_format: str
"""
pat = RDFPatch(metadata=metadata)
patch_id = generate_uuid() # maybe hash the changes instead?
if patch_format != 'jena': # for now
raise ValueError(f'The requested patch format: {patch_format} is not supported.')
# Header
pat.add(('H', Literal('id'), Literal(patch_id), Literal('.')))
# Start transfer
pat.add(('TX', '.'))
# The order does not play a role since these are sets
for sub, pre, obj in in_first:
pat.add(('D', sub, pre, obj, '.'))
for sub, pre, obj in in_second:
pat.add(('A', sub, pre, obj, '.'))
# End transfer
pat.add(('TC', '.'))
return pat
class RDFPatch(object):
"""
This class represents a RDF patch
Created, since one could not parse the Jena Patch format into a simple RDF graph and
rdf-delta is in java (https://github.com/afs/rdf-delta).
If there is a other common way already out there this should be used instead
for example see: https://www.w3.org/TR/ldpatch/
and https://github.com/pchampin/ld-patch-py (LGPL).
There are other formats one could serialize a patch to. These do not overlap in power.
Problems with the current implementation of this:
- Merging of a stack of RDFPatches would not work properly by the current 'several graph' design,
since the order of the transactions matters...
"""
names = ['header', 'addprefix', 'deleteprefix', 'add_triples', 'delete_triples']
abbr = ['h', 'PA', 'PD', 'A', 'D']
header = Graph()
addprefix = Graph()
deleteprefix = Graph()
add_triples = Graph()
delete_triples = Graph()
metadata: dict = {}
def __init__(
self,
metadata: Optional[dict] = None,
header: Optional[Graph] = None,
addprefix: Optional[Graph] = None,
deleteprefix: Optional[Graph] = None,
add_triples: Optional[Graph] = None,
delete_triples: Optional[Graph] = None,
patch_format: str = 'jena'
):
"""
Init a RDF patch.
:param metadata:
:type metadata: Optional[dict]
"""
if metadata is not None:
self.metadata = metadata
total_triples = 0
for i, key in enumerate([header, addprefix, deleteprefix, add_triples, delete_triples]):
if key is None:
setattr(self, self.names[i], Graph())
else:
setattr(self, self.names[i], key)
total_triples += len(key)
'''
# make the whole class iterateable
self._current_index = 0
self._class_size = total_triples
def __iter__(self):
#return chain(iter(self.header), iter(self.addprefix), iter(self.deleteprefix), iter(self.add_triples),
# iter(self.delete_triples))
return chain(self.header, self.addprefix, self.deleteprefix, self.add_triples, self.delete_triples)
def __iter__(self):
return self
def __next__(self):
if self._current_index < self._class_size:
lh = len(self.header)
lap = len(self.addprefix)
ldp = len(self.deleteprefix)
ld = len(self.delete_triples)
# graphs cannot be index like this.
if self._current_index < lh:
triple = ('H', *self.header[self._current_index])
elif self._current_index < (lh+lap):
triple = ('PA', *self.addprefix[self._current_index-lh])
elif self._current_index < (lh+lap+ldp):
triple = ('PD', *self.deleteprefix[self._current_index-lh-lap])
elif self._current_index < (lh+lap+ldp+ld):
triple = ('D', *self.delete_triples[self._current_index-lh-lap-ldp])
else:
triple = self.add_triples[self._current_index - lh+lap-ldp-ld]
self._current_index += 1
return triple
raise StopIteration
'''
def to_string(self, patch_format='jena'):
"""
Serialize Patch to string, to a format required for a certain backend.
default Jena, see: https://jena.apache.org/documentation/rdf-patch/, or
https://github.com/afs/rdf-delta
"""
patch_text = ''
if self.header is not None:
graph = getattr(self, 'header')
for (sub, pre, obj) in graph:
patch_text += f'H {sub} {pre} {obj}\n'
patch_text += 'TX .\n'
for i, key in enumerate(self.abbr[1:]):
gname = self.names[1 + i]
graph = getattr(self, gname)
for (sub, pre, obj) in graph:
patch_text += f'{key} {sub} {pre} {obj} .\n'
patch_text += 'TC .'
return patch_text
def add(self, nquad, patch_format='jena'):
"""
Add a given tuple to the corresponding graph
"""
if nquad[0] == 'H':
self.header.add(nquad[1:])
elif nquad[0] == 'PA':
self.addprefix.add(nquad[1:-1])
elif nquad[0] == 'PD':
self.deleteprefix.add(nquad[1:-1])
elif nquad[0] == 'A':
self.add_triples.add(nquad[1:-1])
elif nquad[0] == 'D':
self.delete_triples.add(nquad[1:-1])
elif any(nquad[0] == trans for trans in ['TX', 'TA', 'TC']):
pass
else: # not supported just ignore
raise ValueError('Warning: {nquad[0]} of {nquad} not supported')
#self._class_size += 1
def from_string(self, patch_text, patch_format='jena'):
"""
Add a patch from a patch string.
important since the patches will be serialized as strings
and have to be read again
"""
lines = patch_text.split('\n')
for line in lines:
nquad = line.split()
self.add(nquad)
# What about patch sequences? then the current class is not sufficient. since graph, can not captures
# order
def apply_patch(graph: Graph, patch: RDFPatch, patch_format='jena') -> Graph:
"""
Apply a given patch to a graph
Since a patch is written a specific backend triple store like jena, this provides a way to apply
the patch through python to a given graph outside of the backened
"""
# todo implement PA
o_graph = graph + patch.add_triples - patch.delete_triples
#o_graph =
#EX = Namesspace('')
#o_graph.bind()
return o_graph
def revert_patch(graph: Graph, patch: RDFPatch, patch_format='jena') -> Graph:
"""
Apply a given patch to a graph
Since a patch is written a specific backend triple store like jena, this provides a way to apply
the patch through python to a given graph outside of the backened
"""
# todo implement PA
o_graph = graph - patch.add_triples + patch.delete_triples
return o_graph
|
PypiClean
|
/yggdrasil_framework-1.10.1.post1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl/docs/source/server_client_io.rst
|
.. _server_client_io_rst:
Server/Client I/O
=================
Often you will want to call one model from another like a functions. This
would required sending the input variable(s) to the model being called and
then sending the output variables(s) back to the calling model. We refer to
this as a Remote Procedure Call (RPC). The model being called can be considered
a server providing its calculations as a service to the client (the calling model).
The |yggdrasil| provides options for treating models as server and clients.
One Server, One Client
----------------------
In the example below, the "server" model computes the nth number in the
Fibonacci sequence and the "client" model calls the server to get a certain
portion of the Fibonacci sequences and then writes it to a log. The server
will continue processing requests until all clients connected to it have
disconnected.
.. include:: examples/rpc_lesson1_src.rst
The interface server-side API call (YggRpcServer for Python),
requires 3 input variables: the name of the server channel (this will be
the name of the server model), a format string for input to the server model,
and a format string for output from the server model. The client-side API call
(YggRpcClient for Python), also requires 3 input variables: the name of the
client channel (this is the name of the server model joined with the name of
the client model by an underscore, ``<server>_<client>``, a format string for
input to the server model, and a format string for output form the server
model. The last two arguments (the format strings) to both the server and
client API calls should be the same.
In the server model YAML, the key/value pair ``is_server: True`` needs
to be added to the model entry to indicate that the model will be called
as a server and requires a set of RPC channels. In the client model YAML,
the key/value pair ``client_of: <server_model_name>`` is required to indicate
that the model will act as a client of the ``<server_model_name>`` model.
.. include:: examples/rpc_lesson1_yml.rst
In addition to the RPC API call, the example server also has an input ``params``.
Models acting as servers can have as many inputs/outputs as desired in addition to
the RPC connections. While the example input is not used to modify the output
in this example, such a comm could be used to initialize a model with
parameters and/or initial conditions.
Using Existing Inputs/Outputs
-----------------------------
Models that have already been integrated via |yggdrasil| can also be turned
into servers without modifying the code. Instead of passing a boolean to
the ``is_server`` parameter, such models can provide a mapping with ``input``
and ``output`` parameters that explicitly outline which of a existing model's
inputs/outputs should be used for the RPC call. Receive/send calls to named
input/output channels will then behave as receive/send calls on a server
interface comm.
.. todo:: Example source code and YAML of server replacing an existing input/output
One Server, Two Clients
-----------------------
There is no limit on the number of clients that can connect to a single
server. In the example below, the server is the same as above. The client
code is also essentially the same except that it has been modified to take
a ``client_index`` variable that provides information to differentiates
between two clients using the same source code.
.. include:: examples/rpc_lesson2_src.rst
The server YAML is the same as above. The client YAML now has entries for
two models which are both clients of the server model and call the same
source code.
.. include:: examples/rpc_lesson2_yml.rst
During runtime, request messages from both clients will be routed to the
server model which will process the requests in the order they are received.
Two Servers, Two Clients
------------------------
There is also no limit on the number of copies of a server model that can be
used to responsd to RPC requests from the clients. In the example below, the
server and clients are the same as above, but 2 copies of the server model
are run as specified by the model ``copies`` parameter in the server YAML.
.. include:: examples/rpc_lesson2b_yml.rst
This allow client requests to be returned twice as fast, but precludes any
use of an internal state by the server model as there is no way for a client
to be sure that the same server model is responding to its requests and only
its requests.
Wrapped Function Server
-----------------------
Models that are created by letting |yggdrasil| automatically
:ref:`wrap a function <autowrap_rst>` can also act as servers and/or clients.
In the example below, the model acting as a server
is a very simple function that takes a string as an input and returns the
same string and the client is a function that takes a string as an input,
calls the server models with the input string and returns the response.
When a client model is autowrapped from a function, additional care must be
taken so that the client RPC comm can be reused during each call to the
model. In interpreted models (Python, R, MATLAB), this is done by passing the
keyword ``global_scope`` to the RPC client interface initialization function
(``YggRpcClient`` in Python). In compiled models (C, C++, Fortran), this is
done by framing RPC client interface initialization calls with the
``WITH_GLOBAL_SCOPE`` macro (see the language specific versions of this
example for specifics).
.. include:: examples/rpc_lesson3_src.rst
The RPC connection between the server and the client is controlled by the
same ``is_server`` and ``client_of`` YAML parameters as before.
.. include:: examples/rpc_lesson3_yml.rst
By default, all inputs to a wrapped server function will be used in
the RPC call. However if only some of the inputs should be passed in by the
RPC calls, they can be specified explicitly by providing the ``is_server``
parameter with a map that contains ``input`` and ``output`` parameters that
map to the names of function input/output variables (as in the case of
using existing input/output channels above).
|
PypiClean
|
/indic_punct-2.1.4-py3-none-any.whl/inverse_text_normalization/mr/taggers/money.py
|
from inverse_text_normalization.mr.data_loader_utils import get_abs_path
from inverse_text_normalization.mr.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
get_singulars,
insert_space,
)
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
# from inverse_text_normalization.lang_params import LANG
# lang_data_path = f'inverse_text_normalization/data/{LANG}_data/'
lang_data_path = 'data/'
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money
e.g. twelve dollars and five cents -> money { integer_part: "12" fractional_part: 05 currency: "$" }
Args:
cardinal: Cardinal GraphFST
decimal: Decimal GraphFST
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst):
super().__init__(name="money", kind="classify")
# quantity, integer_part, fractional_part, currency, style(depr)
cardinal_graph = cardinal.graph_no_exception
graph_decimal_final = decimal.final_graph_wo_negative
unit = pynini.string_file(get_abs_path(lang_data_path+"currency.tsv"))
unit_singular = pynini.invert(unit)
unit_plural = get_singulars(unit_singular)
graph_unit_singular = pynutil.insert("currency: \"") + convert_space(unit_singular) + pynutil.insert("\"")
graph_unit_plural = pynutil.insert("currency: \"") + convert_space(unit_plural) + pynutil.insert("\"")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
# twelve dollars (and) fifty cents, zero cents
cents_standalone = (
pynutil.insert("fractional_part: \"")
+ pynini.union(
pynutil.add_weight(((NEMO_SIGMA - "one") @ cardinal_graph), -0.7) @ add_leading_zero_to_double_digit
+ delete_space
+ pynutil.delete("cents"),
pynini.cross("one", "01") + delete_space + pynutil.delete("cent"),
)
+ pynutil.insert("\"")
)
optional_cents_standalone = pynini.closure(
delete_space
+ pynini.closure(pynutil.delete("and") + delete_space, 0, 1)
+ insert_space
+ cents_standalone,
0,
1,
)
# twelve dollars fifty, only after integer
optional_cents_suffix = pynini.closure(
delete_extra_space
+ pynutil.insert("fractional_part: \"")
+ pynutil.add_weight(cardinal_graph @ add_leading_zero_to_double_digit, -0.7)
+ pynutil.insert("\""),
0,
1,
)
graph_integer = (
pynutil.insert("integer_part: \"")
+ ((NEMO_SIGMA - "one") @ cardinal_graph)
+ pynutil.insert("\"")
+ delete_extra_space
+ (graph_unit_plural | graph_unit_singular)
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_integer |= (
pynutil.insert("integer_part: \"")
+ pynini.cross("one", "1")
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit_singular
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_decimal = graph_decimal_final + delete_extra_space + graph_unit_plural
graph_decimal |= pynutil.insert("currency: \"$\" integer_part: \"0\" ") + cents_standalone
final_graph = graph_integer | graph_decimal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
|
PypiClean
|
/bemserver_api-0.22.0-py3-none-any.whl/bemserver_api/resources/users/routes.py
|
from flask.views import MethodView
from flask_smorest import abort
from bemserver_core.model import User
from bemserver_api import Blueprint
from bemserver_api.database import db
from .schemas import UserSchema, UserQueryArgsSchema, BooleanValueSchema
blp = Blueprint(
"User", __name__, url_prefix="/users", description="Operations on users"
)
@blp.route("/")
class UserViews(MethodView):
@blp.login_required
@blp.etag
@blp.arguments(UserQueryArgsSchema, location="query")
@blp.response(200, UserSchema(many=True))
def get(self, args):
"""List users"""
return User.get(**args)
@blp.login_required
@blp.etag
@blp.arguments(UserSchema)
@blp.response(201, UserSchema)
@blp.catch_integrity_error
def post(self, new_item):
"""Add a new user"""
password = new_item.pop("password")
item = User.new(**new_item)
item.set_password(password)
item.is_admin = False
item.is_active = True
db.session.commit()
return item
@blp.route("/<int:item_id>")
class UserByIdViews(MethodView):
@blp.login_required
@blp.etag
@blp.response(200, UserSchema)
def get(self, item_id):
"""Get user by ID"""
item = User.get_by_id(item_id)
if item is None:
abort(404)
return item
@blp.login_required
@blp.etag
@blp.arguments(UserSchema)
@blp.response(200, UserSchema)
@blp.catch_integrity_error
def put(self, new_item, item_id):
"""Update an existing user"""
item = User.get_by_id(item_id)
if item is None:
abort(404)
blp.check_etag(item, UserSchema)
password = new_item.pop("password")
item.update(**new_item)
item.set_password(password)
db.session.commit()
return item
@blp.login_required
@blp.etag
@blp.response(204)
def delete(self, item_id):
"""Delete a user"""
item = User.get_by_id(item_id)
if item is None:
abort(404)
blp.check_etag(item, UserSchema)
item.delete()
db.session.commit()
@blp.route("/<int:item_id>/set_admin", methods=("PUT",))
@blp.login_required
@blp.etag
@blp.arguments(BooleanValueSchema)
@blp.response(204)
def set_admin(args, item_id):
"""Set / unset admin"""
item = User.get_by_id(item_id)
if item is None:
abort(404)
blp.check_etag(item, UserSchema)
item.is_admin = args["value"]
db.session.commit()
blp.set_etag(item, UserSchema)
@blp.route("/<int:item_id>/set_active", methods=("PUT",))
@blp.login_required
@blp.etag
@blp.arguments(BooleanValueSchema)
@blp.response(204)
def set_active(args, item_id):
"""Set / unset active"""
item = User.get_by_id(item_id)
if item is None:
abort(404)
blp.check_etag(item, UserSchema)
item.is_active = args["value"]
db.session.commit()
blp.set_etag(item, UserSchema)
|
PypiClean
|
/websauna.system-1.0a8.tar.gz/websauna.system-1.0a8/websauna/system/static/bootstrap.min.js
|
if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" ")[0].split(".");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1)throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher")}(jQuery),+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){return a(b.target).is(this)?b.handleObj.handler.apply(this,arguments):void 0}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.4",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger("closed.bs.alert").remove()}var e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&&f.replace(/.*(?=#[^\s]*$)/,""));var g=a(f);b&&b.preventDefault(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&&g.hasClass("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.4",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")&&(c.prop("checked")&&this.$element.hasClass("active")?a=!1:b.find(".active").removeClass("active")),a&&c.prop("checked",!this.$element.hasClass("active")).trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active"));a&&this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target);d.hasClass("btn")||(d=d.closest(".btn")),b.call(d,"toggle"),c.preventDefault()}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",a.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.3.4",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c=this.getItemIndex(b),d="prev"==a&&0===c||"next"==a&&c==this.$items.length-1;if(d&&!this.options.wrap)return b;var e="prev"==a?-1:1,f=(c+e)%this.$items.length;return this.$items.eq(f)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(".item.active"));return a>this.$items.length-1||0>a?void 0:this.sliding?this.$element.one("slid.bs.carousel",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?"next":"prev",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){return this.sliding?void 0:this.slide("next")},c.prototype.prev=function(){return this.sliding?void 0:this.slide("prev")},c.prototype.slide=function(b,d){var e=this.$element.find(".item.active"),f=d||this.getItemForDirection(b,e),g=this.interval,h="next"==b?"left":"right",i=this;if(f.hasClass("active"))return this.sliding=!1;var j=f[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:h});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(f)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:h});return a.support.transition&&this.$element.hasClass("slide")?(f.addClass(b),f[0].offsetWidth,e.addClass(h),f.addClass(h),e.one("bsTransitionEnd",function(){f.removeClass([b,h].join(" ")).addClass("active"),e.removeClass(["active",h].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass("active"),f.addClass("active"),this.sliding=!1,this.$element.trigger(m)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}};a(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){var c,d=b.attr("data-target")||(c=b.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data("bs.collapse"),f=a.extend({},d.DEFAULTS,c.data(),"object"==typeof b&&b);!e&&f.toggle&&/show|hide/.test(b)&&(f.toggle=!1),e||c.data("bs.collapse",e=new d(this,f)),"string"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a('[data-toggle="collapse"][href="#'+b.id+'"],[data-toggle="collapse"][data-target="#'+b.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION="3.3.4",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0},d.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},d.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!(e&&e.length&&(b=e.data("bs.collapse"),b&&b.transitioning))){var f=a.Event("show.bs.collapse");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,"hide"),b||e.data("bs.collapse",null));var g=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[g](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var h=function(){this.$element.removeClass("collapsing").addClass("collapse in")[g](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return h.call(this);var i=a.camelCase(["scroll",g].join("-"));this.$element.one("bsTransitionEnd",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var e=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass("in");a.attr("aria-expanded",c),b.toggleClass("collapsed",!c).attr("aria-expanded",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Constructor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(d){var e=a(this);e.attr("data-target")||d.preventDefault();var f=b(e),g=f.data("bs.collapse"),h=g?"toggle":e.data();c.call(f,h)})}(jQuery),+function(a){"use strict";function b(b){b&&3===b.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=c(d),f={relatedTarget:this};e.hasClass("open")&&(e.trigger(b=a.Event("hide.bs.dropdown",f)),b.isDefaultPrevented()||(d.attr("aria-expanded","false"),e.removeClass("open").trigger("hidden.bs.dropdown",f)))}))}function c(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.3.4",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=c(e),g=f.hasClass("open");if(b(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a('<div class="dropdown-backdrop"/>').insertAfter(a(this)).on("click",b);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),f.toggleClass("open").trigger("shown.bs.dropdown",h)}return!1}},g.prototype.keydown=function(b){if(/(38|40|27|32)/.test(b.which)&&!/input|textarea/i.test(b.target.tagName)){var d=a(this);if(b.preventDefault(),b.stopPropagation(),!d.is(".disabled, :disabled")){var e=c(d),g=e.hasClass("open");if(!g&&27!=b.which||g&&27==b.which)return 27==b.which&&e.find(f).trigger("focus"),d.trigger("click");var h=" li:not(.disabled):visible a",i=e.find('[role="menu"]'+h+', [role="listbox"]'+h);if(i.length){var j=i.index(b.target);38==b.which&&j>0&&j--,40==b.which&&j<i.length-1&&j++,~j||(j=0),i.eq(j).trigger("focus")}}}};var h=a.fn.dropdown;a.fn.dropdown=d,a.fn.dropdown.Constructor=g,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=h,this},a(document).on("click.bs.dropdown.data-api",b).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",f,g.prototype.toggle).on("keydown.bs.dropdown.data-api",f,g.prototype.keydown).on("keydown.bs.dropdown.data-api",'[role="menu"]',g.prototype.keydown).on("keydown.bs.dropdown.data-api",'[role="listbox"]',g.prototype.keydown)}(jQuery),+function(a){"use strict";function b(b,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},c.DEFAULTS,e.data(),"object"==typeof b&&b);f||e.data("bs.modal",f=new c(this,g)),"string"==typeof b?f[b](d):g.show&&f.show(d)})}var c=function(b,c){this.options=c,this.$body=a(document.body),this.$element=a(b),this.$dialog=this.$element.find(".modal-dialog"),this.$backdrop=null,this.isShown=null,this.originalBodyPad=null,this.scrollbarWidth=0,this.ignoreBackdropClick=!1,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};c.VERSION="3.3.4",c.TRANSITION_DURATION=300,c.BACKDROP_TRANSITION_DURATION=150,c.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},c.prototype.toggle=function(a){return this.isShown?this.hide():this.show(a)},c.prototype.show=function(b){var d=this,e=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(e),this.isShown||e.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.setScrollbar(),this.$body.addClass("modal-open"),this.escape(),this.resize(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.$dialog.on("mousedown.dismiss.bs.modal",function(){d.$element.one("mouseup.dismiss.bs.modal",function(b){a(b.target).is(d.$element)&&(d.ignoreBackdropClick=!0)})}),this.backdrop(function(){var e=a.support.transition&&d.$element.hasClass("fade");d.$element.parent().length||d.$element.appendTo(d.$body),d.$element.show().scrollTop(0),d.adjustDialog(),e&&d.$element[0].offsetWidth,d.$element.addClass("in").attr("aria-hidden",!1),d.enforceFocus();var f=a.Event("shown.bs.modal",{relatedTarget:b});e?d.$dialog.one("bsTransitionEnd",function(){d.$element.trigger("focus").trigger(f)}).emulateTransitionEnd(c.TRANSITION_DURATION):d.$element.trigger("focus").trigger(f)}))},c.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.escape(),this.resize(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").attr("aria-hidden",!0).off("click.dismiss.bs.modal").off("mouseup.dismiss.bs.modal"),this.$dialog.off("mousedown.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTransitionEnd(c.TRANSITION_DURATION):this.hideModal())},c.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.trigger("focus")},this))},c.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keydown.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off("keydown.dismiss.bs.modal")},c.prototype.resize=function(){this.isShown?a(window).on("resize.bs.modal",a.proxy(this.handleUpdate,this)):a(window).off("resize.bs.modal")},c.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.$body.removeClass("modal-open"),a.resetAdjustments(),a.resetScrollbar(),a.$element.trigger("hidden.bs.modal")})},c.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},c.prototype.backdrop=function(b){var d=this,e=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var f=a.support.transition&&e;if(this.$backdrop=a('<div class="modal-backdrop '+e+'" />').appendTo(this.$body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(a){return this.ignoreBackdropClick?void(this.ignoreBackdropClick=!1):void(a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus():this.hide()))},this)),f&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;f?this.$backdrop.one("bsTransitionEnd",b).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):b()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var g=function(){d.removeBackdrop(),b&&b()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",g).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):g()}else b&&b()},c.prototype.handleUpdate=function(){this.adjustDialog()},c.prototype.adjustDialog=function(){var a=this.$element[0].scrollHeight>document.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&a?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!a?this.scrollbarWidth:""})},c.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},c.prototype.checkScrollbar=function(){var a=window.innerWidth;if(!a){var b=document.documentElement.getBoundingClientRect();a=b.right-Math.abs(b.left)}this.bodyIsOverflowing=document.body.clientWidth<a,this.scrollbarWidth=this.measureScrollbar()},c.prototype.setScrollbar=function(){var a=parseInt(this.$body.css("padding-right")||0,10);this.originalBodyPad=document.body.style.paddingRight||"",this.bodyIsOverflowing&&this.$body.css("padding-right",a+this.scrollbarWidth)},c.prototype.resetScrollbar=function(){this.$body.css("padding-right",this.originalBodyPad)},c.prototype.measureScrollbar=function(){var a=document.createElement("div");a.className="modal-scrollbar-measure",this.$body.append(a);var b=a.offsetWidth-a.clientWidth;return this.$body[0].removeChild(a),b};var d=a.fn.modal;a.fn.modal=b,a.fn.modal.Constructor=c,a.fn.modal.noConflict=function(){return a.fn.modal=d,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(c){var d=a(this),e=d.attr("href"),f=a(d.attr("data-target")||e&&e.replace(/.*(?=#[^\s]+$)/,"")),g=f.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(e)&&e},f.data(),d.data());d.is("a")&&c.preventDefault(),f.one("show.bs.modal",function(a){a.isDefaultPrevented()||f.one("hidden.bs.modal",function(){d.is(":visible")&&d.trigger("focus")})}),b.call(f,g,this)})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof b&&b;(e||!/destroy|hide/.test(b))&&(e||d.data("bs.tooltip",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.type=null,this.options=null,this.enabled=null,this.timeout=null,this.hoverState=null,this.$element=null,this.init("tooltip",a,b)};c.VERSION="3.3.4",c.TRANSITION_DURATION=150,c.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){if(this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(this.options.viewport.selector||this.options.viewport),this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error("`selector` option must be specified when initializing "+this.type+" on the window.document object!");for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c&&c.$tip&&c.$tip.is(":visible")?void(c.hoverState="in"):(c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show())},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},c.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var d=a.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!d)return;var e=this,f=this.tip(),g=this.getUID(this.type);this.setContent(),f.attr("id",g),this.$element.attr("aria-describedby",g),this.options.animation&&f.addClass("fade");var h="function"==typeof this.options.placement?this.options.placement.call(this,f[0],this.$element[0]):this.options.placement,i=/\s?auto?\s?/i,j=i.test(h);j&&(h=h.replace(i,"")||"top"),f.detach().css({top:0,left:0,display:"block"}).addClass(h).data("bs."+this.type,this),this.options.container?f.appendTo(this.options.container):f.insertAfter(this.$element);var k=this.getPosition(),l=f[0].offsetWidth,m=f[0].offsetHeight;if(j){var n=h,o=this.options.container?a(this.options.container):this.$element.parent(),p=this.getPosition(o);h="bottom"==h&&k.bottom+m>p.bottom?"top":"top"==h&&k.top-m<p.top?"bottom":"right"==h&&k.right+l>p.width?"left":"left"==h&&k.left-l<p.left?"right":h,f.removeClass(n).addClass(h)}var q=this.getCalculatedOffset(h,k,l,m);this.applyPlacement(q,h);var r=function(){var a=e.hoverState;e.$element.trigger("shown.bs."+e.type),e.hoverState=null,"out"==a&&e.leave(e)};a.support.transition&&this.$tip.hasClass("fade")?f.one("bsTransitionEnd",r).emulateTransitionEnd(c.TRANSITION_DURATION):r()}},c.prototype.applyPlacement=function(b,c){var d=this.tip(),e=d[0].offsetWidth,f=d[0].offsetHeight,g=parseInt(d.css("margin-top"),10),h=parseInt(d.css("margin-left"),10);isNaN(g)&&(g=0),isNaN(h)&&(h=0),b.top=b.top+g,b.left=b.left+h,a.offset.setOffset(d[0],a.extend({using:function(a){d.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),d.addClass("in");var i=d[0].offsetWidth,j=d[0].offsetHeight;"top"==c&&j!=f&&(b.top=b.top+f-j);var k=this.getViewportAdjustedDelta(c,b,i,j);k.left?b.left+=k.left:b.top+=k.top;var l=/top|bottom/.test(c),m=l?2*k.left-e+i:2*k.top-f+j,n=l?"offsetWidth":"offsetHeight";d.offset(b),this.replaceArrow(m,d[0][n],l)},c.prototype.replaceArrow=function(a,b,c){this.arrow().css(c?"left":"top",50*(1-a/b)+"%").css(c?"top":"left","")},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},c.prototype.hide=function(b){function d(){"in"!=e.hoverState&&f.detach(),e.$element.removeAttr("aria-describedby").trigger("hidden.bs."+e.type),b&&b()}var e=this,f=a(this.$tip),g=a.Event("hide.bs."+this.type);return this.$element.trigger(g),g.isDefaultPrevented()?void 0:(f.removeClass("in"),a.support.transition&&f.hasClass("fade")?f.one("bsTransitionEnd",d).emulateTransitionEnd(c.TRANSITION_DURATION):d(),this.hoverState=null,this)},c.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"string"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},c.prototype.hasContent=function(){return this.getTitle()},c.prototype.getPosition=function(b){b=b||this.$element;var c=b[0],d="BODY"==c.tagName,e=c.getBoundingClientRect();null==e.width&&(e=a.extend({},e,{width:e.right-e.left,height:e.bottom-e.top}));var f=d?{top:0,left:0}:b.offset(),g={scroll:d?document.documentElement.scrollTop||document.body.scrollTop:b.scrollTop()},h=d?{width:a(window).width(),height:a(window).height()}:null;return a.extend({},e,g,h,f)},c.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},c.prototype.getViewportAdjustedDelta=function(a,b,c,d){var e={top:0,left:0};if(!this.$viewport)return e;var f=this.options.viewport&&this.options.viewport.padding||0,g=this.getPosition(this.$viewport);if(/right|left/.test(a)){var h=b.top-f-g.scroll,i=b.top+f-g.scroll+d;h<g.top?e.top=g.top-h:i>g.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;j<g.left?e.left=g.left-j:k>g.width&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){return this.$tip=this.$tip||a(this.options.template)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){var a=this;clearTimeout(this.timeout),this.hide(function(){a.$element.off("."+a.type).removeData("bs."+a.type)})};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b;(e||!/destroy|hide/.test(b))&&(e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.3.4",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").children().detach().end()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){this.$body=a(document.body),this.$scrollElement=a(a(c).is(document.body)?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",a.proxy(this.process,this)),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.3.4",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b=this,c="offset",d=0;this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight(),a.isWindow(this.$scrollElement[0])||(c="position",d=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var b=a(this),e=b.data("target")||b.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[c]().top+d,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){b.offsets.push(this[0]),b.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b<e[0])return this.activeTarget=null,this.clear();for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(void 0===e[a+1]||b<e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){this.activeTarget=b,this.clear();var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),d.trigger("activate.bs.scrollspy")},b.prototype.clear=function(){a(this.selector).parentsUntil(this.options.target,".active").removeClass("active")};var d=a.fn.scrollspy;a.fn.scrollspy=c,a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=d,this},a(window).on("load.bs.scrollspy.data-api",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);c.call(b,b.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new c(this)),"string"==typeof b&&e[b]()})}var c=function(b){this.element=a(b)};c.VERSION="3.3.4",c.TRANSITION_DURATION=150,c.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("target");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){
var e=c.find(".active:last a"),f=a.Event("hide.bs.tab",{relatedTarget:b[0]}),g=a.Event("show.bs.tab",{relatedTarget:e[0]});if(e.trigger(f),b.trigger(g),!g.isDefaultPrevented()&&!f.isDefaultPrevented()){var h=a(d);this.activate(b.closest("li"),c),this.activate(h,h.parent(),function(){e.trigger({type:"hidden.bs.tab",relatedTarget:b[0]}),b.trigger({type:"shown.bs.tab",relatedTarget:e[0]})})}}},c.prototype.activate=function(b,d,e){function f(){g.removeClass("active").find("> .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),b.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),h?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu").length&&b.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),e&&e()}var g=d.find("> .active"),h=e&&a.support.transition&&(g.length&&g.hasClass("fade")||!!d.find("> .fade").length);g.length&&h?g.one("bsTransitionEnd",f).emulateTransitionEnd(c.TRANSITION_DURATION):f(),g.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this};var e=function(c){c.preventDefault(),b.call(a(this),"show")};a(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',e).on("click.bs.tab.data-api",'[data-toggle="pill"]',e)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkPosition()};c.VERSION="3.3.4",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getState=function(a,b,c,d){var e=this.$target.scrollTop(),f=this.$element.offset(),g=this.$target.height();if(null!=c&&"top"==this.affixed)return c>e?"top":!1;if("bottom"==this.affixed)return null!=c?e+this.unpin<=f.top?!1:"bottom":a-d>=e+g?!1:"bottom";var h=null==this.affixed,i=h?e:f.top,j=h?g:b;return null!=c&&c>=e?"top":null!=d&&i+j>=a-d?"bottom":!1},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=this.$element.height(),d=this.options.offset,e=d.top,f=d.bottom,g=a(document.body).height();"object"!=typeof d&&(f=e=d),"function"==typeof e&&(e=d.top(this.$element)),"function"==typeof f&&(f=d.bottom(this.$element));var h=this.getState(g,b,e,f);if(this.affixed!=h){null!=this.unpin&&this.$element.css("top","");var i="affix"+(h?"-"+h:""),j=a.Event(i+".bs.affix");if(this.$element.trigger(j),j.isDefaultPrevented())return;this.affixed=h,this.unpin="bottom"==h?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(i).trigger(i.replace("affix","affixed")+".bs.affix")}"bottom"==h&&this.$element.offset({top:g-b-f})}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},null!=d.offsetBottom&&(d.offset.bottom=d.offsetBottom),null!=d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery);
|
PypiClean
|
/DeepManufacturing-0.0.7.tar.gz/DeepManufacturing-0.0.7/ManufacturingNet/models/svm.py
|
from math import sqrt
import matplotlib.pyplot as plt
from sklearn.metrics import (accuracy_score, confusion_matrix, make_scorer,
mean_squared_error, roc_auc_score, roc_curve)
from sklearn.model_selection import (GridSearchCV, cross_val_score,
train_test_split)
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR, NuSVC, NuSVR
class SVM:
"""Class model for support vector machine (SVM) models."""
def __init__(self, attributes=None, labels=None):
"""Initializes a SVM object."""
self.attributes = attributes
self.labels = labels
self.test_size = None
self.cv = None
self.graph_results = None
self.fpr = None
self.tpr = None
self.bin = False
self.gridsearch = False
self.gs_params = None
self.gs_result = None
self.classifier_SVC = None
self.accuracy_SVC = None
self.roc_auc_SVC = None
self.confusion_matrix_SVC = None
self.cross_val_scores_SVC = None
self.classifier_nu_SVC = None
self.accuracy_nu_SVC = None
self.roc_auc_nu_SVC = None
self.confusion_matrix_nu_SVC = None
self.cross_val_scores_nu_SVC = None
self.classifier_linear_SVC = None
self.accuracy_linear_SVC = None
self.cross_val_scores_linear_SVC = None
self.regressor_SVR = None
self.mean_squared_error_SVR = None
self.r2_score_SVR = None
self.r_score_SVR = None
self.cross_val_scores_SVR = None
self.regressor_nu_SVR = None
self.mean_squared_error_nu_SVR = None
self.r2_score_nu_SVR = None
self.r_score_nu_SVR = None
self.cross_val_scores_nu_SVR = None
self.regressor_linear_SVR = None
self.mean_squared_error_linear_SVR = None
self.r2_score_linear_SVR = None
self.r_score_linear_SVR = None
self.cross_val_scores_linear_SVR = None
# References to training and testing subsets of dataset
# For re-use purposes
self.dataset_X_train = None
self.dataset_y_train = None
self.dataset_X_test = None
self.dataset_y_test = None
# Accessor Methods
def get_attributes(self):
"""Accessor method for attributes."""
return self.attributes
def get_labels(self):
"""Accessor method for labels."""
return self.labels
def get_classifier_SVC(self):
"""Accessor method for classifier_SVC."""
return self.classifier_SVC
def get_accuracy_SVC(self):
"""Accessor method for accuracy_SVC."""
return self.accuracy_SVC
def get_roc_auc_SVC(self):
"""Accessor method for roc_auc_SVC."""
return self.roc_auc_SVC
def get_confusion_matrix_SVC(self):
"""Accessor method for confusion_matrix_SVC."""
return self.confusion_matrix_SVC
def get_cross_val_scores_SVC(self):
"""Accessor method for cross_val_scores_SVC."""
return self.cross_val_scores_SVC
def get_classifier_nu_SVC(self):
"""Accessor method for classifier_nu_SVC."""
return self.classifier_nu_SVC
def get_accuracy_nu_SVC(self):
"""Accessor method for accuracy_nu_SVC."""
return self.accuracy_nu_SVC
def get_roc_auc_nu_SVC(self):
"""Accessor method for roc_auc_nu_SVC."""
return self.roc_auc_nu_SVC
def get_confusion_matrix_nu_SVC(self):
"""Accessor method for confusion_matrix_nu_SVC."""
return self.confusion_matrix_nu_SVC
def get_cross_val_scores_nu_SVC(self):
"""Accessor method for cross_val_scores_nu_SVC."""
return self.cross_val_scores_nu_SVC
def get_classifier_linear_SVC(self):
"""Accessor method for classifier_linear_SVC."""
return self.classifier_linear_SVC
def get_accuracy_linear_SVC(self):
"""Accessor method for accuracy_linear_SVC."""
return self.accuracy_linear_SVC
def get_cross_val_scores_linear_SVC(self):
"""Accessor method for cross_val_scores_linear_SVC."""
return self.cross_val_scores_linear_SVC
def get_regressor_SVR(self):
"""Accessor method for regressor_SVR."""
return self.regressor_SVR
def get_mean_squared_error_SVR(self):
"""Accessor method for mean_squared_error_SVR."""
return self.mean_squared_error_SVR
def get_r2_score_SVR(self):
"""Accessor method for r2_score_SVR."""
return self.r2_score_SVR
def get_r_score_SVR(self):
"""Accessor method for r_score_SVR."""
return self.r_score_SVR
def get_cross_val_scores_SVR(self):
"""Accessor method for cross_val_scores_SVR."""
return self.cross_val_scores_SVR
def get_regressor_nu_SVR(self):
"""Accessor method for regressor_nu_SVR."""
return self.regressor_nu_SVR
def get_mean_squared_error_nu_SVR(self):
"""Accessor method for mean_squared_error_nu_SVR."""
return self.mean_squared_error_nu_SVR
def get_r2_score_nu_SVR(self):
"""Accessor method for r2_score_nu_SVR."""
return self.r2_score_nu_SVR
def get_r_score_nu_SVR(self):
"""Accessor method for r_score_nu_SVR."""
return self.r_score_nu_SVR
def get_cross_val_scores_nu_SVR(self):
"""Accessor method for cross_val_scores_nu_SVR."""
return self.cross_val_scores_nu_SVR
def get_regressor_linear_SVR(self):
"""Accessor method for regressor_linear_SVR."""
return self.regressor_linear_SVR
def get_mean_squared_error_linear_SVR(self):
"""Accessor method for mean_squared_error_linear_SVR."""
return self.mean_squared_error_linear_SVR
def get_r2_score_linear_SVR(self):
"""Accessor method for r2_score_linear_SVR."""
return self.r2_score_linear_SVR
def get_r_score_linear_SVR(self):
"""Accessor method for r_score_linear_SVR."""
return self.r_score_linear_SVR
def get_cross_val_scores_linear_SVR(self):
"""Accessor method for cross_val_scores_linear_SVR."""
return self.cross_val_scores_linear_SVR
# Modifier Methods
def set_attributes(self, new_attributes=None):
"""Modifier method for attributes."""
self.attributes = new_attributes
def set_labels(self, new_labels=None):
"""Modifier method for labels."""
self.labels = new_labels
# Wrappers for SVM classification classes
def run_SVC(self):
"""Runs SVC model."""
if self._check_inputs():
# Initialize classifier
self.classifier_SVC = self._create_SVC_model(is_nu=False)
# Split data, if needed
# If testing/training sets are still None, call _split_data()
if self.dataset_X_test is None:
self._split_data()
# Train classifier
# Handle exception if arguments are incorrect
try:
self.classifier_SVC.fit(self.dataset_X_train,
self.dataset_y_train)
except Exception as e:
print("An exception occurred while training the SVC model.",
"Check your arguments and try again.")
print("Here is the exception message:")
print(e)
self.classifier_SVC = None
return
# Metrics
self.accuracy_SVC = self.classifier_SVC.score(self.dataset_X_test,
self.dataset_y_test)
y_prediction = self.classifier_SVC.predict(self.dataset_X_test)
probas = self.classifier_SVC.predict_proba(self.dataset_X_test)
# If classification is binary, calculate roc_auc
if probas.shape[1] == 2:
self.bin = True
self.roc_auc_SVC = roc_auc_score(y_prediction, probas[::, 1])
self.fpr, self.tpr, _ = roc_curve(self.dataset_y_test,
probas[::, 1])
# Else, calculate confusion matrix
else:
self.confusion_matrix_SVC = confusion_matrix(self.dataset_y_test,
y_prediction)
self.cross_val_scores_SVC = \
cross_val_score(self.classifier_SVC, self.attributes,
self.labels, cv=self.cv)
# Output results
self._output_classifier_results(model="SVC")
def predict_SVC(self, dataset_X=None):
"""Classifies each datapoint in dataset_X using the SVC model.
Returns the predicted classifications.
"""
# Check that run_SVC() has already been called
if self.classifier_SVC is None:
print("The SVC model seems to be missing. Have you called",
"run_SVC() yet?")
return None
# Try to make the prediction
# Handle exception if dataset_X isn't a valid input
try:
y_prediction = self.classifier_SVC.predict(dataset_X)
except Exception as e:
print("The SVC model failed to run.",
"Check your inputs and try again.")
print("Here is the exception message:")
print(e)
return None
print("\nSVC Predictions:\n", y_prediction, "\n")
return y_prediction
def run_nu_SVC(self):
"""Runs NuSVC model."""
if self._check_inputs():
# Initialize classifier
self.classifier_nu_SVC = self._create_SVC_model(is_nu=True)
# Split data, if needed
# If testing/training sets are still None, call _split_data()
if self.dataset_X_test is None:
self._split_data()
# Train classifier
# Handle exception if arguments are incorrect
try:
self.classifier_nu_SVC.fit(self.dataset_X_train,
self.dataset_y_train)
except Exception as e:
print("An exception occurred while training the NuSVC model.",
"Check your arguments and try again.")
print("Here is the exception message:")
print(e)
self.classifier_nu_SVC = None
return
# Metrics
self.accuracy_nu_SVC =\
self.classifier_nu_SVC.score(self.dataset_X_test,
self.dataset_y_test)
y_prediction = self.classifier_nu_SVC.predict(self.dataset_X_test)
probas = self.classifier_nu_SVC.predict_proba(self.dataset_X_test)
# If classification is binary, calculate roc_auc
if probas.shape[1] == 2:
self.bin = True
self.roc_auc_nu_SVC = roc_auc_score(
y_prediction, probas[::, 1])
self.fpr, self.tpr, _ = \
roc_curve(self.dataset_y_test, probas[::, 1])
# Else, calculate confusion matrix
else:
self.confusion_matrix_nu_SVC = \
confusion_matrix(self.dataset_y_test, y_prediction)
self.cross_val_scores_nu_SVC = \
cross_val_score(self.classifier_nu_SVC, self.attributes,
self.labels, cv=self.cv)
# Output results
self._output_classifier_results(model="NuSVC")
def predict_nu_SVC(self, dataset_X=None):
"""Classifies each datapoint in dataset_X using the NuSVC model.
Returns the predicted classifications.
"""
# Check that run_nu_SVC() has already been called
if self.classifier_nu_SVC is None:
print("The NuSVC model seems to be missing.",
"Have you called run_nu_SVC() yet?")
return None
# Try to make the prediction
# Handle exception if dataset_X isn't a valid input
try:
y_prediction = self.classifier_nu_SVC.predict(dataset_X)
except Exception as e:
print("The NuSVC model failed to run.",
"Check your inputs and try again.")
print("Here is the exception message:")
print(e)
return None
print("\nNuSVC Predictions:\n", y_prediction, "\n")
return y_prediction
def run_linear_SVC(self):
"""Runs LinearSVC model."""
if self._check_inputs():
# Initialize classifier
self.classifier_linear_SVC = self._create_linear_SVC_model()
# Split data, if needed
# If testing/training sets are still None, call _split_data()
if self.dataset_X_test is None:
self._split_data()
# Train classifier
# Handle exception if arguments are incorrect
try:
self.classifier_linear_SVC.fit(self.dataset_X_train,
self.dataset_y_train)
except Exception as e:
print("An exception occurred while training the LinearSVC",
"model. Check your arguments and try again.")
print("Here is the exception message:")
print(e)
self.classifier_linear_SVC = None
return
# Metrics
self.accuracy_linear_SVC = \
self.classifier_linear_SVC.score(self.dataset_X_test,
self.dataset_y_test)
self.cross_val_scores_linear_SVC =\
cross_val_score(self.classifier_linear_SVC, self.attributes,
self.labels, cv=self.cv)
# Output results
self._output_classifier_results(model="LinearSVC")
def predict_linear_SVC(self, dataset_X=None):
"""Classifies each datapoint in dataset_X using the LinearSVC
model. Returns the predicted classifications.
"""
# Check that run_linear_SVC() has already been called
if self.classifier_linear_SVC is None:
print("The LinearSVC model seems to be missing.",
"Have you called run_linear_SVC() yet?")
return None
# Try to make the prediction
# Handle exception if dataset_X isn't a valid input
try:
y_prediction = self.classifier_linear_SVC.predict(dataset_X)
except Exception as e:
print("The LinearSVC model failed to run.",
"Check your inputs and try again.")
print("Here is the exception message:")
print(e)
return None
print("\nLinearSVC Predictions:\n", y_prediction, "\n")
return y_prediction
# Wrappers for SVM regression classes
def run_SVR(self):
"""Runs SVR model."""
if self._check_inputs():
# Initialize regression model
self.regressor_SVR = self._create_SVR_model(is_nu=False)
# Split data, if needed
# If testing/training sets are still None, call _split_data()
if self.dataset_X_test is None:
self._split_data()
# Train regression model
# Handle exception if arguments are incorrect and/or if labels isn't
# quantitative data
try:
self.regressor_SVR.fit(self.dataset_X_train,
self.dataset_y_train)
except Exception as e:
print("An exception occurred while training the SVR model.",
"Check you arguments and try again.")
print("Does labels only contain quantitative data?")
print("Here is the exception message:")
print(e)
self.regressor_SVR = None
return
# Evaluate metrics of model
y_prediction = self.regressor_SVR.predict(self.dataset_X_test)
self.mean_squared_error_SVR = \
mean_squared_error(self.dataset_y_test, y_prediction)
self.r2_score_SVR = self.regressor_SVR.score(self.dataset_X_test,
self.dataset_y_test)
if self.r2_score_SVR >= 0:
self.r_score_SVR = sqrt(self.r2_score_SVR)
self.cross_val_scores_SVR = \
cross_val_score(self.regressor_SVR, self.attributes,
self.labels, cv=self.cv)
# Output results
self._output_regressor_results(model="SVR")
def predict_SVR(self, dataset_X=None):
"""Predicts the output of each datapoint in dataset_X using the
SVR model. Returns the predictions.
"""
# Check that run_SVR() has already been called
if self.regressor_SVR is None:
print("The SVR model seems to be missing.",
"Have you called run_SVR() yet?")
return None
# Try to make the prediction
# Handle exception if dataset_X isn't a valid input
try:
y_prediction = self.regressor_SVR.predict(dataset_X)
except Exception as e:
print("The SVR model failed to run.",
"Check your inputs and try again.")
print("Here is the exception message:")
print(e)
return None
print("\nSVR Predictions:\n", y_prediction, "\n")
return y_prediction
def run_nu_SVR(self):
"""Runs NuSVR model."""
if self._check_inputs():
# Initialize regression model
self.regressor_nu_SVR = self._create_SVR_model(is_nu=True)
# Split data, if needed
# If testing/training sets are still None, call _split_data()
if self.dataset_X_test is None:
self._split_data()
# Train regression model
# Handle exception if arguments are incorrect and/or if labels isn't
# quantitative data
try:
self.regressor_nu_SVR.fit(self.dataset_X_train,
self.dataset_y_train)
except Exception as e:
print("An exception occurred while training the NuSVR model.",
"Check you arguments and try again.")
print("Does labels only contain quantitative data?")
print("Here is the exception message:")
print(e)
self.regressor_nu_SVR = None
return
# Metrics
y_prediction = self.regressor_nu_SVR.predict(self.dataset_X_test)
self.mean_squared_error_nu_SVR = \
mean_squared_error(self.dataset_y_test, y_prediction)
self.r2_score_nu_SVR = \
self.regressor_nu_SVR.score(self.dataset_X_test,
self.dataset_y_test)
if self.r2_score_nu_SVR >= 0:
self.r_score_nu_SVR = sqrt(self.r2_score_nu_SVR)
self.cross_val_scores_nu_SVR = \
cross_val_score(self.regressor_nu_SVR, self.attributes,
self.labels, cv=self.cv)
# Output results
self._output_regressor_results(model="NuSVR")
def predict_nu_SVR(self, dataset_X=None):
"""Predicts the output of each datapoint in dataset_X using the
NuSVR model. Returns the predictions.
"""
# Check that run_nu_SVR() has already been called
if self.regressor_nu_SVR is None:
print("The NuSVR model seems to be missing.",
"Have you called run_nu_SVR() yet?")
return None
# Try to make the prediction; handle exception if dataset_X isn't a valid input
try:
y_prediction = self.regressor_nu_SVR.predict(dataset_X)
except Exception as e:
print("The NuSVR model failed to run.",
"Check your inputs and try again.")
print("Here is the exception message:")
print(e)
return None
print("\nNuSVR Predictions:\n", y_prediction, "\n")
return y_prediction
def run_linear_SVR(self):
"""Runs LinearSVR model."""
if self._check_inputs():
# Initialize regression model
self.regressor_linear_SVR = self._create_linear_SVR_model()
# Split data, if needed
# If testing/training sets are still None, call _split_data()
if self.dataset_X_test is None:
self._split_data()
# Train regression model
# Handle exception if arguments are incorrect and/or labels isn't
# quantitative data
try:
self.regressor_linear_SVR.fit(self.dataset_X_train,
self.dataset_y_train)
except Exception as e:
print("An exception occurred while training the LinearSVR",
"model. Check you arguments and try again.")
print("Does labels only contain quantitative data?")
print("Here is the exception message:")
print(e)
self.regressor_linear_SVR = None
return
# Metrics
y_prediction = self.regressor_linear_SVR.predict(
self.dataset_X_test)
self.mean_squared_error_linear_SVR = \
mean_squared_error(self.dataset_y_test, y_prediction)
self.r2_score_linear_SVR = \
self.regressor_linear_SVR.score(self.dataset_X_test,
self.dataset_y_test)
if self.r2_score_linear_SVR >= 0:
self.r_score_linear_SVR = sqrt(self.r2_score_linear_SVR)
self.cross_val_scores_linear_SVR = \
cross_val_score(self.regressor_linear_SVR, self.attributes,
self.labels, cv=self.cv)
# Output results
self._output_regressor_results(model="LinearSVR")
def predict_linear_SVR(self, dataset_X=None):
"""Predicts the output of each datapoint in dataset_X using the
LinearSVR model. Returns the predictions.
"""
# Check that run_linear_SVR() has already been called
if self.regressor_linear_SVR is None:
print("The LinearSVR model seems to be missing.",
"Have you called run_linear_SVR() yet?")
return None
# Try to make the prediction
# Handle exception if dataset_X isn't a valid input
try:
y_prediction = self.regressor_linear_SVR.predict(dataset_X)
except Exception as e:
print("The LinearSVR model failed to run.",
"Check your inputs and try again.")
print("Here is the exception message:")
print(e)
return None
print("\nLinearSVR Predictions:\n", y_prediction, "\n")
return y_prediction
# Helper methods
def _create_SVC_model(self, is_nu):
"""Runs UI for getting parameters and creating SVC or NuSVC
model.
"""
if is_nu:
print("\n==========================")
print("= NuSVC Parameter Inputs =")
print("==========================\n")
else:
print("\n========================")
print("= SVC Parameter Inputs =")
print("========================\n")
print("Default values:", "test_size = 0.25", "cv = 5",
"graph_results = False", sep="\n")
if is_nu:
print("nu = 0.5")
else:
print("C = 1.0")
print("kernel = 'rbf'",
"degree = 3",
"gamma = 'scale'",
"coef0 = 0.0",
"shrinking = True",
"tol = 0.001",
"cache_size = 200",
"class_weight = None",
"max_iter = -1",
"decision_function_shape = 'ovr'",
"break_ties = False",
"random_state = None",
"verbose = False", sep="\n")
# Set defaults
self.test_size = 0.25
self.cv = None
self.graph_results = False
while True:
user_input = input("\nUse default parameters (Y/n)? ").lower()
if user_input in {"y", ""}:
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
if is_nu:
return NuSVC(probability=True)
return SVC(probability=True)
elif user_input == "n":
break
else:
print("Invalid input.")
print("\nIf you are unsure about a parameter, press enter to use its",
"default value.")
print("If you finish entering parameters early, enter 'q' to skip",
"ahead.\n")
# Set more defaults
if is_nu:
nu = 0.5
else:
C = 1.0
kernel = "rbf"
degree = 3
gamma = "scale"
coef0 = 0.0
shrinking = True
tol = 0.001
cache_size = 200
class_weight = None
max_iter = -1
decision_function_shape = "ovr"
break_ties = False
random_state = None
verbose = False
# Get user parameter input
while True:
break_early = False
while True:
user_input = input("\nWhat fraction of the dataset should be the "
+ "testing set (0,1)? ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0 or user_input >= 1:
raise Exception
self.test_size = user_input
break
except Exception:
print("Invalid input.")
print("test_size =", self.test_size)
if break_early:
break
while True:
user_input = input("\nUse GridSearch to find the best "
+ "hyperparameters (y/N)? ").lower()
if user_input == "q":
break_early = True
break
elif user_input in {"n", "y", ""}:
break
else:
print("Invalid input.")
if break_early:
break
while user_input == "y":
print("\n= GridSearch Parameter Inputs =\n")
print("Enter 'q' to skip GridSearch.")
self.gridsearch = True
params = {}
while True:
print("\nEnter the kernels to try out.")
print("Options: 1-'linear', 2-'poly', 3-'rbf', 4-'sigmoid'.",
"Enter 'all' for all options.")
print("Example input: 1,2,3")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
elif user_input == "all":
kern_params = ["linear", "poly", "rbf", "sigmoid"]
break
else:
kern_dict = {1: "linear", 2: "poly", 3: "rbf",
4: "sigmoid"}
try:
kern_params_int = \
list(map(int, list(user_input.split(","))))
if len(kern_params_int) > len(kern_dict):
raise Exception
kern_params = []
for each in kern_params_int:
if not kern_dict.get(each):
raise Exception
kern_params.append(kern_dict.get(each))
break
except Exception:
print("Invalid input.")
if break_early:
break
params["kernel"] = kern_params
print("kernels:", kern_params)
while True:
print("\nEnter the list of kernel coefficients/gamma values",
"to try out.")
print("Example input: 0.001,0.0001")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
try:
gamma_params = \
list(map(float, list(user_input.split(","))))
if len(gamma_params) == 0:
raise Exception
for num in gamma_params:
if num <= 0:
raise Exception
break
except Exception:
print("Invalid input.")
if break_early:
break
params["gamma"] = gamma_params
print("gammas:", gamma_params)
while not is_nu:
print("\nEnter the list of regularization parameters to",
"try out.")
print("Example input: 1,10,100")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
try:
gamma_params = \
list(map(int, list(user_input.split(","))))
if len(gamma_params) == 0:
raise Exception
for num in gamma_params:
if num <= 0:
raise Exception
params["C"] = gamma_params
print("C values:", gamma_params)
break
except Exception:
print("Invalid input.")
if break_early:
break
print("\n= End of GridSearch inputs. =")
self.gs_params = params
best_params = self._run_gridsearch_classifier(is_nu)
kernel = best_params["kernel"]
gamma = best_params["gamma"]
if not is_nu:
C = best_params["C"]
break
break_early = False
while True:
user_input = input("\nEnter the number of folds for cross "
+ "validation [2,): ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input < 2:
raise Exception
self.cv = user_input
break
except Exception:
print("Invalid input.")
print("cv =", self.cv)
if break_early:
break
while True:
user_input = \
input("\nGraph the ROC curve? Only binary classification "
+ "is supported (y/N): ").lower()
if user_input == "y":
self.graph_results = True
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("graph_results =", self.graph_results)
if break_early:
break
while is_nu:
user_input = input("\nEnter a decimal for nu (0,1]: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0 or user_input > 1:
raise Exception
nu = user_input
break
except Exception:
print("Invalid input.")
if is_nu:
print("nu =", nu)
while not is_nu and not self.gridsearch:
user_input = \
input("\nEnter a positive regularization parameter C: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
C = user_input
break
except Exception:
print("Invalid input.")
if not is_nu and not self.gridsearch:
print("C =", C)
if break_early:
break
while not self.gridsearch:
print("\nWhich kernel type should be used?")
user_input = \
input("Enter 1 for 'linear', 2 for 'poly', 3 for 'rbf', 4 "
+ "for 'sigmoid', or 5 for 'precomputed': ")
if user_input == "1":
kernel = "linear"
break
elif user_input == "2":
kernel = "poly"
break
elif user_input == "4":
kernel = "sigmoid"
break
elif user_input == "5":
kernel = "recomputed"
break
elif user_input in {"3", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
if not self.gridsearch:
print("kernel =", kernel)
if break_early:
break
while kernel == "poly":
user_input = \
input("\nEnter the degree of the kernel function [0,): ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input < 0:
raise Exception
degree = user_input
break
except Exception:
print("Invalid input.")
if kernel == "poly":
print("degree =", degree)
if break_early:
break
while kernel in {"rbf", "poly", "sigmoid"} and not self.gridsearch:
print("\nSet the kernel coefficient.")
user_input = input("Enter 1 for 'scale', or 2 for 'auto': ")
if user_input == "2":
gamma = "auto"
break
elif user_input in {"1", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
if kernel in {"rbf", "poly", "sigmoid"} and not self.gridsearch:
print("gamma =", gamma)
if break_early:
break
while kernel in {"poly", "sigmoid"}:
user_input = input("\nEnter coef0, the independent term in the "
+ "kernel function [0,): ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input < 0:
raise Exception
coef0 = user_input
break
except Exception:
print("Invalid input.")
if kernel in {"poly", "sigmoid"}:
print("coef0 =", coef0)
if break_early:
break
while True:
user_input = \
input("\nUse the shrinking heuristic (Y/n)? ").lower()
if user_input == "n":
shrinking = False
break
elif user_input in {"y", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("shrinking =", shrinking)
if break_early:
break
while True:
user_input = input("\nEnter a positive number for the "
+ "tolerance for stopping criterion: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
tol = user_input
break
except Exception:
print("Invalid input.")
print("tol =", tol)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive kernel cache size in MB: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
cache_size = user_input
break
except Exception:
print("Invalid input.")
print("cache_size =", cache_size)
if break_early:
break
while True:
user_input = \
input("\nAutomatically adjust class weights (y/N)? ").lower()
if user_input == "y":
class_weight = "balanced"
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("class_weight =", class_weight)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive maximum number of iterations, or "
+ "press enter for no limit: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input <= 0:
raise Exception
max_iter = user_input
break
except Exception:
print("Invalid input.")
print("max_iter =", max_iter)
if break_early:
break
while True:
print("\nSet the decision function.")
user_input = \
input("Enter 1 for one-vs-rest, or 2 for one-vs-one: ")
if user_input == "2":
decision_function_shape = "ovo"
break
elif user_input in {"1", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
print("decision_function_shape =", decision_function_shape)
if break_early:
break
while True:
user_input = input("\nEnable tie-breaking (y/N)? ").lower()
if user_input == "y":
break_ties = True
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("break_ties =", break_ties)
if break_early:
break
while True:
user_input = \
input("\nEnter a seed for the random number generator: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
random_state = int(user_input)
break
except Exception:
print("Invalid input.")
print("random_state =", random_state)
if break_early:
break
while True:
user_input = input("\nEnable verbose logging (y/N)? ").lower()
if user_input == "y":
verbose = True
break
elif user_input in {"n", "q", ""}:
break
else:
print("Invalid input.")
print("verbose =", verbose)
break
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
if is_nu:
return NuSVC(nu=nu, kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, shrinking=shrinking,
probability=True, tol=tol, cache_size=cache_size,
class_weight=class_weight, verbose=verbose,
max_iter=max_iter,
decision_function_shape=decision_function_shape,
break_ties=break_ties, random_state=random_state)
return SVC(C=C, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
shrinking=shrinking, probability=True, tol=tol,
cache_size=cache_size, class_weight=class_weight,
verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
break_ties=break_ties, random_state=random_state)
def _run_gridsearch_classifier(self, is_nu):
"""Runs GridSearch with the parameters given in run_SVC() or
run_nu_SVC(). Returns the best parameters."""
if is_nu:
clf = NuSVC()
else:
clf = SVC()
acc_scorer = make_scorer(accuracy_score)
if self.dataset_X_test is None:
self._split_data()
# Run GridSearch
grid_obj = GridSearchCV(clf, self.gs_params, scoring=acc_scorer)
grid_obj = grid_obj.fit(self.dataset_X_train, self.dataset_y_train)
# Set the clf to the best combination of parameters
clf = grid_obj.best_estimator_
# Fit the best algorithm to the data
clf.fit(self.dataset_X_train, self.dataset_y_train)
predictions = clf.predict(self.dataset_X_test)
self.gs_result = accuracy_score(self.dataset_y_test, predictions)
# Return the best parameters
print("\nBest GridSearch Parameters:\n", grid_obj.best_params_, "\n")
return grid_obj.best_params_
def _create_linear_SVC_model(self):
"""Runs UI for getting parameters and creating LinearSVC model."""
print("\n==============================")
print("= LinearSVC Parameter Inputs =")
print("==============================\n")
print("Default values:",
"test_size = 0.25",
"cv = 5",
"penalty = 'l2'",
"loss = 'squared_hinge'",
"dual = True",
"tol = 0.0001",
"C = 1.0",
"multi_class = 'ovr'",
"fit_intercept = True",
"intercept_scaling = 1",
"class_weight = None",
"random_state = None",
"max_iter = 1000",
"verbose = False", sep="\n")
# Set defaults
self.test_size = 0.25
self.cv = None
while True:
user_input = input("\nUse default parameters (Y/n)? ").lower()
if user_input in {"y", ""}:
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
return LinearSVC()
elif user_input == "n":
break
else:
print("Invalid input.")
print("\nIf you are unsure about a parameter, press enter to use its",
"default value.")
print("If you finish entering parameters early, enter 'q' to skip",
"ahead.\n")
# Set more defaults
penalty = "l2"
loss = "squared_hinge"
dual = True
tol = 0.0001
C = 1.0
multi_class = "ovr"
fit_intercept = True
intercept_scaling = 1
class_weight = None
random_state = None
max_iter = 1000
verbose = 0
# Get user parameter input
while True:
break_early = False
while True:
user_input = input("\nWhat fraction of the dataset should be the "
+ "testing set (0,1)? ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0 or user_input >= 1:
raise Exception
self.test_size = user_input
break
except Exception:
print("Invalid input.")
print("test_size =", self.test_size)
if break_early:
break
while True:
user_input = input("\nEnter the number of folds for cross "
+ "validation [2,): ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input < 2:
raise Exception
self.cv = user_input
break
except Exception:
print("Invalid input.")
print("cv =", self.cv)
if break_early:
break
while True:
user_input = input("\nCalculate a y-intercept (Y/n)? ").lower()
if user_input == "n":
fit_intercept = False
break
elif user_input in {"y", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("fit_intercept =", fit_intercept)
if break_early:
break
while fit_intercept:
user_input = \
input("\nEnter a number for the intercept scaling factor: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
intercept_scaling = float(user_input)
except Exception:
print("Invalid input.")
if fit_intercept:
print("intercept_scaling =", intercept_scaling)
if break_early:
break
while True:
print("\nSet the norm used in penalization.")
user_input = input("Enter 1 for 'l1', or 2 for 'l2': ")
if user_input == "1":
penalty = "l1"
break
elif user_input in {"2", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
print("penalty =", penalty)
if break_early:
break
while True:
print("\nChoose a loss function.")
user_input = \
input("Enter 1 for 'hinge', or 2 for 'squared_hinge': ")
if user_input == "1":
loss = "hinge"
break
elif user_input in {"2", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
print("loss =", loss)
if break_early:
break
while True:
print("\nShould the algorithm solve the duel or primal",
"optimization problem?")
user_input = input("Enter 1 for dual, or 2 for primal: ")
if user_input == "2":
dual = False
break
elif user_input in {"1", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
print("dual =", dual)
if break_early:
break
while True:
user_input = input("\nEnter a positive tolerance for stopping "
+ "criterion: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
tol = user_input
break
except Exception:
print("Invalid input.")
print("tol =", tol)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive regularization parameter C: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
C = user_input
break
except Exception:
print("Invalid input.")
print("C =", C)
if break_early:
break
while True:
print("\nSet the multi-class strategy if there are more than",
"two classes.")
user_input = input("Enter 1 for 'one-vs-rest', or 2 for "
+ "'Crammer-Singer': ")
if user_input == "2":
multi_class = "crammer_singer"
break
elif user_input in {"1", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
print("multi_class =", multi_class)
if break_early:
break
while True:
user_input = input("\nAutomatically adjust class weights "
+ "(y/N)? ").lower()
if user_input == "y":
class_weight = "balanced"
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("class_weight =", class_weight)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive maximum number of iterations: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input <= 0:
raise Exception
max_iter = user_input
break
except Exception:
print("Invalid input.")
print("max_iter =", max_iter)
if break_early:
break
while True:
user_input = \
input("\nEnter a seed for the random number generator: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
random_state = int(user_input)
break
except Exception:
print("Invalid input.")
print("random_state =", random_state)
if break_early:
break
while True:
user_input = input("\nEnable verbose logging (y/N)? ").lower()
if user_input == "y":
verbose = True
break
elif user_input in {"n", "q", ""}:
break
else:
print("Invalid input.")
print("verbose =", verbose)
break
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
return LinearSVC(penalty=penalty, loss=loss, dual=dual, tol=tol, C=C,
multi_class=multi_class, fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight, verbose=verbose,
random_state=random_state, max_iter=max_iter)
def _create_SVR_model(self, is_nu):
"""Runs UI for getting parameters and creates SVR or NuSVR model."""
if is_nu:
print("\n==========================")
print("= NuSVR Parameter Inputs =")
print("==========================\n")
else:
print("\n========================")
print("= SVR Parameter Inputs =")
print("========================\n")
print("Default values:", "test_size = 0.25", "cv = 5", sep="\n")
if is_nu:
print("nu = 0.5")
else:
print("epsilon = 0.1")
print("kernel = 'rbf'",
"degree = 3",
"gamma = 'scale'",
"coef0 = 0.0",
"tol = 0.001",
"C = 1.0",
"shrinking = True",
"cache_size = 200",
"verbose = False",
"max_iter = -1", sep="\n")
# Set defaults
self.test_size = 0.25
self.cv = None
while True:
user_input = input("\nUse default parameters (Y/n)? ").lower()
if user_input in {"y", ""}:
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
if is_nu:
return NuSVR()
return SVR()
elif user_input == "n":
break
else:
print("Invalid input.")
print("\nIf you are unsure about a parameter, press enter to use its",
"default value.")
print("If you finish entering parameters early, enter 'q' to skip",
"ahead.\n")
# Set more defaults
if is_nu:
nu = 0.5
else:
epsilon = 0.1
kernel = "rbf"
degree = 3
gamma = "scale"
coef0 = 0.0
tol = 0.001
C = 1.0
shrinking = True
cache_size = 200
verbose = False
max_iter = -1
# Get user parameter input
while True:
break_early = False
while True:
user_input = input("\nWhat fraction of the dataset should be the "
+ "testing set (0,1)? ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0 or user_input >= 1:
raise Exception
self.test_size = user_input
break
except Exception:
print("Invalid input.")
print("test_size =", self.test_size)
if break_early:
break
while True:
user_input = input("\nUse GridSearch to find the best "
+ "hyperparameters (y/N)? ").lower()
if user_input == "q":
break_early = True
break
elif user_input in {"n", "y", ""}:
break
else:
print("Invalid input.")
if break_early:
break
while user_input == "y":
print("\n= GridSearch Parameter Inputs =\n")
print("Enter 'q' to skip GridSearch.")
self.gridsearch = True
params = {}
while True:
print("\nEnter the kernels to try out.")
print("Options: 1-'linear', 2-'poly', 3-'rbf', 4-'sigmoid'.",
"Enter 'all' for all options.")
print("Example input: 1,2,3")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
elif user_input == "all":
kern_params = ["linear", "poly", "rbf", "sigmoid"]
break
else:
kern_dict = {1: "linear", 2: "poly", 3: "rbf",
4: "sigmoid"}
try:
kern_params_int = \
list(map(int, list(user_input.split(","))))
if len(kern_params_int) > len(kern_dict):
raise Exception
kern_params = []
for each in kern_params_int:
if not kern_dict.get(each):
raise Exception
kern_params.append(kern_dict.get(each))
break
except Exception:
print("Invalid input.")
if break_early:
break
params["kernel"] = kern_params
print("kernels:", kern_params)
while is_nu:
print("\nEnter a list of decimals for nu.")
print("Example input: 0.1,0.2,0.3")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
try:
nu_params = \
list(map(float, list(user_input.split(","))))
if len(nu_params) == 0:
raise Exception
for num in nu_params:
if num <= 0:
raise Exception
params["nu"] = nu_params
print("nu values:", nu_params)
break
except Exception:
print("Invalid input.")
while not is_nu:
print("\nEnter epsilons (the ranges from an actual value",
"where penalties aren't applied) to try out.")
print("Example input: 0.1,0.2,0.3")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
try:
eps_params = \
list(map(float, list(user_input.split(","))))
if len(eps_params) == 0:
raise Exception
for num in eps_params:
if num <= 0:
raise Exception
params["epsilon"] = eps_params
print("epsilon values:", eps_params)
break
except Exception:
print("Invalid input.")
if break_early:
break
while not is_nu:
print("\nEnter the list of regularization parameters to",
"try out.")
print("Example input: 1,10,100")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
try:
gamma_params = \
list(map(int, list(user_input.split(","))))
if len(gamma_params) == 0:
raise Exception
for num in gamma_params:
if num <= 0:
raise Exception
params["C"] = gamma_params
print("C values:", gamma_params)
break
except Exception:
print("Invalid input.")
if break_early:
break
print("\n= End of GridSearch inputs. =")
self.gs_params = params
best_params = self._run_gridsearch_regressor(is_nu)
kernel = best_params["kernel"]
if is_nu:
nu = best_params["nu"]
else:
C = best_params["C"]
epsilon = best_params["epsilon"]
break
break_early = False
while True:
user_input = input("\nEnter the number of folds for cross "
+ "validation [2,): ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input < 2:
raise Exception
self.cv = user_input
break
except Exception:
print("Invalid input.")
print("cv =", self.cv)
if break_early:
break
while is_nu and not self.gridsearch:
user_input = input("\nEnter a decimal for nu (0,1]: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0 or user_input > 1:
raise Exception
nu = user_input
break
except Exception:
print("Invalid input.")
if is_nu and not self.gridsearch:
print("nu =", nu)
while not is_nu and not self.gridsearch:
user_input = \
input("\nEnter a positive epsilon, the range from an actual "
+ "value where penalties aren't applied: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
epsilon = user_input
break
except Exception:
print("Invalid input.")
if not is_nu and not self.gridsearch:
print("epsilon =", epsilon)
if break_early:
break
while not self.gridsearch:
print("\nWhich kernel type should be used?")
user_input = \
input("Enter 1 for 'linear', 2 for 'poly', 3 for 'rbf', 4 "
+ "for 'sigmoid', or 5 for 'precomputed': ")
if user_input == "1":
kernel = "linear"
break
elif user_input == "2":
kernel = "poly"
break
elif user_input == "4":
kernel = "sigmoid"
break
elif user_input == "5":
kernel = "recomputed"
break
elif user_input in {"3", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
if not self.gridsearch:
print("kernel =", kernel)
if break_early:
break
while kernel == "poly":
user_input = \
input("\nEnter the degree of the kernel function [0,): ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input < 0:
raise Exception
degree = user_input
break
except Exception:
print("Invalid input.")
if kernel == "poly":
print("degree =", degree)
if break_early:
break
while kernel in {"rbf", "poly", "sigmoid"}:
print("\nSet the kernel coefficient.")
user_input = input("Enter 1 for 'scale', or 2 for 'auto': ")
if user_input == "2":
gamma = "auto"
break
elif user_input in {"1", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
if kernel in {"rbf", "poly", "sigmoid"}:
print("gamma =", gamma)
if break_early:
break
while not self.gridsearch or is_nu:
user_input = \
input("\nEnter a positive regularization parameter C: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
C = user_input
break
except Exception:
print("Invalid input.")
if not self.gridsearch or is_nu:
print("C =", C)
if break_early:
break
while kernel in {"poly", "sigmoid"}:
user_input = input("\nEnter coef0, the independent term in the "
+ "kernel function [0,): ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input < 0:
raise Exception
coef0 = user_input
break
except Exception:
print("Invalid input.")
if kernel in {"poly", "sigmoid"}:
print("coef0 =", coef0)
if break_early:
break
while True:
user_input = input("\nEnter a positive tolerance for stopping "
+ "criterion: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
tol = user_input
break
except Exception:
print("Invalid input.")
print("tol =", tol)
if break_early:
break
while True:
user_input = \
input("\nUse the shrinking heuristic (Y/n)? ").lower()
if user_input == "n":
shrinking = False
break
elif user_input in {"y", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("shrinking =", shrinking)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive kernel cache size in MB: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
cache_size = user_input
break
except Exception:
print("Invalid input.")
print("cache_size =", cache_size)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive maximum number of iterations, or "
+ "press enter for no limit: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input <= 0:
raise Exception
max_iter = user_input
break
except Exception:
print("Invalid input.")
print("max_iter =", max_iter)
if break_early:
break
while True:
user_input = input("\nEnable verbose logging (y/N)? ").lower()
if user_input == "y":
verbose = True
break
elif user_input in {"n", "q", ""}:
break
else:
print("Invalid input.")
print("verbose =", verbose)
break
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
if is_nu:
return NuSVR(nu=nu, C=C, kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, shrinking=shrinking, tol=tol,
cache_size=cache_size, verbose=verbose,
max_iter=max_iter)
return SVR(kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, epsilon=epsilon, shrinking=shrinking,
cache_size=cache_size, verbose=verbose, max_iter=max_iter)
def _run_gridsearch_regressor(self, is_nu):
"""Runs GridSearch with the parameters given in run_SVR() or
run_nu_SVR(). Returns the best parameters."""
if is_nu:
clf = NuSVR()
else:
clf = SVR()
if self.dataset_X_test is None:
self._split_data()
# Run GridSearch
grid_obj = GridSearchCV(clf, self.gs_params, scoring="r2")
grid_obj = grid_obj.fit(self.dataset_X_train, self.dataset_y_train)
# Set the clf to the best combination of parameters
clf = grid_obj.best_estimator_
# Fit the best algorithm to the data
clf.fit(self.dataset_X_train, self.dataset_y_train)
self.gs_result = clf.score(self.dataset_X_test, self.dataset_y_test)
# Return the best parameters
print("\nBest GridSearch Parameters:\n", grid_obj.best_params_, "\n")
return grid_obj.best_params_
def _create_linear_SVR_model(self):
"""Runs UI for getting parameters and creates LinearSVR model."""
print("\n==============================")
print("= LinearSVR Parameter Inputs =")
print("==============================\n")
print("Default values:",
"test_size = 0.25",
"cv = 5",
"epsilon = 0.0",
"tol = 0.0001",
"C = 1.0",
"loss = 'epsilon_insensitive'",
"fit_intercept = True",
"intercept_scaling = 1.0",
"dual = True",
"random_state = None",
"max_iter = 1000",
"verbose = False", sep="\n")
# Set defaults
self.test_size = 0.25
self.cv = None
while True:
user_input = input("\nUse default parameters (Y/n)? ").lower()
if user_input in {"y", ""}:
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
return LinearSVR()
elif user_input == "n":
break
else:
print("Invalid input.")
print("\nIf you are unsure about a parameter, press enter to use its",
"default value.")
print("If you finish entering parameters early, enter 'q' to skip",
"ahead.\n")
# Set more defaults
epsilon = 0.0
tol = 0.0001
C = 1.0
loss = "epsilon_insensitive"
fit_intercept = True
intercept_scaling = 1.0
dual = True
random_state = None
max_iter = 1000
verbose = 0
# Get user parameter input
while True:
break_early = False
while True:
user_input = input("\nWhat fraction of the dataset should be the "
+ "testing set (0,1)? ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0 or user_input >= 1:
raise Exception
self.test_size = user_input
break
except Exception:
print("Invalid input.")
print("test_size =", self.test_size)
if break_early:
break
while True:
user_input = input("\nEnter the number of folds for cross "
+ "validation [2,): ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input < 2:
raise Exception
self.cv = user_input
break
except Exception:
print("Invalid input.")
print("cv =", self.cv)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive epsilon, the range from an actual "
+ "value where penalties aren't applied: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
epsilon = user_input
break
except Exception:
print("Invalid input.")
print("epsilon =", epsilon)
if break_early:
break
while True:
user_input = input("\nEnter a positive tolerance for stopping "
+ "criterion: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
tol = user_input
break
except Exception:
print("Invalid input.")
print("tol =", tol)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive regularization parameter C: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
C = user_input
break
except Exception:
print("Invalid input.")
print("C =", C)
if break_early:
break
while True:
print("\nChoose a loss function.")
user_input = \
input("\nEnter 1 for 'epsilon_insensitive', or 2 for "
+ "'squared_epsilon_insensitive': ")
if user_input == "2":
loss = "squared_epsilon_insensitive"
break
elif user_input in {"1", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
print("loss =", loss)
if break_early:
break
while True:
user_input = input("\nCalculate a y-intercept (Y/n)? ").lower()
if user_input == "n":
fit_intercept = False
break
elif user_input in {"y", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("fit_intercept =", fit_intercept)
if break_early:
break
while fit_intercept:
user_input = \
input("\nEnter a number for the intercept scaling factor: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
intercept_scaling = float(user_input)
except Exception:
print("Invalid input.")
if fit_intercept:
print("intercept_scaling =", intercept_scaling)
if break_early:
break
while True:
print("\nShould the algorithm solve the duel or primal",
"optimization problem?")
user_input = input("Enter 1 for dual, or 2 for primal: ")
if user_input == "2":
dual = False
break
elif user_input in {"1", ""}:
break
elif user_input.lower() == "q":
break_early = True
break
else:
print("Invalid input.")
print("dual =", dual)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive maximum number of iterations: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input <= 0:
raise Exception
max_iter = user_input
break
except Exception:
print("Invalid input.")
print("max_iter =", max_iter)
if break_early:
break
while True:
user_input = \
input("\nEnter a seed for the random number generator: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
random_state = int(user_input)
break
except Exception:
print("Invalid input.")
print("random_state =", random_state)
if break_early:
break
while True:
user_input = input("\nEnable verbose logging (y/N)? ").lower()
if user_input == "y":
verbose = True
break
elif user_input in {"n", "q", ""}:
break
else:
print("Invalid input.")
print("verbose =", verbose)
break
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
return LinearSVR(epsilon=epsilon, tol=tol, C=C, loss=loss,
fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling, dual=dual,
verbose=verbose, random_state=random_state,
max_iter=max_iter)
def _output_classifier_results(self, model):
"""Outputs model metrics after a classifier model finishes running."""
if model == "SVC":
print("\n===============")
print("= SVC Results =")
print("===============\n")
print("{:<20} {:<20}".format("Accuracy:", self.accuracy_SVC))
if self.bin:
print("\n{:<20} {:<20}".format("ROC AUC:", self.roc_auc_SVC))
else:
print("\nConfusion Matrix:\n", self.confusion_matrix_SVC)
print("\nCross Validation Scores:", self.cross_val_scores_SVC)
if self.gridsearch:
print("\n{:<20} {:<20}".format("GridSearch Score:",
self.gs_result))
if self.bin and self.graph_results:
plt.plot(self.fpr, self.tpr, label="data 1")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend(loc=4)
plt.show()
print("\n\nCall predict_SVC() to make predictions for new data.")
elif model == "NuSVC":
print("\n=================")
print("= NuSVC Results =")
print("=================\n")
print("{:<20} {:<20}".format("Accuracy:", self.accuracy_nu_SVC))
if self.bin:
print("\n{:<20} {:<20}".format(
"ROC AUC:", self.roc_auc_nu_SVC))
else:
print("\nConfusion Matrix:\n", self.confusion_matrix_nu_SVC)
print("\nCross Validation Scores:", self.cross_val_scores_nu_SVC)
if self.gridsearch:
print("\n{:<20} {:<20}".format("GridSearch Score:",
self.gs_result))
if self.bin and self.graph_results:
plt.plot(self.fpr, self.tpr, label="data 1")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend(loc=4)
plt.show()
print("\n\nCall predict_nu_SVC() to make predictions for new data.")
else:
print("\n=====================")
print("= LinearSVC Results =")
print("=====================\n")
print("{:<20} {:<20}".format(
"Accuracy:", self.accuracy_linear_SVC))
print("\nCross Validation Scores:",
self.cross_val_scores_linear_SVC)
print("\n\nCall predict_linear_SVC() to make predictions for",
"new data.")
print("\n===================")
print("= End of results. =")
print("===================\n")
def _output_regressor_results(self, model):
"""Outputs model metrics after a regressor model finishes running."""
if model == "SVR":
print("\n===============")
print("= SVR Results =")
print("===============\n")
print("{:<20} {:<20}".format("Mean Squared Error:",
self.mean_squared_error_SVR))
print("\n{:<20} {:<20}".format("R2 Score:", self.r2_score_SVR))
print("\n{:<20} {:<20}".format("R Score:", str(self.r_score_SVR)))
print("\nCross Validation Scores", self.cross_val_scores_SVR)
if self.gridsearch:
print("\n{:<20} {:<20}".format("GridSearch Score:",
self.gs_result))
print("\n\nCall predict_SVR() to make predictions for new data.")
elif model == "NuSVR":
print("\n=================")
print("= NuSVR Results =")
print("=================\n")
print("{:<20} {:<20}".format("Mean Squared Error:",
self.mean_squared_error_nu_SVR))
print("\n{:<20} {:<20}".format("R2 Score:", self.r2_score_nu_SVR))
print("\n{:<20} {:<20}".format(
"R Score:", str(self.r_score_nu_SVR)))
print("\nCross Validation Scores:", self.cross_val_scores_nu_SVR)
if self.gridsearch:
print("\n{:<20} {:<20}".format("GridSearch Score:",
self.gs_result))
print("\n\nCall predict_nu_SVR() to make predictions for new data.")
else:
print("\n=====================")
print("= LinearSVR Results =")
print("=====================\n")
print("{:<20} {:<20}".format("Mean Squared Error:",
self.mean_squared_error_linear_SVR))
print("\n{:<20} {:<20}".format("R2 Score:",
self.r2_score_linear_SVR))
print("\n{:<20} {:<20}".format("R Score:",
str(self.r_score_linear_SVR)))
print("\nCross Validation Scores:\n",
self.cross_val_scores_linear_SVR)
print("\n\nCall predict_linear_SVR() to make predictions for new data.")
print("\n===================")
print("= End of results. =")
print("===================\n")
def _split_data(self):
"""Helper method for splitting attributes and labels into
training and testing sets.
This method runs under the assumption that all relevant instance
data has been checked for correctness.
"""
self.dataset_X_train, self.dataset_X_test, self.dataset_y_train, \
self.dataset_y_test = train_test_split(self.attributes, self.labels,
test_size=self.test_size)
def _check_inputs(self):
"""Verifies if instance data is ready for use in SVM model."""
# Check if attributes exists
if self.attributes is None:
print("attributes is missing; call set_attributes(new_attributes)",
"to fix this! new_attributes should be a populated dataset",
"of independent variables.")
return False
# Check if labels exists
if self.labels is None:
print("labels is missing; call set_labels(new_labels) to fix this!",
"new_labels should be a populated dataset of classes.")
return False
# Check if attributes and labels have same number of rows (samples)
if self.attributes.shape[0] != self.labels.shape[0]:
print("attributes and labels don't have the same number of rows.",
"Make sure the number of samples in each dataset matches!")
return False
return True
|
PypiClean
|
/float_evaluation-0.0.2-py3-none-any.whl/float/change_detection/evaluation/change_detection_evaluator.py
|
import numpy as np
import traceback
from typing import Callable, List, Union
class ChangeDetectionEvaluator:
"""Change detection evaluation class.
This class is required to compute the performance measures and store the corresponding results in the evaluation
of the change detection method.
Attributes:
measure_funcs (List[Callable]): A list of evaluation measure functions.
known_drifts (List[int] | List[tuple]):
The positions in the dataset (indices) corresponding to known concept drifts.
batch_size (int): The number of observations processed per iteration/time step.
n_total (int): The total number of observations.
n_delay (int | list):
The number of observations after a known concept drift, during which we count the detections made by
the model as true positives. If the argument is a list, the evaluator computes results for each delay
specified in the list.
n_init_tolerance (int):
The number of observations reserved for the initial training. We do not consider these observations in the
evaluation.
comp_times (list): Computation times for updating the change detector per time step.
memory_changes (list):
Memory changes (in GB RAM) per training iteration of the change detector.
result (dict): Results (i.e. calculated measurements, mean, and variance) for each evaluation measure function.
"""
def __init__(self,
measure_funcs: List[Callable],
known_drifts: Union[List[int], List[tuple]],
batch_size: int,
n_total: int,
n_delay: Union[int, list] = 100,
n_init_tolerance: int = 100):
"""Initializes the change detection evaluation object.
Args:
measure_funcs: A list of evaluation measure functions.
known_drifts:
The positions in the dataset (indices) corresponding to known concept drifts.
batch_size: The number of observations processed per iteration/time step.
n_total: The total number of observations.
n_delay:
The number of observations after a known concept drift, during which we count the detections made by
the model as true positives. If the argument is a list, the evaluator computes results for each delay
specified in the list.
n_init_tolerance:
The number of observations reserved for the initial training. We do not consider these observations in
the evaluation.
"""
self.measure_funcs = measure_funcs
self.known_drifts = known_drifts
self.batch_size = batch_size
self.n_total = n_total
self.n_delay = n_delay
self.n_init_tolerance = n_init_tolerance
self.comp_times = []
self.memory_changes = []
self.result = dict()
for measure_func in measure_funcs:
self.result[measure_func.__name__] = dict()
def run(self, drifts: List):
"""Computes the evaluation measures.
Other than the PredictionEvaluator and FeatureSelectionEvaluator, the ChangeDetectionEvaluator is only run
once at the end of the evaluation.
Args:
drifts: List of time steps corresponding to detected concept drifts.
Raises:
TypeError: Error while executing the provided evaluation measure functions.
"""
for measure_func in self.measure_funcs:
try:
if isinstance(self.n_delay, int): # Run with a single delay parameter
mean = measure_func(evaluator=self, drifts=drifts, n_delay=self.n_delay)
mes = [mean]
var = 0
else: # Run with multiple delay parameters
mes = []
for ndel in self.n_delay:
mes.append(measure_func(evaluator=self, drifts=drifts, n_delay=ndel))
mean = np.nanmean(mes)
var = np.nanvar(mes)
self.result[measure_func.__name__]['measures'] = mes
self.result[measure_func.__name__]['mean'] = mean
self.result[measure_func.__name__]['var'] = var
except TypeError:
traceback.print_exc()
continue
|
PypiClean
|
/binance_dex-0.1.3.tar.gz/binance_dex-0.1.3/binance_dex/sockets.py
|
import inspect
from binance_dex.lib.sockets import BinanceChainSocketConn
IS_TEST_NET = False # A varialbe to switch test net / main net, default would be MAIN-NET
SOCKET_BASE_ADDR_TEST_NET = 'wss://testnet-dex.binance.org/api/ws/'
SOCKET_BASE_ADDR_MAIN_NET = 'wss://dex.binance.org/api/ws/'
# Default Call back sample function to alert user to create own customized function
def _default_call_back(*args):
print('Here is default callback function, '
'please pass in your own customized callback function to handle received data')
print(args[1])
WS_ENTRY_POINTS = {
'fetch_block_height_updates': '$all@blockheight',
'fetch_account_updates': '',
'fetch_trades_updates': '',
'fetch_market_diff_stream': '',
'fetch_market_depth_stream': '',
'fetch_kline_updates': '',
'fetch_ticker_streams': '',
}
class BinanceChainSocket(object):
"""
Web Socket Implementation
Official Document: https://binance-chain.github.io/api-reference/dex-api/ws-connection.html
"""
def __init__(self, is_test_net=IS_TEST_NET):
self.base_ws_url = SOCKET_BASE_ADDR_TEST_NET if is_test_net else SOCKET_BASE_ADDR_MAIN_NET
def fetch_account_updates(self, user_address, one_off=True, callback_function=None):
"""
This function may receive serveral kinds of data, distinguished by "stream" from returned data
:param user_address: Address
Sample Return:
- Account sample return (notice "stream" == "accounts"):
{"stream":"accounts","data":{"e":"outboundAccountInfo","E":7364509,"B":[{"a":"BNB","f":"1371.08750000",
"r":"0.00000000","l":"0.00000000"},{"a":"DEX.B-C72","f":"999999791.11200000","r":"0.00000000",
"l":"0.00000000"}]}}
- Transfer sample return (notice "stream" == "transfers"):
{"stream":"transfers","data":{"e":"outboundTransferInfo","E":7364509,
"H":"08B71F862CDB820AF499D6E4FB34494CA163EBDADD5DC5D0A61EB1A0725BB4F4",
"f":"tbnb1r4gc5ftrkr9ez2khph4h5xxd0mf0hd75jf06gw","t":[{"o":"tbnb1fn9z9vn4f44ekz0a3pf80dcy2wh4d5988phjds",
"c":[{"a":"DEX.B-C72","A":"8.88800000"}]}]}}
- Orders
{"stream":"orders","data":[{"e":"executionReport","E":7366949,"s":"100K-9BC_BNB","S":1,"o":2,"f":1,
"q":"0.00001500","p":"66666.00000000","x":"NEW","X":"Ack","i":"1D518A2563B0CB912AD70DEB7A18CD7ED2FBB7D4-10",
"l":"0.00000000","L":"0.00000000","z":"0.00001500","n":"","T":1554890366040313451,"t":"",
"O":1554890366040313451},{"e":"executionReport","E":7366949,"s":"100K-9BC_BNB","S":1,"o":2,"f":1,
"q":"0.00001500","p":"66666.00000000","x":"NEW","X":"FullyFill",
"i":"1D518A2563B0CB912AD70DEB7A18CD7ED2FBB7D4-10","l":"0.00001500","L":"66666.00000000","z":"0.00001500",
"n":"BNB:0.00039999","T":1554890366040313451,"t":"7366949-0","O":1554890366040313451}]}
"""
return self._standard_binance_change_socket_handler(one_off=one_off,
callback_function=callback_function,
parameter=user_address)
def fetch_block_height_updates(self, one_off=True, callback_function=None):
return self._standard_binance_change_socket_handler(one_off=one_off, callback_function=callback_function)
def fetch_trades_updates(self, trading_pairs, one_off=True, callback_function=None):
"""
Returns individual trade updates by trading pair name
:param trading_pairs: Trading Pair
Sample Return:
{"stream":"trades","data":[{"e":"trade","E":7549438,"s":"100K-9BC_BNB","t":"7549438-0","p":"3333.00000000",
"q":"0.02611100","b":"1D518A2563B0CB912AD70DEB7A18CD7ED2FBB7D4-11",
"a":"EA1AE716501D1DB0B9F295A30891D9E562828678-12","T":1554964166437515341,
"sa":"tbnb1agdww9jsr5wmpw0jjk3s3yweu43g9pnc4p5kg7","ba":"tbnb1r4gc5ftrkr9ez2khph4h5xxd0mf0hd75jf06gw"}]}
"""
postfix_url = trading_pairs + '@trades'
return self._standard_binance_change_socket_handler(one_off=one_off,
callback_function=callback_function,
parameter=postfix_url)
def fetch_market_diff_stream(self, trading_pairs, one_off=True, callback_function=None):
"""
Order book price and quantity depth updates used to locally keep an order book.
:param trading_pairs: Trading Pair
Sample Return:
{"stream":"marketDiff","data":{"e":"depthUpdate","E":1554964484,"s":"100K-9BC_BNB",
"b":[["3333.00000000","0.07398900"]],"a":[]}}
"""
postfix_url = trading_pairs + '@marketDiff'
return self._standard_binance_change_socket_handler(one_off=one_off,
callback_function=callback_function,
parameter=postfix_url)
def fetch_market_depth_stream(self, trading_pairs, one_off=True, callback_function=None):
"""
Fetch Market Top 20 Levels of Bids and Asks
:param trading_pairs: Trading Pair
Sample Return:
{"stream":"marketDepth","data":{"lastUpdateId":7551469,"symbol":"100K-9BC_BNB",
"bids":[["3333.00000000","0.07398900"]],"asks":[["66666.00000000","1.68270010"],["70000.00000000","1.00000000"],
["90000000000.00000000","40.05079290"]]}}
"""
postfix_url = trading_pairs + '@marketDepth'
return self._standard_binance_change_socket_handler(one_off=one_off,
callback_function=callback_function,
parameter=postfix_url)
def fetch_kline_updates(self, trading_pair, interval, one_off=True, callback_function=None):
postfix_url = '%s@kline_%s' % (trading_pair, interval)
return self._standard_binance_change_socket_handler(one_off=one_off,
callback_function=callback_function,
parameter=postfix_url)
def fetch_ticker_streams(self, trading_pair=None, is_full_data=True, one_off=True, callback_function=None):
"""
24hr Ticker statistics for a single symbol are pushed every second
:param trading_pair: Trading Pair, if not provide will return all data
:param is_full_data: will return full data?
Sample Return:
{"stream":"ticker","data":{"e":"24hrTicker","E":1554966678,"s":"100K-9BC_BNB","p":"0.00000000",
"P":"0.00000000","w":"5023.09923713","x":"3333.00000000","c":"66666.00000000","Q":"0.00010000","b":"0.00000000",
"B":"0.00000000","a":"66666.00000000","A":"1.68260010","o":"66666.00000000","h":"66666.00000000",
"l":"3333.00000000","v":"0.02784240","q":"139.85513820","O":1554880245974,"C":1554966645974,"F":"7366949-0",
"L":"7554709-0","n":23}}
"""
if trading_pair:
if is_full_data:
postfix_url = '%s@ticker' % trading_pair
else:
postfix_url = '%s@miniTicker' % trading_pair
else:
if is_full_data:
postfix_url = '$all@allTickers'
else:
postfix_url = '$all@allMiniTickers'
return self._standard_binance_change_socket_handler(one_off=one_off,
callback_function=callback_function,
parameter=postfix_url)
def _standard_binance_change_socket_handler(self, one_off, callback_function, parameter=None):
# Get caller function name
caller_func_name = inspect.stack()[1].function
# todo:
# refer to: https://stackoverflow.com/questions/900392/getting-the-caller-function-name-inside-another-function-in-python/900413
# seems caller_func_name is different from python 3 and python 2, need to test on python 2
# Get ws name from Mapper
ws_api_name = WS_ENTRY_POINTS[caller_func_name]
# Compose whole ws url
ws_url = self.base_ws_url + ws_api_name
if parameter:
ws_url += parameter
print('WebSocket URL:' + ws_url)
# Create Socket instance
socket_obj = BinanceChainSocketConn(ws_url=ws_url)
# Stream data
if one_off: # short-live-call, just return
return socket_obj.one_off()
else: # long-live-call, keep getting data
if callback_function:
socket_obj.long_conn(callback_function)
else:
socket_obj.long_conn(_default_call_back)
|
PypiClean
|
/collective.emaillogin-1.3.zip/collective.emaillogin-1.3/README.txt
|
collective.emaillogin Package Readme
====================================
Overview
--------
This package allow logins with email address rather than login name. It applies
some (somewhat hackish) patches to Plone's membership tool and memberdata
class, after which the email address, on save, is saved as the login name for
members. This makes that members can log in using their email address rather
than some additional id, and when the email address changes the login name
is changed along with it.
Since version 1.0 we explicitly convert e-mail addresses to
lowercase. You should be able to login with any mix of upper and
lower case letters.
Installation
------------
Add it to the eggs of your Plone 3 buildout. With Plone 3.2.x or
earlier also add it to the zcml option of your instance. Install it
in the Add-ons (Extra Packages) control panel in your Plone Site.
Installing simply adds a new skin layer named 'emaillogin'.
It is best to install this on a fresh Plone site. The login names of
current users are not changed. There is code in core Plone 4 for
this, so you may want to look there if you need it.
.. WARNING::
A major part of this package works by patching several core
Plone and CMF classes. The patches also apply when you do not have
this package installed in your Plone Site. This may give unwanted
results, like changing the login name of a user when his or her e-mail
address is changed. This also means that when you have multiple Plone
Sites in one Zope instance, you should either install this package in
all of them or not use it at all and remove it from your buildout.
Upgrading
---------
When upgrading from version 0.8, an upgrade step is run to change all
login names to lower case, for those login names that are already
e-mail addresses.
Gotchas
-------
No, these are not bugs. Or if they are bugs, then they are bugs that
are too hard to fix without introducing other bugs. They might be
unexpected though, so we call them gotchas.
- Since version 1.0, whenever an e-mail address is set, we
automatically convert it to lowercase. You cannot set an e-mail
address to upper or mixed case. When logging in or resetting a
password the case does not need to match: we look for the given
login but also for the lowercased login.
- As an administrator, when you change the login name of a user in the
ZMI, this does not update the email.
- When you register with [email protected] and change this to
[email protected], you can no longer login with your original address.
You can only login with your current e-mail address, though the case
(upper, lower, mixed) should not matter anymore.
- The initial e-mail address is used as userid. This id never ever
changes. In places where the userid is displayed this original
userid is shown, which is normally fine until the email address is
overwritten -- once this is done the *original* email address will
be displayed rather than the new one. (Plone 4 fixes this in the
core.) There may be some more spots in Plone that for example
search only for users by id so when you use that to search on login
name this may fail. Also, there are spots in the Plone or CMF or
Zope code that have a userid as input but use it as login name or
the other way around so be careful when you start hacking yourself.
- If you register with [email protected], then change it to
[email protected], then no one can register a new user with
[email protected] or change the e-mail address of an existing user to
[email protected]. This is because it will forever be used as
userid. Note that when you now change your address to
[email protected], your intermediate address of [email protected] is
free for the taking.
- When you change your e-mail address, you do *not* get a confirmation
e-mail to check if you did not make any typos and it is a real
address. This means you will not be able to login if you do not
remember this typo; a password reset will then also not work. This
could be considered a problem of Plone in general and not specific
for this add-on, though we are hit harder by it. Might be a nice
candidate for a PLIP (PLone Improvement Proposal) or first an extra
add-on.
Future
------
In Plone 4 this package is deprecated, as Plone 4 already supports
logging in with your email address as an option:
http://dev.plone.org/plone/ticket/9214
So we strongly advise not to use this package on Plone 4. But your
instance will still start up (tested on Plone 4.0a4) and you can
uninstall the package through the UI. You may need to manually remove
``emaillogin`` from the skin selections in the Properties tab of
portal_skins in the ZMI. Since the package does some patches on
startup, you should still remove it from the eggs and zcml options of
your instance, rerun buildout and start your instance again.
|
PypiClean
|
/lfl-admin-0.0.9.tar.gz/lfl-admin-0.0.9/lfl_admin/statistic/models/raiting_of_players.py
|
import logging
from django.db import transaction
from django.db.models import DecimalField
from isc_common.bit import TurnBitOn
from isc_common.common import blinkString
from isc_common.common.functions import ExecuteStoredProcRows
from isc_common.fields.code_field import CodeField
from isc_common.fields.name_field import NameField
from isc_common.fields.related import ForeignKeyProtect
from isc_common.http.DSRequest import DSRequest
from isc_common.models.audit import AuditModel, AuditQuerySet, AuditManager
from isc_common.progress import managed_progress, ProgressDroped, progress_deleted
from lfl_admin.common.models.posts import Posts
from lfl_admin.competitions.models.clubs import Clubs
from lfl_admin.competitions.models.players import Players
logger = logging.getLogger(__name__)
class Raiting_of_playersQuerySet(AuditQuerySet):
pass
class Raiting_of_playersManager(AuditManager):
def calcStaticFromRequest(self, request):
from lfl_admin.competitions.models.tournaments import Tournaments
from lfl_admin.statistic.models.raiting_of_players_division import Raiting_of_players_division
from lfl_admin.statistic.models.raiting_of_players_tournamet import Raiting_of_players_tournamet
request = DSRequest(request=request)
tournament_ids = None
division_ids = None
players_ids = None
data = request.get_data()
d = data.get('d')
if d is not None:
tournament_ids = d.get('tournaments_ids')
division_ids = d.get('division_ids')
players_ids = data.get('players_ids')
res = []
if isinstance(players_ids, list) and (isinstance(tournament_ids, list) or isinstance(division_ids, list)):
if not isinstance(tournament_ids, list):
tournament_ids = list(set(map(lambda x: x.get('id'), Tournaments.objects.filter(division_id__in=division_ids, props=Tournaments.props.active).values('id'))))
with transaction.atomic():
with managed_progress(
id=f'calcStaticFromRequest_{request.user.id}',
qty=len(players_ids),
user=request.user,
message='<h4>Вычисление рейтинга</h4>',
title='Выполнено',
props=TurnBitOn(0, 0)
) as progress:
for players_id in players_ids:
user = Players.objects.get(id=players_id).person.user
progress.setContentsLabel(content=blinkString(f'Вычисление рейтинга: {user.get_full_name}', blink=False, bold=True))
rows = ExecuteStoredProcRows('raiting_of_players', [tournament_ids, players_id])
for row in rows:
FIO, num, KF, kf_bombar, kf_not_wins, kf_opyt, kf_plus_minus, kf_propusch_com, kf_zabito_com, mid_propusch_com, mid_zabito_com, plays, pers_not_wins, pers_wins, plus_minus, propusch_com, raiting, standoff_cnt, kf_wins, zabito_com, zabito_play, amplua_id, club_id, player_id, win_cnt, not_win_cnt = row
raiting = Raiting_of_players.objects.create(
amplua_id=amplua_id,
club_id=club_id,
FIO=FIO,
num=num,
KF=KF,
kf_bombar=kf_bombar,
kf_not_wins=kf_not_wins,
kf_opyt=kf_opyt,
kf_plus_minus=kf_plus_minus if kf_plus_minus is not None else 0,
kf_propusch_com=kf_propusch_com,
kf_wins=kf_wins,
kf_zabito_com=kf_zabito_com,
mid_propusch_com=mid_propusch_com,
mid_zabito_com=mid_zabito_com,
not_win_cnt=not_win_cnt,
plays=plays,
pers_not_wins=pers_not_wins,
pers_wins=pers_wins,
player_id=player_id,
plus_minus=plus_minus if plus_minus is not None else 0,
propusch_com=propusch_com,
raiting=raiting,
standoff_cnt=standoff_cnt,
win_cnt=win_cnt,
zabito_com=zabito_com,
zabito_play=zabito_play,
)
res.append(raiting.id)
if isinstance(division_ids, list):
for division_id in division_ids:
Raiting_of_players_division.objects.create(raiting=raiting, division_id=division_id)
elif isinstance(tournament_ids, list):
for tournament_id in tournament_ids:
Raiting_of_players_tournamet.objects.create(raiting=raiting, tournament_id=tournament_id)
if progress.step() != 0:
raise ProgressDroped(progress_deleted)
# sleep(2)
progress.sendInfo('Расчет выполнен')
return res
@classmethod
def getRecord(cls, record ) :
res = {
'amplua__name': record.amplua.name,
'amplua_id': record.amplua.id,
'club__name': record.club.name,
'club_id': record.club.id,
'deliting': record.deliting,
'editing': record.editing,
'FIO': record.FIO,
'num': record.num,
'id': record.id,
'KF': record.KF,
'kf_bombar': record.kf_bombar,
'kf_not_wins': record.kf_not_wins,
'kf_opyt': record.kf_opyt,
'kf_plus_minus': record.kf_plus_minus,
'kf_propusch_com': record.kf_propusch_com,
'kf_wins': record.kf_wins,
'kf_zabito_com': record.kf_zabito_com,
'mid_propusch_com': record.mid_propusch_com,
'mid_zabito_com': record.mid_zabito_com,
'not_win_cnt': record.not_win_cnt,
'pays': record.pays,
'pers_not_wins': record.pers_not_wins,
'pers_wins': record.pers_wins,
'player': record.player,
'plus_minus': record.plus_minus,
'propusch_com': record.propusch_com,
'raiting': record.raiting,
'standoff_cnt': record.standoff_cnt,
'win_cnt': record.win_cnt,
'zabito_com': record.zabito_com,
'zabito_play': record.zabito_play,
}
return res
def get_queryset(self):
return Raiting_of_playersQuerySet(self.model, using=self._db)
class Raiting_of_players(AuditModel):
amplua = ForeignKeyProtect(Posts)
club = ForeignKeyProtect(Clubs)
FIO = NameField()
num = CodeField()
KF = CodeField()
kf_bombar = DecimalField(max_digits=5, decimal_places=2, )
kf_not_wins = DecimalField(max_digits=5, decimal_places=2, )
kf_opyt = DecimalField(max_digits=5, decimal_places=2, )
kf_plus_minus = DecimalField(max_digits=5, decimal_places=2, )
kf_propusch_com = DecimalField(max_digits=5, decimal_places=2, )
kf_wins = DecimalField(max_digits=5, decimal_places=2, )
kf_zabito_com = DecimalField(max_digits=5, decimal_places=2, )
mid_propusch_com = DecimalField(max_digits=5, decimal_places=2, )
mid_zabito_com = DecimalField(max_digits=5, decimal_places=2, )
not_win_cnt = DecimalField(max_digits=5, decimal_places=2, )
plays = DecimalField(max_digits=5, decimal_places=2, )
pers_not_wins = DecimalField(max_digits=5, decimal_places=2, )
pers_wins = DecimalField(max_digits=5, decimal_places=2, )
player = ForeignKeyProtect(Players)
plus_minus = DecimalField(max_digits=5, decimal_places=2, )
propusch_com = DecimalField(max_digits=5, decimal_places=2, )
raiting = DecimalField(max_digits=5, decimal_places=2, )
standoff_cnt = DecimalField(verbose_name='Ничьих', max_digits=5, decimal_places=2, )
win_cnt = DecimalField(max_digits=5, decimal_places=2, )
zabito_com = DecimalField(max_digits=5, decimal_places=2, )
zabito_play = DecimalField(max_digits=5, decimal_places=2, )
objects = Raiting_of_playersManager()
def __str__(self):
return f'ID:{self.id}'
def __repr__(self):
return self.__str__()
class Meta:
verbose_name = 'Рейтинг футболистов'
|
PypiClean
|
/flask-talisman-1.1.0.tar.gz/flask-talisman-1.1.0/README.rst
|
Talisman: HTTP security headers for Flask
=========================================
|PyPI Version|
Talisman is a small Flask extension that handles setting HTTP headers
that can help protect against a few common web application security
issues.
The default configuration:
- Forces all connects to ``https``, unless running with debug enabled.
- Enables `HTTP Strict Transport
Security <https://developer.mozilla.org/en-US/docs/Web/Security/HTTP_strict_transport_security>`_.
- Sets Flask's session cookie to ``secure``, so it will never be set if
your application is somehow accessed via a non-secure connection.
- Sets Flask's session cookie to ``httponly``, preventing JavaScript
from being able to access its content. CSRF via Ajax uses a separate
cookie and should be unaffected.
- Sets Flask's session cookie to ``Lax``, preventing the cookie to be leaked
in CSRF-prone request methods.
- Sets
`X-Frame-Options <https://developer.mozilla.org/en-US/docs/Web/HTTP/X-Frame-Options>`_
to ``SAMEORIGIN`` to avoid
`clickjacking <https://en.wikipedia.org/wiki/Clickjacking>`_.
- Sets `X-Content-Type-Options
<https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options>`_
to prevent content type sniffing.
- Sets a strict `Content Security
Policy <https://developer.mozilla.org/en-US/docs/Web/Security/CSP/Introducing_Content_Security_Policy>`__
of ``default-src: 'self', 'object-src': 'none'``. This is intended to almost completely
prevent Cross Site Scripting (XSS) attacks. This is probably the only
setting that you should reasonably change. See the
`Content Security Policy`_ section.
- Sets a strict `Referrer-Policy <https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy>`_
of ``strict-origin-when-cross-origin`` that governs which referrer information should be included with
requests made.
- Disables ``browsing-topics`` by default in the `Permissions-Policy <https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Feature-Policy>`_
like `Drupal <https://www.drupal.org/project/drupal/issues/3209628>`_ to enhance privacy protection.
In addition to Talisman, you **should always use a cross-site request
forgery (CSRF) library**. It's highly recommended to use
`Flask-SeaSurf <https://flask-seasurf.readthedocs.org/en/latest/>`_,
which is based on Django's excellent library.
Installation & Basic Usage
--------------------------
Install via `pip <https://pypi.python.org/pypi/pip>`_:
::
pip install flask-talisman
After installing, wrap your Flask app with a ``Talisman``:
.. code:: python
from flask import Flask
from flask_talisman import Talisman
app = Flask(__name__)
Talisman(app)
There is also a full `Example App <https://github.com/wntrblm/flask-talisman/blob/master/example_app>`_.
Options
-------
- ``force_https``, default ``True``, forces all non-debug connects to
``https`` (`about HTTPS <https://developer.mozilla.org/en-US/docs/Glossary/https>`_).
- ``force_https_permanent``, default ``False``, uses ``301`` instead of
``302`` for ``https`` redirects.
- ``frame_options``, default ``SAMEORIGIN``, can be ``SAMEORIGIN``,
``DENY``, or ``ALLOWFROM`` (`about Frame Options <https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options>`_).
- ``frame_options_allow_from``, default ``None``, a string indicating
the domains that are allowed to embed the site via iframe.
- ``strict_transport_security``, default ``True``, whether to send HSTS
headers (`about HSTS <https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security>`_).
- ``strict_transport_security_preload``, default ``False``, enables HSTS
preloading. If you register your application with
`Google's HSTS preload list <https://hstspreload.appspot.com/>`_,
Firefox and Chrome will never load your site over a non-secure
connection.
- ``strict_transport_security_max_age``, default ``ONE_YEAR_IN_SECS``,
length of time the browser will respect the HSTS header.
- ``strict_transport_security_include_subdomains``, default ``True``,
whether subdomains should also use HSTS.
- ``content_security_policy``, default ``default-src: 'self'`, 'object-src': 'none'``, see the
`Content Security Policy`_ section (`about Content Security Policy <https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy>`_).
- ``content_security_policy_nonce_in``, default ``[]``. Adds a per-request nonce
value to the flask request object and also to the specified CSP header section.
I.e. ``['script-src', 'style-src']``
- ``content_security_policy_report_only``, default ``False``, whether to set
the CSP header as "report-only" (as `Content-Security-Policy-Report-Only`)
to ease deployment by disabling the policy enforcement by the browser,
requires passing a value with the ``content_security_policy_report_uri``
parameter
- ``content_security_policy_report_uri``, default ``None``, a string
indicating the report URI used for `CSP violation reports
<https://developer.mozilla.org/en-US/docs/Web/Security/CSP/Using_CSP_violation_reports>`_
- ``referrer_policy``, default ``strict-origin-when-cross-origin``, a string
that sets the Referrer Policy header to send a full URL when performing a same-origin
request, only send the origin of the document to an equally secure destination
(HTTPS->HTTPS), and send no header to a less secure destination (HTTPS->HTTP) (`about Referrer Policy <https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy>`_).
- ``feature_policy``, default ``{}``, see the `Feature Policy`_ section (`about Feature Policy <https://developer.mozilla.org/en-US/docs/Web/HTTP/Feature_Policy>`_).
- ``permissions_policy``, default ``{'browsing-topics': '()'}``, see the `Permissions Policy`_ section (`about Permissions Policy <https://developer.mozilla.org/en-US/docs/Web/HTTP/Feature_Policy>`_).
- ``document_policy``, default ``{}``, see the `Document Policy`_ section (`about Document Policy <https://wicg.github.io/document-policy/>`_).
- ``session_cookie_secure``, default ``True``, set the session cookie
to ``secure``, preventing it from being sent over plain ``http`` (`about cookies (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie)_`).
- ``session_cookie_http_only``, default ``True``, set the session
cookie to ``httponly``, preventing it from being read by JavaScript.
- ``session_cookie_samesite``, default ``Lax``, set this to ``Strict`` to prevent the cookie from being sent by the browser to the target site in all cross-site browsing context, even when following a regular link.
- ``force_file_save``, default ``False``, whether to set the
`X-Download-Options <https://docs.microsoft.com/en-us/previous-versions/windows/internet-explorer/ie-developer/compatibility/jj542450(v=vs.85)?redirectedfrom=MSDN>`_
header to ``noopen`` to prevent IE >= 8 to from opening file downloads
directly and only save them instead.
- ``x_content_type_options``, default ``True``, Protects against MIME sniffing vulnerabilities (`about Content Type Options <https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options>`_).
- ``x_xss_protection``, default ``False``, Protects against cross-site scripting (XSS) attacks (`about XSS Protection <https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection>`_). This option is disabled by default because no modern browser (`supports this header <https://caniuse.com/mdn-http_headers_x-xss-protection>`_) anymore.
For a full list of (security) headers, check out: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers.
Per-view options
~~~~~~~~~~~~~~~~
Sometimes you want to change the policy for a specific view. The
``force_https``, ``frame_options``, ``frame_options_allow_from``,
`content_security_policy``, ``feature_policy``, ``permissions_policy``
and ``document_policy`` options can be changed on a per-view basis.
.. code:: python
from flask import Flask
from flask_talisman import Talisman, ALLOW_FROM
app = Flask(__name__)
talisman = Talisman(app)
@app.route('/normal')
def normal():
return 'Normal'
@app.route('/embeddable')
@talisman(frame_options=ALLOW_FROM, frame_options_allow_from='*')
def embeddable():
return 'Embeddable'
Content Security Policy
-----------------------
The default content security policy is extremely strict and will
prevent loading any resources that are not in the same domain as the
application. Most web applications will need to change this policy.
If you're not ready to deploy Content Security Policy, you can set
`content_security_policy` to `False` to disable sending this header
entirely.
A slightly more permissive policy is available at
``flask_talisman.GOOGLE_CSP_POLICY``, which allows loading Google-hosted JS
libraries, fonts, and embeding media from YouTube and Maps.
You can and should create your own policy to suit your site's needs.
Here's a few examples adapted from
`MDN <https://developer.mozilla.org/en-US/docs/Web/Security/CSP/Using_Content_Security_Policy>`_:
Example 1
~~~~~~~~~
This is the default policy. A web site administrator wants all content
to come from the site's own origin (this excludes subdomains) and disallow
legacy HTML elements.
.. code:: python
csp = {
'default-src': '\'self\'',
'object-src': '\'none\'',
}
talisman = Talisman(app, content_security_policy=csp)
Example 2
~~~~~~~~~
A web site administrator wants to allow content from a trusted domain
and all its subdomains (it doesn't have to be the same domain that the
CSP is set on.)
.. code:: python
csp = {
'default-src': [
'\'self\'',
'*.trusted.com'
]
}
Example 3
~~~~~~~~~
A web site administrator wants to allow users of a web application to
include images from any origin in their own content, but to restrict
audio or video media to trusted providers, and all scripts only to a
specific server that hosts trusted code.
.. code:: python
csp = {
'default-src': '\'self\'',
'img-src': '*',
'media-src': [
'media1.com',
'media2.com',
],
'script-src': 'userscripts.example.com'
}
In this example content is only permitted from the document's origin
with the following exceptions:
- Images may loaded from anywhere (note the ``*`` wildcard).
- Media is only allowed from media1.com and media2.com (and not from
subdomains of those sites).
- Executable script is only allowed from userscripts.example.com.
Example 4
~~~~~~~~~
A web site administrator for an online banking site wants to ensure that
all its content is loaded using SSL, in order to prevent attackers from
eavesdropping on requests.
.. code:: python
csp = {
'default-src': 'https://onlinebanking.jumbobank.com'
}
The server only permits access to documents being loaded specifically
over HTTPS through the single origin onlinebanking.jumbobank.com.
Example 5
~~~~~~~~~
A web site administrator of a web mail site wants to allow HTML in
email, as well as images loaded from anywhere, but not JavaScript or
other potentially dangerous content.
.. code:: python
csp = {
'default-src': [
'\'self\'',
'*.mailsite.com',
],
'img-src': '*'
}
Note that this example doesn't specify a ``script-src``; with the
example CSP, this site uses the setting specified by the ``default-src``
directive, which means that scripts can be loaded only from the
originating server.
Example 6
~~~~~~~~~
A web site administrator wants to allow embedded scripts (which might
be generated dynamicially).
.. code:: python
csp = {
'default-src': '\'self\'',
'script-src': '\'self\'',
}
talisman = Talisman(
app,
content_security_policy=csp,
content_security_policy_nonce_in=['script-src']
)
The nonce needs to be added to the script tag in the template:
.. code:: html
<script nonce="{{ csp_nonce() }}">
//...
</script>
Note that the CSP directive (`script-src` in the example) to which the `nonce-...`
source should be added needs to be defined explicitly.
Example 7
~~~~~~~~~
A web site adminstrator wants to override the CSP directives via an
environment variable which doesn't support specifying the policy as
a Python dictionary, e.g.:
.. code:: bash
export CSP_DIRECTIVES="default-src 'self'; image-src *"
python app.py
Then in the app code you can read the CSP directives from the environment:
.. code:: python
import os
from flask_talisman import Talisman, DEFAULT_CSP_POLICY
talisman = Talisman(
app,
content_security_policy=os.environ.get("CSP_DIRECTIVES", DEFAULT_CSP_POLICY),
)
As you can see above the policy can be defined simply just like the official
specification requires the HTTP header to be set: As a semicolon separated
list of individual CSP directives.
Feature Policy
--------------
**Note:** Feature Policy has largely been `renamed Permissions Policy <https://github.com/w3c/webappsec-feature-policy/issues/359>`_
in the latest draft and some features are likely to move to Document Policy.
At this writing, most browsers support the ``Feature-Policy`` HTTP Header name.
See the `Permissions Policy`_ and `Document Policy`_ sections below should you wish
to set these.
Also note that the Feature Policy specification did not progress beyond the `draft https://wicg.github.io/feature-policy/`
stage before being renamed, but is `supported in some form in most browsers
<https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Feature-Policy#Browser_compatibility>`_.
The default feature policy is empty, as this is the default expected behaviour.
Geolocation Example
~~~~~~~~~~~~~~~~~~~
Disable access to Geolocation interface.
.. code:: python
feature_policy = {
'geolocation': '\'none\''
}
talisman = Talisman(app, feature_policy=feature_policy)
Permissions Policy
------------------
Feature Policy has been split into Permissions Policy and Document Policy but
at this writing `browser support of Permissions Policy is very limited <https://caniuse.com/permissions-policy>`_,
and it is recommended to still set the ``Feature-Policy`` HTTP Header.
Permission Policy support is included in Talisman for when this becomes more
widely supported.
Note that the `Permission Policy is still an Working Draft <https://www.w3.org/TR/permissions-policy/>`_.
When the same feature or permission is set in both Feature Policy and Permission Policy,
the Permission Policy setting will take precedence in browsers that support both.
It should be noted that the syntax differs between Feature Policy and Permission Policy
as can be seen from the ``geolocation`` examples provided.
The default Permissions Policy is ``browsing-topics=()``, which opts sites out of
`Federated Learning of Cohorts <https://wicg.github.io/floc/>`_ an interest-based advertising initiative
called Topics API.
Permission Policy can be set either using a dictionary, or using a string.
Geolocation and Microphone Example
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Disable access to Geolocation interface and Microphone using dictionary syntax
.. code:: python
permissions_policy = {
'geolocation': '()',
'microphone': '()'
}
talisman = Talisman(app, permissions_policy=permissions_policy)
Disable access to Geolocation interface and Microphone using string syntax
.. code:: python
permissions_policy = 'geolocation=(), microphone=()'
talisman = Talisman(app, permissions_policy=permissions_policy)
Document Policy
---------------
Feature Policy has been split into Permissions Policy and Document Policy but
at this writing `browser support of Document Policy is very limited <https://caniuse.com/document-policy>`_,
and it is recommended to still set the ``Feature-Policy`` HTTP Header.
Document Policy support is included in Talisman for when this becomes more
widely supported.
Note that the `Document Policy is still an Unofficial Draft <https://wicg.github.io/document-policy/>`_.
The default Document Policy is empty, as this is the default expected behaviour.
Document Policy can be set either using a dictionary, or using a string.
Oversized-Images Example
~~~~~~~~~~~~~~~~~~~~~~~~
Forbid oversized-images using dictionary syntax:
.. code:: python
document_policy = {
'oversized-images': '?0'
}
talisman = Talisman(app, document_policy=document_policy)
Forbid oversized-images using string syntax:
.. code:: python
document_policy = 'oversized-images=?0'
talisman = Talisman(app, document_policy=document_policy)
Disclaimer
----------
This code originated at Google, but is not an official Google product,
experimental or otherwise. It was forked on June 6th, 2021 from the
unmaintained GoogleCloudPlatform/flask-talisman.
There is no silver bullet for web application security. Talisman can
help, but security is more than just setting a few headers. Any
public-facing web application should have a comprehensive approach to
security.
Contributing changes
--------------------
- See `CONTRIBUTING.md`_
Licensing
---------
- Apache 2.0 - See `LICENSE`_
.. _LICENSE: https://github.com/wntrblm/flask-talisman/blob/master/LICENSE
.. _CONTRIBUTING.md: https://github.com/wntrblm/flask-talisman/blob/master/CONTRIBUTING.md
.. |PyPI Version| image:: https://img.shields.io/pypi/v/flask-talisman.svg
:target: https://pypi.python.org/pypi/flask-talisman
|
PypiClean
|
/tensorflow_macos-2.14.0rc0-cp311-cp311-macosx_12_0_arm64.whl/tensorflow/python/eager/polymorphic_function/function_context.py
|
"""Context information for a tf.function."""
from typing import NamedTuple, Any
from tensorflow.core.function.polymorphism import function_cache
from tensorflow.python.eager import context
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.saved_model import save_context
# EagerContext is used by tf.function to identify cases where tracing
# needs to occur due to a change in conditions other than the arguments.
class EagerContext(NamedTuple):
parent_graph: Any
device_functions: Any
colocation_stack: Any
in_cross_replica_context: Any
variable_policy: Any
xla_context_id: Any
def make_function_context(scope_type=None) -> function_cache.FunctionContext:
"""Generates a FunctionContext based on current contextual info."""
ctx = context.context()
# Don't need to open an init_scope if the tf.function call is in eager mode
# already.
executing_eagerly = ctx.executing_eagerly()
parent_graph = None
xla_context_id = 0
if not executing_eagerly:
# We want to force function retracing for each different
# XLAControlFlowContext, so add `xla_context_id` to the context.
xla_context = _enclosing_xla_context()
if xla_context is not None and xla_context.RequiresUniqueFunctionRetracing(
):
xla_context_id = id(xla_context)
with ops.init_scope():
# The graph, or whether we're executing eagerly, should be a part of the
# cache key so we don't improperly capture tensors such as variables.
executing_eagerly = ctx.executing_eagerly()
parent_graph = None if executing_eagerly else ops.get_default_graph()
# pylint: disable=protected-access
default_graph = ops.get_default_graph()
# TODO(b/117617952): The current distribution strategy will affect graph
# building (e.g. accessing different variables from different devices) and
# so requires retracing for each device.
strategy_stack = default_graph._distribution_strategy_stack
uses_distribution_strategy = (
strategy_stack and
strategy_stack[-1].strategy.extended._retrace_functions_for_each_device)
if executing_eagerly:
colocation_stack = ()
if uses_distribution_strategy:
device_functions = (pydev.merge_device(ctx.device_name),)
else:
device_functions = ()
else:
colocation_stack = tuple(default_graph._colocation_stack.peek_objs())
if (uses_distribution_strategy or
func_graph_module.device_stack_has_callable(
default_graph._device_function_stack)):
# Putting the device in the cache key ensures that call-site device
# annotations are respected.
device_functions = tuple(default_graph._device_functions_outer_to_inner)
else:
device_functions = ()
in_cross_replica_context = False
try:
in_cross_replica_context = (strategy_stack[-1].replica_context is None) # pylint: disable=protected-access
except (AttributeError, IndexError):
pass
if save_context.in_save_context():
variable_policy = (
save_context.get_save_options().experimental_variable_policy)
else:
variable_policy = None
return function_cache.FunctionContext(
EagerContext(
parent_graph,
device_functions,
colocation_stack,
in_cross_replica_context,
variable_policy,
xla_context_id,
),
scope_type,
)
def _enclosing_xla_context():
"""Returns the XLAControlFlowContext, which exists inside a tpu.rewrite()."""
graph = ops.get_default_graph()
while graph is not None:
# pylint: disable=protected-access
context_ = graph._get_control_flow_context()
# pylint: enable=protected-access
while context_ is not None:
if isinstance(context_, control_flow_ops.XLAControlFlowContext):
return context_
context_ = context_.outer_context
# This may be a FuncGraph due to defuns or v2 control flow. We need to
# find the original graph with the XLAControlFlowContext.
graph = getattr(graph, "outer_graph", None)
return None
|
PypiClean
|
/onnx_tf-1.10.0-py3-none-any.whl/onnx_tf/handlers/backend/depth_to_space.py
|
import copy
import tensorflow as tf
from onnx_tf.common import get_data_format
from onnx_tf.common.tf_helper import tf_shape
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
@onnx_op("DepthToSpace")
@tf_func(tf.nn.depth_to_space)
class DepthToSpace(BackendHandler):
@classmethod
def get_attrs_processor_param(cls):
return {"rename": {"blocksize": "block_size"}}
@classmethod
def version_1(cls, node, **kwargs):
x = kwargs["tensor_dict"][node.inputs[0]]
x_rank = len(x.get_shape())
storage_format, compute_format = get_data_format(x_rank)
attrs = copy.deepcopy(node.attrs)
attrs["data_format"] = storage_format
return [
cls.make_tensor_from_onnx_node(node,
attrs=attrs,
c_first_cuda_only=True,
**kwargs)
]
@classmethod
def _common(cls, node, **kwargs):
x = kwargs["tensor_dict"][node.inputs[0]]
x_rank = len(x.get_shape())
storage_format, _ = get_data_format(x_rank)
attrs = copy.deepcopy(node.attrs)
attrs["data_format"] = storage_format
mode = attrs.get("mode", "DCR")
if mode == "CRD":
# need native computation
bsize = attrs.get("blocksize")
x_shape = tf_shape(x)
batch, channel = x_shape[0], x_shape[1]
height, width = x_shape[2], x_shape[3]
csize = channel // (bsize**2)
reshape_node = tf.reshape(x, [batch, csize, bsize, bsize, height, width])
transpose_node = tf.transpose(reshape_node, perm=[0, 1, 4, 2, 5, 3])
return [
tf.reshape(transpose_node,
[batch, csize, height * bsize, width * bsize])
]
return [
cls.make_tensor_from_onnx_node(node,
attrs=attrs,
c_first_cuda_only=True,
**kwargs)
]
@classmethod
def version_11(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._common(node, **kwargs)
|
PypiClean
|
/py_trans/translator.py
|
import requests
from pykillerx.py_trans.language_codes import _get_full_lang_name, _get_lang_code
from pykillerx.py_trans.errors import check_internet_connection, UnknownErrorOccurred, DeprecatedMethod
class PyTranslator:
"""
PyTranslator Class
Note:
Before Trying to Translate Create an instance of this with provider (Default provider is google)
Providers:
google - Google Translate
libre - LibreTranslate Engine
translate.com - translate.com Translate
my_memory - MyMemory Translate
translate_dict - Translate Dict
Argument(s):
provider - Provider of Translator. (Must be a supported provider)
Example(s):
pytranslator = PyTranslator(provider="google")
"""
def __init__(self, provider="google"):
# Checking internet connection
check_internet_connection()
self.providers = ["google", "libre",
"translate.com", "my_memory", "translate_dict"]
if provider in self.providers:
self.provider = provider
else:
self.provider = "google"
self.lheader = {"Origin": "https://libretranslate.com",
"Host": "libretranslate.com", "Referer": "https://libretranslate.com/"}
def translate(self, text, dest_lang="en"):
"""
Translator Function
Argument(s):
text - Source Text (Text that need to be translated)
dest_lang - Destination Language
Example(s):
pytranslator.translate(text="Hi, How are you?", dest_lang="si")
"""
if self.provider == "google":
return self.google_translate(text, dest_lang)
elif self.provider == "libre":
raise DeprecatedMethod(
"Libre is no longer supported as it's translation accuracy is low")
# return self.libre_translate(text, dest_lang)
elif self.provider == "translate.com":
return self.translate_com(text, dest_lang)
elif self.provider == "my_memory":
return self.my_memory(text, dest_lang)
elif self.provider == "translate_dict":
return self.translate_dict(text, dest_lang)
else:
return
# Google Translate
def google_translate(self, text, dest_lang):
r_url = f"https://clients5.google.com/translate_a/t?client=dict-chrome-ex&sl=auto&tl={dest_lang}&q={text}"
try:
resp = requests.get(r_url).json()[0]
translation = resp[0]
origin_text = text
origin_lang = self.get_lang_name(resp[1])
dest_lang_f = self.get_lang_name(dest_lang)
tr_dict = {"status": "success", "engine": "Google Translate", "translation": translation,
"dest_lang": dest_lang_f, "orgin_text": origin_text, "origin_lang": origin_lang}
return tr_dict
except Exception as e:
return {"status": "failed", "error": e}
# LibreTranslate
def _detect_lang(self, text, full_name=False):
try:
r_url = requests.post("https://libretranslate.com/detect",
data={"q": str(text)}, headers=self.lheader).json()
language_code = r_url[0]["language"]
except:
# If can't detect the language let's think it's just english (RIP moment)
language_code = "en"
if full_name is False:
return language_code
else:
return self.get_lang_name(language_code)
def libre_translate(self, text, dest_lang):
try:
source_lang = self._detect_lang(text=text, full_name=False)
r_url = requests.post("https://libretranslate.com/translate", data={"q": str(
text), "source": source_lang, "target": dest_lang}, headers=self.lheader).json()
translation = r_url["translatedText"]
origin_lang = self.get_lang_name(source_lang)
dest_lang_f = self.get_lang_name(dest_lang)
tr_dict = {"status": "success", "engine": "LibreTranslate", "translation": translation,
"dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang}
return tr_dict
except Exception as e:
return {"status": "failed", "error": e}
# Translate.com
def translate_com(self, text, dest_lang):
try:
source_lang = self._detect_lang(text=text, full_name=False)
r_url = requests.post(url="https://www.translate.com/translator/ajax_translate", data={"text_to_translate": str(
text), "source_lang": source_lang, "translated_lang": dest_lang, "use_cache_only": "false"}).json()
translation = r_url["translated_text"]
origin_lang = self.get_lang_name(source_lang)
dest_lang_f = self.get_lang_name(dest_lang)
tr_dict = {"status": "success", "engine": "Translate.com", "translation": translation,
"dest_lang": dest_lang_f, "orgin_text": origin_lang, "origin_lang": origin_lang}
return tr_dict
except Exception as e:
return {"status": "failed", "error": e}
# My Memory
def my_memory(self, text, dest_lang):
try:
source_lang = self._detect_lang(text=text, full_name=False)
r_url = requests.get("https://api.mymemory.translated.net/get", params={
"q": text, "langpair": f"{source_lang}|{dest_lang}"}).json()
translation = r_url["matches"][0]["translation"]
origin_lang = self.get_lang_name(source_lang)
dest_lang_f = self.get_lang_name(dest_lang)
tr_dict = {"status": "success", "engine": "MyMemory", "translation": translation,
"dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang}
return tr_dict
except Exception as e:
return {"status": "failed", "error": e}
# Translate Dict
def translate_dict(self, text, dest_lang):
try:
r_url = requests.get(
f"https://t3.translatedict.com/1.php?p1=auto&p2={dest_lang}&p3={text}").text
origin_lang = self._detect_lang(text=text, full_name=True)
dest_lang_f = self.get_lang_name(dest_lang)
tr_dict = {"status": "success", "engine": "Translate Dict", "translation": r_url,
"dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang}
return tr_dict
except Exception as e:
return {"status": "failed", "error": e}
# Get Language Names
def get_lang_name(self, text):
if len(text) == 2:
return _get_full_lang_name(text)
else:
if len(text) <= 3:
return "Not a full language name"
else:
return _get_lang_code(text)
|
PypiClean
|
/nautobot_ssot-2.0.0rc1.tar.gz/nautobot_ssot-2.0.0rc1/nautobot_ssot/jobs/base.py
|
from collections import namedtuple
from datetime import datetime
import traceback
import tracemalloc
from typing import Iterable
from django.db.utils import OperationalError
from django.templatetags.static import static
from django.utils import timezone
from django.utils.functional import classproperty
# pylint-django doesn't understand classproperty, and complains unnecessarily. We disable this specific warning:
# pylint: disable=no-self-argument
from diffsync.enum import DiffSyncFlags
import structlog
from nautobot.extras.jobs import DryRunVar, Job, BooleanVar
from nautobot_ssot.choices import SyncLogEntryActionChoices
from nautobot_ssot.models import Sync, SyncLogEntry
DataMapping = namedtuple("DataMapping", ["source_name", "source_url", "target_name", "target_url"])
"""Entry in the list returned by a job's data_mappings() API.
The idea here is to provide insight into how various classes of data are mapped between Nautobot
and the other system acting as data source/target.
* source_name: Name of a data class (device, location, vlan, etc.) provided by the data source.
* source_url: URL (if any) to hyperlink for this source_name in the UI.
Can be used, for example, to link to the Nautobot list view for this data class.
* target_name: Name of a data class on the data target that corresponds to the source data.
* target_url: URL (if any) to hyperlink the target_name.
"""
class DataSyncBaseJob(Job): # pylint: disable=too-many-instance-attributes
"""Common base class for data synchronization jobs.
Works mostly as per the BaseJob API, with the following changes:
- Concrete subclasses are responsible for implementing `self.sync_data()` (or related hooks), **not** `self.run()`.
- Subclasses may optionally define any Meta field supported by Jobs, as well as the following:
- `dryrun_default` - defaults to True if unspecified
- `data_source` and `data_target` as labels (by default, will use the `name` and/or "Nautobot" as appropriate)
- `data_source_icon` and `data_target_icon`
"""
dryrun = DryRunVar(description="Perform a dry-run, making no actual changes to Nautobot data.", default=True)
memory_profiling = BooleanVar(description="Perform a memory profiling analysis.", default=False)
def load_source_adapter(self):
"""Method to instantiate and load the SOURCE adapter into `self.source_adapter`.
Relevant available instance attributes include:
- self.job_result (as per Job API)
"""
raise NotImplementedError
def load_target_adapter(self):
"""Method to instantiate and load the TARGET adapter into `self.target_adapter`.
Relevant available instance attributes include:
- self.job_result (as per Job API)
"""
raise NotImplementedError
def calculate_diff(self):
"""Method to calculate the difference from SOURCE to TARGET adapter and store in `self.diff`.
This is a generic implementation that you could overwrite completely in your custom logic.
"""
if self.source_adapter is not None and self.target_adapter is not None:
self.diff = self.source_adapter.diff_to(self.target_adapter, flags=self.diffsync_flags)
self.sync.diff = {}
self.sync.summary = self.diff.summary()
self.sync.save()
try:
self.sync.diff = self.diff.dict()
self.sync.save()
except OperationalError:
self.logger.warning("Unable to save JSON diff to the database; likely the diff is too large.")
self.sync.refresh_from_db()
self.logger.info(self.diff.summary())
else:
self.logger.warning("Not both adapters were properly initialized prior to diff calculation.")
def execute_sync(self):
"""Method to synchronize the difference from `self.diff`, from SOURCE to TARGET adapter.
This is a generic implementation that you could overwrite completely in your custom logic.
"""
if self.source_adapter is not None and self.target_adapter is not None:
self.source_adapter.sync_to(self.target_adapter, flags=self.diffsync_flags)
else:
self.logger.warning("Not both adapters were properly initialized prior to synchronization.")
def sync_data(self, memory_profiling):
"""Method to load data from adapters, calculate diffs and sync (if not dry-run).
It is composed by 4 methods:
- self.load_source_adapter: instantiates the source adapter (self.source_adapter) and loads its data
- self.load_target_adapter: instantiates the target adapter (self.target_adapter) and loads its data
- self.calculate_diff: generates the diff from source to target adapter and stores it in self.diff
- self.execute_sync: if not dry-run, uses the self.diff to synchronize from source to target
This is a generic implementation that you could overwrite completely in you custom logic.
Available instance attributes include:
- self.sync (Sync instance tracking this job execution)
- self.job_result (as per Job API)
"""
def record_memory_trace(step: str):
"""Helper function to record memory usage and reset tracemalloc stats."""
memory_final, memory_peak = tracemalloc.get_traced_memory()
setattr(self.sync, f"{step}_memory_final", memory_final)
setattr(self.sync, f"{step}_memory_peak", memory_peak)
self.sync.save()
self.logger.info("Traced memory for %s (Final, Peak): %s bytes, %s bytes", step, memory_final, memory_peak)
tracemalloc.clear_traces()
if not self.sync:
return
if memory_profiling:
tracemalloc.start()
start_time = datetime.now()
self.logger.info("Loading current data from source adapter...")
self.load_source_adapter()
load_source_adapter_time = datetime.now()
self.sync.source_load_time = load_source_adapter_time - start_time
self.sync.save()
self.logger.info("Source Load Time from %s: %s", self.source_adapter, self.sync.source_load_time)
if memory_profiling:
record_memory_trace("source_load")
self.logger.info("Loading current data from target adapter...")
self.load_target_adapter()
load_target_adapter_time = datetime.now()
self.sync.target_load_time = load_target_adapter_time - load_source_adapter_time
self.sync.save()
self.logger.info("Target Load Time from %s: %s", self.target_adapter, self.sync.target_load_time)
if memory_profiling:
record_memory_trace("target_load")
self.logger.info("Calculating diffs...")
self.calculate_diff()
calculate_diff_time = datetime.now()
self.sync.diff_time = calculate_diff_time - load_target_adapter_time
self.sync.save()
self.logger.info("Diff Calculation Time: %s", self.sync.diff_time)
if memory_profiling:
record_memory_trace("diff")
if self.dryrun:
self.logger.info("As `dryrun` is set, skipping the actual data sync.")
else:
self.logger.info("Syncing from %s to %s...", self.source_adapter, self.target_adapter)
self.execute_sync()
execute_sync_time = datetime.now()
self.sync.sync_time = execute_sync_time - calculate_diff_time
self.sync.save()
self.logger.info("Sync complete")
self.logger.info("Sync Time: %s", self.sync.sync_time)
if memory_profiling:
record_memory_trace("sync")
def lookup_object(self, model_name, unique_id): # pylint: disable=unused-argument
"""Look up the Nautobot record, if any, identified by the args.
Optional helper method used to build more detailed/accurate SyncLogEntry records from DiffSync logs.
Args:
model_name (str): DiffSyncModel class name or similar class/model label.
unique_id (str): DiffSyncModel unique_id or similar unique identifier.
Returns:
models.Model: Nautobot model instance, or None
"""
return None
@classmethod
def data_mappings(cls) -> Iterable[DataMapping]:
"""List the data mappings involved in this sync job."""
return []
@classmethod
def config_information(cls):
"""Return a dict of user-facing configuration information {property: value}.
Note that this will be rendered 'as-is' in the UI, so as a general practice this
should NOT include sensitive information such as passwords!
"""
return {}
def sync_log( # pylint: disable=too-many-arguments
self,
action,
status,
message="",
diff=None,
synced_object=None,
object_repr="",
):
"""Log a action message as a SyncLogEntry."""
if synced_object and not object_repr:
object_repr = repr(synced_object)
SyncLogEntry.objects.create(
sync=self.sync,
action=action,
status=status,
message=message,
diff=diff,
synced_object=synced_object,
object_repr=object_repr,
)
def _structlog_to_sync_log_entry(self, _logger, _log_method, event_dict):
"""Capture certain structlog messages from DiffSync into the Nautobot database."""
if all(key in event_dict for key in ("src", "dst", "action", "model", "unique_id", "diffs", "status")):
# The DiffSync log gives us a model name (string) and unique_id (string).
# Try to look up the actual Nautobot object that this describes.
synced_object = self.lookup_object( # pylint: disable=assignment-from-none
event_dict["model"], event_dict["unique_id"]
)
object_repr = repr(synced_object) if synced_object else f"{event_dict['model']} {event_dict['unique_id']}"
self.sync_log(
action=event_dict["action"] or SyncLogEntryActionChoices.ACTION_NO_CHANGE,
diff=event_dict["diffs"] if event_dict["action"] else None,
status=event_dict["status"],
message=event_dict["event"],
synced_object=synced_object,
object_repr=object_repr,
)
return event_dict
@classmethod
def _get_vars(cls):
"""Extend Job._get_vars to include `dryrun` variable.
Workaround for https://github.com/netbox-community/netbox/issues/5529
"""
got_vars = super()._get_vars()
if hasattr(cls, "dryrun"):
got_vars["dryrun"] = cls.dryrun
if hasattr(cls, "memory_profiling"):
got_vars["memory_profiling"] = cls.memory_profiling
return got_vars
def __init__(self):
"""Initialize a Job."""
super().__init__()
self.sync = None
self.diff = None
self.source_adapter = None
self.target_adapter = None
# Default diffsync flags. You can overwrite them at any time.
self.diffsync_flags = DiffSyncFlags.CONTINUE_ON_FAILURE | DiffSyncFlags.LOG_UNCHANGED_RECORDS
def as_form(self, data=None, files=None, initial=None, approval_view=False):
"""Render this instance as a Django form for user inputs, including a "Dry run" field."""
form = super().as_form(data=data, files=files, initial=initial, approval_view=approval_view)
# Set the "dryrun" widget's initial value based on our Meta attribute, if any
form.fields["dryrun"].initial = getattr(self.Meta, "dryrun_default", True)
return form
@classproperty
def data_source(cls):
"""The system or data source providing input data for this sync."""
return getattr(cls.Meta, "data_source", cls.name)
@classproperty
def data_target(cls):
"""The system or data source being modified by this sync."""
return getattr(cls.Meta, "data_target", cls.name)
@classproperty
def data_source_icon(cls):
"""Icon corresponding to the data_source."""
return getattr(cls.Meta, "data_source_icon", None)
@classproperty
def data_target_icon(cls):
"""Icon corresponding to the data_target."""
return getattr(cls.Meta, "data_target_icon", None)
def run(self, dryrun, memory_profiling, *args, **kwargs): # pylint:disable=arguments-differ
"""Job entry point from Nautobot - do not override!"""
self.sync = Sync.objects.create(
source=self.data_source,
target=self.data_target,
dry_run=dryrun,
job_result=self.job_result,
start_time=timezone.now(),
diff={},
)
# Add _structlog_to_sync_log_entry as a processor for structlog calls from DiffSync
structlog.configure(
processors=[self._structlog_to_sync_log_entry, structlog.stdlib.render_to_log_kwargs],
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
# We need to catch exceptions and handle them, because if they aren't caught here,
# they'll be caught by the Nautobot core run_job() function, which will trigger a database
# rollback, which will delete our above created Sync record!
try:
self.sync_data(memory_profiling)
except Exception as exc: # pylint: disable=broad-except
stacktrace = traceback.format_exc()
self.logger.error(f"An exception occurred: `{type(exc).__name__}: {exc}`\n```\n{stacktrace}\n```")
# pylint: disable=abstract-method
class DataSource(DataSyncBaseJob):
"""Base class for Jobs that sync data **from** another data source **to** Nautobot."""
@classproperty
def data_target(cls):
"""For a DataSource this is always Nautobot."""
return "Nautobot"
@classproperty
def data_target_icon(cls):
"""For a DataSource this is always the Nautobot logo."""
return static("img/nautobot_logo.png")
class DataTarget(DataSyncBaseJob):
"""Base class for Jobs that sync data **to** another data target **from** Nautobot."""
@classproperty
def data_source(cls):
"""For a DataTarget this is always Nautobot."""
return "Nautobot"
@classproperty
def data_source_icon(cls):
"""For a DataTarget this is always the Nautobot logo."""
return static("img/nautobot_logo.png")
|
PypiClean
|
/apache_superset_iteco-2.1.1.4-py3-none-any.whl/superset/datasets/dao.py
|
import logging
from typing import Any, Dict, List, Optional
from sqlalchemy.exc import SQLAlchemyError
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.dao.base import BaseDAO
from superset.extensions import db
from superset.models.core import Database
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.utils.core import DatasourceType
from superset.views.base import DatasourceFilter
logger = logging.getLogger(__name__)
class DatasetDAO(BaseDAO): # pylint: disable=too-many-public-methods
model_cls = SqlaTable
base_filter = DatasourceFilter
@staticmethod
def get_database_by_id(database_id: int) -> Optional[Database]:
try:
return db.session.query(Database).filter_by(id=database_id).one_or_none()
except SQLAlchemyError as ex: # pragma: no cover
logger.error("Could not get database by id: %s", str(ex), exc_info=True)
return None
@staticmethod
def get_related_objects(database_id: int) -> Dict[str, Any]:
charts = (
db.session.query(Slice)
.filter(
Slice.datasource_id == database_id,
Slice.datasource_type == DatasourceType.TABLE,
)
.all()
)
chart_ids = [chart.id for chart in charts]
dashboards = (
(
db.session.query(Dashboard)
.join(Dashboard.slices)
.filter(Slice.id.in_(chart_ids))
)
.distinct()
.all()
)
return dict(charts=charts, dashboards=dashboards)
@staticmethod
def validate_table_exists(
database: Database, table_name: str, schema: Optional[str]
) -> bool:
try:
database.get_table(table_name, schema=schema)
return True
except SQLAlchemyError as ex: # pragma: no cover
logger.warning("Got an error %s validating table: %s", str(ex), table_name)
return False
@staticmethod
def validate_uniqueness(
database_id: int,
schema: Optional[str],
name: str,
dataset_id: Optional[int] = None,
) -> bool:
dataset_query = db.session.query(SqlaTable).filter(
SqlaTable.table_name == name,
SqlaTable.schema == schema,
SqlaTable.database_id == database_id,
)
if dataset_id:
# make sure the dataset found is different from the target (if any)
dataset_query = dataset_query.filter(SqlaTable.id != dataset_id)
return not db.session.query(dataset_query.exists()).scalar()
@staticmethod
def validate_update_uniqueness(
database_id: int, dataset_id: int, name: str
) -> bool:
dataset_query = db.session.query(SqlaTable).filter(
SqlaTable.table_name == name,
SqlaTable.database_id == database_id,
SqlaTable.id != dataset_id,
)
return not db.session.query(dataset_query.exists()).scalar()
@staticmethod
def validate_columns_exist(dataset_id: int, columns_ids: List[int]) -> bool:
dataset_query = (
db.session.query(TableColumn.id).filter(
TableColumn.table_id == dataset_id, TableColumn.id.in_(columns_ids)
)
).all()
return len(columns_ids) == len(dataset_query)
@staticmethod
def validate_columns_uniqueness(dataset_id: int, columns_names: List[str]) -> bool:
dataset_query = (
db.session.query(TableColumn.id).filter(
TableColumn.table_id == dataset_id,
TableColumn.column_name.in_(columns_names),
)
).all()
return len(dataset_query) == 0
@staticmethod
def validate_metrics_exist(dataset_id: int, metrics_ids: List[int]) -> bool:
dataset_query = (
db.session.query(SqlMetric.id).filter(
SqlMetric.table_id == dataset_id, SqlMetric.id.in_(metrics_ids)
)
).all()
return len(metrics_ids) == len(dataset_query)
@staticmethod
def validate_metrics_uniqueness(dataset_id: int, metrics_names: List[str]) -> bool:
dataset_query = (
db.session.query(SqlMetric.id).filter(
SqlMetric.table_id == dataset_id,
SqlMetric.metric_name.in_(metrics_names),
)
).all()
return len(dataset_query) == 0
@classmethod
def update(
cls,
model: SqlaTable,
properties: Dict[str, Any],
commit: bool = True,
) -> Optional[SqlaTable]:
"""
Updates a Dataset model on the metadata DB
"""
if "columns" in properties:
cls.update_columns(
model,
properties.pop("columns"),
commit=commit,
override_columns=bool(properties.get("override_columns")),
)
if "metrics" in properties:
cls.update_metrics(model, properties.pop("metrics"), commit=commit)
return super().update(model, properties, commit=commit)
@classmethod
def update_columns(
cls,
model: SqlaTable,
property_columns: List[Dict[str, Any]],
commit: bool = True,
override_columns: bool = False,
) -> None:
"""
Creates/updates and/or deletes a list of columns, based on a
list of Dict.
- If a column Dict has an `id` property then we update.
- If a column Dict does not have an `id` then we create a new metric.
- If there are extra columns on the metadata db that are not defined on the List
then we delete.
"""
if override_columns:
db.session.query(TableColumn).filter(
TableColumn.table_id == model.id
).delete(synchronize_session="fetch")
db.session.bulk_insert_mappings(
TableColumn,
[
{**properties, "table_id": model.id}
for properties in property_columns
],
)
else:
columns_by_id = {column.id: column for column in model.columns}
property_columns_by_id = {
properties["id"]: properties
for properties in property_columns
if "id" in properties
}
db.session.bulk_insert_mappings(
TableColumn,
[
{**properties, "table_id": model.id}
for properties in property_columns
if not "id" in properties
],
)
db.session.bulk_update_mappings(
TableColumn,
[
{**columns_by_id[properties["id"]].__dict__, **properties}
for properties in property_columns_by_id.values()
],
)
db.session.query(TableColumn).filter(
TableColumn.id.in_(
{column.id for column in model.columns}
- property_columns_by_id.keys()
)
).delete(synchronize_session="fetch")
if commit:
db.session.commit()
@classmethod
def update_metrics(
cls,
model: SqlaTable,
property_metrics: List[Dict[str, Any]],
commit: bool = True,
) -> None:
"""
Creates/updates and/or deletes a list of metrics, based on a
list of Dict.
- If a metric Dict has an `id` property then we update.
- If a metric Dict does not have an `id` then we create a new metric.
- If there are extra metrics on the metadata db that are not defined on the List
then we delete.
"""
metrics_by_id = {metric.id: metric for metric in model.metrics}
property_metrics_by_id = {
properties["id"]: properties
for properties in property_metrics
if "id" in properties
}
db.session.bulk_insert_mappings(
SqlMetric,
[
{**properties, "table_id": model.id}
for properties in property_metrics
if not "id" in properties
],
)
db.session.bulk_update_mappings(
SqlMetric,
[
{**metrics_by_id[properties["id"]].__dict__, **properties}
for properties in property_metrics_by_id.values()
],
)
db.session.query(SqlMetric).filter(
SqlMetric.id.in_(
{metric.id for metric in model.metrics} - property_metrics_by_id.keys()
)
).delete(synchronize_session="fetch")
if commit:
db.session.commit()
@classmethod
def find_dataset_column(
cls, dataset_id: int, column_id: int
) -> Optional[TableColumn]:
# We want to apply base dataset filters
dataset = DatasetDAO.find_by_id(dataset_id)
if not dataset:
return None
return (
db.session.query(TableColumn)
.filter(TableColumn.table_id == dataset_id, TableColumn.id == column_id)
.one_or_none()
)
@classmethod
def update_column(
cls,
model: TableColumn,
properties: Dict[str, Any],
commit: bool = True,
) -> TableColumn:
return DatasetColumnDAO.update(model, properties, commit=commit)
@classmethod
def create_column(
cls, properties: Dict[str, Any], commit: bool = True
) -> TableColumn:
"""
Creates a Dataset model on the metadata DB
"""
return DatasetColumnDAO.create(properties, commit=commit)
@classmethod
def delete_column(cls, model: TableColumn, commit: bool = True) -> TableColumn:
"""
Deletes a Dataset column
"""
return cls.delete(model, commit=commit)
@classmethod
def find_dataset_metric(
cls, dataset_id: int, metric_id: int
) -> Optional[SqlMetric]:
# We want to apply base dataset filters
dataset = DatasetDAO.find_by_id(dataset_id)
if not dataset:
return None
return db.session.query(SqlMetric).get(metric_id)
@classmethod
def delete_metric(cls, model: SqlMetric, commit: bool = True) -> SqlMetric:
"""
Deletes a Dataset metric
"""
return cls.delete(model, commit=commit)
@classmethod
def update_metric(
cls,
model: SqlMetric,
properties: Dict[str, Any],
commit: bool = True,
) -> SqlMetric:
return DatasetMetricDAO.update(model, properties, commit=commit)
@classmethod
def create_metric(
cls,
properties: Dict[str, Any],
commit: bool = True,
) -> SqlMetric:
"""
Creates a Dataset model on the metadata DB
"""
return DatasetMetricDAO.create(properties, commit=commit)
@staticmethod
def bulk_delete(models: Optional[List[SqlaTable]], commit: bool = True) -> None:
item_ids = [model.id for model in models] if models else []
# bulk delete, first delete related data
if models:
for model in models:
model.owners = []
db.session.merge(model)
db.session.query(SqlMetric).filter(SqlMetric.table_id.in_(item_ids)).delete(
synchronize_session="fetch"
)
db.session.query(TableColumn).filter(
TableColumn.table_id.in_(item_ids)
).delete(synchronize_session="fetch")
# bulk delete itself
try:
db.session.query(SqlaTable).filter(SqlaTable.id.in_(item_ids)).delete(
synchronize_session="fetch"
)
if commit:
db.session.commit()
except SQLAlchemyError as ex:
if commit:
db.session.rollback()
raise ex
@staticmethod
def get_table_by_name(database_id: int, table_name: str) -> Optional[SqlaTable]:
return (
db.session.query(SqlaTable)
.filter_by(database_id=database_id, table_name=table_name)
.one_or_none()
)
class DatasetColumnDAO(BaseDAO):
model_cls = TableColumn
class DatasetMetricDAO(BaseDAO):
model_cls = SqlMetric
|
PypiClean
|
/ressources/lib/node_modules/highcharts/modules/no-data-to-display.src.js
|
'use strict';
(function (factory) {
if (typeof module === 'object' && module.exports) {
module.exports = factory;
} else if (typeof define === 'function' && define.amd) {
define(function () {
return factory;
});
} else {
factory(Highcharts);
}
}(function (Highcharts) {
(function (H) {
/**
* Plugin for displaying a message when there is no data visible in chart.
*
* (c) 2010-2017 Highsoft AS
* Author: Oystein Moseng
*
* License: www.highcharts.com/license
*/
var seriesTypes = H.seriesTypes,
chartPrototype = H.Chart.prototype,
defaultOptions = H.getOptions(),
extend = H.extend,
each = H.each;
// Add language option
extend(defaultOptions.lang, {
/**
* The text to display when the chart contains no data. Requires the
* no-data module, see [noData](#noData).
*
* @type {String}
* @default No data to display
* @since 3.0.8
* @product highcharts highstock
* @sample highcharts/no-data-to-display/no-data-line
* No-data text
* @apioption lang.noData
*/
noData: 'No data to display'
});
// Add default display options for message
/**
* Options for displaying a message like "No data to display".
* This feature requires the file no-data-to-display.js to be loaded in the
* page. The actual text to display is set in the lang.noData option.
* @type {Object}
*
* @sample highcharts/no-data-to-display/no-data-line
* Line chart with no-data module
* @sample highcharts/no-data-to-display/no-data-pie
* Pie chart with no-data module
* @optionparent noData
*/
defaultOptions.noData = {
/**
* An object of additional SVG attributes for the no-data label.
*
* @type {Object}
* @since 3.0.8
* @product highcharts highstock
* @apioption noData.attr
*/
/**
* Whether to insert the label as HTML, or as pseudo-HTML rendered with
* SVG.
*
* @type {Boolean}
* @default false
* @since 4.1.10
* @product highcharts highstock
* @apioption noData.useHTML
*/
/**
* The position of the no-data label, relative to the plot area.
*
* @type {Object}
* @default { "x": 0, "y": 0, "align": "center", "verticalAlign": "middle" }
* @since 3.0.8
*/
position: {
/**
* Horizontal offset of the label, in pixels.
*
* @type {Number}
* @default 0
* @product highcharts highstock
*/
x: 0,
/**
* Vertical offset of the label, in pixels.
*
* @type {Number}
* @default 0
* @product highcharts highstock
*/
y: 0,
/**
* Horizontal alignment of the label.
*
* @validvalue ["left", "center", "right"]
* @type {String}
* @default center
*/
align: 'center',
/**
* Vertical alignment of the label.
*
* @validvalue ["top", "middle", "bottom"]
* @type {String}
* @default middle
* @product highcharts highstock
*/
verticalAlign: 'middle'
}
};
// Presentational
/**
* CSS styles for the no-data label.
*
* @sample highcharts/no-data-to-display/no-data-line
* Styled no-data text
* @optionparent noData.style
*/
defaultOptions.noData.style = {
fontWeight: 'bold',
fontSize: '12px',
color: '#666666'
};
// Define hasData function for non-cartesian seris. Returns true if the series
// has points at all.
each([
'bubble',
'gauge',
'heatmap',
'pie',
'sankey',
'treemap',
'waterfall'
], function (type) {
if (seriesTypes[type]) {
seriesTypes[type].prototype.hasData = function () {
return !!this.points.length; // != 0
};
}
});
/**
* Define hasData functions for series. These return true if there are data
* points on this series within the plot area.
*/
H.Series.prototype.hasData = function () {
return (
this.visible &&
this.dataMax !== undefined &&
this.dataMin !== undefined // #3703
);
};
/**
* Display a no-data message.
*
* @param {String} str An optional message to show in place of the default one
*/
chartPrototype.showNoData = function (str) {
var chart = this,
options = chart.options,
text = str || (options && options.lang.noData),
noDataOptions = options && options.noData;
if (!chart.noDataLabel && chart.renderer) {
chart.noDataLabel = chart.renderer
.label(
text,
0,
0,
null,
null,
null,
noDataOptions.useHTML,
null,
'no-data'
);
chart.noDataLabel
.attr(noDataOptions.attr)
.css(noDataOptions.style);
chart.noDataLabel.add();
chart.noDataLabel.align(
extend(chart.noDataLabel.getBBox(), noDataOptions.position),
false,
'plotBox'
);
}
};
/**
* Hide no-data message
*/
chartPrototype.hideNoData = function () {
var chart = this;
if (chart.noDataLabel) {
chart.noDataLabel = chart.noDataLabel.destroy();
}
};
/**
* Returns true if there are data points within the plot area now
*/
chartPrototype.hasData = function () {
var chart = this,
series = chart.series || [],
i = series.length;
while (i--) {
if (series[i].hasData() && !series[i].options.isInternal) {
return true;
}
}
return chart.loadingShown; // #4588
};
/**
* Add event listener to handle automatic show or hide no-data message
*/
H.addEvent(H.Chart, 'render', function handleNoData() {
if (this.hasData()) {
this.hideNoData();
} else {
this.showNoData();
}
});
}(Highcharts));
return (function () {
}());
}));
|
PypiClean
|
/pynusmv-1.0rc8-cp35-cp35m-manylinux1_x86_64.whl/pynusmv_lower_interface/nusmv/hrc/dumpers/dumpers.py
|
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_dumpers')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_dumpers')
_dumpers = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_dumpers', [dirname(__file__)])
except ImportError:
import _dumpers
return _dumpers
try:
_mod = imp.load_module('_dumpers', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_dumpers = swig_import_helper()
del swig_import_helper
else:
import _dumpers
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
PRIuPTR = _dumpers.PRIuPTR
PRIdPTR = _dumpers.PRIdPTR
LLU = _dumpers.LLU
LLO = _dumpers.LLO
LLX = _dumpers.LLX
false = _dumpers.false
true = _dumpers.true
OUTCOME_GENERIC_ERROR = _dumpers.OUTCOME_GENERIC_ERROR
OUTCOME_PARSER_ERROR = _dumpers.OUTCOME_PARSER_ERROR
OUTCOME_SYNTAX_ERROR = _dumpers.OUTCOME_SYNTAX_ERROR
OUTCOME_FILE_ERROR = _dumpers.OUTCOME_FILE_ERROR
OUTCOME_SUCCESS_REQUIRED_HELP = _dumpers.OUTCOME_SUCCESS_REQUIRED_HELP
OUTCOME_SUCCESS = _dumpers.OUTCOME_SUCCESS
def Object_destroy(arg1: 'Object_ptr', arg: 'void *') -> "void":
"""Object_destroy(Object_ptr arg1, void * arg)"""
return _dumpers.Object_destroy(arg1, arg)
def Object_copy(arg1: 'Object_ptr const') -> "Object_ptr":
"""Object_copy(Object_ptr const arg1) -> Object_ptr"""
return _dumpers.Object_copy(arg1)
HDS_HRC_TOP = _dumpers.HDS_HRC_TOP
HDS_LIST_MODS = _dumpers.HDS_LIST_MODS
HDS_MOD = _dumpers.HDS_MOD
HDS_MOD_NAME = _dumpers.HDS_MOD_NAME
HDS_LIST_MOD_FORMAL_PARAMS = _dumpers.HDS_LIST_MOD_FORMAL_PARAMS
HDS_MOD_FORMAL_PARAM = _dumpers.HDS_MOD_FORMAL_PARAM
HDS_LIST_MOD_INSTANCES = _dumpers.HDS_LIST_MOD_INSTANCES
HDS_MOD_INSTANCE = _dumpers.HDS_MOD_INSTANCE
HDS_MOD_INSTANCE_VARNAME = _dumpers.HDS_MOD_INSTANCE_VARNAME
HDS_MOD_INSTANCE_MODNAME = _dumpers.HDS_MOD_INSTANCE_MODNAME
HDS_LIST_MOD_INSTANCE_ACTUAL_PARAMS = _dumpers.HDS_LIST_MOD_INSTANCE_ACTUAL_PARAMS
HDS_MOD_INSTANCE_ACTUAL_PARAM = _dumpers.HDS_MOD_INSTANCE_ACTUAL_PARAM
HDS_LIST_SYMBOLS = _dumpers.HDS_LIST_SYMBOLS
HDS_SYMBOL = _dumpers.HDS_SYMBOL
HDS_LIST_ASSIGNS = _dumpers.HDS_LIST_ASSIGNS
HDS_ASSIGN_INIT = _dumpers.HDS_ASSIGN_INIT
HDS_ASSIGN_INVAR = _dumpers.HDS_ASSIGN_INVAR
HDS_ASSIGN_NEXT = _dumpers.HDS_ASSIGN_NEXT
HDS_LIST_CONSTRAINTS = _dumpers.HDS_LIST_CONSTRAINTS
HDS_CONSTRAINT_INIT = _dumpers.HDS_CONSTRAINT_INIT
HDS_CONSTRAINT_INVAR = _dumpers.HDS_CONSTRAINT_INVAR
HDS_CONSTRAINT_TRANS = _dumpers.HDS_CONSTRAINT_TRANS
HDS_LIST_FAIRNESS = _dumpers.HDS_LIST_FAIRNESS
HDS_JUSTICE = _dumpers.HDS_JUSTICE
HDS_COMPASSION = _dumpers.HDS_COMPASSION
HDS_LIST_SPECS = _dumpers.HDS_LIST_SPECS
HDS_SPEC = _dumpers.HDS_SPEC
HDS_LIST_COMPILER_INFO = _dumpers.HDS_LIST_COMPILER_INFO
HDS_LIST_SYNTAX_ERRORS = _dumpers.HDS_LIST_SYNTAX_ERRORS
HDS_ERROR = _dumpers.HDS_ERROR
class HrcDumperInfo(_object):
"""Proxy of C HrcDumperInfo_TAG struct."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HrcDumperInfo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HrcDumperInfo, name)
__repr__ = _swig_repr
__swig_setmethods__["stage"] = _dumpers.HrcDumperInfo_stage_set
__swig_getmethods__["stage"] = _dumpers.HrcDumperInfo_stage_get
if _newclass:
stage = _swig_property(_dumpers.HrcDumperInfo_stage_get, _dumpers.HrcDumperInfo_stage_set)
__swig_getmethods__["n1"] = _dumpers.HrcDumperInfo_n1_get
if _newclass:
n1 = _swig_property(_dumpers.HrcDumperInfo_n1_get)
__swig_getmethods__["n2"] = _dumpers.HrcDumperInfo_n2_get
if _newclass:
n2 = _swig_property(_dumpers.HrcDumperInfo_n2_get)
__swig_getmethods__["error"] = _dumpers.HrcDumperInfo_error_get
if _newclass:
error = _swig_property(_dumpers.HrcDumperInfo_error_get)
__swig_setmethods__["symb_cat"] = _dumpers.HrcDumperInfo_symb_cat_set
__swig_getmethods__["symb_cat"] = _dumpers.HrcDumperInfo_symb_cat_get
if _newclass:
symb_cat = _swig_property(_dumpers.HrcDumperInfo_symb_cat_get, _dumpers.HrcDumperInfo_symb_cat_set)
__swig_setmethods__["spec_type"] = _dumpers.HrcDumperInfo_spec_type_set
__swig_getmethods__["spec_type"] = _dumpers.HrcDumperInfo_spec_type_get
if _newclass:
spec_type = _swig_property(_dumpers.HrcDumperInfo_spec_type_get, _dumpers.HrcDumperInfo_spec_type_set)
__swig_setmethods__["last_in_list"] = _dumpers.HrcDumperInfo_last_in_list_set
__swig_getmethods__["last_in_list"] = _dumpers.HrcDumperInfo_last_in_list_get
if _newclass:
last_in_list = _swig_property(_dumpers.HrcDumperInfo_last_in_list_get, _dumpers.HrcDumperInfo_last_in_list_set)
__swig_setmethods__["list_is_empty"] = _dumpers.HrcDumperInfo_list_is_empty_set
__swig_getmethods__["list_is_empty"] = _dumpers.HrcDumperInfo_list_is_empty_get
if _newclass:
list_is_empty = _swig_property(_dumpers.HrcDumperInfo_list_is_empty_get, _dumpers.HrcDumperInfo_list_is_empty_set)
__swig_setmethods__["hrcNode"] = _dumpers.HrcDumperInfo_hrcNode_set
__swig_getmethods__["hrcNode"] = _dumpers.HrcDumperInfo_hrcNode_get
if _newclass:
hrcNode = _swig_property(_dumpers.HrcDumperInfo_hrcNode_get, _dumpers.HrcDumperInfo_hrcNode_set)
__swig_setmethods__["user"] = _dumpers.HrcDumperInfo_user_set
__swig_getmethods__["user"] = _dumpers.HrcDumperInfo_user_get
if _newclass:
user = _swig_property(_dumpers.HrcDumperInfo_user_get, _dumpers.HrcDumperInfo_user_set)
def __init__(self):
"""__init__(HrcDumperInfo_TAG self) -> HrcDumperInfo"""
this = _dumpers.new_HrcDumperInfo()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _dumpers.delete_HrcDumperInfo
__del__ = lambda self: None
HrcDumperInfo_swigregister = _dumpers.HrcDumperInfo_swigregister
HrcDumperInfo_swigregister(HrcDumperInfo)
HRC_STAGE_BEGIN = _dumpers.HRC_STAGE_BEGIN
HRC_STAGE_END = _dumpers.HRC_STAGE_END
HRC_STAGE_BEGIN_END = _dumpers.HRC_STAGE_BEGIN_END
class HrcDumperInfo_TAG_error(_object):
"""Proxy of C HrcDumperInfo_TAG_error struct."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HrcDumperInfo_TAG_error, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HrcDumperInfo_TAG_error, name)
__repr__ = _swig_repr
__swig_setmethods__["lineno"] = _dumpers.HrcDumperInfo_TAG_error_lineno_set
__swig_getmethods__["lineno"] = _dumpers.HrcDumperInfo_TAG_error_lineno_get
if _newclass:
lineno = _swig_property(_dumpers.HrcDumperInfo_TAG_error_lineno_get, _dumpers.HrcDumperInfo_TAG_error_lineno_set)
__swig_setmethods__["filename"] = _dumpers.HrcDumperInfo_TAG_error_filename_set
__swig_getmethods__["filename"] = _dumpers.HrcDumperInfo_TAG_error_filename_get
if _newclass:
filename = _swig_property(_dumpers.HrcDumperInfo_TAG_error_filename_get, _dumpers.HrcDumperInfo_TAG_error_filename_set)
__swig_setmethods__["message"] = _dumpers.HrcDumperInfo_TAG_error_message_set
__swig_getmethods__["message"] = _dumpers.HrcDumperInfo_TAG_error_message_get
if _newclass:
message = _swig_property(_dumpers.HrcDumperInfo_TAG_error_message_get, _dumpers.HrcDumperInfo_TAG_error_message_set)
__swig_setmethods__["token"] = _dumpers.HrcDumperInfo_TAG_error_token_set
__swig_getmethods__["token"] = _dumpers.HrcDumperInfo_TAG_error_token_get
if _newclass:
token = _swig_property(_dumpers.HrcDumperInfo_TAG_error_token_get, _dumpers.HrcDumperInfo_TAG_error_token_set)
def __init__(self):
"""__init__(HrcDumperInfo_TAG_error self) -> HrcDumperInfo_TAG_error"""
this = _dumpers.new_HrcDumperInfo_TAG_error()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _dumpers.delete_HrcDumperInfo_TAG_error
__del__ = lambda self: None
HrcDumperInfo_TAG_error_swigregister = _dumpers.HrcDumperInfo_TAG_error_swigregister
HrcDumperInfo_TAG_error_swigregister(HrcDumperInfo_TAG_error)
class HrcDumperInfo_TAG_n2(_object):
"""Proxy of C HrcDumperInfo_TAG_n2 struct."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HrcDumperInfo_TAG_n2, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HrcDumperInfo_TAG_n2, name)
__repr__ = _swig_repr
__swig_setmethods__["type"] = _dumpers.HrcDumperInfo_TAG_n2_type_set
__swig_getmethods__["type"] = _dumpers.HrcDumperInfo_TAG_n2_type_get
if _newclass:
type = _swig_property(_dumpers.HrcDumperInfo_TAG_n2_type_get, _dumpers.HrcDumperInfo_TAG_n2_type_set)
__swig_setmethods__["body"] = _dumpers.HrcDumperInfo_TAG_n2_body_set
__swig_getmethods__["body"] = _dumpers.HrcDumperInfo_TAG_n2_body_get
if _newclass:
body = _swig_property(_dumpers.HrcDumperInfo_TAG_n2_body_get, _dumpers.HrcDumperInfo_TAG_n2_body_set)
__swig_setmethods__["expr"] = _dumpers.HrcDumperInfo_TAG_n2_expr_set
__swig_getmethods__["expr"] = _dumpers.HrcDumperInfo_TAG_n2_expr_get
if _newclass:
expr = _swig_property(_dumpers.HrcDumperInfo_TAG_n2_expr_get, _dumpers.HrcDumperInfo_TAG_n2_expr_set)
__swig_setmethods__["lineno"] = _dumpers.HrcDumperInfo_TAG_n2_lineno_set
__swig_getmethods__["lineno"] = _dumpers.HrcDumperInfo_TAG_n2_lineno_get
if _newclass:
lineno = _swig_property(_dumpers.HrcDumperInfo_TAG_n2_lineno_get, _dumpers.HrcDumperInfo_TAG_n2_lineno_set)
def __init__(self):
"""__init__(HrcDumperInfo_TAG_n2 self) -> HrcDumperInfo_TAG_n2"""
this = _dumpers.new_HrcDumperInfo_TAG_n2()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _dumpers.delete_HrcDumperInfo_TAG_n2
__del__ = lambda self: None
HrcDumperInfo_TAG_n2_swigregister = _dumpers.HrcDumperInfo_TAG_n2_swigregister
HrcDumperInfo_TAG_n2_swigregister(HrcDumperInfo_TAG_n2)
class HrcDumperInfo_TAG_n1(_object):
"""Proxy of C HrcDumperInfo_TAG_n1 struct."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, HrcDumperInfo_TAG_n1, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, HrcDumperInfo_TAG_n1, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _dumpers.HrcDumperInfo_TAG_n1_name_set
__swig_getmethods__["name"] = _dumpers.HrcDumperInfo_TAG_n1_name_get
if _newclass:
name = _swig_property(_dumpers.HrcDumperInfo_TAG_n1_name_get, _dumpers.HrcDumperInfo_TAG_n1_name_set)
__swig_setmethods__["value"] = _dumpers.HrcDumperInfo_TAG_n1_value_set
__swig_getmethods__["value"] = _dumpers.HrcDumperInfo_TAG_n1_value_get
if _newclass:
value = _swig_property(_dumpers.HrcDumperInfo_TAG_n1_value_get, _dumpers.HrcDumperInfo_TAG_n1_value_set)
__swig_setmethods__["expr"] = _dumpers.HrcDumperInfo_TAG_n1_expr_set
__swig_getmethods__["expr"] = _dumpers.HrcDumperInfo_TAG_n1_expr_get
if _newclass:
expr = _swig_property(_dumpers.HrcDumperInfo_TAG_n1_expr_get, _dumpers.HrcDumperInfo_TAG_n1_expr_set)
def __init__(self):
"""__init__(HrcDumperInfo_TAG_n1 self) -> HrcDumperInfo_TAG_n1"""
this = _dumpers.new_HrcDumperInfo_TAG_n1()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _dumpers.delete_HrcDumperInfo_TAG_n1
__del__ = lambda self: None
HrcDumperInfo_TAG_n1_swigregister = _dumpers.HrcDumperInfo_TAG_n1_swigregister
HrcDumperInfo_TAG_n1_swigregister(HrcDumperInfo_TAG_n1)
def HrcDumper_create(fout: 'FILE *') -> "HrcDumper_ptr":
"""HrcDumper_create(FILE * fout) -> HrcDumper_ptr"""
return _dumpers.HrcDumper_create(fout)
def HrcDumper_destroy(arg1: 'HrcDumper_ptr') -> "void":
"""HrcDumper_destroy(HrcDumper_ptr arg1)"""
return _dumpers.HrcDumper_destroy(arg1)
def HrcDumper_dump_snippet(arg1: 'HrcDumper_ptr', snippet: 'HrcDumperSnippet', info: 'HrcDumperInfo') -> "void":
"""HrcDumper_dump_snippet(HrcDumper_ptr arg1, HrcDumperSnippet snippet, HrcDumperInfo info)"""
return _dumpers.HrcDumper_dump_snippet(arg1, snippet, info)
def HrcDumper_enable_indentation(arg1: 'HrcDumper_ptr', flag: 'boolean') -> "void":
"""HrcDumper_enable_indentation(HrcDumper_ptr arg1, boolean flag)"""
return _dumpers.HrcDumper_enable_indentation(arg1, flag)
def HrcDumper_inc_indent(arg1: 'HrcDumper_ptr') -> "void":
"""HrcDumper_inc_indent(HrcDumper_ptr arg1)"""
return _dumpers.HrcDumper_inc_indent(arg1)
def HrcDumper_dec_indent(arg1: 'HrcDumper_ptr') -> "void":
"""HrcDumper_dec_indent(HrcDumper_ptr arg1)"""
return _dumpers.HrcDumper_dec_indent(arg1)
def HrcDumper_enable_mod_suffix(arg1: 'HrcDumper_ptr', flag: 'boolean') -> "void":
"""HrcDumper_enable_mod_suffix(HrcDumper_ptr arg1, boolean flag)"""
return _dumpers.HrcDumper_enable_mod_suffix(arg1, flag)
def HrcDumperDebug_create(fout: 'FILE *') -> "HrcDumperDebug_ptr":
"""HrcDumperDebug_create(FILE * fout) -> HrcDumperDebug_ptr"""
return _dumpers.HrcDumperDebug_create(fout)
def HrcDumperSmv_create(fout: 'FILE *') -> "HrcDumperSmv_ptr":
"""HrcDumperSmv_create(FILE * fout) -> HrcDumperSmv_ptr"""
return _dumpers.HrcDumperSmv_create(fout)
def HrcDumperXml_create(fout: 'FILE *') -> "HrcDumperXml_ptr":
"""HrcDumperXml_create(FILE * fout) -> HrcDumperXml_ptr"""
return _dumpers.HrcDumperXml_create(fout)
# This file is compatible with both classic and new-style classes.
|
PypiClean
|
/cdktf-cdktf-provider-google_beta-9.0.1.tar.gz/cdktf-cdktf-provider-google_beta-9.0.1/src/cdktf_cdktf_provider_google_beta/data_google_compute_backend_bucket/__init__.py
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import cdktf as _cdktf_9a9027ec
import constructs as _constructs_77d1e7e8
class DataGoogleComputeBackendBucket(
_cdktf_9a9027ec.TerraformDataSource,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucket",
):
'''Represents a {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket google_compute_backend_bucket}.'''
def __init__(
self,
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
name: builtins.str,
id: typing.Optional[builtins.str] = None,
project: typing.Optional[builtins.str] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
'''Create a new {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket google_compute_backend_bucket} Data Source.
:param scope: The scope in which to define this construct.
:param id_: The scoped construct ID. Must be unique amongst siblings in the same scope
:param name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression '`a-z <%5B-a-z0-9%5D*%5Ba-z0-9%5D>`_?' which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket#name DataGoogleComputeBackendBucket#name}
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket#id DataGoogleComputeBackendBucket#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param project: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket#project DataGoogleComputeBackendBucket#project}.
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__7e2d78d18436131ea7e1711059d0d67d8353315190a6e754a1750bf022e5cc20)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id_", value=id_, expected_type=type_hints["id_"])
config = DataGoogleComputeBackendBucketConfig(
name=name,
id=id,
project=project,
connection=connection,
count=count,
depends_on=depends_on,
for_each=for_each,
lifecycle=lifecycle,
provider=provider,
provisioners=provisioners,
)
jsii.create(self.__class__, self, [scope, id_, config])
@jsii.member(jsii_name="resetId")
def reset_id(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetId", []))
@jsii.member(jsii_name="resetProject")
def reset_project(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetProject", []))
@jsii.member(jsii_name="synthesizeAttributes")
def _synthesize_attributes(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "synthesizeAttributes", []))
@jsii.python.classproperty
@jsii.member(jsii_name="tfResourceType")
def TF_RESOURCE_TYPE(cls) -> builtins.str:
return typing.cast(builtins.str, jsii.sget(cls, "tfResourceType"))
@builtins.property
@jsii.member(jsii_name="bucketName")
def bucket_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "bucketName"))
@builtins.property
@jsii.member(jsii_name="cdnPolicy")
def cdn_policy(self) -> "DataGoogleComputeBackendBucketCdnPolicyList":
return typing.cast("DataGoogleComputeBackendBucketCdnPolicyList", jsii.get(self, "cdnPolicy"))
@builtins.property
@jsii.member(jsii_name="compressionMode")
def compression_mode(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "compressionMode"))
@builtins.property
@jsii.member(jsii_name="creationTimestamp")
def creation_timestamp(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "creationTimestamp"))
@builtins.property
@jsii.member(jsii_name="customResponseHeaders")
def custom_response_headers(self) -> typing.List[builtins.str]:
return typing.cast(typing.List[builtins.str], jsii.get(self, "customResponseHeaders"))
@builtins.property
@jsii.member(jsii_name="description")
def description(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "description"))
@builtins.property
@jsii.member(jsii_name="edgeSecurityPolicy")
def edge_security_policy(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "edgeSecurityPolicy"))
@builtins.property
@jsii.member(jsii_name="enableCdn")
def enable_cdn(self) -> _cdktf_9a9027ec.IResolvable:
return typing.cast(_cdktf_9a9027ec.IResolvable, jsii.get(self, "enableCdn"))
@builtins.property
@jsii.member(jsii_name="selfLink")
def self_link(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "selfLink"))
@builtins.property
@jsii.member(jsii_name="idInput")
def id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "idInput"))
@builtins.property
@jsii.member(jsii_name="nameInput")
def name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "nameInput"))
@builtins.property
@jsii.member(jsii_name="projectInput")
def project_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "projectInput"))
@builtins.property
@jsii.member(jsii_name="id")
def id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "id"))
@id.setter
def id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__b6a22cd18df0dba19e69fe8a379c4e0658349edce581213c23c99cb3b51c8942)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "id", value)
@builtins.property
@jsii.member(jsii_name="name")
def name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "name"))
@name.setter
def name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__d6debde651242a4adf84b623db3628c598aca6db019eaac30a670df305bc2c9b)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "name", value)
@builtins.property
@jsii.member(jsii_name="project")
def project(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "project"))
@project.setter
def project(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__2dda25237b5fbd74d7855095295247b7270a46087a93f84e4ebb807233646e5b)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "project", value)
@jsii.data_type(
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicy",
jsii_struct_bases=[],
name_mapping={},
)
class DataGoogleComputeBackendBucketCdnPolicy:
def __init__(self) -> None:
self._values: typing.Dict[builtins.str, typing.Any] = {}
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DataGoogleComputeBackendBucketCdnPolicy(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders",
jsii_struct_bases=[],
name_mapping={},
)
class DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders:
def __init__(self) -> None:
self._values: typing.Dict[builtins.str, typing.Any] = {}
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersList(
_cdktf_9a9027ec.ComplexList,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersList",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__0f88e1d48bd0ffc24d4e5bed5b920b7e0ab9d467d09807dcd50699bd249dc63d)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set])
@jsii.member(jsii_name="get")
def get(
self,
index: jsii.Number,
) -> "DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersOutputReference":
'''
:param index: the index of the item to return.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__32a39f280d472a517269c1da5e3bdda9e2427cd760f2c2d84d868af172699800)
check_type(argname="argument index", value=index, expected_type=type_hints["index"])
return typing.cast("DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersOutputReference", jsii.invoke(self, "get", [index]))
@builtins.property
@jsii.member(jsii_name="terraformAttribute")
def _terraform_attribute(self) -> builtins.str:
'''The attribute on the parent resource this class is referencing.'''
return typing.cast(builtins.str, jsii.get(self, "terraformAttribute"))
@_terraform_attribute.setter
def _terraform_attribute(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__da1058e09fce98c13e32a996b0dfbe7fc9201de948b1f0eee8123c1f69862f70)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformAttribute", value)
@builtins.property
@jsii.member(jsii_name="terraformResource")
def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent:
'''The parent resource.'''
return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource"))
@_terraform_resource.setter
def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__eb6638fb56c6f2da087116161005f39333d7e42d84e1d8f0022b3c24aaa53f0e)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformResource", value)
@builtins.property
@jsii.member(jsii_name="wrapsSet")
def _wraps_set(self) -> builtins.bool:
'''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).'''
return typing.cast(builtins.bool, jsii.get(self, "wrapsSet"))
@_wraps_set.setter
def _wraps_set(self, value: builtins.bool) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__2527ee1e3a2b121cb84c824e5aa9ea99a73ae6343dbe73f891ff3bde4c5374ef)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "wrapsSet", value)
class DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param complex_object_index: the index of this item in the list.
:param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__3766b6296fc8de8e0a780c1a6e184cf8fc1023f2cb5085ab5e34ddbf36adcb79)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"])
check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set])
@builtins.property
@jsii.member(jsii_name="headerName")
def header_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "headerName"))
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders]:
return typing.cast(typing.Optional[DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__7270fa3e6098f8abf2a5999d5868f1468ec17488e116d5e9324a15371a9f2038)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
@jsii.data_type(
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicy",
jsii_struct_bases=[],
name_mapping={},
)
class DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicy:
def __init__(self) -> None:
self._values: typing.Dict[builtins.str, typing.Any] = {}
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicy(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicyList(
_cdktf_9a9027ec.ComplexList,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicyList",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__a7f1ec33f91b932a6dd8167e079b48c0e2980ba8b40b18c5eb5496fa8f576a58)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set])
@jsii.member(jsii_name="get")
def get(
self,
index: jsii.Number,
) -> "DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicyOutputReference":
'''
:param index: the index of the item to return.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__2827e0b92ff34b8bb718a6a71a5b4ca3c836af7b2c4f381b0630b395db2fb2d1)
check_type(argname="argument index", value=index, expected_type=type_hints["index"])
return typing.cast("DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicyOutputReference", jsii.invoke(self, "get", [index]))
@builtins.property
@jsii.member(jsii_name="terraformAttribute")
def _terraform_attribute(self) -> builtins.str:
'''The attribute on the parent resource this class is referencing.'''
return typing.cast(builtins.str, jsii.get(self, "terraformAttribute"))
@_terraform_attribute.setter
def _terraform_attribute(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ebec2df0bbfe3d8d405acd2fc28af7b63b311056fff366efd12c63d5bfe8a875)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformAttribute", value)
@builtins.property
@jsii.member(jsii_name="terraformResource")
def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent:
'''The parent resource.'''
return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource"))
@_terraform_resource.setter
def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__3101edd741c50c0a882e446082ce868f20c568fc2ffbac4cfc4f8269dbc26551)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformResource", value)
@builtins.property
@jsii.member(jsii_name="wrapsSet")
def _wraps_set(self) -> builtins.bool:
'''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).'''
return typing.cast(builtins.bool, jsii.get(self, "wrapsSet"))
@_wraps_set.setter
def _wraps_set(self, value: builtins.bool) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__80db7427130ab9a83ba02502d0c9f1c64790ff69827a747d37bf8dfd06d1925b)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "wrapsSet", value)
class DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicyOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicyOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param complex_object_index: the index of this item in the list.
:param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__9fbecce0ae31eba21d9ca0f0bfaf547c84da8c2b5f7e0cfe6e2ee2a2a5aaeae8)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"])
check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set])
@builtins.property
@jsii.member(jsii_name="includeHttpHeaders")
def include_http_headers(self) -> typing.List[builtins.str]:
return typing.cast(typing.List[builtins.str], jsii.get(self, "includeHttpHeaders"))
@builtins.property
@jsii.member(jsii_name="queryStringWhitelist")
def query_string_whitelist(self) -> typing.List[builtins.str]:
return typing.cast(typing.List[builtins.str], jsii.get(self, "queryStringWhitelist"))
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicy]:
return typing.cast(typing.Optional[DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicy], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicy],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__6e2d1f6044c81cb79f9f53cb38097aa74488ff998af3f9c9f24df6f0c598dc22)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
class DataGoogleComputeBackendBucketCdnPolicyList(
_cdktf_9a9027ec.ComplexList,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyList",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__08c40e3c165a72b76716c483d8d8c33ed263cdcca26fd900a7125ecb1b250433)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set])
@jsii.member(jsii_name="get")
def get(
self,
index: jsii.Number,
) -> "DataGoogleComputeBackendBucketCdnPolicyOutputReference":
'''
:param index: the index of the item to return.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__3eb7a7c17abb9f0adc717b7986dac132589496c0b686474b94ee4a80130ab3ec)
check_type(argname="argument index", value=index, expected_type=type_hints["index"])
return typing.cast("DataGoogleComputeBackendBucketCdnPolicyOutputReference", jsii.invoke(self, "get", [index]))
@builtins.property
@jsii.member(jsii_name="terraformAttribute")
def _terraform_attribute(self) -> builtins.str:
'''The attribute on the parent resource this class is referencing.'''
return typing.cast(builtins.str, jsii.get(self, "terraformAttribute"))
@_terraform_attribute.setter
def _terraform_attribute(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__0c7b1ab6644ad22f1b3bcaf696e1116d4fe1a266ca12147a0daf1dc1d076faad)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformAttribute", value)
@builtins.property
@jsii.member(jsii_name="terraformResource")
def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent:
'''The parent resource.'''
return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource"))
@_terraform_resource.setter
def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__51e2a82395744216b6ed4d42f01bcfb925cbdd8a5e760964ab9e7006e781d319)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformResource", value)
@builtins.property
@jsii.member(jsii_name="wrapsSet")
def _wraps_set(self) -> builtins.bool:
'''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).'''
return typing.cast(builtins.bool, jsii.get(self, "wrapsSet"))
@_wraps_set.setter
def _wraps_set(self, value: builtins.bool) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__38e075b42167bd974dd39a222b5be9698f5b7932aa97b89a5c381f3bdf72a1cc)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "wrapsSet", value)
@jsii.data_type(
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicy",
jsii_struct_bases=[],
name_mapping={},
)
class DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicy:
def __init__(self) -> None:
self._values: typing.Dict[builtins.str, typing.Any] = {}
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicy(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicyList(
_cdktf_9a9027ec.ComplexList,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicyList",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4d65f87cd6884c7fcfd51702991cd87d3ee09f7b3c0298a7ddd35a1acb58a5b0)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set])
@jsii.member(jsii_name="get")
def get(
self,
index: jsii.Number,
) -> "DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicyOutputReference":
'''
:param index: the index of the item to return.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__076eaad8d3ab0411d906772e2a15c98d9af6bafba7179af993b97adca80b31b8)
check_type(argname="argument index", value=index, expected_type=type_hints["index"])
return typing.cast("DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicyOutputReference", jsii.invoke(self, "get", [index]))
@builtins.property
@jsii.member(jsii_name="terraformAttribute")
def _terraform_attribute(self) -> builtins.str:
'''The attribute on the parent resource this class is referencing.'''
return typing.cast(builtins.str, jsii.get(self, "terraformAttribute"))
@_terraform_attribute.setter
def _terraform_attribute(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__7a2b88f7b0fa4802bbf142ab63e97c2b2460c68c6a3370b7167e76840e8b0cd0)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformAttribute", value)
@builtins.property
@jsii.member(jsii_name="terraformResource")
def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent:
'''The parent resource.'''
return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource"))
@_terraform_resource.setter
def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__9fe2c0e191aaec1220aa2bef11b7a05e40373d6963a1f4eca7a97f6b55da52e2)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformResource", value)
@builtins.property
@jsii.member(jsii_name="wrapsSet")
def _wraps_set(self) -> builtins.bool:
'''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).'''
return typing.cast(builtins.bool, jsii.get(self, "wrapsSet"))
@_wraps_set.setter
def _wraps_set(self, value: builtins.bool) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__f0e54f02ce87b92bc77455bf43ff0c0998f65856f7c64a54b35ae93dc9d6d0ad)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "wrapsSet", value)
class DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicyOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicyOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param complex_object_index: the index of this item in the list.
:param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__b67bd092e66a01a938b52a11e2183176341aefaf6199bfe04dd1ff0cb569208a)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"])
check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set])
@builtins.property
@jsii.member(jsii_name="code")
def code(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "code"))
@builtins.property
@jsii.member(jsii_name="ttl")
def ttl(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "ttl"))
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicy]:
return typing.cast(typing.Optional[DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicy], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicy],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__c226f19ec2ede6f82f30614444aac9ed64136b2bc2ed97165b5305c638882bfe)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
class DataGoogleComputeBackendBucketCdnPolicyOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketCdnPolicyOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param complex_object_index: the index of this item in the list.
:param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__762fcd72d0257e36ec7fe0c548e82f809477859e2d051b158d255687f161b0fd)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"])
check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set])
@builtins.property
@jsii.member(jsii_name="bypassCacheOnRequestHeaders")
def bypass_cache_on_request_headers(
self,
) -> DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersList:
return typing.cast(DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersList, jsii.get(self, "bypassCacheOnRequestHeaders"))
@builtins.property
@jsii.member(jsii_name="cacheKeyPolicy")
def cache_key_policy(
self,
) -> DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicyList:
return typing.cast(DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicyList, jsii.get(self, "cacheKeyPolicy"))
@builtins.property
@jsii.member(jsii_name="cacheMode")
def cache_mode(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "cacheMode"))
@builtins.property
@jsii.member(jsii_name="clientTtl")
def client_ttl(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "clientTtl"))
@builtins.property
@jsii.member(jsii_name="defaultTtl")
def default_ttl(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "defaultTtl"))
@builtins.property
@jsii.member(jsii_name="maxTtl")
def max_ttl(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "maxTtl"))
@builtins.property
@jsii.member(jsii_name="negativeCaching")
def negative_caching(self) -> _cdktf_9a9027ec.IResolvable:
return typing.cast(_cdktf_9a9027ec.IResolvable, jsii.get(self, "negativeCaching"))
@builtins.property
@jsii.member(jsii_name="negativeCachingPolicy")
def negative_caching_policy(
self,
) -> DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicyList:
return typing.cast(DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicyList, jsii.get(self, "negativeCachingPolicy"))
@builtins.property
@jsii.member(jsii_name="requestCoalescing")
def request_coalescing(self) -> _cdktf_9a9027ec.IResolvable:
return typing.cast(_cdktf_9a9027ec.IResolvable, jsii.get(self, "requestCoalescing"))
@builtins.property
@jsii.member(jsii_name="serveWhileStale")
def serve_while_stale(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "serveWhileStale"))
@builtins.property
@jsii.member(jsii_name="signedUrlCacheMaxAgeSec")
def signed_url_cache_max_age_sec(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "signedUrlCacheMaxAgeSec"))
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[DataGoogleComputeBackendBucketCdnPolicy]:
return typing.cast(typing.Optional[DataGoogleComputeBackendBucketCdnPolicy], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[DataGoogleComputeBackendBucketCdnPolicy],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__11c41232513a7f1588e8c6ad37d0547aa179507ffa928b08ac7f3b63e2044e31)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
@jsii.data_type(
jsii_type="@cdktf/provider-google-beta.dataGoogleComputeBackendBucket.DataGoogleComputeBackendBucketConfig",
jsii_struct_bases=[_cdktf_9a9027ec.TerraformMetaArguments],
name_mapping={
"connection": "connection",
"count": "count",
"depends_on": "dependsOn",
"for_each": "forEach",
"lifecycle": "lifecycle",
"provider": "provider",
"provisioners": "provisioners",
"name": "name",
"id": "id",
"project": "project",
},
)
class DataGoogleComputeBackendBucketConfig(_cdktf_9a9027ec.TerraformMetaArguments):
def __init__(
self,
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
name: builtins.str,
id: typing.Optional[builtins.str] = None,
project: typing.Optional[builtins.str] = None,
) -> None:
'''
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
:param name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression '`a-z <%5B-a-z0-9%5D*%5Ba-z0-9%5D>`_?' which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket#name DataGoogleComputeBackendBucket#name}
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket#id DataGoogleComputeBackendBucket#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param project: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket#project DataGoogleComputeBackendBucket#project}.
'''
if isinstance(lifecycle, dict):
lifecycle = _cdktf_9a9027ec.TerraformResourceLifecycle(**lifecycle)
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__a67f81f703400afd0cc78d3b69e0f17c5537e392521f6f55bbfa0babe51193df)
check_type(argname="argument connection", value=connection, expected_type=type_hints["connection"])
check_type(argname="argument count", value=count, expected_type=type_hints["count"])
check_type(argname="argument depends_on", value=depends_on, expected_type=type_hints["depends_on"])
check_type(argname="argument for_each", value=for_each, expected_type=type_hints["for_each"])
check_type(argname="argument lifecycle", value=lifecycle, expected_type=type_hints["lifecycle"])
check_type(argname="argument provider", value=provider, expected_type=type_hints["provider"])
check_type(argname="argument provisioners", value=provisioners, expected_type=type_hints["provisioners"])
check_type(argname="argument name", value=name, expected_type=type_hints["name"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
check_type(argname="argument project", value=project, expected_type=type_hints["project"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"name": name,
}
if connection is not None:
self._values["connection"] = connection
if count is not None:
self._values["count"] = count
if depends_on is not None:
self._values["depends_on"] = depends_on
if for_each is not None:
self._values["for_each"] = for_each
if lifecycle is not None:
self._values["lifecycle"] = lifecycle
if provider is not None:
self._values["provider"] = provider
if provisioners is not None:
self._values["provisioners"] = provisioners
if id is not None:
self._values["id"] = id
if project is not None:
self._values["project"] = project
@builtins.property
def connection(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]]:
'''
:stability: experimental
'''
result = self._values.get("connection")
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]], result)
@builtins.property
def count(
self,
) -> typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]]:
'''
:stability: experimental
'''
result = self._values.get("count")
return typing.cast(typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]], result)
@builtins.property
def depends_on(
self,
) -> typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]]:
'''
:stability: experimental
'''
result = self._values.get("depends_on")
return typing.cast(typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]], result)
@builtins.property
def for_each(self) -> typing.Optional[_cdktf_9a9027ec.ITerraformIterator]:
'''
:stability: experimental
'''
result = self._values.get("for_each")
return typing.cast(typing.Optional[_cdktf_9a9027ec.ITerraformIterator], result)
@builtins.property
def lifecycle(self) -> typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle]:
'''
:stability: experimental
'''
result = self._values.get("lifecycle")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle], result)
@builtins.property
def provider(self) -> typing.Optional[_cdktf_9a9027ec.TerraformProvider]:
'''
:stability: experimental
'''
result = self._values.get("provider")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformProvider], result)
@builtins.property
def provisioners(
self,
) -> typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]]:
'''
:stability: experimental
'''
result = self._values.get("provisioners")
return typing.cast(typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]], result)
@builtins.property
def name(self) -> builtins.str:
'''Name of the resource.
Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and
match the regular expression '`a-z <%5B-a-z0-9%5D*%5Ba-z0-9%5D>`_?' which means
the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the
last character, which cannot be a dash.
Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket#name DataGoogleComputeBackendBucket#name}
'''
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def id(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket#id DataGoogleComputeBackendBucket#id}.
Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2.
If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
'''
result = self._values.get("id")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def project(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/google-beta/4.80.0/docs/data-sources/google_compute_backend_bucket#project DataGoogleComputeBackendBucket#project}.'''
result = self._values.get("project")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DataGoogleComputeBackendBucketConfig(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"DataGoogleComputeBackendBucket",
"DataGoogleComputeBackendBucketCdnPolicy",
"DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders",
"DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersList",
"DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeadersOutputReference",
"DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicy",
"DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicyList",
"DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicyOutputReference",
"DataGoogleComputeBackendBucketCdnPolicyList",
"DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicy",
"DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicyList",
"DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicyOutputReference",
"DataGoogleComputeBackendBucketCdnPolicyOutputReference",
"DataGoogleComputeBackendBucketConfig",
]
publication.publish()
def _typecheckingstub__7e2d78d18436131ea7e1711059d0d67d8353315190a6e754a1750bf022e5cc20(
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
name: builtins.str,
id: typing.Optional[builtins.str] = None,
project: typing.Optional[builtins.str] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__b6a22cd18df0dba19e69fe8a379c4e0658349edce581213c23c99cb3b51c8942(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__d6debde651242a4adf84b623db3628c598aca6db019eaac30a670df305bc2c9b(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__2dda25237b5fbd74d7855095295247b7270a46087a93f84e4ebb807233646e5b(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__0f88e1d48bd0ffc24d4e5bed5b920b7e0ab9d467d09807dcd50699bd249dc63d(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__32a39f280d472a517269c1da5e3bdda9e2427cd760f2c2d84d868af172699800(
index: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__da1058e09fce98c13e32a996b0dfbe7fc9201de948b1f0eee8123c1f69862f70(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__eb6638fb56c6f2da087116161005f39333d7e42d84e1d8f0022b3c24aaa53f0e(
value: _cdktf_9a9027ec.IInterpolatingParent,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__2527ee1e3a2b121cb84c824e5aa9ea99a73ae6343dbe73f891ff3bde4c5374ef(
value: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__3766b6296fc8de8e0a780c1a6e184cf8fc1023f2cb5085ab5e34ddbf36adcb79(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__7270fa3e6098f8abf2a5999d5868f1468ec17488e116d5e9324a15371a9f2038(
value: typing.Optional[DataGoogleComputeBackendBucketCdnPolicyBypassCacheOnRequestHeaders],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__a7f1ec33f91b932a6dd8167e079b48c0e2980ba8b40b18c5eb5496fa8f576a58(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__2827e0b92ff34b8bb718a6a71a5b4ca3c836af7b2c4f381b0630b395db2fb2d1(
index: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ebec2df0bbfe3d8d405acd2fc28af7b63b311056fff366efd12c63d5bfe8a875(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__3101edd741c50c0a882e446082ce868f20c568fc2ffbac4cfc4f8269dbc26551(
value: _cdktf_9a9027ec.IInterpolatingParent,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__80db7427130ab9a83ba02502d0c9f1c64790ff69827a747d37bf8dfd06d1925b(
value: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__9fbecce0ae31eba21d9ca0f0bfaf547c84da8c2b5f7e0cfe6e2ee2a2a5aaeae8(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__6e2d1f6044c81cb79f9f53cb38097aa74488ff998af3f9c9f24df6f0c598dc22(
value: typing.Optional[DataGoogleComputeBackendBucketCdnPolicyCacheKeyPolicy],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__08c40e3c165a72b76716c483d8d8c33ed263cdcca26fd900a7125ecb1b250433(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__3eb7a7c17abb9f0adc717b7986dac132589496c0b686474b94ee4a80130ab3ec(
index: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__0c7b1ab6644ad22f1b3bcaf696e1116d4fe1a266ca12147a0daf1dc1d076faad(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__51e2a82395744216b6ed4d42f01bcfb925cbdd8a5e760964ab9e7006e781d319(
value: _cdktf_9a9027ec.IInterpolatingParent,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__38e075b42167bd974dd39a222b5be9698f5b7932aa97b89a5c381f3bdf72a1cc(
value: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4d65f87cd6884c7fcfd51702991cd87d3ee09f7b3c0298a7ddd35a1acb58a5b0(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__076eaad8d3ab0411d906772e2a15c98d9af6bafba7179af993b97adca80b31b8(
index: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__7a2b88f7b0fa4802bbf142ab63e97c2b2460c68c6a3370b7167e76840e8b0cd0(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__9fe2c0e191aaec1220aa2bef11b7a05e40373d6963a1f4eca7a97f6b55da52e2(
value: _cdktf_9a9027ec.IInterpolatingParent,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__f0e54f02ce87b92bc77455bf43ff0c0998f65856f7c64a54b35ae93dc9d6d0ad(
value: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__b67bd092e66a01a938b52a11e2183176341aefaf6199bfe04dd1ff0cb569208a(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__c226f19ec2ede6f82f30614444aac9ed64136b2bc2ed97165b5305c638882bfe(
value: typing.Optional[DataGoogleComputeBackendBucketCdnPolicyNegativeCachingPolicy],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__762fcd72d0257e36ec7fe0c548e82f809477859e2d051b158d255687f161b0fd(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__11c41232513a7f1588e8c6ad37d0547aa179507ffa928b08ac7f3b63e2044e31(
value: typing.Optional[DataGoogleComputeBackendBucketCdnPolicy],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__a67f81f703400afd0cc78d3b69e0f17c5537e392521f6f55bbfa0babe51193df(
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
name: builtins.str,
id: typing.Optional[builtins.str] = None,
project: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
|
PypiClean
|
/parameter-sherpa-1.0.6.tar.gz/parameter-sherpa-1.0.6/sherpa/app/static/lib/d3.parcoords.js
|
d3.parcoords = function(config) {
var __ = {
data: [],
highlighted: [],
dimensions: {},
dimensionTitleRotation: 0,
brushed: false,
brushedColor: null,
alphaOnBrushed: 0.0,
mode: "default",
rate: 20,
width: 600,
height: 300,
margin: { top: 24, right: 0, bottom: 12, left: 0 },
nullValueSeparator: "undefined", // set to "top" or "bottom"
nullValueSeparatorPadding: { top: 8, right: 0, bottom: 8, left: 0 },
color: "#069",
composite: "source-over",
alpha: 0.7,
bundlingStrength: 0.5,
bundleDimension: null,
smoothness: 0.0,
showControlPoints: false,
hideAxis : [],
flipAxes: [],
animationTime: 1100, // How long it takes to flip the axis when you double click
rotateLabels: false
};
extend(__, config);
if (config && config.dimensionTitles) {
console.warn("dimensionTitles passed in config is deprecated. Add title to dimension object.");
d3.entries(config.dimensionTitles).forEach(function(d) {
if (__.dimensions[d.key]) {
__.dimensions[d.key].title = __.dimensions[d.key].title ? __.dimensions[d.key].title : d.value;
} else {
__.dimensions[d.key] = {
title: d.value
};
}
});
}
var pc = function(selection) {
selection = pc.selection = d3.select(selection);
__.width = selection[0][0].clientWidth;
__.height = selection[0][0].clientHeight;
// canvas data layers
["marks", "foreground", "brushed", "highlight"].forEach(function(layer) {
canvas[layer] = selection
.append("canvas")
.attr("class", layer)[0][0];
ctx[layer] = canvas[layer].getContext("2d");
});
// svg tick and brush layers
pc.svg = selection
.append("svg")
.attr("width", __.width)
.attr("height", __.height)
.style("font", "14px sans-serif")
.style("position", "absolute")
.append("svg:g")
.attr("transform", "translate(" + __.margin.left + "," + __.margin.top + ")");
return pc;
};
var events = d3.dispatch.apply(this,["render", "resize", "highlight", "brush", "brushend", "brushstart", "axesreorder"].concat(d3.keys(__))),
w = function() { return __.width - __.margin.right - __.margin.left; },
h = function() { return __.height - __.margin.top - __.margin.bottom; },
flags = {
brushable: false,
reorderable: false,
axes: false,
interactive: false,
debug: false
},
xscale = d3.scale.ordinal(),
dragging = {},
line = d3.svg.line(),
axis = d3.svg.axis().orient("left").ticks(5),
g, // groups for axes, brushes
ctx = {},
canvas = {},
clusterCentroids = [];
// side effects for setters
var side_effects = d3.dispatch.apply(this,d3.keys(__))
.on("composite", function(d) {
ctx.foreground.globalCompositeOperation = d.value;
ctx.brushed.globalCompositeOperation = d.value;
})
.on("alpha", function(d) {
ctx.foreground.globalAlpha = d.value;
ctx.brushed.globalAlpha = d.value;
})
.on("brushedColor", function (d) {
ctx.brushed.strokeStyle = d.value;
})
.on("width", function(d) { pc.resize(); })
.on("height", function(d) { pc.resize(); })
.on("margin", function(d) { pc.resize(); })
.on("rate", function(d) {
brushedQueue.rate(d.value);
foregroundQueue.rate(d.value);
})
.on("dimensions", function(d) {
__.dimensions = pc.applyDimensionDefaults(d3.keys(d.value));
xscale.domain(pc.getOrderedDimensionKeys());
pc.sortDimensions();
if (flags.interactive){pc.render().updateAxes();}
})
.on("bundleDimension", function(d) {
if (!d3.keys(__.dimensions).length) pc.detectDimensions();
pc.autoscale();
if (typeof d.value === "number") {
if (d.value < d3.keys(__.dimensions).length) {
__.bundleDimension = __.dimensions[d.value];
} else if (d.value < __.hideAxis.length) {
__.bundleDimension = __.hideAxis[d.value];
}
} else {
__.bundleDimension = d.value;
}
__.clusterCentroids = compute_cluster_centroids(__.bundleDimension);
if (flags.interactive){pc.render();}
})
.on("hideAxis", function(d) {
pc.dimensions(pc.applyDimensionDefaults());
pc.dimensions(without(__.dimensions, d.value));
})
.on("flipAxes", function(d) {
if (d.value && d.value.length) {
d.value.forEach(function(axis) {
flipAxisAndUpdatePCP(axis);
});
pc.updateAxes(0);
}
});
// expose the state of the chart
pc.state = __;
pc.flags = flags;
// create getter/setters
getset(pc, __, events);
// expose events
d3.rebind(pc, events, "on");
// getter/setter with event firing
function getset(obj,state,events) {
d3.keys(state).forEach(function(key) {
obj[key] = function(x) {
if (!arguments.length) {
return state[key];
}
if (key === 'dimensions' && Object.prototype.toString.call(x) === '[object Array]') {
console.warn("pc.dimensions([]) is deprecated, use pc.dimensions({})");
x = pc.applyDimensionDefaults(x);
}
var old = state[key];
state[key] = x;
side_effects[key].call(pc,{"value": x, "previous": old});
events[key].call(pc,{"value": x, "previous": old});
return obj;
};
});
};
function extend(target, source) {
for (var key in source) {
target[key] = source[key];
}
return target;
};
function without(arr, items) {
items.forEach(function (el) {
delete arr[el];
});
return arr;
};
/** adjusts an axis' default range [h()+1, 1] if a NullValueSeparator is set */
function getRange() {
if (__.nullValueSeparator=="bottom") {
return [h()+1-__.nullValueSeparatorPadding.bottom-__.nullValueSeparatorPadding.top, 1];
} else if (__.nullValueSeparator=="top") {
return [h()+1, 1+__.nullValueSeparatorPadding.bottom+__.nullValueSeparatorPadding.top];
}
return [h()+1, 1];
};
pc.autoscale = function() {
// yscale
var defaultScales = {
"date": function(k) {
var extent = d3.extent(__.data, function(d) {
return d[k] ? d[k].getTime() : null;
});
// special case if single value
if (extent[0] === extent[1]) {
return d3.scale.ordinal()
.domain([extent[0]])
.rangePoints(getRange());
}
return d3.time.scale()
.domain(extent)
.range(getRange());
},
"number": function(k) {
var extent = d3.extent(__.data, function(d) { return +d[k]; });
// special case if single value
if (extent[0] === extent[1]) {
return d3.scale.ordinal()
.domain([extent[0]])
.rangePoints(getRange());
}
return d3.scale.linear()
.domain(extent)
.range(getRange());
},
"string": function(k) {
var counts = {},
domain = [];
// Let's get the count for each value so that we can sort the domain based
// on the number of items for each value.
__.data.map(function(p) {
if (p[k] === undefined && __.nullValueSeparator!== "undefined"){
return; // null values will be drawn beyond the horizontal null value separator!
}
if (counts[p[k]] === undefined) {
counts[p[k]] = 1;
} else {
counts[p[k]] = counts[p[k]] + 1;
}
});
domain = Object.getOwnPropertyNames(counts).sort(function(a, b) {
return counts[a] - counts[b];
});
return d3.scale.ordinal()
.domain(domain)
.rangePoints(getRange());
}
};
d3.keys(__.dimensions).forEach(function(k) {
if (!__.dimensions[k].yscale){
__.dimensions[k].yscale = defaultScales[__.dimensions[k].type](k);
}
});
// xscale
xscale.rangePoints([0, w()], 1);
// Retina display, etc.
var devicePixelRatio = window.devicePixelRatio || 1;
// canvas sizes
pc.selection.selectAll("canvas")
.style("margin-top", __.margin.top + "px")
.style("margin-left", __.margin.left + "px")
.style("width", (w()+2) + "px")
.style("height", (h()+2) + "px")
.attr("width", (w()+2) * devicePixelRatio)
.attr("height", (h()+2) * devicePixelRatio);
// default styles, needs to be set when canvas width changes
ctx.foreground.strokeStyle = __.color;
ctx.foreground.lineWidth = 1.4;
ctx.foreground.globalCompositeOperation = __.composite;
ctx.foreground.globalAlpha = __.alpha;
ctx.foreground.scale(devicePixelRatio, devicePixelRatio);
ctx.brushed.strokeStyle = __.brushedColor;
ctx.brushed.lineWidth = 1.4;
ctx.brushed.globalCompositeOperation = __.composite;
ctx.brushed.globalAlpha = __.alpha;
ctx.brushed.scale(devicePixelRatio, devicePixelRatio);
ctx.highlight.lineWidth = 3;
ctx.highlight.scale(devicePixelRatio, devicePixelRatio);
return this;
};
pc.scale = function(d, domain) {
__.dimensions[d].yscale.domain(domain);
return this;
};
pc.flip = function(d) {
//__.dimensions[d].yscale.domain().reverse(); // does not work
__.dimensions[d].yscale.domain(__.dimensions[d].yscale.domain().reverse()); // works
return this;
};
pc.commonScale = function(global, type) {
var t = type || "number";
if (typeof global === 'undefined') {
global = true;
}
// try to autodetect dimensions and create scales
if (!d3.keys(__.dimensions).length) {
pc.detectDimensions()
}
pc.autoscale();
// scales of the same type
var scales = d3.keys(__.dimensions).filter(function(p) {
return __.dimensions[p].type == t;
});
if (global) {
var extent = d3.extent(scales.map(function(d,i) {
return __.dimensions[d].yscale.domain();
}).reduce(function(a,b) {
return a.concat(b);
}));
scales.forEach(function(d) {
__.dimensions[d].yscale.domain(extent);
});
} else {
scales.forEach(function(d) {
__.dimensions[d].yscale.domain(d3.extent(__.data, function(d) { return +d[k]; }));
});
}
// update centroids
if (__.bundleDimension !== null) {
pc.bundleDimension(__.bundleDimension);
}
return this;
};
pc.detectDimensions = function() {
pc.dimensions(pc.applyDimensionDefaults());
return this;
};
pc.applyDimensionDefaults = function(dims) {
var types = pc.detectDimensionTypes(__.data);
dims = dims ? dims : d3.keys(types);
var newDims = {};
var currIndex = 0;
dims.forEach(function(k) {
newDims[k] = __.dimensions[k] ? __.dimensions[k] : {};
//Set up defaults
newDims[k].orient= newDims[k].orient ? newDims[k].orient : 'left';
newDims[k].ticks= newDims[k].ticks != null ? newDims[k].ticks : 5;
newDims[k].innerTickSize= newDims[k].innerTickSize != null ? newDims[k].innerTickSize : 6;
newDims[k].outerTickSize= newDims[k].outerTickSize != null ? newDims[k].outerTickSize : 0;
newDims[k].tickPadding= newDims[k].tickPadding != null ? newDims[k].tickPadding : 3;
newDims[k].type= newDims[k].type ? newDims[k].type : types[k];
newDims[k].index = newDims[k].index != null ? newDims[k].index : currIndex;
currIndex++;
});
return newDims;
};
pc.getOrderedDimensionKeys = function(){
return d3.keys(__.dimensions).sort(function(x, y){
return d3.ascending(__.dimensions[x].index, __.dimensions[y].index);
});
};
// a better "typeof" from this post: http://stackoverflow.com/questions/7390426/better-way-to-get-type-of-a-javascript-variable
pc.toType = function(v) {
return ({}).toString.call(v).match(/\s([a-zA-Z]+)/)[1].toLowerCase();
};
// try to coerce to number before returning type
pc.toTypeCoerceNumbers = function(v) {
if ((parseFloat(v) == v) && (v != null)) {
return "number";
}
return pc.toType(v);
};
// attempt to determine types of each dimension based on first row of data
pc.detectDimensionTypes = function(data) {
var types = {};
d3.keys(data[0])
.forEach(function(col) {
types[isNaN(Number(col)) ? col : parseInt(col)] = pc.toTypeCoerceNumbers(data[0][col]);
});
return types;
};
pc.render = function() {
// try to autodetect dimensions and create scales
if (!d3.keys(__.dimensions).length) {
pc.detectDimensions()
}
pc.autoscale();
pc.render[__.mode]();
events.render.call(this);
return this;
};
pc.renderBrushed = function() {
if (!d3.keys(__.dimensions).length) pc.detectDimensions();
pc.renderBrushed[__.mode]();
events.render.call(this);
return this;
};
function isBrushed() {
if (__.brushed && __.brushed.length !== __.data.length)
return true;
var object = brush.currentMode().brushState();
for (var key in object) {
if (object.hasOwnProperty(key)) {
return true;
}
}
return false;
};
pc.render.default = function() {
pc.clear('foreground');
pc.clear('highlight');
pc.renderBrushed.default();
__.data.forEach(path_foreground);
};
var foregroundQueue = d3.renderQueue(path_foreground)
.rate(50)
.clear(function() {
pc.clear('foreground');
pc.clear('highlight');
});
pc.render.queue = function() {
pc.renderBrushed.queue();
foregroundQueue(__.data);
};
pc.renderBrushed.default = function() {
pc.clear('brushed');
if (isBrushed()) {
__.brushed.forEach(path_brushed);
}
};
var brushedQueue = d3.renderQueue(path_brushed)
.rate(50)
.clear(function() {
pc.clear('brushed');
});
pc.renderBrushed.queue = function() {
if (isBrushed()) {
brushedQueue(__.brushed);
} else {
brushedQueue([]); // This is needed to clear the currently brushed items
}
};
function compute_cluster_centroids(d) {
var clusterCentroids = d3.map();
var clusterCounts = d3.map();
// determine clusterCounts
__.data.forEach(function(row) {
var scaled = __.dimensions[d].yscale(row[d]);
if (!clusterCounts.has(scaled)) {
clusterCounts.set(scaled, 0);
}
var count = clusterCounts.get(scaled);
clusterCounts.set(scaled, count + 1);
});
__.data.forEach(function(row) {
d3.keys(__.dimensions).map(function(p, i) {
var scaled = __.dimensions[d].yscale(row[d]);
if (!clusterCentroids.has(scaled)) {
var map = d3.map();
clusterCentroids.set(scaled, map);
}
if (!clusterCentroids.get(scaled).has(p)) {
clusterCentroids.get(scaled).set(p, 0);
}
var value = clusterCentroids.get(scaled).get(p);
value += __.dimensions[p].yscale(row[p]) / clusterCounts.get(scaled);
clusterCentroids.get(scaled).set(p, value);
});
});
return clusterCentroids;
}
function compute_centroids(row) {
var centroids = [];
var p = d3.keys(__.dimensions);
var cols = p.length;
var a = 0.5; // center between axes
for (var i = 0; i < cols; ++i) {
// centroids on 'real' axes
var x = position(p[i]);
var y = __.dimensions[p[i]].yscale(row[p[i]]);
centroids.push($V([x, y]));
// centroids on 'virtual' axes
if (i < cols - 1) {
var cx = x + a * (position(p[i+1]) - x);
var cy = y + a * (__.dimensions[p[i+1]].yscale(row[p[i+1]]) - y);
if (__.bundleDimension !== null) {
var leftCentroid = __.clusterCentroids.get(__.dimensions[__.bundleDimension].yscale(row[__.bundleDimension])).get(p[i]);
var rightCentroid = __.clusterCentroids.get(__.dimensions[__.bundleDimension].yscale(row[__.bundleDimension])).get(p[i+1]);
var centroid = 0.5 * (leftCentroid + rightCentroid);
cy = centroid + (1 - __.bundlingStrength) * (cy - centroid);
}
centroids.push($V([cx, cy]));
}
}
return centroids;
}
pc.compute_real_centroids = function(row) {
var realCentroids = [];
var p = d3.keys(__.dimensions);
var cols = p.length;
var a = 0.5;
for (var i = 0; i < cols; ++i) {
var x = position(p[i]);
var y = __.dimensions[p[i]].yscale(row[p[i]]);
realCentroids.push([x, y]);
}
return realCentroids;
}
function compute_control_points(centroids) {
var cols = centroids.length;
var a = __.smoothness;
var cps = [];
cps.push(centroids[0]);
cps.push($V([centroids[0].e(1) + a*2*(centroids[1].e(1)-centroids[0].e(1)), centroids[0].e(2)]));
for (var col = 1; col < cols - 1; ++col) {
var mid = centroids[col];
var left = centroids[col - 1];
var right = centroids[col + 1];
var diff = left.subtract(right);
cps.push(mid.add(diff.x(a)));
cps.push(mid);
cps.push(mid.subtract(diff.x(a)));
}
cps.push($V([centroids[cols-1].e(1) + a*2*(centroids[cols-2].e(1)-centroids[cols-1].e(1)), centroids[cols-1].e(2)]));
cps.push(centroids[cols - 1]);
return cps;
};pc.shadows = function() {
flags.shadows = true;
pc.alphaOnBrushed(0.1);
pc.render();
return this;
};
// draw dots with radius r on the axis line where data intersects
pc.axisDots = function(r) {
var r = r || 0.1;
var ctx = pc.ctx.marks;
var startAngle = 0;
var endAngle = 2 * Math.PI;
ctx.globalAlpha = d3.min([ 1 / Math.pow(__.data.length, 1 / 2), 1 ]);
__.data.forEach(function(d) {
d3.entries(__.dimensions).forEach(function(p, i) {
ctx.beginPath();
ctx.arc(position(p), __.dimensions[p.key].yscale(d[p]), r, startAngle, endAngle);
ctx.stroke();
ctx.fill();
});
});
return this;
};
// draw single cubic bezier curve
function single_curve(d, ctx) {
var centroids = compute_centroids(d);
var cps = compute_control_points(centroids);
ctx.moveTo(cps[0].e(1), cps[0].e(2));
for (var i = 1; i < cps.length; i += 3) {
if (__.showControlPoints) {
for (var j = 0; j < 3; j++) {
ctx.fillRect(cps[i+j].e(1), cps[i+j].e(2), 2, 2);
}
}
ctx.bezierCurveTo(cps[i].e(1), cps[i].e(2), cps[i+1].e(1), cps[i+1].e(2), cps[i+2].e(1), cps[i+2].e(2));
}
};
// draw single polyline
function color_path(d, ctx) {
ctx.beginPath();
if ((__.bundleDimension !== null && __.bundlingStrength > 0) || __.smoothness > 0) {
single_curve(d, ctx);
} else {
single_path(d, ctx);
}
ctx.stroke();
};
// draw many polylines of the same color
function paths(data, ctx) {
ctx.clearRect(-1, -1, w() + 2, h() + 2);
ctx.beginPath();
data.forEach(function(d) {
if ((__.bundleDimension !== null && __.bundlingStrength > 0) || __.smoothness > 0) {
single_curve(d, ctx);
} else {
single_path(d, ctx);
}
});
ctx.stroke();
};
// returns the y-position just beyond the separating null value line
function getNullPosition() {
if (__.nullValueSeparator=="bottom") {
return h()+1;
} else if (__.nullValueSeparator=="top") {
return 1;
} else {
console.log("A value is NULL, but nullValueSeparator is not set; set it to 'bottom' or 'top'.");
}
return h()+1;
};
function single_path(d, ctx) {
d3.entries(__.dimensions).forEach(function(p, i) { //p isn't really p
if (i == 0) {
ctx.moveTo(position(p.key), typeof d[p.key] =='undefined' ? getNullPosition() : __.dimensions[p.key].yscale(d[p.key]));
} else {
ctx.lineTo(position(p.key), typeof d[p.key] =='undefined' ? getNullPosition() : __.dimensions[p.key].yscale(d[p.key]));
}
});
};
function path_brushed(d, i) {
if (__.brushedColor !== null) {
ctx.brushed.strokeStyle = d3.functor(__.brushedColor)(d, i);
} else {
ctx.brushed.strokeStyle = d3.functor(__.color)(d, i);
}
return color_path(d, ctx.brushed)
};
function path_foreground(d, i) {
ctx.foreground.strokeStyle = d3.functor(__.color)(d, i);
return color_path(d, ctx.foreground);
};
function path_highlight(d, i) {
ctx.highlight.strokeStyle = d3.functor(__.color)(d, i);
return color_path(d, ctx.highlight);
};
pc.clear = function(layer) {
ctx[layer].clearRect(0, 0, w() + 2, h() + 2);
// This will make sure that the foreground items are transparent
// without the need for changing the opacity style of the foreground canvas
// as this would stop the css styling from working
if(layer === "brushed" && isBrushed()) {
ctx.brushed.fillStyle = pc.selection.style("background-color");
ctx.brushed.globalAlpha = 1 - __.alphaOnBrushed;
ctx.brushed.fillRect(0, 0, w() + 2, h() + 2);
ctx.brushed.globalAlpha = __.alpha;
}
return this;
};
d3.rebind(pc, axis, "ticks", "orient", "tickValues", "tickSubdivide", "tickSize", "tickPadding", "tickFormat");
function flipAxisAndUpdatePCP(dimension) {
var g = pc.svg.selectAll(".dimension");
pc.flip(dimension);
d3.select(this.parentElement)
.transition()
.duration(__.animationTime)
.call(axis.scale(__.dimensions[dimension].yscale))
.call(axis.orient(__.dimensions[dimension].orient))
.call(axis.ticks(__.dimensions[dimension].ticks))
.call(axis.innerTickSize(__.dimensions[dimension].innerTickSize))
.call(axis.outerTickSize(__.dimensions[dimension].outerTickSize))
.call(axis.tickPadding(__.dimensions[dimension].tickPadding))
.call(axis.tickFormat(__.dimensions[dimension].tickFormat));
pc.render();
}
function rotateLabels() {
if (!__.rotateLabels) return;
var delta = d3.event.deltaY;
delta = delta < 0 ? -5 : delta;
delta = delta > 0 ? 5 : delta;
__.dimensionTitleRotation += delta;
pc.svg.selectAll("text.label")
.attr("transform", "translate(0,-5) rotate(" + __.dimensionTitleRotation + ")");
d3.event.preventDefault();
}
function dimensionLabels(d) {
return __.dimensions[d].title ? __.dimensions[d].title : d; // dimension display names
}
pc.createAxes = function() {
if (g) pc.removeAxes();
// Add a group element for each dimension.
g = pc.svg.selectAll(".dimension")
.data(pc.getOrderedDimensionKeys(), function(d) {
return d;
})
.enter().append("svg:g")
.attr("class", "dimension")
.attr("transform", function(d) {
return "translate(" + xscale(d) + ")";
});
// Add an axis and title.
g.append("svg:g")
.attr("class", "axis")
.attr("transform", "translate(0,0)")
.each(function(d) {
var axisElement = d3.select(this).call( pc.applyAxisConfig(axis, __.dimensions[d]) );
axisElement.selectAll("path")
.style("fill", "none")
.style("stroke", "#222")
.style("shape-rendering", "crispEdges");
axisElement.selectAll("line")
.style("fill", "none")
.style("stroke", "#222")
.style("shape-rendering", "crispEdges");
})
.append("svg:text")
.attr({
"text-anchor": "middle",
"y": 0,
"transform": "translate(0,-5) rotate(" + __.dimensionTitleRotation + ")",
"x": 0,
"class": "label"
})
.text(dimensionLabels)
.on("dblclick", flipAxisAndUpdatePCP)
.on("wheel", rotateLabels);
if (__.nullValueSeparator=="top") {
pc.svg.append("line")
.attr("x1", 0)
.attr("y1", 1+__.nullValueSeparatorPadding.top)
.attr("x2", w())
.attr("y2", 1+__.nullValueSeparatorPadding.top)
.attr("stroke-width", 1)
.attr("stroke", "#777")
.attr("fill", "none")
.attr("shape-rendering", "crispEdges");
} else if (__.nullValueSeparator=="bottom") {
pc.svg.append("line")
.attr("x1", 0)
.attr("y1", h()+1-__.nullValueSeparatorPadding.bottom)
.attr("x2", w())
.attr("y2", h()+1-__.nullValueSeparatorPadding.bottom)
.attr("stroke-width", 1)
.attr("stroke", "#777")
.attr("fill", "none")
.attr("shape-rendering", "crispEdges");
}
flags.axes= true;
return this;
};
pc.removeAxes = function() {
g.remove();
g = undefined;
return this;
};
pc.updateAxes = function(animationTime) {
if (typeof animationTime === 'undefined') {
animationTime = __.animationTime;
}
var g_data = pc.svg.selectAll(".dimension").data(pc.getOrderedDimensionKeys());
// Enter
g_data.enter().append("svg:g")
.attr("class", "dimension")
.attr("transform", function(p) { return "translate(" + position(p) + ")"; })
.style("opacity", 0)
.append("svg:g")
.attr("class", "axis")
.attr("transform", "translate(0,0)")
.each(function(d) {
var axisElement = d3.select(this).call( pc.applyAxisConfig(axis, __.dimensions[d]) );
axisElement.selectAll("path")
.style("fill", "none")
.style("stroke", "#222")
.style("shape-rendering", "crispEdges");
axisElement.selectAll("line")
.style("fill", "none")
.style("stroke", "#222")
.style("shape-rendering", "crispEdges");
})
.append("svg:text")
.attr({
"text-anchor": "middle",
"y": 0,
"transform": "translate(0,-5) rotate(" + __.dimensionTitleRotation + ")",
"x": 0,
"class": "label"
})
.text(dimensionLabels)
.on("dblclick", flipAxisAndUpdatePCP)
.on("wheel", rotateLabels);
// Update
g_data.attr("opacity", 0);
g_data.select(".axis")
.transition()
.duration(animationTime)
.each(function(d) { d3.select(this).call( pc.applyAxisConfig(axis, __.dimensions[d]) )
});
g_data.select(".label")
.transition()
.duration(animationTime)
.text(dimensionLabels)
.attr("transform", "translate(0,-5) rotate(" + __.dimensionTitleRotation + ")");
// Exit
g_data.exit().remove();
g = pc.svg.selectAll(".dimension");
g.transition().duration(animationTime)
.attr("transform", function(p) { return "translate(" + position(p) + ")"; })
.style("opacity", 1);
pc.svg.selectAll(".axis")
.transition()
.duration(animationTime)
.each(function(d) { d3.select(this).call( pc.applyAxisConfig(axis, __.dimensions[d]) );
});
if (flags.brushable) pc.brushable();
if (flags.reorderable) pc.reorderable();
if (pc.brushMode() !== "None") {
var mode = pc.brushMode();
pc.brushMode("None");
pc.brushMode(mode);
}
return this;
};
pc.applyAxisConfig = function(axis, dimension) {
return axis.scale(dimension.yscale)
.orient(dimension.orient)
.ticks(dimension.ticks)
.tickValues(dimension.tickValues)
.innerTickSize(dimension.innerTickSize)
.outerTickSize(dimension.outerTickSize)
.tickPadding(dimension.tickPadding)
.tickFormat(dimension.tickFormat)
};
// Jason Davies, http://bl.ocks.org/1341281
pc.reorderable = function() {
if (!g) pc.createAxes();
g.style("cursor", "move")
.call(d3.behavior.drag()
.on("dragstart", function(d) {
dragging[d] = this.__origin__ = xscale(d);
})
.on("drag", function(d) {
dragging[d] = Math.min(w(), Math.max(0, this.__origin__ += d3.event.dx));
pc.sortDimensions();
xscale.domain(pc.getOrderedDimensionKeys());
pc.render();
g.attr("transform", function(d) {
return "translate(" + position(d) + ")";
});
})
.on("dragend", function(d) {
// Let's see if the order has changed and send out an event if so.
var i = 0,
j = __.dimensions[d].index,
elem = this,
parent = this.parentElement;
while((elem = elem.previousElementSibling) != null) ++i;
if (i !== j) {
events.axesreorder.call(pc, pc.getOrderedDimensionKeys());
// We now also want to reorder the actual dom elements that represent
// the axes. That is, the g.dimension elements. If we don't do this,
// we get a weird and confusing transition when updateAxes is called.
// This is due to the fact that, initially the nth g.dimension element
// represents the nth axis. However, after a manual reordering,
// without reordering the dom elements, the nth dom elements no longer
// necessarily represents the nth axis.
//
// i is the original index of the dom element
// j is the new index of the dom element
if (i > j) { // Element moved left
parent.insertBefore(this, parent.children[j - 1]);
} else { // Element moved right
if ((j + 1) < parent.children.length) {
parent.insertBefore(this, parent.children[j + 1]);
} else {
parent.appendChild(this);
}
}
}
delete this.__origin__;
delete dragging[d];
d3.select(this).transition().attr("transform", "translate(" + xscale(d) + ")");
pc.render();
}));
flags.reorderable = true;
return this;
};
// Reorder dimensions, such that the highest value (visually) is on the left and
// the lowest on the right. Visual values are determined by the data values in
// the given row.
pc.reorder = function(rowdata) {
var firstDim = pc.getOrderedDimensionKeys()[0];
pc.sortDimensionsByRowData(rowdata);
// NOTE: this is relatively cheap given that:
// number of dimensions < number of data items
// Thus we check equality of order to prevent rerendering when this is the case.
var reordered = false;
reordered = firstDim !== pc.getOrderedDimensionKeys()[0];
if (reordered) {
xscale.domain(pc.getOrderedDimensionKeys());
var highlighted = __.highlighted.slice(0);
pc.unhighlight();
g.transition()
.duration(1500)
.attr("transform", function(d) {
return "translate(" + xscale(d) + ")";
});
pc.render();
// pc.highlight() does not check whether highlighted is length zero, so we do that here.
if (highlighted.length !== 0) {
pc.highlight(highlighted);
}
}
}
pc.sortDimensionsByRowData = function(rowdata) {
var copy = __.dimensions;
var positionSortedKeys = d3.keys(__.dimensions).sort(function(a, b) {
var pixelDifference = __.dimensions[a].yscale(rowdata[a]) - __.dimensions[b].yscale(rowdata[b]);
// Array.sort is not necessarily stable, this means that if pixelDifference is zero
// the ordering of dimensions might change unexpectedly. This is solved by sorting on
// variable name in that case.
if (pixelDifference === 0) {
return a.localeCompare(b);
} // else
return pixelDifference;
});
__.dimensions = {};
positionSortedKeys.forEach(function(p, i){
__.dimensions[p] = copy[p];
__.dimensions[p].index = i;
});
}
pc.sortDimensions = function() {
var copy = __.dimensions;
var positionSortedKeys = d3.keys(__.dimensions).sort(function(a, b) {
return position(a) - position(b);
});
__.dimensions = {};
positionSortedKeys.forEach(function(p, i){
__.dimensions[p] = copy[p];
__.dimensions[p].index = i;
})
};
// pairs of adjacent dimensions
pc.adjacent_pairs = function(arr) {
var ret = [];
for (var i = 0; i < arr.length-1; i++) {
ret.push([arr[i],arr[i+1]]);
};
return ret;
};
var brush = {
modes: {
"None": {
install: function(pc) {}, // Nothing to be done.
uninstall: function(pc) {}, // Nothing to be done.
selected: function() { return []; }, // Nothing to return
brushState: function() { return {}; }
}
},
mode: "None",
predicate: "AND",
currentMode: function() {
return this.modes[this.mode];
}
};
// This function can be used for 'live' updates of brushes. That is, during the
// specification of a brush, this method can be called to update the view.
//
// @param newSelection - The new set of data items that is currently contained
// by the brushes
function brushUpdated(newSelection) {
__.brushed = newSelection;
events.brush.call(pc,__.brushed);
pc.renderBrushed();
}
function brushPredicate(predicate) {
if (!arguments.length) { return brush.predicate; }
predicate = String(predicate).toUpperCase();
if (predicate !== "AND" && predicate !== "OR") {
throw new Error("Invalid predicate " + predicate);
}
brush.predicate = predicate;
__.brushed = brush.currentMode().selected();
pc.renderBrushed();
return pc;
}
pc.brushModes = function() {
return Object.getOwnPropertyNames(brush.modes);
};
pc.brushMode = function(mode) {
if (arguments.length === 0) {
return brush.mode;
}
if (pc.brushModes().indexOf(mode) === -1) {
throw new Error("pc.brushmode: Unsupported brush mode: " + mode);
}
// Make sure that we don't trigger unnecessary events by checking if the mode
// actually changes.
if (mode !== brush.mode) {
// When changing brush modes, the first thing we need to do is clearing any
// brushes from the current mode, if any.
if (brush.mode !== "None") {
pc.brushReset();
}
// Next, we need to 'uninstall' the current brushMode.
brush.modes[brush.mode].uninstall(pc);
// Finally, we can install the requested one.
brush.mode = mode;
brush.modes[brush.mode].install();
if (mode === "None") {
delete pc.brushPredicate;
} else {
pc.brushPredicate = brushPredicate;
}
}
return pc;
};
// brush mode: 1D-Axes
(function() {
var brushes = {};
function is_brushed(p) {
return !brushes[p].empty();
}
// data within extents
function selected() {
var actives = d3.keys(__.dimensions).filter(is_brushed),
extents = actives.map(function(p) { return brushes[p].extent(); });
// We don't want to return the full data set when there are no axes brushed.
// Actually, when there are no axes brushed, by definition, no items are
// selected. So, let's avoid the filtering and just return false.
//if (actives.length === 0) return false;
// Resolves broken examples for now. They expect to get the full dataset back from empty brushes
if (actives.length === 0) return __.data;
// test if within range
var within = {
"date": function(d,p,dimension) {
if (typeof __.dimensions[p].yscale.rangePoints === "function") { // if it is ordinal
return extents[dimension][0] <= __.dimensions[p].yscale(d[p]) && __.dimensions[p].yscale(d[p]) <= extents[dimension][1]
} else {
return extents[dimension][0] <= d[p] && d[p] <= extents[dimension][1]
}
},
"number": function(d,p,dimension) {
if (typeof __.dimensions[p].yscale.rangePoints === "function") { // if it is ordinal
return extents[dimension][0] <= __.dimensions[p].yscale(d[p]) && __.dimensions[p].yscale(d[p]) <= extents[dimension][1]
} else {
return extents[dimension][0] <= d[p] && d[p] <= extents[dimension][1]
}
},
"string": function(d,p,dimension) {
return extents[dimension][0] <= __.dimensions[p].yscale(d[p]) && __.dimensions[p].yscale(d[p]) <= extents[dimension][1]
}
};
return __.data
.filter(function(d) {
switch(brush.predicate) {
case "AND":
return actives.every(function(p, dimension) {
return within[__.dimensions[p].type](d,p,dimension);
});
case "OR":
return actives.some(function(p, dimension) {
return within[__.dimensions[p].type](d,p,dimension);
});
default:
throw new Error("Unknown brush predicate " + __.brushPredicate);
}
});
};
function brushExtents(extents) {
if(typeof(extents) === 'undefined')
{
var extents = {};
d3.keys(__.dimensions).forEach(function(d) {
var brush = brushes[d];
if (brush !== undefined && !brush.empty()) {
var extent = brush.extent();
extent.sort(d3.ascending);
extents[d] = extent;
}
});
return extents;
}
else
{
//first get all the brush selections
var brushSelections = {};
g.selectAll('.brush')
.each(function(d) {
brushSelections[d] = d3.select(this);
});
// loop over each dimension and update appropriately (if it was passed in through extents)
d3.keys(__.dimensions).forEach(function(d) {
if (extents[d] === undefined){
return;
}
var brush = brushes[d];
if (brush !== undefined) {
//update the extent
brush.extent(extents[d]);
//redraw the brush
brushSelections[d]
.transition()
.duration(0)
.call(brush);
//fire some events
brush.event(brushSelections[d]);
}
});
//redraw the chart
pc.renderBrushed();
return pc;
}
}
function brushFor(axis) {
var brush = d3.svg.brush();
brush
.y(__.dimensions[axis].yscale)
.on("brushstart", function() {
if(d3.event.sourceEvent !== null) {
events.brushstart.call(pc, __.brushed);
d3.event.sourceEvent.stopPropagation();
}
})
.on("brush", function() {
brushUpdated(selected());
})
.on("brushend", function() {
events.brushend.call(pc, __.brushed);
});
brushes[axis] = brush;
return brush;
};
function brushReset(dimension) {
if (dimension===undefined) {
__.brushed = false;
if (g) {
g.selectAll('.brush')
.each(function(d) {
d3.select(this)
.transition()
.duration(0)
.call(brushes[d].clear());
});
pc.renderBrushed();
}
}
else {
if (g) {
g.selectAll('.brush')
.each(function(d) {
if (d!=dimension) return;
d3.select(this)
.transition()
.duration(0)
.call(brushes[d].clear());
brushes[d].event(d3.select(this));
});
pc.renderBrushed();
}
}
return this;
};
function install() {
if (!g) pc.createAxes();
// Add and store a brush for each axis.
var brush = g.append("svg:g")
.attr("class", "brush")
.each(function(d) {
d3.select(this).call(brushFor(d));
});
brush.selectAll("rect")
.style("visibility", null)
.attr("x", -15)
.attr("width", 30);
brush.selectAll("rect.background")
.style("fill", "transparent");
brush.selectAll("rect.extent")
.style("fill", "rgba(255,255,255,0.25)")
.style("stroke", "rgba(0,0,0,0.6)");
brush.selectAll(".resize rect")
.style("fill", "rgba(0,0,0,0.1)");
pc.brushExtents = brushExtents;
pc.brushReset = brushReset;
return pc;
};
brush.modes["1D-axes"] = {
install: install,
uninstall: function() {
g.selectAll(".brush").remove();
brushes = {};
delete pc.brushExtents;
delete pc.brushReset;
},
selected: selected,
brushState: brushExtents
}
})();
// brush mode: 2D-strums
// bl.ocks.org/syntagmatic/5441022
(function() {
var strums = {},
strumRect;
function drawStrum(strum, activePoint) {
var svg = pc.selection.select("svg").select("g#strums"),
id = strum.dims.i,
points = [strum.p1, strum.p2],
line = svg.selectAll("line#strum-" + id).data([strum]),
circles = svg.selectAll("circle#strum-" + id).data(points),
drag = d3.behavior.drag();
line.enter()
.append("line")
.attr("id", "strum-" + id)
.attr("class", "strum");
line
.attr("x1", function(d) {
return d.p1[0]; })
.attr("y1", function(d) {
return d.p1[1]; })
.attr("x2", function(d) {
return d.p2[0]; })
.attr("y2", function(d) {
return d.p2[1]; })
.attr("stroke", "black")
.attr("stroke-width", 2);
drag
.on("drag", function(d, i) {
var ev = d3.event;
i = i + 1;
strum["p" + i][0] = Math.min(Math.max(strum.minX + 1, ev.x), strum.maxX);
strum["p" + i][1] = Math.min(Math.max(strum.minY, ev.y), strum.maxY);
drawStrum(strum, i - 1);
})
.on("dragend", onDragEnd());
circles.enter()
.append("circle")
.attr("id", "strum-" + id)
.attr("class", "strum");
circles
.attr("cx", function(d) { return d[0]; })
.attr("cy", function(d) { return d[1]; })
.attr("r", 5)
.style("opacity", function(d, i) {
return (activePoint !== undefined && i === activePoint) ? 0.8 : 0;
})
.on("mouseover", function() {
d3.select(this).style("opacity", 0.8);
})
.on("mouseout", function() {
d3.select(this).style("opacity", 0);
})
.call(drag);
}
function dimensionsForPoint(p) {
var dims = { i: -1, left: undefined, right: undefined };
d3.keys(__.dimensions).some(function(dim, i) {
if (xscale(dim) < p[0]) {
var next = d3.keys(__.dimensions)[pc.getOrderedDimensionKeys().indexOf(dim)+1];
dims.i = i;
dims.left = dim;
dims.right = next;
return false;
}
return true;
});
if (dims.left === undefined) {
// Event on the left side of the first axis.
dims.i = 0;
dims.left = pc.getOrderedDimensionKeys()[0];
dims.right = pc.getOrderedDimensionKeys()[1];
} else if (dims.right === undefined) {
// Event on the right side of the last axis
dims.i = d3.keys(__.dimensions).length - 1;
dims.right = dims.left;
dims.left = pc.getOrderedDimensionKeys()[d3.keys(__.dimensions).length - 2];
}
return dims;
}
function onDragStart() {
// First we need to determine between which two axes the sturm was started.
// This will determine the freedom of movement, because a strum can
// logically only happen between two axes, so no movement outside these axes
// should be allowed.
return function() {
var p = d3.mouse(strumRect[0][0]),
dims,
strum;
p[0] = p[0] - __.margin.left;
p[1] = p[1] - __.margin.top;
dims = dimensionsForPoint(p),
strum = {
p1: p,
dims: dims,
minX: xscale(dims.left),
maxX: xscale(dims.right),
minY: 0,
maxY: h()
};
strums[dims.i] = strum;
strums.active = dims.i;
// Make sure that the point is within the bounds
strum.p1[0] = Math.min(Math.max(strum.minX, p[0]), strum.maxX);
strum.p2 = strum.p1.slice();
};
}
function onDrag() {
return function() {
var ev = d3.event,
strum = strums[strums.active];
// Make sure that the point is within the bounds
strum.p2[0] = Math.min(Math.max(strum.minX + 1, ev.x - __.margin.left), strum.maxX);
strum.p2[1] = Math.min(Math.max(strum.minY, ev.y - __.margin.top), strum.maxY);
drawStrum(strum, 1);
};
}
function containmentTest(strum, width) {
var p1 = [strum.p1[0] - strum.minX, strum.p1[1] - strum.minX],
p2 = [strum.p2[0] - strum.minX, strum.p2[1] - strum.minX],
m1 = 1 - width / p1[0],
b1 = p1[1] * (1 - m1),
m2 = 1 - width / p2[0],
b2 = p2[1] * (1 - m2);
// test if point falls between lines
return function(p) {
var x = p[0],
y = p[1],
y1 = m1 * x + b1,
y2 = m2 * x + b2;
if (y > Math.min(y1, y2) && y < Math.max(y1, y2)) {
return true;
}
return false;
};
}
function selected() {
var ids = Object.getOwnPropertyNames(strums),
brushed = __.data;
// Get the ids of the currently active strums.
ids = ids.filter(function(d) {
return !isNaN(d);
});
function crossesStrum(d, id) {
var strum = strums[id],
test = containmentTest(strum, strums.width(id)),
d1 = strum.dims.left,
d2 = strum.dims.right,
y1 = __.dimensions[d1].yscale,
y2 = __.dimensions[d2].yscale,
point = [y1(d[d1]) - strum.minX, y2(d[d2]) - strum.minX];
return test(point);
}
if (ids.length === 0) { return brushed; }
return brushed.filter(function(d) {
switch(brush.predicate) {
case "AND":
return ids.every(function(id) { return crossesStrum(d, id); });
case "OR":
return ids.some(function(id) { return crossesStrum(d, id); });
default:
throw new Error("Unknown brush predicate " + __.brushPredicate);
}
});
}
function removeStrum() {
var strum = strums[strums.active],
svg = pc.selection.select("svg").select("g#strums");
delete strums[strums.active];
strums.active = undefined;
svg.selectAll("line#strum-" + strum.dims.i).remove();
svg.selectAll("circle#strum-" + strum.dims.i).remove();
}
function onDragEnd() {
return function() {
var brushed = __.data,
strum = strums[strums.active];
// Okay, somewhat unexpected, but not totally unsurprising, a mousclick is
// considered a drag without move. So we have to deal with that case
if (strum && strum.p1[0] === strum.p2[0] && strum.p1[1] === strum.p2[1]) {
removeStrum(strums);
}
brushed = selected(strums);
strums.active = undefined;
__.brushed = brushed;
pc.renderBrushed();
events.brushend.call(pc, __.brushed);
};
}
function brushReset(strums) {
return function() {
var ids = Object.getOwnPropertyNames(strums).filter(function(d) {
return !isNaN(d);
});
ids.forEach(function(d) {
strums.active = d;
removeStrum(strums);
});
onDragEnd(strums)();
};
}
function install() {
var drag = d3.behavior.drag();
// Map of current strums. Strums are stored per segment of the PC. A segment,
// being the area between two axes. The left most area is indexed at 0.
strums.active = undefined;
// Returns the width of the PC segment where currently a strum is being
// placed. NOTE: even though they are evenly spaced in our current
// implementation, we keep for when non-even spaced segments are supported as
// well.
strums.width = function(id) {
var strum = strums[id];
if (strum === undefined) {
return undefined;
}
return strum.maxX - strum.minX;
};
pc.on("axesreorder.strums", function() {
var ids = Object.getOwnPropertyNames(strums).filter(function(d) {
return !isNaN(d);
});
// Checks if the first dimension is directly left of the second dimension.
function consecutive(first, second) {
var length = d3.keys(__.dimensions).length;
return d3.keys(__.dimensions).some(function(d, i) {
return (d === first)
? i + i < length && __.dimensions[i + 1] === second
: false;
});
}
if (ids.length > 0) { // We have some strums, which might need to be removed.
ids.forEach(function(d) {
var dims = strums[d].dims;
strums.active = d;
// If the two dimensions of the current strum are not next to each other
// any more, than we'll need to remove the strum. Otherwise we keep it.
if (!consecutive(dims.left, dims.right)) {
removeStrum(strums);
}
});
onDragEnd(strums)();
}
});
// Add a new svg group in which we draw the strums.
pc.selection.select("svg").append("g")
.attr("id", "strums")
.attr("transform", "translate(" + __.margin.left + "," + __.margin.top + ")");
// Install the required brushReset function
pc.brushReset = brushReset(strums);
drag
.on("dragstart", onDragStart(strums))
.on("drag", onDrag(strums))
.on("dragend", onDragEnd(strums));
// NOTE: The styling needs to be done here and not in the css. This is because
// for 1D brushing, the canvas layers should not listen to
// pointer-events.
strumRect = pc.selection.select("svg").insert("rect", "g#strums")
.attr("id", "strum-events")
.attr("x", __.margin.left)
.attr("y", __.margin.top)
.attr("width", w())
.attr("height", h() + 2)
.style("opacity", 0)
.call(drag);
}
brush.modes["2D-strums"] = {
install: install,
uninstall: function() {
pc.selection.select("svg").select("g#strums").remove();
pc.selection.select("svg").select("rect#strum-events").remove();
pc.on("axesreorder.strums", undefined);
delete pc.brushReset;
strumRect = undefined;
},
selected: selected,
brushState: function () { return strums; }
};
}());
// brush mode: 1D-Axes with multiple extents
// requires d3.svg.multibrush
(function() {
if (typeof d3.svg.multibrush !== 'function') {
return;
}
var brushes = {};
function is_brushed(p) {
return !brushes[p].empty();
}
// data within extents
function selected() {
var actives = d3.keys(__.dimensions).filter(is_brushed),
extents = actives.map(function(p) { return brushes[p].extent(); });
// We don't want to return the full data set when there are no axes brushed.
// Actually, when there are no axes brushed, by definition, no items are
// selected. So, let's avoid the filtering and just return false.
//if (actives.length === 0) return false;
// Resolves broken examples for now. They expect to get the full dataset back from empty brushes
if (actives.length === 0) return __.data;
// test if within range
var within = {
"date": function(d,p,dimension,b) {
if (typeof __.dimensions[p].yscale.rangePoints === "function") { // if it is ordinal
return b[0] <= __.dimensions[p].yscale(d[p]) && __.dimensions[p].yscale(d[p]) <= b[1]
} else {
return b[0] <= d[p] && d[p] <= b[1]
}
},
"number": function(d,p,dimension,b) {
if (typeof __.dimensions[p].yscale.rangePoints === "function") { // if it is ordinal
return b[0] <= __.dimensions[p].yscale(d[p]) && __.dimensions[p].yscale(d[p]) <= b[1]
} else {
return b[0] <= d[p] && d[p] <= b[1]
}
},
"string": function(d,p,dimension,b) {
return b[0] <= __.dimensions[p].yscale(d[p]) && __.dimensions[p].yscale(d[p]) <= b[1]
}
};
return __.data
.filter(function(d) {
switch(brush.predicate) {
case "AND":
return actives.every(function(p, dimension) {
return extents[dimension].some(function(b) {
return within[__.dimensions[p].type](d,p,dimension,b);
});
});
case "OR":
return actives.some(function(p, dimension) {
return extents[dimension].some(function(b) {
return within[__.dimensions[p].type](d,p,dimension,b);
});
});
default:
throw new Error("Unknown brush predicate " + __.brushPredicate);
}
});
};
function brushExtents(extents) {
if (typeof(extents) === 'undefined') {
extents = {};
d3.keys(__.dimensions).forEach(function (d) {
var brush = brushes[d];
if (brush !== undefined && !brush.empty()) {
var extent = brush.extent();
extents[d] = extent;
}
});
return extents;
}
else {
//first get all the brush selections
var brushSelections = {};
g.selectAll('.brush')
.each(function (d) {
brushSelections[d] = d3.select(this);
});
// loop over each dimension and update appropriately (if it was passed in through extents)
d3.keys(__.dimensions).forEach(function (d) {
if (extents[d] === undefined) {
return;
}
var brush = brushes[d];
if (brush !== undefined) {
//update the extent
brush.extent(extents[d]);
//redraw the brush
brushSelections[d]
.transition()
.duration(0)
.call(brush);
//fire some events
brush.event(brushSelections[d]);
}
});
//redraw the chart
pc.renderBrushed();
return pc;
}
}
//function brushExtents() {
// var extents = {};
// d3.keys(__.dimensions).forEach(function(d) {
// var brush = brushes[d];
// if (brush !== undefined && !brush.empty()) {
// var extent = brush.extent();
// extents[d] = extent;
// }
// });
// return extents;
//}
function brushFor(axis) {
var brush = d3.svg.multibrush();
brush
.y(__.dimensions[axis].yscale)
.on("brushstart", function() {
if(d3.event.sourceEvent !== null) {
events.brushstart.call(pc, __.brushed);
d3.event.sourceEvent.stopPropagation();
}
})
.on("brush", function() {
brushUpdated(selected());
})
.on("brushend", function() {
// d3.svg.multibrush clears extents just before calling 'brushend'
// so we have to update here again.
// This fixes issue #103 for now, but should be changed in d3.svg.multibrush
// to avoid unnecessary computation.
brushUpdated(selected());
events.brushend.call(pc, __.brushed);
})
.extentAdaption(function(selection) {
selection
.style("visibility", null)
.attr("x", -15)
.attr("width", 30)
.style("fill", "rgba(255,255,255,0.25)")
.style("stroke", "rgba(0,0,0,0.6)");
})
.resizeAdaption(function(selection) {
selection
.selectAll("rect")
.attr("x", -15)
.attr("width", 30)
.style("visibility", null)
.style("fill", "rgba(0,0,0,0.1)");
});
brushes[axis] = brush;
return brush;
}
function brushReset(dimension) {
__.brushed = false;
if (g) {
g.selectAll('.brush')
.each(function(d) {
d3.select(this).call(
brushes[d].clear()
);
});
pc.renderBrushed();
}
return this;
};
function install() {
if (!g) pc.createAxes();
// Add and store a brush for each axis.
var brush = g.append("svg:g")
.attr("class", "brush")
.each(function(d) {
d3.select(this).call(brushFor(d));
})
brush.selectAll("rect")
.style("visibility", null)
.attr("x", -15)
.attr("width", 30);
brush.selectAll("rect.background")
.style("fill", "transparent");
brush.selectAll("rect.extent")
.style("fill", "rgba(255,255,255,0.25)")
.style("stroke", "rgba(0,0,0,0.6)");
brush.selectAll(".resize rect")
.style("fill", "rgba(0,0,0,0.1)");
pc.brushExtents = brushExtents;
pc.brushReset = brushReset;
return pc;
}
brush.modes["1D-axes-multi"] = {
install: install,
uninstall: function() {
g.selectAll(".brush").remove();
brushes = {};
delete pc.brushExtents;
delete pc.brushReset;
},
selected: selected,
brushState: brushExtents
}
})();
// brush mode: angular
// code based on 2D.strums.js
(function() {
var arcs = {},
strumRect;
function drawStrum(arc, activePoint) {
var svg = pc.selection.select("svg").select("g#arcs"),
id = arc.dims.i,
points = [arc.p2, arc.p3],
line = svg.selectAll("line#arc-" + id).data([{p1:arc.p1,p2:arc.p2},{p1:arc.p1,p2:arc.p3}]),
circles = svg.selectAll("circle#arc-" + id).data(points),
drag = d3.behavior.drag(),
path = svg.selectAll("path#arc-" + id).data([arc]);
path.enter()
.append("path")
.attr("id", "arc-" + id)
.attr("class", "arc")
.style("fill", "orange")
.style("opacity", 0.5);
path
.attr("d", arc.arc)
.attr("transform", "translate(" + arc.p1[0] + "," + arc.p1[1] + ")");
line.enter()
.append("line")
.attr("id", "arc-" + id)
.attr("class", "arc");
line
.attr("x1", function(d) { return d.p1[0]; })
.attr("y1", function(d) { return d.p1[1]; })
.attr("x2", function(d) { return d.p2[0]; })
.attr("y2", function(d) { return d.p2[1]; })
.attr("stroke", "black")
.attr("stroke-width", 2);
drag
.on("drag", function(d, i) {
var ev = d3.event,
angle = 0;
i = i + 2;
arc["p" + i][0] = Math.min(Math.max(arc.minX + 1, ev.x), arc.maxX);
arc["p" + i][1] = Math.min(Math.max(arc.minY, ev.y), arc.maxY);
angle = i === 3 ? arcs.startAngle(id) : arcs.endAngle(id);
if ((arc.startAngle < Math.PI && arc.endAngle < Math.PI && angle < Math.PI) ||
(arc.startAngle >= Math.PI && arc.endAngle >= Math.PI && angle >= Math.PI)) {
if (i === 2) {
arc.endAngle = angle;
arc.arc.endAngle(angle);
} else if (i === 3) {
arc.startAngle = angle;
arc.arc.startAngle(angle);
}
}
drawStrum(arc, i - 2);
})
.on("dragend", onDragEnd());
circles.enter()
.append("circle")
.attr("id", "arc-" + id)
.attr("class", "arc");
circles
.attr("cx", function(d) { return d[0]; })
.attr("cy", function(d) { return d[1]; })
.attr("r", 5)
.style("opacity", function(d, i) {
return (activePoint !== undefined && i === activePoint) ? 0.8 : 0;
})
.on("mouseover", function() {
d3.select(this).style("opacity", 0.8);
})
.on("mouseout", function() {
d3.select(this).style("opacity", 0);
})
.call(drag);
}
function dimensionsForPoint(p) {
var dims = { i: -1, left: undefined, right: undefined };
d3.keys(__.dimensions).some(function(dim, i) {
if (xscale(dim) < p[0]) {
var next = d3.keys(__.dimensions)[pc.getOrderedDimensionKeys().indexOf(dim)+1];
dims.i = i;
dims.left = dim;
dims.right = next;
return false;
}
return true;
});
if (dims.left === undefined) {
// Event on the left side of the first axis.
dims.i = 0;
dims.left = pc.getOrderedDimensionKeys()[0];
dims.right = pc.getOrderedDimensionKeys()[1];
} else if (dims.right === undefined) {
// Event on the right side of the last axis
dims.i = d3.keys(__.dimensions).length - 1;
dims.right = dims.left;
dims.left = pc.getOrderedDimensionKeys()[d3.keys(__.dimensions).length - 2];
}
return dims;
}
function onDragStart() {
// First we need to determine between which two axes the arc was started.
// This will determine the freedom of movement, because a arc can
// logically only happen between two axes, so no movement outside these axes
// should be allowed.
return function() {
var p = d3.mouse(strumRect[0][0]),
dims,
arc;
p[0] = p[0] - __.margin.left;
p[1] = p[1] - __.margin.top;
dims = dimensionsForPoint(p),
arc = {
p1: p,
dims: dims,
minX: xscale(dims.left),
maxX: xscale(dims.right),
minY: 0,
maxY: h(),
startAngle: undefined,
endAngle: undefined,
arc: d3.svg.arc().innerRadius(0)
};
arcs[dims.i] = arc;
arcs.active = dims.i;
// Make sure that the point is within the bounds
arc.p1[0] = Math.min(Math.max(arc.minX, p[0]), arc.maxX);
arc.p2 = arc.p1.slice();
arc.p3 = arc.p1.slice();
};
}
function onDrag() {
return function() {
var ev = d3.event,
arc = arcs[arcs.active];
// Make sure that the point is within the bounds
arc.p2[0] = Math.min(Math.max(arc.minX + 1, ev.x - __.margin.left), arc.maxX);
arc.p2[1] = Math.min(Math.max(arc.minY, ev.y - __.margin.top), arc.maxY);
arc.p3 = arc.p2.slice();
// console.log(arcs.angle(arcs.active));
// console.log(signedAngle(arcs.unsignedAngle(arcs.active)));
drawStrum(arc, 1);
};
}
// some helper functions
function hypothenuse(a, b) {
return Math.sqrt(a*a + b*b);
}
var rad = (function() {
var c = Math.PI / 180;
return function(angle) {
return angle * c;
};
})();
var deg = (function() {
var c = 180 / Math.PI;
return function(angle) {
return angle * c;
};
})();
// [0, 2*PI] -> [-PI/2, PI/2]
var signedAngle = function(angle) {
var ret = angle;
if (angle > Math.PI) {
ret = angle - 1.5 * Math.PI;
ret = angle - 1.5 * Math.PI;
} else {
ret = angle - 0.5 * Math.PI;
ret = angle - 0.5 * Math.PI;
}
return -ret;
}
/**
* angles are stored in radians from in [0, 2*PI], where 0 in 12 o'clock.
* However, one can only select lines from 0 to PI, so we compute the
* 'signed' angle, where 0 is the horizontal line (3 o'clock), and +/- PI/2
* are 12 and 6 o'clock respectively.
*/
function containmentTest(arc) {
var startAngle = signedAngle(arc.startAngle);
var endAngle = signedAngle(arc.endAngle);
if (startAngle > endAngle) {
var tmp = startAngle;
startAngle = endAngle;
endAngle = tmp;
}
// test if segment angle is contained in angle interval
return function(a) {
if (a >= startAngle && a <= endAngle) {
return true;
}
return false;
};
}
function selected() {
var ids = Object.getOwnPropertyNames(arcs),
brushed = __.data;
// Get the ids of the currently active arcs.
ids = ids.filter(function(d) {
return !isNaN(d);
});
function crossesStrum(d, id) {
var arc = arcs[id],
test = containmentTest(arc),
d1 = arc.dims.left,
d2 = arc.dims.right,
y1 = __.dimensions[d1].yscale,
y2 = __.dimensions[d2].yscale,
a = arcs.width(id),
b = y1(d[d1]) - y2(d[d2]),
c = hypothenuse(a, b),
angle = Math.asin(b/c); // rad in [-PI/2, PI/2]
return test(angle);
}
if (ids.length === 0) { return brushed; }
return brushed.filter(function(d) {
switch(brush.predicate) {
case "AND":
return ids.every(function(id) { return crossesStrum(d, id); });
case "OR":
return ids.some(function(id) { return crossesStrum(d, id); });
default:
throw new Error("Unknown brush predicate " + __.brushPredicate);
}
});
}
function removeStrum() {
var arc = arcs[arcs.active],
svg = pc.selection.select("svg").select("g#arcs");
delete arcs[arcs.active];
arcs.active = undefined;
svg.selectAll("line#arc-" + arc.dims.i).remove();
svg.selectAll("circle#arc-" + arc.dims.i).remove();
svg.selectAll("path#arc-" + arc.dims.i).remove();
}
function onDragEnd() {
return function() {
var brushed = __.data,
arc = arcs[arcs.active];
// Okay, somewhat unexpected, but not totally unsurprising, a mousclick is
// considered a drag without move. So we have to deal with that case
if (arc && arc.p1[0] === arc.p2[0] && arc.p1[1] === arc.p2[1]) {
removeStrum(arcs);
}
if (arc) {
var angle = arcs.startAngle(arcs.active);
arc.startAngle = angle;
arc.endAngle = angle;
arc.arc
.outerRadius(arcs.length(arcs.active))
.startAngle(angle)
.endAngle(angle);
}
brushed = selected(arcs);
arcs.active = undefined;
__.brushed = brushed;
pc.renderBrushed();
events.brushend.call(pc, __.brushed);
};
}
function brushReset(arcs) {
return function() {
var ids = Object.getOwnPropertyNames(arcs).filter(function(d) {
return !isNaN(d);
});
ids.forEach(function(d) {
arcs.active = d;
removeStrum(arcs);
});
onDragEnd(arcs)();
};
}
function install() {
var drag = d3.behavior.drag();
// Map of current arcs. arcs are stored per segment of the PC. A segment,
// being the area between two axes. The left most area is indexed at 0.
arcs.active = undefined;
// Returns the width of the PC segment where currently a arc is being
// placed. NOTE: even though they are evenly spaced in our current
// implementation, we keep for when non-even spaced segments are supported as
// well.
arcs.width = function(id) {
var arc = arcs[id];
if (arc === undefined) {
return undefined;
}
return arc.maxX - arc.minX;
};
// returns angles in [-PI/2, PI/2]
angle = function(p1, p2) {
var a = p1[0] - p2[0],
b = p1[1] - p2[1],
c = hypothenuse(a, b);
return Math.asin(b/c);
}
// returns angles in [0, 2 * PI]
arcs.endAngle = function(id) {
var arc = arcs[id];
if (arc === undefined) {
return undefined;
}
var sAngle = angle(arc.p1, arc.p2),
uAngle = -sAngle + Math.PI / 2;
if (arc.p1[0] > arc.p2[0]) {
uAngle = 2 * Math.PI - uAngle;
}
return uAngle;
}
arcs.startAngle = function(id) {
var arc = arcs[id];
if (arc === undefined) {
return undefined;
}
var sAngle = angle(arc.p1, arc.p3),
uAngle = -sAngle + Math.PI / 2;
if (arc.p1[0] > arc.p3[0]) {
uAngle = 2 * Math.PI - uAngle;
}
return uAngle;
}
arcs.length = function(id) {
var arc = arcs[id];
if (arc === undefined) {
return undefined;
}
var a = arc.p1[0] - arc.p2[0],
b = arc.p1[1] - arc.p2[1],
c = hypothenuse(a, b);
return(c);
}
pc.on("axesreorder.arcs", function() {
var ids = Object.getOwnPropertyNames(arcs).filter(function(d) {
return !isNaN(d);
});
// Checks if the first dimension is directly left of the second dimension.
function consecutive(first, second) {
var length = d3.keys(__.dimensions).length;
return d3.keys(__.dimensions).some(function(d, i) {
return (d === first)
? i + i < length && __.dimensions[i + 1] === second
: false;
});
}
if (ids.length > 0) { // We have some arcs, which might need to be removed.
ids.forEach(function(d) {
var dims = arcs[d].dims;
arcs.active = d;
// If the two dimensions of the current arc are not next to each other
// any more, than we'll need to remove the arc. Otherwise we keep it.
if (!consecutive(dims.left, dims.right)) {
removeStrum(arcs);
}
});
onDragEnd(arcs)();
}
});
// Add a new svg group in which we draw the arcs.
pc.selection.select("svg").append("g")
.attr("id", "arcs")
.attr("transform", "translate(" + __.margin.left + "," + __.margin.top + ")");
// Install the required brushReset function
pc.brushReset = brushReset(arcs);
drag
.on("dragstart", onDragStart(arcs))
.on("drag", onDrag(arcs))
.on("dragend", onDragEnd(arcs));
// NOTE: The styling needs to be done here and not in the css. This is because
// for 1D brushing, the canvas layers should not listen to
// pointer-events.
strumRect = pc.selection.select("svg").insert("rect", "g#arcs")
.attr("id", "arc-events")
.attr("x", __.margin.left)
.attr("y", __.margin.top)
.attr("width", w())
.attr("height", h() + 2)
.style("opacity", 0)
.call(drag);
}
brush.modes["angular"] = {
install: install,
uninstall: function() {
pc.selection.select("svg").select("g#arcs").remove();
pc.selection.select("svg").select("rect#arc-events").remove();
pc.on("axesreorder.arcs", undefined);
delete pc.brushReset;
strumRect = undefined;
},
selected: selected,
brushState: function () { return arcs; }
};
}());
pc.interactive = function() {
flags.interactive = true;
return this;
};
// expose a few objects
pc.xscale = xscale;
pc.ctx = ctx;
pc.canvas = canvas;
pc.g = function() { return g; };
// rescale for height, width and margins
// TODO currently assumes chart is brushable, and destroys old brushes
pc.resize = function() {
// selection size
pc.selection.select("svg")
.attr("width", __.width)
.attr("height", __.height)
pc.svg.attr("transform", "translate(" + __.margin.left + "," + __.margin.top + ")");
// FIXME: the current brush state should pass through
if (flags.brushable) pc.brushReset();
// scales
pc.autoscale();
// axes, destroys old brushes.
if (g) pc.createAxes();
if (flags.brushable) pc.brushable();
if (flags.reorderable) pc.reorderable();
events.resize.call(this, {width: __.width, height: __.height, margin: __.margin});
return this;
};
// highlight an array of data
pc.highlight = function(data) {
if (arguments.length === 0) {
return __.highlighted;
}
__.highlighted = data;
pc.clear("highlight");
d3.selectAll([canvas.foreground, canvas.brushed]).classed("faded", true);
data.forEach(path_highlight);
events.highlight.call(this, data);
return this;
};
// clear highlighting
pc.unhighlight = function() {
__.highlighted = [];
pc.clear("highlight");
d3.selectAll([canvas.foreground, canvas.brushed]).classed("faded", false);
return this;
};
// calculate 2d intersection of line a->b with line c->d
// points are objects with x and y properties
pc.intersection = function(a, b, c, d) {
return {
x: ((a.x * b.y - a.y * b.x) * (c.x - d.x) - (a.x - b.x) * (c.x * d.y - c.y * d.x)) / ((a.x - b.x) * (c.y - d.y) - (a.y - b.y) * (c.x - d.x)),
y: ((a.x * b.y - a.y * b.x) * (c.y - d.y) - (a.y - b.y) * (c.x * d.y - c.y * d.x)) / ((a.x - b.x) * (c.y - d.y) - (a.y - b.y) * (c.x - d.x))
};
};
function position(d) {
if (xscale.range().length === 0) {
xscale.rangePoints([0, w()], 1);
}
var v = dragging[d];
return v == null ? xscale(d) : v;
}
// Merges the canvases and SVG elements into one canvas element which is then passed into the callback
// (so you can choose to save it to disk, etc.)
pc.mergeParcoords = function(callback) {
// Retina display, etc.
var devicePixelRatio = window.devicePixelRatio || 1;
// Create a canvas element to store the merged canvases
var mergedCanvas = document.createElement("canvas");
mergedCanvas.width = pc.canvas.foreground.clientWidth * devicePixelRatio
mergedCanvas.height = (pc.canvas.foreground.clientHeight + 30) * devicePixelRatio;
mergedCanvas.style.width = mergedCanvas.width / devicePixelRatio + "px";
mergedCanvas.style.height = mergedCanvas.height / devicePixelRatio + "px";
// Give the canvas a white background
var context = mergedCanvas.getContext("2d");
context.fillStyle = "#ffffff";
context.fillRect(0, 0, mergedCanvas.width, mergedCanvas.height);
// Merge all the canvases
for (var key in pc.canvas) {
context.drawImage(pc.canvas[key], 0, 24 * devicePixelRatio, mergedCanvas.width, mergedCanvas.height - 30 * devicePixelRatio);
}
// Add SVG elements to canvas
var DOMURL = window.URL || window.webkitURL || window;
var serializer = new XMLSerializer();
var svgStr = serializer.serializeToString(pc.selection.select("svg")[0][0]);
// Create a Data URI.
var src = 'data:image/svg+xml;base64,' + window.btoa(svgStr);
var img = new Image();
img.onload = function () {
context.drawImage(img, 0, 0, img.width * devicePixelRatio, img.height * devicePixelRatio);
if (typeof callback === "function") {
callback(mergedCanvas);
}
};
img.src = src;
}
pc.version = "0.7.0";
// this descriptive text should live with other introspective methods
pc.toString = function() { return "Parallel Coordinates: " + d3.keys(__.dimensions).length + " dimensions (" + d3.keys(__.data[0]).length + " total) , " + __.data.length + " rows"; };
return pc;
};
d3.renderQueue = (function(func) {
var _queue = [], // data to be rendered
_rate = 10, // number of calls per frame
_clear = function() {}, // clearing function
_i = 0; // current iteration
var rq = function(data) {
if (data) rq.data(data);
rq.invalidate();
_clear();
rq.render();
};
rq.render = function() {
_i = 0;
var valid = true;
rq.invalidate = function() { valid = false; };
function doFrame() {
if (!valid) return true;
if (_i > _queue.length) return true;
// Typical d3 behavior is to pass a data item *and* its index. As the
// render queue splits the original data set, we'll have to be slightly
// more carefull about passing the correct index with the data item.
var end = Math.min(_i + _rate, _queue.length);
for (var i = _i; i < end; i++) {
func(_queue[i], i);
}
_i += _rate;
}
d3.timer(doFrame);
};
rq.data = function(data) {
rq.invalidate();
_queue = data.slice(0);
return rq;
};
rq.rate = function(value) {
if (!arguments.length) return _rate;
_rate = value;
return rq;
};
rq.remaining = function() {
return _queue.length - _i;
};
// clear the canvas
rq.clear = function(func) {
if (!arguments.length) {
_clear();
return rq;
}
_clear = func;
return rq;
};
rq.invalidate = function() {};
return rq;
});
|
PypiClean
|
/skytemple_dtef-1.6.0a3-py3-none-any.whl/skytemple_dtef/explorers_dtef.py
|
from math import floor, ceil
from typing import List, Dict
from xml.etree import ElementTree
from PIL import Image
from skytemple_dtef.dungeon_xml import DungeonXml, RestTileMapping, RestTileMappingEntry
from skytemple_dtef.rules import get_rule_variations, REMAP_RULES
from skytemple_files.graphics.dma.protocol import DmaProtocol, DmaType
from skytemple_files.graphics.dpc import DPC_TILING_DIM
from skytemple_files.graphics.dpc.protocol import DpcProtocol
from skytemple_files.graphics.dpci import DPCI_TILE_DIM
from skytemple_files.graphics.dpci.protocol import DpciProtocol
from skytemple_files.graphics.dpl.protocol import DplProtocol
from skytemple_files.graphics.dpla.protocol import DplaProtocol
TILESHEET_WIDTH = 6
TILESHEET_HEIGHT = 8
TW = DPCI_TILE_DIM * DPC_TILING_DIM
VAR0_FN = 'tileset_0.png'
VAR1_FN = 'tileset_1.png'
VAR2_FN = 'tileset_2.png'
MORE_FN = 'tileset_more.png'
class ExplorersDtef:
def __init__(self, dma: DmaProtocol, dpc: DpcProtocol, dpci: DpciProtocol, dpl: DplProtocol, dpla: DplaProtocol):
self.dma = dma
self.dpc = dpc
self.dpci = dpci
self.dpl = dpl
self.dpla = dpla
chunks = self.dpc.chunks_to_pil(self.dpci, self.dpl.palettes, 1)
self.var0 = Image.new('P', (TILESHEET_WIDTH * 3 * TW, TILESHEET_HEIGHT * TW))
self.var1 = Image.new('P', (TILESHEET_WIDTH * 3 * TW, TILESHEET_HEIGHT * TW))
self.var2 = Image.new('P', (TILESHEET_WIDTH * 3 * TW, TILESHEET_HEIGHT * TW))
pal = chunks.getpalette()
self.var0.putpalette(pal) # type: ignore
self.var1.putpalette(pal) # type: ignore
self.var2.putpalette(pal) # type: ignore
def paste(fimg, chunk_index, x, y):
fimg.paste(
chunks.crop((0, chunk_index * TW, TW, chunk_index * TW + TW)),
(x * TW, y * TW)
)
# Process tiles
self._variation_map: List[List[int]] = [[], [], []]
self._coord_map = {}
# Pre-fill variation maps
for ti, the_type in enumerate((DmaType.WALL, DmaType.WATER, DmaType.FLOOR)):
for i, base_rule in enumerate(get_rule_variations(REMAP_RULES).keys()):
if base_rule is None:
continue
x = i % TILESHEET_WIDTH + (TILESHEET_WIDTH * ti)
y = floor(i / TILESHEET_WIDTH)
variations = self.dma.get(the_type, base_rule)
already_printed = set()
for img, iv in zip((self.var0, self.var1, self.var2), range(len(variations))):
variation = variations[iv]
if variation in already_printed:
continue
already_printed.add(variation)
self._coord_map[variation] = (x, y)
self._variation_map[iv].append(variation)
# Non standard tiles
self.rest_mappings: List[RestTileMapping] = []
self._tiles_to_draw_on_more: List[int] = []
self._rest_mappings_idxes: Dict[int, int] = {} # dpc -> rest_mappings index
# Process all normal rule tiles (47-set and check 256-set extended)
for ti, the_type in enumerate((DmaType.WALL, DmaType.WATER, DmaType.FLOOR)):
for i, (base_rule, derived_rules) in enumerate(get_rule_variations(REMAP_RULES).items()):
if base_rule is None:
continue
x = i % TILESHEET_WIDTH + (TILESHEET_WIDTH * ti)
y = floor(i / TILESHEET_WIDTH)
variations = self.dma.get(the_type, base_rule)
already_printed = set()
for img, iv in zip((self.var0, self.var1, self.var2), range(len(variations))):
variation = variations[iv]
if variation in already_printed:
continue
already_printed.add(variation)
paste(img, variation, x, y)
for other_rule in derived_rules:
if other_rule == base_rule:
continue
r_variations = self.dma.get(the_type, other_rule)
for index, (r_var, var) in enumerate(zip(r_variations, variations)):
if r_var != var:
# Process non-standard mappings
self._add_extra_mapping(other_rule, r_var, index)
# Process all extra tiles
for i, m in enumerate(self.dma.chunk_mappings[0x300 * 3:]):
self._add_extra_mapping(0x300 * 3 + i, m, None)
more_width = TILESHEET_WIDTH * 3 * TW
more_height = max(TW, ceil(len(self._tiles_to_draw_on_more) / more_width) * TW)
self.rest = Image.new('P', (more_width, more_height))
for i, tile_more in enumerate(self._tiles_to_draw_on_more):
x = i % (TILESHEET_WIDTH * 3)
y = floor(i / (TILESHEET_WIDTH * 3))
paste(self.rest, tile_more, x, y)
self.rest.putpalette(pal) # type: ignore
def get_xml(self) -> ElementTree.Element:
return DungeonXml.generate(self.dpla, TW, self.rest_mappings)
def get_tiles(self) -> List[Image.Image]:
return [self.var0, self.var1, self.var2, self.rest]
@staticmethod
def get_filenames():
return [VAR0_FN, VAR1_FN, VAR2_FN, MORE_FN]
def _add_extra_mapping(self, i, m, variation):
if m in self._variation_map[0]:
fn = VAR0_FN
x, y = self._coord_map[m]
elif m in self._variation_map[1]:
fn = VAR1_FN
x, y = self._coord_map[m]
elif m in self._variation_map[2]:
fn = VAR2_FN
x, y = self._coord_map[m]
else:
fn = MORE_FN
oi = len(self._tiles_to_draw_on_more)
x = oi % (TILESHEET_WIDTH * 3)
y = floor(oi / (TILESHEET_WIDTH * 3))
if m not in self._rest_mappings_idxes:
if fn == MORE_FN:
self._tiles_to_draw_on_more.append(m)
self.rest_mappings.append(RestTileMapping(x, y, [], fn))
self._rest_mappings_idxes[m] = len(self.rest_mappings) - 1
mappings = self.rest_mappings[self._rest_mappings_idxes[m]].mappings
if i < 0x300 * 3:
# Normal
dma_type = DmaType.WALL
if i >= 0x200:
dma_type = DmaType.FLOOR
elif i >= 0x100:
dma_type = DmaType.WATER
mappings.append(RestTileMappingEntry("normal", dma_type, i, variation))
else:
# Special
typ = i % 3
i_real = int(i / 3) - 0x300
mappings.append(RestTileMappingEntry("extra", typ, 0, i_real))
|
PypiClean
|
/Discode.py-1.1.1.tar.gz/Discode.py-1.1.1/discode/utils.py
|
import pprint
from .message import Message
from .member import Member
def make_pretty(*args, **kwargs) -> str:
return pprint.pformat(*args, **kwargs)
async def _check(ws, data: dict):
if ws._ready.is_set():
event = data.get('t').upper()
d = data.get("d")
if event == "MESSAGE_CREATE":
msgdata = data["d"]
msgdata["http"] = ws.http
message = Message(**msgdata)
ws.message_cache.append(message)
if len(ws.message_cache) == ws.http.client.message_limit:
ws.message_cache[1:]
await ws.dispatch("message", message)
elif event == "MESSAGE_UPDATE":
for msg in ws.message_cache:
if msg.id == int(d.get("id")):
msgdata = msg.data.copy()
before = msg
after = Message(**msgdata)
after.content = data["d"].get("content")
msg.data["edited_at"] = data["d"].get("edited_timestamp")
await ws.dispatch("message edit", before, after)
ws.message_cache.remove(before)
ws.message_cache.append(after)
break
elif event == "MESSAGE_DELETE":
for msg in ws.message_cache:
pass
elif event == "MEMBER_ADD":
for g in ws.guilds:
if g.id == int(d.get("guild_id")):
d.pop("guild_id", None)
g.members.append(d)
await ws.dispatch("member create")
break
elif event == "MEMBER_DELETE":
g = ws.client.get_guild(int(d.get("guild_id")))
if not g:
return
elif event == "MEMBER_UPDATE":
g = ws.client.get_guild(int(d.get("guild_id")))
if g:
d.pop("guild_id", None)
m = ws.client.get_member(
member_id = int(d.get("id")),
guild_id = g.id
)
try:
g.members.remove(m)
except ValueError:
pass
g.members.append(d)
|
PypiClean
|
/open_aea_cosmpy-0.6.5.tar.gz/open_aea_cosmpy-0.6.5/cosmpy/protos/cosmos/evidence/v1beta1/query_pb2_grpc.py
|
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from cosmos.evidence.v1beta1 import query_pb2 as cosmos_dot_evidence_dot_v1beta1_dot_query__pb2
class QueryStub(object):
"""Query defines the gRPC querier service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Evidence = channel.unary_unary(
'/cosmos.evidence.v1beta1.Query/Evidence',
request_serializer=cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryEvidenceRequest.SerializeToString,
response_deserializer=cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryEvidenceResponse.FromString,
)
self.AllEvidence = channel.unary_unary(
'/cosmos.evidence.v1beta1.Query/AllEvidence',
request_serializer=cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryAllEvidenceRequest.SerializeToString,
response_deserializer=cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryAllEvidenceResponse.FromString,
)
class QueryServicer(object):
"""Query defines the gRPC querier service.
"""
def Evidence(self, request, context):
"""Evidence queries evidence based on evidence hash.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AllEvidence(self, request, context):
"""AllEvidence queries all evidence.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_QueryServicer_to_server(servicer, server):
rpc_method_handlers = {
'Evidence': grpc.unary_unary_rpc_method_handler(
servicer.Evidence,
request_deserializer=cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryEvidenceRequest.FromString,
response_serializer=cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryEvidenceResponse.SerializeToString,
),
'AllEvidence': grpc.unary_unary_rpc_method_handler(
servicer.AllEvidence,
request_deserializer=cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryAllEvidenceRequest.FromString,
response_serializer=cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryAllEvidenceResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'cosmos.evidence.v1beta1.Query', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Query(object):
"""Query defines the gRPC querier service.
"""
@staticmethod
def Evidence(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosmos.evidence.v1beta1.Query/Evidence',
cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryEvidenceRequest.SerializeToString,
cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryEvidenceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AllEvidence(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosmos.evidence.v1beta1.Query/AllEvidence',
cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryAllEvidenceRequest.SerializeToString,
cosmos_dot_evidence_dot_v1beta1_dot_query__pb2.QueryAllEvidenceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
PypiClean
|
/no_comment-0.1.1-py3-none-any.whl/no_comment/infrastructure/flask/__init__.py
|
from flask import Flask, get_flashed_messages, url_for
from werkzeug.middleware.proxy_fix import ProxyFix
import no_comment.interfaces.to_http.as_html as html_presenters
from no_comment import __version__
from no_comment.infrastructure.settings import WsgiSettings
from . import services
from .auth import blueprint as auth
from .streams import blueprint as streams
def build_app(settings: WsgiSettings) -> Flask:
services.define_settings(settings)
app = Flask(
__name__,
static_url_path="/resources",
static_folder="./static/",
)
configure(app, settings)
register_blueprints(app, settings)
register_globals(settings)
app.teardown_appcontext(services.teardown_connection)
return app
def configure(app: Flask, settings: WsgiSettings) -> None:
if settings.PROXIED:
app.wsgi_app = ProxyFix( # type: ignore[assignment]
app.wsgi_app, x_for=1, x_proto=1, x_host=1, x_prefix=1
)
app.config.update(
SECRET_KEY=settings.SECRET_KEY,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_HTTPONLY=True,
SESSION_COOKIE_SAMESITE="Lax",
)
def register_blueprints(app: Flask, settings: WsgiSettings) -> None:
app.register_blueprint(streams, url_prefix="/")
app.auth_links = [] # type: ignore[attr-defined]
app.register_blueprint(auth, url_prefix="/auth")
if settings.TOTP:
from no_comment.infrastructure.flask.totp import blueprint as totp
app.register_blueprint(totp, url_prefix="/auth/totp")
app.auth_links.append( # type: ignore[attr-defined]
{"route": "totp.login", "label": "TOTP"}
)
if settings.AUTHORIZED_IP:
from no_comment.infrastructure.flask.ip import blueprint as ip
app.register_blueprint(ip, url_prefix="/auth/ip")
app.auth_links.append( # type: ignore[attr-defined]
{"route": "ip.login", "label": "IP"}
)
def register_globals(settings: WsgiSettings) -> None:
html_presenters.register_jinja_global("version", __version__)
html_presenters.register_jinja_global("url_for", url_for)
html_presenters.register_jinja_global("get_flashed_messages", get_flashed_messages)
html_presenters.register_jinja_global("timezone", settings.TIMEZONE)
|
PypiClean
|
/bf-banki-nlu-1.5.tar.gz/bf-banki-nlu-1.5/rasa/core/training/converters/responses_prefix_converter.py
|
from pathlib import Path
from typing import Text
from rasa.shared.core.domain import Domain, InvalidDomain
from rasa.shared.core.events import ActionExecuted
from rasa.shared.core.training_data.story_reader.yaml_story_reader import (
YAMLStoryReader,
)
from rasa.shared.core.training_data.story_writer.yaml_story_writer import (
YAMLStoryWriter,
)
from rasa.shared.constants import UTTER_PREFIX
from rasa.utils.converter import TrainingDataConverter
OBSOLETE_RESPOND_PREFIX = "respond_"
def normalize_utter_action(action_name: Text) -> Text:
"""Ensure that response names start with `utter_`.
Args:
action_name: The name of the response.
Returns:
The name of the response, starting with `utter_`.
"""
return (
f"{UTTER_PREFIX}{action_name[len(OBSOLETE_RESPOND_PREFIX):]}"
if action_name.startswith(OBSOLETE_RESPOND_PREFIX)
else action_name
)
class StoryResponsePrefixConverter(TrainingDataConverter):
"""
Converter responsible for ensuring that retrieval intent actions in stories
start with `utter_` instead of `respond_`.
"""
@classmethod
def filter(cls, source_path: Path) -> bool:
"""Only accept YAML story files.
Args:
source_path: Path to a training data file.
Returns:
`True` if the given file is a YAML stories file, `False` otherwise.
"""
return YAMLStoryReader.is_stories_file(source_path)
@classmethod
async def convert_and_write(cls, source_path: Path, output_path: Path) -> None:
"""Migrate retrieval intent responses to the new 2.0 format in stories.
Before 2.0, retrieval intent responses needed to start
with `respond_`. Now, they need to start with `utter_`.
Args:
source_path: the source YAML stories file.
output_path: Path to the output directory.
"""
reader = YAMLStoryReader()
story_steps = reader.read_from_file(source_path)
for story_step in story_steps:
for event in story_step.events:
if isinstance(event, ActionExecuted):
event.action_name = normalize_utter_action(event.action_name)
output_file = cls.generate_path_for_converted_training_data_file(
source_path, output_path
)
YAMLStoryWriter().dump(output_file, story_steps)
class DomainResponsePrefixConverter(TrainingDataConverter):
"""
Converter responsible for ensuring that retrieval intent actions in domain
start with `utter_` instead of `respond_`.
"""
@classmethod
def filter(cls, source_path: Path) -> bool:
"""Only accept domain files.
Args:
source_path: Path to a domain file.
Returns:
`True` if the given file can is a valid domain file, `False` otherwise.
"""
try:
Domain.from_path(source_path)
except InvalidDomain:
return False
return True
@classmethod
async def convert_and_write(cls, source_path: Path, output_path: Path) -> None:
"""Migrate retrieval intent responses to the new 2.0 format in domains.
Before 2.0, retrieval intent responses needed to start
with `respond_`. Now, they need to start with `utter_`.
Args:
source_path: The source domain file.
output_path: Path to the output directory.
"""
domain = Domain.from_path(source_path)
domain_dict = domain.as_dict()
domain_dict["actions"] = [
normalize_utter_action(action) for action in domain_dict["actions"]
]
new_domain = Domain.from_dict(domain_dict)
output_file = cls.generate_path_for_converted_training_data_file(
source_path, output_path
)
new_domain.persist(output_file)
|
PypiClean
|
/chemfiles-0.10.4.tar.gz/chemfiles-0.10.4/lib/CHANGELOG.md
|
# Change Log
All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](https://semver.org/).
## Next Release (current master)
### Deprecation and removals
- Remove support for configuration files (`chemfiles.toml`) and associated
functions.
## 0.10.4 (7 May 2023)
### Changes in supported formats
- Added read support for the TPR format (#459)
- Fix an issue when writing DCD files that made them incompatible with VMD (#465)
- Fix a bug when converting cell angles in DCD (#467)
## 0.10.3 (20 Aug 2022)
### Changes in supported formats
- Amber NetCDF files are now read/written with a custom netcdf parser (#443)
- TRR and XTC files are now read/written with a custom parser (#451)
- DCD files are now read/written with a custom parser (#453)
## 0.10.2 (20 Oct 2021)
### New features
- properties are now sorted, and iterating over properties will always yield
them sorted by the associated key.
### Changes in supported formats
- Added read support for PSF files using VMD molfile plugin.
## 0.10.1 (22 Jul 2021)
### New features
- added `chemfiles::guess_format` and `chfl_guess_format` to get the format
chemfiles would use for a given file based on its filename
- Added read support for GROMACS TPR format.
### Changes in supported formats
- Added read and write support for Amber Restart (.ncrst) files.
- Added native read and write support for LAMMPS trajectory (.lammpstrj) files,
replacing the VMD molfile implementation.
### Changes to the C API
## 0.10.0 (14 Feb 2021)
### Changes in supported formats
* Added read and write support for CIF format.
* Added read and write support for extended XYZ. The XYZ format now default to
extended output, and read extended files. Extended XYZ allow storing unit
cell and arbitrary atomic properties.
### New features
* Added ability to read and write files directly in-memory. See
`Trajectory::memory_reader`; `Trajectory::memory_writer`;
`Trajectory::memory_buffer`; `chfl_trajectory_memory_reader`;
`chfl_trajectory_memory_writer` and `chfl_trajectory_memory_buffer`.
* Added support for appending to gzip (.gz) compressed trajectories.
* Added support for sub-selection in numerical functions, for example
`distance(#1, name O)`.
* Changed the UnitCell representation to the full cell matrix instead of
a/b/c/alpha/beta/gamma
* Added `chemfiles::formats_list` function to get a list of formats
and associated metadata.
### Changes to the C API
* Added missing `chfl_frame_clear_bonds` and `chfl_topology_clear_bonds`.
* Added `chfl_cell_from_matrix` and changed parameters to `chfl_cell`, removed
`chfl_cell_triclinic`.
* Added `chfl_formats_list` function to get a list of formats and
associated metadata.
## 0.9.3 (5 Feb 2020)
* Fix a bug in the PDB format where no atomic name/type was read from short
ATOM/HETATM records.
* Fix a few bugs related to UnitCell Matrix and infinite UnitCell construction
## 0.9.2 (18 Dec 2019)
* When compiling chemfiles as a shared library, the dependencies symbols are
now hidden. This should prevent clashes between say chemfiles's zlib and the
system zlib.
* Cache sub-selection (the 'name O' in 'is_bonded(#1, name O)'), reducing
selection evaluation time by a huge margin.
### Changes in supported formats
* Added read and write support for CML (Chemical Markup Language) files, a XML
based format.
* Added a native implementation of XTC and TRR formats, replacing the VMD
molfile version. The new code supports reading and writing files, should be
faster and use less memory.
* Remove the ability to read frames from a trajectory that was opened in
append mode. This mode is now write only.
* Added support for bzip2 (.bz2) compressed files when reading and writing
## 0.9.1 (13 Mar 2019)
* Fix a bug with memory allocation in the C API. The allocator did not remove
pointers as soon as `chfl_free` was called, which leaded to an error when the
system allocator re-used the pointers.
## 0.9.0 (18 Nov 2018)
* Direct reading and writing of compressed files. gzip and lzma (.xz) formats
are supported.
* GROMACS .gro files now supported through custom implementation.
* Properties are now supported in the `Residue` class. They are accessed using
`Residue::set` and `Residue::get`.
* The topology of residues is now automatically set using a lookup table for the
PDB format.
* `Frame::guess_topology` was renamed to `Frame::guess_bonds`.
* The selection engine has been rewritten to add support for more complex
selections:
* it is now possible to use mathematical expressions in selections such as
`x^2 - y ^2 < sqrt(z^2 + 25)`;
* it is now possible to access geometrical properties in such mathematical
expressions: `distance(#1, #2)`, `angle(#1, #2, #3)`, `dihedral(#1, #2,
#3, #4)`, and `out_of_plane(#1, #2, #3, #4)` are supported;
* it is now possible to add constrains on the topology of the system:
`bonded(#1, #2)`, `is_angle(#1, #2, #3)`, `is_dihedral(#1, #2, #3, #4)`,
and `improper(#1, #2, #3, #4)` are supported;
* the topology constrains support sub-selections: instead of checking is
`#1` and `#2` are bonded, one can check if `#1` is bonded to any atom
matching a selection, for example `name O` with `bonded(#1, name O)`.
* When using numbers as atomic names/types, they must now be inside double
quotes (`name "45"`). This also allows for more exotic atomic names
(`name "名"`).
* Atomic properties can be checked, using the `[property] == Ow` syntax for
string properties, `[property] == 2.3` for numeric properties and
`[property]` for boolean properties.
* There is only one constructor for the `Frame` class: `Frame(UnitCell cell =
UnitCell())`. The constructor taking a topology can be replaced with calls to
`Frame::add_atom` and `Frame::add_bond`.
* Chemfiles will now read configuration from `.chemfiles.toml` or
`chemfiles.toml` instead of `.chemfilesrc`
* Added `Trajectory::path` to get the file path used to create a trajectory
* Renamed `Property::get_kind` to `Property::kind`
* Added `Atom::properties`; `Frame::properties`; and `Residue::properties` to
allow iteration over all the properties in an Atom/Frame/Residue.
### Changes in supported formats
* Added `MarcoMolecule Transmission Format (MMTF)` support, reading via mmtf-cpp.
* Added `Structure-Data File (SDF)` support, reading and writing.
* Added `Cambridge Structure Search and Retrieval (CSSR)` support, reading and writing.
* `LAMMPS Data` format now support triclinic unit cells.
### Changes to the C API
* Added `chfl_residue_get_property` and `chfl_residue_set_property` to provide
access to residue properties.
* `chfl_frame_guess_topology` was renamed to `chfl_frame_guess_bonds`.
* Function accessing atoms/cell/residue/topology inside a frame/topology no
longer make a copy. This allows for direct reading and writing inside the
containing frame/topology.
* Added `chfl_trajectory_path` to get the file path used to create a trajectory
* Added `chfl_{atom,frame,residue}_properties_count` and
`chfl_{atom,frame,residue}_list_properties` to list all properties in an
Atom/Frame/Residue
* Replaced `chfl_*_free` by an unique `chfl_free` function
## 0.8 (14 Dec 2017)
### New features
* Change the license to the 3-clauses BSD license.
* Chemfiles will now read configuration files (by default in `.chemfilesrc`),
and use the configuration data to rename atomic types to make sure they match
element names. The `chemfiles::add_configuration` function can be used to add
additional configuration files.
* Reading a `Frame` (with `Trajectory::read` or `Trajectory::read_step`) will
now set the frame step.
* The `Atom` faillible methods (`atomic_number`, `vdw_radius`, `covalent_radius`
and `full_name`) returns `optional<T>` instead of `T`.
* Functions taking an atomic index parameter can now throw `OutOfBounds` errors
if the index is out of bounds.
* `Topology::append` is now called `Topology::add_atom`
* `Topology::natoms` and `Frame::natoms` are now called `Topology::size` and
`Frame::size`
* `Topology::residue` is now called `Topology::residue_for_atom`
* Added `Frame::distance`, `Frame::angle`, `Frame::dihedral` and
`Frame::out_of_plane` to get geometric information on the system, accounting
for periodic boundary conditions.
* Added a `Property` class to store arbitrary properties in `Frame` and `Atom`.
* Added support for improper dihedral angles in `Topology`.
* `chemfiles::add_configuration` and `chemfiles::set_warning_callback` are now
thread safe, and will block upon concurrent usage.
* `UnitCell::matricial` is renamed to `UnitCell::matrix`.
* The `UnitCell::shape` setter is renamed to `UnitCell::set_shape`.
* The `Trajectory::close` function can be used to close a trajectory and
synchronize any buffered content with the storage.
* Some of the topology functions are now accsible directly on the frame:
`Frame::add_bond`, `Frame::remove_bond`, `Frame::clear_bonds`,
`Frame::add_residue` and `operator[]`. The non const version of
`Frame::topology` is removed.
### Changes in supported formats
* Amber NetCDF format is now activated by default, by embedding the netcdf
library in chemfiles.
* Added `LAMMPS Data` format, reading and writing [LAMMPS data files].
* Added `Tinker` format, reading and writing Tinker XYZ file format.
* Added `MOL2` format, reading mol2 files using VMD molfiles plugin.
* Added `Molden` format, reading molden files using VMD molfiles plugin.
[LAMMPS data files]: https://lammps.sandia.gov/doc/read_data.html
### Changes to the C API
* Added `chfl_add_configuration` to add more configuration files.
* Renamed `chfl_vector_t` to `chfl_vector3d`, `chfl_match_t` to `cfl_match`; and
`chfl_cell_shape_t` to `chfl_cellshape`.
* `chfl_atom_atomic_number`, `chfl_atom_vdw_radius` and
`chfl_atom_covalent_radius` all returns 0 instead of -1 if the atom does not
have a known value for this property. This allow `chfl_atom_atomic_number` to
take a `uint64_t*` parameter instead of an `int64_t*`, following all the other
functions in the C API.
* Added `CHFL_OUT_OF_BOUNDS` and `CHFL_PROPERTY_ERROR` variants to `chfl_status`
* Added `chfl_frame_distance`, `chfl_frame_angle`, `chfl_frame_dihedral`,
`chfl_frame_out_of_plane` and `chfl_cell_wrap` to work with periodic boundary
conditions.
* `chfl_residue` does not take the optional residue id as parameter, instead you
should use `chfl_residue_with_id`.
* Added `chfl_residue_atoms` to get the list of atoms in a residue.
* Added `chfl_topology_impropers` and `chfl_topology_impropers_count` functions.
* Added `CHFL_PROPERTY` and related functions.
* `chfl_add_configuration` and `chfl_set_warning_callback` are now thread safe,
and will block upon concurrent usage.
* Added `chfl_frame_add_bond`, `chfl_frame_remove_bond`, and
`chfl_frame_add_residue`.
### Deprecation and removals
* `Topology::isbond`, `Topology::isangle`, `Topology::isdihedral`, and the
corresponding C functions `chfl_topology_isbond`, `chfl_topology_isangle`
`chfl_topology_isdihedral` are removed.
## 0.7 (25 Feb 2017)
### New features
* Add a public `Residue` class to C++ and C API to represent residue data.
Residues are groups of atoms bonded together, which may or may not correspond
to molecules; and are often used for bio-molecules.
* Add the `resname` and `resid` selector, to select atoms based on their
residue.
* Account for the difference between the atom name ("H1") and atom type ("H")
in some formats (PDB, TNG, ...). This introduces the `Atom::type` member
function and the `chfl_atom_type` C API function.
* Add the `type` selector, to select atoms based on their type.
* Add "Frame::add_atom" function to add an atom and the corresponding position
(and velocity) data to a frame, and the C API `chfl_frame_add_atom` function.
* Rename `UnitCell::type` to `UnitCell::shape`. This also affect
`chfl_cell_shape_t`, `chfl_cell_shape`, and `chfl_cell_set_shape`.
* All the floating point data uses doubles instead of floats. This concerns
atomic data, positions and velocities.
* Add "Selection::string" function and the corresponding `chfl_selection_string`
to get the string used to build a selection.
* Selection variables uses the `#3` syntax instead of the `$3` syntax to allow
passing selection string as shell arguments.
* Add `Frame::remove` and `chfl_frame_remove` to remove an atom in a frame.
* Allow to use chemfiles with Cmake `find_package`.
### Changes in supported formats
* Add read support for TNG files, an new portable and compressed binary format
used by GROMACS.
### Changes to the C API
* All the integers at C boundary have a fixed size, most of the time using
`uint64_t`.
* Add missing `chfl_topology_resize` function to C API.
* C API functions taking three lengths/angles now take a `double[3]` parameter
instead.
* Rename `chfl_topology_are_linked` to `chfl_topology_residues_linked`.
* Rename `chfl_topology_append` to `chfl_topology_add_atom`.
* Remove `chfl_strerror`, as it is redundant with `chfl_last_error`.
* Merge `chfl_trajectory_set_topology_file` and `chfl_trajectory_set_topology_with_format`
into `chfl_trajectory_topology_file`.
* The `chfl_frame` function no longer take the frame size as argument. It always
creates an empty frame, that you can resize using `chfl_frame_resize`.
* `chfl_selection_evalutate` was a typo, it is renamed to `chfl_selection_evaluate`.
### Deprecation and removals
* Remove the `Atom::type` enum from C and C++ API.
* Remove the `Trajectory::sync` and the `chfl_trajectory_sync` functions.
To ensure that all content of a file is written to the disk, the user need to
close it.
* Remove the `Logger` and all the `chfl_log*` functions. Instead, the users can
use `chemfiles::set_warning_callback` or `chfl_set_warning_callback` to set a
global callback to call on warnings events.
## 0.6 (1 July 2016)
* Improve the selection language to allow selecting multiple atoms at once. For
example, `"pairs: name($1) H and mass($2) > 5"` will select all pairs of atoms
where the first atom name is `'H'` and the second atom mass is bigger than 5.
* The implemented modes for selections are `one`, `atoms`, `two`, `pairs`,
`three`, `four`, `bonds`, `angles` and `dihedrals`;
* The `Selection` class is now directly exposed to the C API, as
`CHFL_SELECTION*`. The `chfl_frame_selection` function is replaced by the
`chfl_selection`, `chfl_selection_size`, `chfl_selection_evalutate`,
`chfl_selection_matches` and `chfl_selection_free` functions, and the
`chfl_match_t` helper struct.
* Add the `chfl_clear_errors` function, to cleanup the error state of the C API.
* Molfiles plugins are now incorporated in the Chemfiles library, and no longer
distributed as shared libraries. The `CHEMFILES_PLUGINS` environment variable
is a no-op.
* The caching of angles and dihedrals is now an implementation detail. That
means that `Topology::recalculate` is gone, and that `Frame::guess_topology`
and `chfl_frame_guess_topology` do not take a boolean parameter anymore.
* The opening mode is now a `char` instead of a string in `Trajectory`
constructor, `chfl_trajectory_open`, and `chfl_trajectory_with_format`.
* Remove `operator<<` and `operator>>` for `Trajectory`. Users should use
`Trajectory::read` and `Trajectory::write`
* Users can now specify the format when reading the topology associated with a
trajectory from a file. The `chfl_trajectory_set_topology_with_format`
function can be used to do so from the C API.
* The `chfl_atom_from_{frame,topology}` function now return `NULL` in case of
out-of-bound access.
## 0.5 (19 Feb 2016)
* The C API now provide a direct view into the `positions` and `velocities`
arrays. This remove the need for copy and separated getter
(`chfl_frame_{position,velocities}`) and setter
(`chfl_frame_{position,velocities}_set`) function. This also force usage of
`chfl_frame_add_velocities` to add velocity data to a frame, and
`chfl_frame_resize` to change the size of the frame.
* Add constants for error codes in C API. The following macro are defined:
`CHFL_SUCCESS`, `CHFL_MEMORY_ERROR`, `CHFL_FILE_ERROR`, `CHFL_FORMAT_ERROR`,
`CHFL_GENERIC_ERROR`, `CHFL_CXX_ERROR`.
* Add the `chfl_version` function in C API.
* Add a small selection language *a la* VMD, allowing to select atoms matching
a selection string like `"name H and x > 4"`. This is exposed to C++ with the
public `Selection` class, and to C with the `chfl_frame_selection` function.
* Remove the periodicity handling from `UnitCell`. It was not implemented in
boundaries conditions. The corresponding function where removed from the C
API.
* Rename all setter function from `void xxx(const XXX& value)` to
`void set_xxx(const XXX& value)` in C++ API.
* It is now possible to provide a callback for logging from the C API. The
`chfl_log_stdout`, `chfl_log_silent` and `chfl_log_callback` function where
added to the C API.
## 0.4 (30 Oct 2015)
* Chemharp can now be compiled as a static library! This should allow for easier
embedding in external code, and easier distribution of binaries.
* Add a `chemfiles::Trajectory::sync` method to sync any buffered operation with
the disk. The `chfl_trajectory_sync` function exposes it to the C API.
* Add a Rust binding
* Rewrite the Python binding to use ctypes. The same code can be used with
Python 2 & 3, and with all numpy versions.
* Easier Python and Julia binding installation, using conda binary packaging.
* All the bindings now live on their own repository:
- [Fortran](https://github.com/Luthaf/Chemharp.f03)
- [Python](https://github.com/Luthaf/Chemharp.py)
- [Julia](https://github.com/Luthaf/Chemharp.jl)
- [Rust](https://github.com/Luthaf/Chemharp.rs)
* The library is now continuously tested on Visual Studio
* Various bug fixes and code improvements
* Renamed the library to Chemfiles.
## 0.3 (3 Aug 2015)
* Julia binding
* Initial Windows support, with both MSVC and mingw
* Add a binary frontend called `chrp`, implementing some analysis algorithms.
For more information, see the [specific repository](https://github.com/Luthaf/chrp).
## 0.2 (31 May 2015)
* Add basic geometrical operations on vectors and implement basic periodic boundaries condition with the `UnitCell::wrap` function;
* Use VMD Molfiles plugins as a format provider to read trajectories. The following formats are
added through Molfiles:
* PDB;
* GROMACS gro;
* GROMACS xtc;
* GROMACS trj;
* GROMACS trr;
* CHARMM dcd;
## 0.1 (16 May 2015)
Initial release. See the documentation for the full API.
Chemharp is usable from four languages:
* C++;
* C;
* Fortran;
* Python;
The following formats are supported:
* XYZ;
* AMBER NetCDF;
|
PypiClean
|
/hsa-pyelastica-0.0.1.tar.gz/hsa-pyelastica-0.0.1/hsa_elastica/memory_block/memory_block_hsa_rod.py
|
__doc__ = """Create block-structure class for collection of Cosserat rod systems."""
from elastica.memory_block.memory_block_rod import make_block_memory_metadata
from elastica.rod.data_structures import _RodSymplecticStepperMixin
from elastica.reset_functions_for_block_structure import _reset_scalar_ghost
import numpy as np
from typing import Sequence
from hsa_elastica.rod import HsaRod
class MemoryBlockHsaRod(HsaRod, _RodSymplecticStepperMixin):
def __init__(self, systems: Sequence):
self.n_elems_in_rods = np.array([x.n_elems for x in systems], dtype=np.int64)
self.n_rods = len(systems)
(
self.n_elems,
self.ghost_nodes_idx,
self.ghost_elems_idx,
self.ghost_voronoi_idx,
) = make_block_memory_metadata(self.n_elems_in_rods)
self.n_nodes = self.n_elems + 1
self.n_voronoi = self.n_elems - 1
# n_nodes_in_rods = self.n_elems_in_rods + 1
# n_voronois_in_rods = self.n_elems_in_rods - 1
self.start_idx_in_rod_nodes = np.hstack(
(0, self.ghost_nodes_idx + 1)
) # Start index of subsequent rod
self.end_idx_in_rod_nodes = np.hstack(
(self.ghost_nodes_idx, self.n_nodes)
) # End index of the rod, Some max size, doesn't really matter
self.start_idx_in_rod_elems = np.hstack((0, self.ghost_elems_idx[1::2] + 1))
self.end_idx_in_rod_elems = np.hstack((self.ghost_elems_idx[::2], self.n_elems))
self.start_idx_in_rod_voronoi = np.hstack((0, self.ghost_voronoi_idx[2::3] + 1))
self.end_idx_in_rod_voronoi = np.hstack(
(self.ghost_voronoi_idx[::3], self.n_voronoi)
)
# Allocate block structure using system collection.
self.allocate_block_variables_in_nodes(systems)
self.allocate_block_variables_in_elements(systems)
self.allocate_blocks_variables_in_voronoi(systems)
self.allocate_blocks_variables_for_symplectic_stepper(systems)
# Reset ghosts of mass, rest length and rest voronoi length to 1. Otherwise
# since ghosts are not modified, this causes a division by zero error.
_reset_scalar_ghost(self.mass, self.ghost_nodes_idx, 1.0)
_reset_scalar_ghost(self.rest_lengths, self.ghost_elems_idx, 1.0)
_reset_scalar_ghost(self.rest_voronoi_lengths, self.ghost_voronoi_idx, 1.0)
_reset_scalar_ghost(self.printed_lengths, self.ghost_elems_idx, 1.0)
_reset_scalar_ghost(self.printed_voronoi_lengths, self.ghost_voronoi_idx, 1.0)
# Initialize the mixin class for symplectic time-stepper.
_RodSymplecticStepperMixin.__init__(self)
def allocate_block_variables_in_nodes(self, systems: Sequence):
"""
This function takes system collection and allocates the variables on
node for block-structure and references allocated variables back to the
systems.
Parameters
----------
systems
Returns
-------
"""
# Things in nodes that are scalars
# 0 ("mass", float64[:]),
map_scalar_dofs_in_rod_nodes = {
"mass": 0,
"dissipation_constant_for_forces": 1,
}
self.scalar_dofs_in_rod_nodes = np.zeros(
(len(map_scalar_dofs_in_rod_nodes), self.n_nodes)
)
for k, v in map_scalar_dofs_in_rod_nodes.items():
self.__dict__[k] = np.lib.stride_tricks.as_strided(
self.scalar_dofs_in_rod_nodes[v], (self.n_nodes,)
)
for k, v in map_scalar_dofs_in_rod_nodes.items():
for system_idx, system in enumerate(systems):
start_idx = self.start_idx_in_rod_nodes[system_idx]
end_idx = self.end_idx_in_rod_nodes[system_idx]
self.__dict__[k][..., start_idx:end_idx] = system.__dict__[k].copy()
system.__dict__[k] = np.ndarray.view(
self.__dict__[k][..., start_idx:end_idx]
)
# Things in nodes that are vectors
# 0 ("position_collection", float64[:, :]),
# 1 ("internal_forces", float64[:, :]),
# 2 ("external_forces", float64[:, :]),
# 3 ("damping_forces", float64[:, :]),
# 6 in total
map_vector_dofs_in_rod_nodes = {
"position_collection": 0,
"internal_forces": 1,
"external_forces": 2,
"damping_forces": 3,
}
self.vector_dofs_in_rod_nodes = np.zeros(
(len(map_vector_dofs_in_rod_nodes), 3 * self.n_nodes)
)
for k, v in map_vector_dofs_in_rod_nodes.items():
self.__dict__[k] = np.lib.stride_tricks.as_strided(
self.vector_dofs_in_rod_nodes[v], (3, self.n_nodes)
)
for k, v in map_vector_dofs_in_rod_nodes.items():
for system_idx, system in enumerate(systems):
start_idx = self.start_idx_in_rod_nodes[system_idx]
end_idx = self.end_idx_in_rod_nodes[system_idx]
self.__dict__[k][..., start_idx:end_idx] = system.__dict__[k].copy()
system.__dict__[k] = np.ndarray.view(
self.__dict__[k][..., start_idx:end_idx]
)
# Things in nodes that are matrices
# Null set
def allocate_block_variables_in_elements(self, systems: Sequence):
"""
This function takes system collection and allocates the variables on
elements for block-structure and references allocated variables back to the
systems.
Parameters
----------
systems
Returns
-------
"""
# Things in elements that are scalars
# 0 ("radius", float64[:]),
# 1 ("volume", float64[:]),
# 2 ("density", float64[:]),
# 3 ("lengths", float64[:]),
# 4 ("rest_lengths", float64[:]),
# 5 ("dilatation", float64[:]),
# 6 ("dilatation_rate", float64[:]),
# 7 ("dissipation_constant_for_forces", float64[:]),
# 8 ("dissipation_constant_for_torques", float64[:])
map_scalar_dofs_in_rod_elems = {
"radius": 0,
"volume": 1,
"density": 2,
"lengths": 3,
"rest_lengths": 4,
"rest_lengths_scale_factor": 5,
"printed_lengths": 6,
"dilatation": 7,
"dilatation_rate": 8,
"dissipation_constant_for_torques": 9,
"cross_sectional_area": 10,
"elastic_modulus": 11,
"elastic_modulus_scale_factor": 12,
"shear_modulus": 13,
"shear_modulus_scale_factor": 14,
"auxetic": 15,
}
self.scalar_dofs_in_rod_elems = np.zeros(
(len(map_scalar_dofs_in_rod_elems), self.n_elems)
)
for k, v in map_scalar_dofs_in_rod_elems.items():
self.__dict__[k] = np.lib.stride_tricks.as_strided(
self.scalar_dofs_in_rod_elems[v], (self.n_elems,)
)
for k, v in map_scalar_dofs_in_rod_elems.items():
for system_idx, system in enumerate(systems):
start_idx = self.start_idx_in_rod_elems[system_idx]
end_idx = self.end_idx_in_rod_elems[system_idx]
self.__dict__[k][..., start_idx:end_idx] = system.__dict__[k].copy()
system.__dict__[k] = np.ndarray.view(
self.__dict__[k][..., start_idx:end_idx]
)
# Things in elements that are vectors
# 0 ("tangents", float64[:, :]),
# 1 ("sigma", float64[:, :]),
# 2 ("rest_sigma", float64[:, :]),
# 3 ("internal_torques", float64[:, :]),
# 4 ("external_torques", float64[:, :]),
# 5 ("damping_torques", float64[:, :]),
# 6 ("internal_stress", float64[:, :]),
map_vector_dofs_in_rod_elems = {
"tangents": 0,
"sigma": 1,
"rest_sigma": 2,
"internal_torques": 3,
"external_torques": 4,
"damping_torques": 5,
"internal_stress": 6,
"second_moment_of_inertia": 7,
}
self.vector_dofs_in_rod_elems = np.zeros(
(len(map_vector_dofs_in_rod_elems), 3 * self.n_elems)
)
for k, v in map_vector_dofs_in_rod_elems.items():
self.__dict__[k] = np.lib.stride_tricks.as_strided(
self.vector_dofs_in_rod_elems[v], (3, self.n_elems)
)
for k, v in map_vector_dofs_in_rod_elems.items():
for system_idx, system in enumerate(systems):
start_idx = self.start_idx_in_rod_elems[system_idx]
end_idx = self.end_idx_in_rod_elems[system_idx]
self.__dict__[k][..., start_idx:end_idx] = system.__dict__[k].copy()
system.__dict__[k] = np.ndarray.view(
self.__dict__[k][..., start_idx:end_idx]
)
# Things in elements that are matrices
# 0 ("director_collection", float64[:, :, :]),
# 1 ("mass_second_moment_of_inertia", float64[:, :, :]),
# 2 ("inv_mass_second_moment_of_inertia", float64[:, :, :]),
# 3 ("shear_matrix", float64[:, :, :]),
map_matrix_dofs_in_rod_elems = {
"director_collection": 0,
"mass_second_moment_of_inertia": 1,
"inv_mass_second_moment_of_inertia": 2,
"shear_matrix": 3,
}
self.matrix_dofs_in_rod_elems = np.zeros(
(len(map_matrix_dofs_in_rod_elems), 9 * self.n_elems)
)
for k, v in map_matrix_dofs_in_rod_elems.items():
self.__dict__[k] = np.lib.stride_tricks.as_strided(
self.matrix_dofs_in_rod_elems[v], (3, 3, self.n_elems)
)
for k, v in map_matrix_dofs_in_rod_elems.items():
for system_idx, system in enumerate(systems):
start_idx = self.start_idx_in_rod_elems[system_idx]
end_idx = self.end_idx_in_rod_elems[system_idx]
self.__dict__[k][..., start_idx:end_idx] = system.__dict__[k].copy()
system.__dict__[k] = np.ndarray.view(
self.__dict__[k][..., start_idx:end_idx]
)
def allocate_blocks_variables_in_voronoi(self, systems: Sequence):
"""
This function takes system collection and allocates the variables on
voronoi for block-structure and references allocated variables back to the
systems.
Parameters
----------
systems
Returns
-------
"""
# Things in voronoi that are scalars
# 0 ("voronoi_dilatation", float64[:]),
# 1 ("rest_voronoi_lengths", float64[:]),
map_scalar_dofs_in_rod_voronois = {
"voronoi_dilatation": 0,
"rest_voronoi_lengths": 1,
"printed_voronoi_lengths": 2,
"bend_rigidity": 3,
"bend_rigidity_scale_factor": 4,
"twist_rigidity": 5,
"handedness": 6,
}
self.scalar_dofs_in_rod_voronois = np.zeros(
(len(map_scalar_dofs_in_rod_voronois), self.n_voronoi)
)
for k, v in map_scalar_dofs_in_rod_voronois.items():
self.__dict__[k] = np.lib.stride_tricks.as_strided(
self.scalar_dofs_in_rod_voronois[v], (self.n_voronoi,)
)
for k, v in map_scalar_dofs_in_rod_voronois.items():
for system_idx, system in enumerate(systems):
start_idx = self.start_idx_in_rod_voronoi[system_idx]
end_idx = self.end_idx_in_rod_voronoi[system_idx]
self.__dict__[k][..., start_idx:end_idx] = system.__dict__[k].copy()
system.__dict__[k] = np.ndarray.view(
self.__dict__[k][..., start_idx:end_idx]
)
# Things in voronoi that are vectors
# 0 ("kappa", float64[:, :]),
# 1 ("rest_kappa", float64[:, :]),
# 2 ("internal_couple", float64[:, :]),
map_vector_dofs_in_rod_voronois = {
"kappa": 0,
"rest_kappa": 1,
"internal_couple": 2,
}
self.vector_dofs_in_rod_voronois = np.zeros(
(len(map_vector_dofs_in_rod_voronois), 3 * self.n_voronoi)
)
for k, v in map_vector_dofs_in_rod_voronois.items():
self.__dict__[k] = np.lib.stride_tricks.as_strided(
self.vector_dofs_in_rod_voronois[v], (3, self.n_voronoi)
)
for k, v in map_vector_dofs_in_rod_voronois.items():
for system_idx, system in enumerate(systems):
start_idx = self.start_idx_in_rod_voronoi[system_idx]
end_idx = self.end_idx_in_rod_voronoi[system_idx]
self.__dict__[k][..., start_idx:end_idx] = system.__dict__[k].copy()
system.__dict__[k] = np.ndarray.view(
self.__dict__[k][..., start_idx:end_idx]
)
# Things in voronoi that are matrices
# 0 ("bend_matrix", float64[:, :, :]),
map_matrix_dofs_in_rod_voronois = {"bend_matrix": 0}
self.matrix_dofs_in_rod_voronois = np.zeros(
(len(map_matrix_dofs_in_rod_voronois), 9 * self.n_voronoi)
)
for k, v in map_matrix_dofs_in_rod_voronois.items():
self.__dict__[k] = np.lib.stride_tricks.as_strided(
self.matrix_dofs_in_rod_voronois[v], (3, 3, self.n_voronoi)
)
for k, v in map_matrix_dofs_in_rod_voronois.items():
for system_idx, system in enumerate(systems):
start_idx = self.start_idx_in_rod_voronoi[system_idx]
end_idx = self.end_idx_in_rod_voronoi[system_idx]
self.__dict__[k][..., start_idx:end_idx] = system.__dict__[k].copy()
system.__dict__[k] = np.ndarray.view(
self.__dict__[k][..., start_idx:end_idx]
)
def allocate_blocks_variables_for_symplectic_stepper(self, systems: Sequence):
"""
This function takes system collection and allocates the variables used by symplectic
stepper for block-structure and references allocated variables back to the systems.
Parameters
----------
systems
Returns
-------
"""
# These vectors are on nodes or on elements, but we stack them together for
# better memory access. Because we use them together in time-steppers.
# 0 ("velocity_collection", float64[:, :]),
# 1 ("omega_collection", float64[:, :]),
# 2 ("acceleration_collection", float64[:, :]),
# 3 ("alpha_collection", float64[:, :]),
# 4 in total
map_rate_collection = {
"velocity_collection": 0,
"omega_collection": 1,
"acceleration_collection": 2,
"alpha_collection": 3,
}
self.rate_collection = np.zeros((len(map_rate_collection), 3 * self.n_nodes))
for k, v in map_rate_collection.items():
self.__dict__[k] = np.lib.stride_tricks.as_strided(
self.rate_collection[v], (3, self.n_nodes)
)
self.__dict__["velocity_collection"] = np.lib.stride_tricks.as_strided(
self.rate_collection[0], (3, self.n_nodes)
)
self.__dict__["omega_collection"] = np.lib.stride_tricks.as_strided(
self.rate_collection[1],
(3, self.n_elems),
)
self.__dict__["acceleration_collection"] = np.lib.stride_tricks.as_strided(
self.rate_collection[2],
(3, self.n_nodes),
)
self.__dict__["alpha_collection"] = np.lib.stride_tricks.as_strided(
self.rate_collection[3],
(3, self.n_elems),
)
# For Dynamic state update of position Verlet create references
self.v_w_collection = np.lib.stride_tricks.as_strided(
self.rate_collection[0:2], (2, 3 * self.n_nodes)
)
self.dvdt_dwdt_collection = np.lib.stride_tricks.as_strided(
self.rate_collection[2:-1], (2, 3 * self.n_nodes)
)
# Copy systems variables on nodes to block structure
map_rate_collection_dofs_in_rod_nodes = {
"velocity_collection": 0,
"acceleration_collection": 1,
}
for k, v in map_rate_collection_dofs_in_rod_nodes.items():
for system_idx, system in enumerate(systems):
start_idx = self.start_idx_in_rod_nodes[system_idx]
end_idx = self.end_idx_in_rod_nodes[system_idx]
self.__dict__[k][..., start_idx:end_idx] = system.__dict__[k].copy()
system.__dict__[k] = np.ndarray.view(
self.__dict__[k][..., start_idx:end_idx]
)
# Copy systems variables on nodes to block structure
map_rate_collection_dofs_in_rod_elems = {
"omega_collection": 0,
"alpha_collection": 1,
}
for k, v in map_rate_collection_dofs_in_rod_elems.items():
for system_idx, system in enumerate(systems):
start_idx = self.start_idx_in_rod_elems[system_idx]
end_idx = self.end_idx_in_rod_elems[system_idx]
self.__dict__[k][..., start_idx:end_idx] = system.__dict__[k].copy()
system.__dict__[k] = np.ndarray.view(
self.__dict__[k][..., start_idx:end_idx]
)
|
PypiClean
|
/rdflib-jsonld-0.6.2.tar.gz/rdflib-jsonld-0.6.2/LICENSE.md
|
LICENSE AGREEMENT FOR RDFLIB-JSONLD
========================================================================
Copyright (c) 2012-2015, RDFLib Team
All rights reserved.
See http://github.com/RDFLib/rdflib-jsonld
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
PypiClean
|
/graphite-web-1.1.10.tar.gz/graphite-web-1.1.10/webapp/graphite/readers/multi.py
|
import functools
from graphite.intervals import IntervalSet
from graphite.logger import log
from graphite.readers.utils import BaseReader
class MultiReader(BaseReader):
__slots__ = ('nodes',)
def __init__(self, nodes):
self.nodes = nodes
def get_intervals(self):
interval_sets = []
for node in self.nodes:
interval_sets.extend(node.intervals.intervals)
return IntervalSet(sorted(interval_sets))
def fetch(self, startTime, endTime, now=None, requestContext=None):
# Start the fetch on each node
fetches = []
for n in self.nodes:
try:
fetches.append(n.fetch(startTime, endTime, now, requestContext))
except BaseException:
log.exception("Failed to initiate subfetch for %s" % str(n))
results = [
r for r in fetches
if r is not None
]
if not results:
raise Exception("All sub-fetches failed")
return functools.reduce(self.merge, results)
@staticmethod
def merge(results1, results2):
# Ensure results1 is finer than results2
if results1[0][2] > results2[0][2]:
results1, results2 = results2, results1
time_info1, values1 = results1
time_info2, values2 = results2
start1, end1, step1 = time_info1
start2, end2, step2 = time_info2
step = step1 # finest step
start = min(start1, start2) # earliest start
end = max(end1, end2) # latest end
time_info = (start, end, step)
values = []
t = start
while t < end:
# Look for the finer precision value first if available
i1 = (t - start1) // step1
if len(values1) > i1:
v1 = values1[i1]
else:
v1 = None
if v1 is None:
i2 = (t - start2) // step2
if len(values2) > i2:
v2 = values2[i2]
else:
v2 = None
values.append(v2)
else:
values.append(v1)
t += step
return (time_info, values)
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/models/currency_column.py
|
from __future__ import annotations
from kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, Union
class CurrencyColumn(AdditionalDataHolder, Parsable):
@property
def additional_data(self,) -> Dict[str, Any]:
"""
Gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Returns: Dict[str, Any]
"""
return self._additional_data
@additional_data.setter
def additional_data(self,value: Dict[str, Any]) -> None:
"""
Sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Args:
value: Value to set for the AdditionalData property.
"""
self._additional_data = value
def __init__(self,) -> None:
"""
Instantiates a new currencyColumn and sets the default values.
"""
# Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
self._additional_data: Dict[str, Any] = {}
# Specifies the locale from which to infer the currency symbol.
self._locale: Optional[str] = None
# The OdataType property
self._odata_type: Optional[str] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> CurrencyColumn:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: CurrencyColumn
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return CurrencyColumn()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
fields = {
"locale": lambda n : setattr(self, 'locale', n.get_str_value()),
"@odata.type": lambda n : setattr(self, 'odata_type', n.get_str_value()),
}
return fields
@property
def locale(self,) -> Optional[str]:
"""
Gets the locale property value. Specifies the locale from which to infer the currency symbol.
Returns: Optional[str]
"""
return self._locale
@locale.setter
def locale(self,value: Optional[str] = None) -> None:
"""
Sets the locale property value. Specifies the locale from which to infer the currency symbol.
Args:
value: Value to set for the locale property.
"""
self._locale = value
@property
def odata_type(self,) -> Optional[str]:
"""
Gets the @odata.type property value. The OdataType property
Returns: Optional[str]
"""
return self._odata_type
@odata_type.setter
def odata_type(self,value: Optional[str] = None) -> None:
"""
Sets the @odata.type property value. The OdataType property
Args:
value: Value to set for the OdataType property.
"""
self._odata_type = value
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
writer.write_str_value("locale", self.locale)
writer.write_str_value("@odata.type", self.odata_type)
writer.write_additional_data_value(self.additional_data)
|
PypiClean
|
/pgn-parser-1.1.0.tar.gz/pgn-parser-1.1.0/pgn_parser/pgn.py
|
import pgn_parser.parser as parser
import re
from collections import OrderedDict, deque
class Actions:
"""Collection of actions for the parser
Functions that will return the desired structure of a node in the parse tree
"""
def make_tag_pair(self, input, start, end, elements):
"""Creates dictionary {Key:"Value"} from the parsed tag pair [Key "Value"]"""
tp = {elements[2].text: elements[5].text}
return tp
def make_tag_pairs(self, input, start, end, elements):
"""Creates an ordered dict of collected tag pairs"""
tps = TagPairs()
for e in elements:
k = [k for k in e.keys()][0]
tps[k] = e[k]
return tps
def make_comment(self, input, start, end, elements):
"""Retrieves the comment str without enclosing braces"""
return elements[1].text.strip('{}')
def make_movetext(self, input, start, end, elements):
"""Creates the full movetext portion as a List of Move's
The tree should have all the necessary data parsed to create a list of
all moves.
Args:
elements[x] = A single move = e
e.move_number = Move number, 1+
e.white = The SAN of white's move
e.wcomment = The comment after whites move
e.black = The SAN of black's move
e.bcomment = The comment after blacks move
e.mcomment = The comment that applies to the whole move
Returns:
A List of Move objects in order:
[Move("1.", "e4", "white comment", "e5", "black comment", "move comment"), etc]
"""
mt = Movetext()
for e in elements:
if type(e.wcomment) == str:
wcomment = e.wcomment
else:
wcomment = ""
mcomment = ""
if type(e.bcomment) == str:
if e.black.text:
bcomment = e.bcomment
else:
bcomment = ""
mcomment = e.bcomment
else:
bcomment = ""
if not mcomment:
if type(e.mcomment) == str:
mcomment = e.mcomment
mt.append(Move(e.move_number.text,
e.white.text,
e.wnags.elements,
wcomment,
e.wvars,
e.black.text,
e.bnags.elements,
bcomment,
e.bvars,
mcomment))
return mt
def make_variation(self, input, start, end, elements):
"""Return just the movetext of a variation"""
return elements[2]
def make_variations(self, input, start, end, elements):
"""Convert a TreeNode of variations to a List of them"""
out = []
for e in elements:
out.append(e)
return out
def make_game(self, input, start, end, elements):
"""Construct the representation of an entire game
Args:
elements = e
e[0]: Tag Pairs
e[2]: Game Comment
e[3]: Movetext
e[4]: Score
Returns:
A Game object, representing a fully parsed pgn file
"""
e = elements
if re.match(r"(1-0|0-1|1/2-1/2|\*)", e[4].text):
s = Score(e[4].text)
else:
s = Score('*')
g = Game(e[0], e[2], e[3], s)
return g
class PGNGameException(Exception):
pass
class TagPairs(OrderedDict):
"""TagPairs is a slightly customised OrderedDict
It is extended in order to make the __str__ return valid pgn formatted tag pairs
"""
def __str__(self):
"""Stringify the OrderedDict to a valid Tag Pairs section of a pgn file
Returns:
A string with each tag pair represented (in the order it was parsed):
[Key "Value"]\n
And an extra newline at the end to begin the movetext section
"""
# Seven tag roster list
strl = ["Event","Site","Date","Round","White","Black","Result"]
out = ""
# We first print in order of STR, then any others
for k in strl:
if k in self.keys():
out += '[{} "{}"]\n'.format(k, self[k])
for k in self.keys():
if k not in strl:
out += '[{} "{}"]\n'.format(k, self[k])
# If there are no tag pairs, the extra newline is not needed
if out:
out += "\n"
return out
class Ply:
"""A Ply is a half a move in a game, either white or blacks side of the move"""
def __init__(self, colour, san, nags=[], comment="", variations=[]):
"""Inits the colour san and any comment of the ply"""
self.colour = colour
self.san = san
self.nags = self.nodes_to_nags(nags)
self.comment = comment
self.variations = variations
def __str__(self):
"""Stringifies to a single pgn ply
Returns:
<san> {<coment>}
Ncxe4 {white comment}
"""
out = self.san
if self.comment != "":
out += " {" + self.comment.replace('\n', ' ') + "}"
if len(self.nags) > 0:
for n in self.nags:
out += " " + n
for v in self.variations:
out += " (" + str(v).strip(' ') + ")"
return out
def nodes_to_nags(self, nags):
"""Convert input TreeNode's into a list of string nags"""
out = []
for n in nags:
out.append(n.text.strip(' '))
return out
class Move:
"""Representing a move, of 1 or 2 ply along with the move number"""
def __init__(self, move_number, white, wnags, wcomment, wvars, black, bnags, bcomment, bvars, mcomment):
"""Inits the Move x with the white and or black Ply's"""
self.move_number = self.move_no_to_i(move_number)
white = "" if white == ".." else white
self.white = Ply("w", white, wnags, wcomment, wvars)
self.black = Ply("b", black, bnags, bcomment, bvars)
self.comment = mcomment
def __str__(self):
"""Stringifies the Move to legal pgn move
Returns:
1. e4 e5
"""
out = "{}.".format(self.move_number)
if self.white.san != "":
out += " " + str(self.white)
else:
out += ".."
if self.black.san != "":
out += " " + str(self.black)
if self.comment:
out += " {" + self.comment + "}"
return out
def __repr__(self):
return self.__str__()
def move_no_to_i(self, move_number):
"""Turns move number from string to integer"""
no = int(re.match(r"([0-9]+)\.", move_number).groups()[0])
return no
class Movetext(list):
def __str__(self):
"""Stringifies movetext
Turns the list of Move's into a valid movetext section
Returns:
1. e4 {wc} e5 {bc} 2. d4 {wc2} d5 {bc2}
"""
out = ""
for i, m in enumerate(self):
out += " " + str(m) if i > 0 else str(m)
out += " "
return out
def move(self, find):
"""Returns the move number `find`
Args:
find: An integer move number
Returns:
A Move() object of the requested move number
Raises:
PGNGameException is raised if the number cannot be found
"""
first = self[0].move_number
last = self[-1].move_number
fail = "Move number {} is not in this game. First is {}, last is {}.".format(find, first, last)
if first > find:
raise PGNGameException(fail)
for m in self:
if find == m.move_number:
return m
# We haven't found the move
raise PGNGameException(fail)
class Score:
"""Representing the score of a game"""
def __init__(self, score):
if score == "*":
w, b = "*", "*"
else:
w, b = score.split('-')
self.white = w
self.black = b
self.result = str(self)
def __str__(self):
"""Stringifies the score to one of the leg possiblities
Returns:
1-0, 0-1, 1/2-1/2 or *
"""
if self.white == "*":
return "*"
else:
return "{}-{}".format(self.white, self.black)
class Game:
"""Represents an entire game
Attributes:
tag_pairs: The tag pairs section as an ordered dictionary
movetext: The List of all Move's
score: The score of the game
"""
def __init__(self, tag_pairs, gcomment, movetext, score):
"""Initialises the Game given the constituent tag_pairs, movetext and score"""
self.tag_pairs = tag_pairs
if type(gcomment) == str:
self.comment = gcomment
else:
self.comment = ''
self.movetext = movetext
self.score = score
def __str__(self):
"""Stringifies the Game to a valid pgn file"""
out = str(self.tag_pairs)
if self.comment:
out += "{" + self.comment + "} "
out += self.format_body()
return out
def format_body(self):
"""Keeps the line length to 80 chars on output"""
mt = deque(str(self.movetext).split(' ') + [])
out = mt.popleft()
ll = len(out)
while True:
if len(mt) is 0:
break
n = mt.popleft()
# If the current line length + space + character is less than
# 80 chars long
if ll + len(n) + 1 < 80:
to_add = " " + n
out += " " + n
ll += len(to_add)
else:
out += "\n" + n
ll = len(n)
return out + str(self.score)
def move(self, find):
"""Returns the move number `find`
An alias for self.movetext.move()
"""
return self.movetext.move(find)
|
PypiClean
|
/entity_selector_jupyter_widget-0.1.6.tar.gz/entity_selector_jupyter_widget-0.1.6/README.md
|
# entity_selector_jupyter_widget
A Jupyter Widget library for selecting entities in text
## Installation
To install run:
```bash
$ pip install entity_selector_jupyter_widget
$ jupyter nbextension enable --py --sys-prefix entity_selector_jupyter_widget
```
<!--To uninstall run:
```bash
$ pip uninstall entity_selector_jupyter_widget
$ jupyter nbextension disable entity_selector_jupyter_widget
$ jupyter nbextension uninstall entity_selector_jupyter_widget
```-->
For a development installation (requires npm),
```bash
$ git clone https://gitlab.com/randomunrandom/entity_selector_jupyter_widget.git
$ cd entity_selector_jupyter_widget
$ ./dev.sh
### it a utilitary script which deactivates virtual environment, creates new and launches jupyter notebook
```
[A development version is available via test.pypi](https://test.pypi.org/project/entity-selector-jupyter-widget/)
## Usage
See [example.py](https://gitlab.com/randomunrandom/entity_selector_jupyter_widget/blob/master/example.ipynb) for example of hw to use this extension.
## JupyterLab
Due to major differences between the Jupyter Notebook and JupyterLab, the extensions will not work in JupyterLab.
## License
It's an open source prject under MIT license
|
PypiClean
|
/pyHMSA-0.2.0.tar.gz/pyHMSA-0.2.0/pyhmsa/fileformat/xmlhandler/condition/acquisition.py
|
import xml.etree.ElementTree as etree
# Third party modules.
import numpy as np
# Local modules.
from pyhmsa.spec.condition.acquisition import \
(AcquisitionPoint, AcquisitionMultipoint,
AcquisitionRasterLinescan, AcquisitionRasterXY, AcquisitionRasterXYZ)
from pyhmsa.spec.condition.specimenposition import SpecimenPosition
from pyhmsa.fileformat.xmlhandler.condition.condition import _ConditionXMLHandler
from pyhmsa.util.parameter import ObjectAttribute
# Globals and constants variables.
class AcquisitionPointXMLHandler(_ConditionXMLHandler):
def __init__(self, version):
super().__init__(AcquisitionPoint, version)
class AcquisitionMultipointXMLHandler(_ConditionXMLHandler):
def __init__(self, version):
super().__init__(AcquisitionMultipoint, version)
self._attrib_specimen_position = \
ObjectAttribute(SpecimenPosition, xmlname='SpecimenPosition')
def parse(self, element):
obj = super().parse(element)
for subelement in element.findall('./Positions/SpecimenPosition'):
position = \
self._parse_object_attribute(subelement,
self._attrib_specimen_position)
obj.positions.append(position)
subelement = element.find('PointCount')
count = self._parse_numerical_attribute(subelement)
assert len(obj.positions) == count
return obj
def convert(self, obj):
element = super().convert(obj)
value = np.uint32(len(obj.positions))
attrib = type('MockAttribute', (object,), {'xmlname': 'PointCount'})
subelements = self._convert_numerical_attribute(value, attrib)
element.extend(subelements)
subelement = etree.Element('Positions')
for position in obj.positions:
subsubelements = \
self._convert_object_attribute(position,
self._attrib_specimen_position)
subelement.extend(subsubelements)
element.append(subelement)
return element
class _AcquisitionRasterXMLHandler(_ConditionXMLHandler):
def __init__(self, clasz, version):
super().__init__(clasz, version)
self._attrib_specimen_position = \
ObjectAttribute(SpecimenPosition, xmlname='SpecimenPosition')
def _parse_positions(self, element):
positions = {}
for subelement in element.findall('SpecimenPosition'):
location = subelement.attrib['Name']
position = \
self._parse_object_attribute(subelement,
self._attrib_specimen_position)
positions[location] = position
return positions
def parse(self, element):
obj = super().parse(element)
obj.positions.update(self._parse_positions(element))
return obj
def _convert_positions(self, obj):
elements = []
for location, position in obj.positions.items():
subelement = \
self._convert_object_attribute(position,
self._attrib_specimen_position)[0]
subelement.set('Name', location)
elements.append(subelement)
return elements
def convert(self, obj):
element = super().convert(obj)
element.extend(self._convert_positions(obj))
return element
class AcquisitionRasterLinescanXMLHandler(_AcquisitionRasterXMLHandler):
def __init__(self, version):
super().__init__(AcquisitionRasterLinescan, version)
class AcquisitionRasterXYXMLHandler(_AcquisitionRasterXMLHandler):
def __init__(self, version):
super().__init__(AcquisitionRasterXY, version)
class AcquisitionRasterXYZXMLHandler(_AcquisitionRasterXMLHandler):
def __init__(self, version):
super().__init__(AcquisitionRasterXYZ, version)
|
PypiClean
|
/macedon-0.11.0-py3-none-any.whl/pytermor/common.py
|
from __future__ import annotations
import enum
import inspect
import time
import typing as t
import logging
from functools import update_wrapper
logger = logging.getLogger(__package__)
logger.addHandler(logging.NullHandler())
### catching library logs "from the outside":
# logger = logging.getLogger('pytermor')
# handler = logging.StreamHandler()
# fmt = '[%(levelname)5.5s][%(name)s.%(module)s] %(message)s'
# handler.setFormatter(logging.Formatter(fmt))
# logger.addHandler(handler)
# logger.setLevel(logging.WARNING)
########
CDT = t.TypeVar("CDT", int, str)
"""
:abbr:`CDT (Color descriptor type)` represents a RGB color value. Primary handler
is `resolve_color()`. Valid values include:
- *str* with a color name in any form distinguishable by the color resolver;
the color lists can be found at: `guide.ansi-presets` and `guide.es7s-colors`;
- *str* starting with a "#" and consisting of 6 more hexadecimal characters, case
insensitive (RGB regular form), e.g.: "#0B0CCA";
- *str* starting with a "#" and consisting of 3 more hexadecimal characters, case
insensitive (RGB short form), e.g.: "#666";
- *int* in a [0; 0xFFFFFF] range.
"""
FT = t.TypeVar("FT", int, str, "IColor", "Style", None)
"""
:abbr:`FT (Format type)` is a style descriptor. Used as a shortcut precursor for actual
styles. Primary handler is `make_style()`.
"""
RT = t.TypeVar("RT", str, "IRenderable")
"""
:abbr:`RT (Renderable type)` includes regular *str*\\ s as well as `IRenderable`
implementations.
"""
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def measure(msg: str = "Done"):
def wrapper(origin: F) -> F:
def new_func(*args, **kwargs):
before_s = time.time_ns() / 1e9
result = origin(*args, **kwargs)
after_s = time.time_ns() / 1e9
from . import PYTERMOR_DEV
if PYTERMOR_DEV and not kwargs.get("no_log", False):
from . import format_si, dump, logger
logger.debug(msg + f" in {format_si((after_s - before_s), 's')}")
logger.log(level=5, msg=dump(result, "Dump"))
return result
return update_wrapper(t.cast(F, new_func), origin)
return wrapper
class ExtendedEnum(enum.Enum):
@classmethod
def list(cls):
return list(map(lambda c: c.value, cls))
@classmethod
def dict(cls):
return dict(map(lambda c: (c, c.value), cls))
class Align(str, ExtendedEnum):
"""
Align type.
"""
LEFT = "<"
""" """
RIGHT = ">"
""" """
CENTER = "^"
""" """
@classmethod
def resolve(cls, input: str | Align | None, fallback: Align = LEFT):
if input is None:
return fallback
if isinstance(input, cls):
return input
for k, v in cls.dict().items():
if v == input:
return k
try:
return cls[input.upper()]
except KeyError:
logger.warning(f"Invalid align name: {input}")
return fallback
class UserCancel(Exception):
pass
class UserAbort(Exception):
pass
class LogicError(Exception):
pass
class ConflictError(Exception):
pass
class ArgTypeError(Exception):
""" """
def __init__(self, actual_type: t.Type, arg_name: str = None, fn: t.Callable = None):
arg_name_str = f'"{arg_name}"' if arg_name else "argument"
# @todo suggestion
# f = inspect.currentframe()
# fp = f.f_back
# fn = getattr(fp.f_locals['self'].__class__, fp.f_code.co_name)
# argspec = inspect.getfullargspec(fn)
if fn is None:
try:
stacks = inspect.stack()
method_name = stacks[0].function
outer_frame = stacks[1].frame
fn = outer_frame.f_locals.get(method_name)
except Exception:
pass
if fn is not None:
signature = inspect.signature(fn)
param_desc = signature.parameters.get(arg_name, None)
expected_type = '?'
if param_desc:
expected_type = param_desc.annotation
actual_type = actual_type.__qualname__
msg = (
f"Expected {arg_name_str} type: <{expected_type}>, got: <{actual_type}>"
)
else:
msg = f"Unexpected {arg_name_str} type: <{actual_type}>"
super().__init__(msg)
class ArgCountError(Exception):
""" """
def __init__(self, actual: int, *expected: int) -> None:
expected_str = ", ".join(str(e) for e in expected)
msg = (
f"Invalid arguments amount, expected one of: ({expected_str}), got: {actual}"
)
super().__init__(msg)
|
PypiClean
|
/clipt-1.0.15.tar.gz/clipt-1.0.15/README.rst
|
About
-----
Clipt (command line interface plotting tool) uses clasp and matplotlib
to aid in plotting data files using matplotlib directly from the command
line using a extensive set of options. See the clasp documentation for
imformation about using config files.
https://clipt.readthedocs.io/
Clipt was built using clasp: https://clasp.readthedocs.io/
which in turn depends on click: https://palletsprojects.com/p/click/
Installation
------------
::
pip install clipt
Usage
-----
see::
cl_plot --help
Autocomplete
------------
::
_CL_PLOT_COMPLETE=source cl_plot > clipt_complete.sh
In your .bash_profile::
source path/to/bin/clipt_complete.sh
Source Code
-----------
* clipt: https://bitbucket.org/stephenwasilewski/clipt
Licence
-------
| Copyright (c) 2018 Stephen Wasilewski
| This Source Code Form is subject to the terms of the Mozilla Public
| License, v. 2.0. If a copy of the MPL was not distributed with this
| file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
PypiClean
|
/hyo2.qc-3.5.12-cp38-cp38-win_amd64.whl/hyo2/qc/survey/scan/checks.py
|
import datetime
import logging
import os
from typing import List, Optional, TYPE_CHECKING
from hyo2.qc.common.s57_aux import S57Aux
if TYPE_CHECKING:
from hyo2.qc.survey.scan.flags import Flags
from hyo2.s57.s57 import S57Record10
from hyo2.abc.app.report import Report
logger = logging.getLogger(__name__)
class Checks:
survey_areas = {
"Great Lakes": 0,
"Pacific Coast": 1,
"Atlantic Coast": 2,
}
def __init__(self, flags: 'Flags', report: 'Report', all_features: List['S57Record10'],
survey_area: int, version: str,
sorind: Optional[str], sordat: Optional[str],
profile: int, use_mhw: bool, mhw_value: float,
check_image_names: bool, multimedia_folder: Optional[str]):
self.flags = flags
self.report = report
self.all_fts = all_features # type: List['S57Record10']
self.no_carto_fts = list() # type: List['S57Record10']
self.new_updated_fts = list() # type: List['S57Record10']
self.assigned_fts = list() # type: List['S57Record10']
self.new_deleted_fts = list() # type: List['S57Record10']
self.survey_area = survey_area
self.version = version
self.sorind = sorind
self.sordat = sordat
self.profile = profile
self.use_mhw = use_mhw
self.mhw_value = mhw_value
self.check_image_names = check_image_names
self.multimedia_folder = multimedia_folder
self.character_limit = 255
# shared functions
def _check_features_for_attribute(self, objects: List['S57Record10'], attribute: str, possible: bool = False) \
-> List[list]:
"""Check if the passed features have the passed attribute"""
flagged = list()
for obj in objects:
# do the test
has_attribute = False
for attr in obj.attributes:
if attr.acronym == attribute:
has_attribute = True
# check passed
if has_attribute:
continue
if possible:
# add to the flagged report
self.report += 'Warning: Found missing %s at (%.7f, %.7f)' % (
obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, "warning: missing %s" % attribute,
self.report.cur_section())
else:
# add to the flagged report
self.report += 'Found missing %s at (%.7f, %.7f)' % (obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, "missing %s" % attribute, self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
return flagged
def _check_features_without_attribute(self, objects: List['S57Record10'], attribute: str, possible: bool = False) \
-> List[list]:
"""Check if the passed features have the passed attribute"""
flagged = list()
for obj in objects:
# do the test
has_attribute = False
for attr in obj.attributes:
if attr.acronym == attribute:
has_attribute = True
# check passed
if not has_attribute:
continue
if possible:
# add to the flagged report
self.report += 'Warning: Found %s at (%.7f, %.7f)' % (obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, "warning: containing %s (?)" % attribute,
self.report.cur_section())
else:
# add to the flagged report
self.report += 'Found %s at (%.7f, %.7f)' % (obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, "containing %s" % attribute,
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
return flagged
def _flag_features_with_attribute_value(self, objects: List['S57Record10'],
attribute: str, values_to_flag: List[str],
check_attrib_existence: bool = False, possible: bool = False) -> List[list]:
"""Flag the passed features if they have the passed values for the passed attribute"""
flagged = list()
for obj in objects:
# do the test
has_attribute_with_value = False
has_attribute = False
for attr in obj.attributes:
acronym = attr.acronym.strip()
if acronym == attribute:
has_attribute = True
if attr.value in values_to_flag:
has_attribute_with_value = True
if check_attrib_existence:
if not has_attribute:
if possible:
# add to the flagged report
self.report += 'Warning: Found missing attribute %s at (%.7f, %.7f)' \
% (obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y,
"warning: missing attribute: %s" % attribute, self.report.cur_section())
else:
# add to the flagged report
self.report += 'Found missing attribute %s at (%.7f, %.7f)' \
% (obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, "missing attribute: %s" % attribute,
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
# check passed
if not has_attribute_with_value:
continue
# add to the flagged feature list
if possible:
# add to the flagged report
self.report += 'Warning: Found invalid/prohibited attribute value for %s at (%.7f, %.7f)' \
% (obj.acronym, obj.centroid.x, obj.centroid.y)
self.flags.append(obj.centroid.x, obj.centroid.y,
"warning: invalid/prohibited value for %s" % attribute, self.report.cur_section())
else:
# add to the flagged report
self.report += 'Found invalid/prohibited attribute value for %s at (%.7f, %.7f)' \
% (obj.acronym, obj.centroid.x, obj.centroid.y)
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid/prohibited value for %s" % attribute,
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
return flagged
# ### ALL FEATURES ###
def file_consistency(self):
self.report += "Checks for feature file consistency [SECTION]"
self._check_all_features_for_redundancy_and_geometry()
if self.version in ["2021"]:
# New Requirement in 2021 HSSD character limit for all fields with free text strings
self.report += "Features with text input fields exceeding %d characters [CHECK]" % self.character_limit
self.flags.all_fts.chars_limit = self._check_character_limit(objects=self.all_fts,
attributes=['images', 'invreq', 'keywrd',
'onotes', 'recomd', 'remrks'],
character_limit=self.character_limit)
def _check_all_features_for_redundancy_and_geometry(self) -> None:
"""Function that identifies the presence of duplicated feature looking at their geometries"""
logger.debug('Checking for feature redundancy...')
self.report += "Redundant features [CHECK]"
tmp_features = list()
features = list()
for ft in self.all_fts:
# skip if the feature has not position
if (len(ft.geo2s) == 0) and (len(ft.geo3s) == 0):
# logger.debug("removing: %s" % ft)
continue
tmp_features.append(ft)
# get the attributes as a long string
attrs_str = str()
for attr in ft.attributes:
attrs_str += "%s=%s;" % (attr.acronym.strip(), attr.value)
# get the point positions as sorted list of string
geo2x = list()
geo2y = list()
if len(ft.geo2s) > 0:
for geo2 in ft.geo2s:
geo2x.append("%.7f" % geo2.x)
geo2y.append("%.7f" % geo2.y)
elif len(ft.geo3s) > 0:
for geo3 in ft.geo3s:
geo2x.append("%.7f" % geo3.x)
geo2y.append("%.7f" % geo3.y)
geo2x.sort()
geo2y.sort()
# test for redundancy
# logger.info("key: %s" % [ft.acronym, attrs_str, geo2x, geo2y])
i = features.count([ft.acronym, attrs_str, geo2x, geo2y])
if i > 0: # we have a redundancy
if ft.acronym in ["LIGHTS", ]:
# add to the flagged report
self.report += 'Warning: Redundant %s at (%.7f, %.7f)' % (ft.acronym, ft.centroid.x, ft.centroid.y)
# add to the flagged feature list
self.flags.append(ft.centroid.x, ft.centroid.y, "warning: redundant %s" % ft.acronym,
self.report.cur_section())
else:
# add to the flagged report
self.report += 'Redundant %s at (%.7f, %.7f)' % (ft.acronym, ft.centroid.x, ft.centroid.y)
# add to the flagged feature list
self.flags.append(ft.centroid.x, ft.centroid.y, "redundant %s" % ft.acronym,
self.report.cur_section())
self.flags.all_fts.redundancy.append([ft.acronym, geo2x, geo2y])
else:
# populated the feature list
features.append([ft.acronym, attrs_str, geo2x, geo2y])
if len(self.flags.all_fts.redundancy) == 0:
self.report += "OK"
self.all_fts = tmp_features # to remove features without geometry
# ### ASSIGNED FEATURES ###
def assigned_features(self):
self.report += "Checks for assigned features [SECTION]"
# Isolate only features that are assigned
self.assigned_fts = S57Aux.select_by_attribute_value(objects=self.all_fts, attribute='asgnmt',
value_filter=['2', ])
# Ensure assigned features have descrp
self.report += "Assigned features with empty or missing mandatory attribute description [CHECK]"
self.flags.ass_fts.description = self._flag_features_with_attribute_value(objects=self.assigned_fts,
attribute='descrp',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure assigned features have remrks
self.report += "Assigned features missing mandatory attribute remarks [CHECK]"
self.flags.ass_fts.remarks = self._check_features_for_attribute(objects=self.assigned_fts, attribute='remrks')
# ### NEW OR UPDATED FEATURES ###
def new_or_updated_features(self):
self.report += "Checks for new/updated features [SECTION]"
# Remove carto features
self.no_carto_fts = S57Aux.filter_by_object(objects=self.all_fts, object_filter=['$AREAS', '$LINES', '$CSYMB',
'$COMPS', '$TEXTS'])
# Isolate only features with descrp = New or Update
self.new_updated_fts = S57Aux.select_by_attribute_value(objects=self.no_carto_fts, attribute='descrp',
value_filter=['1', '2', ])
# Ensure new or updated features have SORIND
self.report += "New or Updated features (excluding carto notes) missing mandatory attribute SORIND [CHECK]"
self.flags.new_updated_fts.sorind = self._check_features_for_attribute(
objects=self.new_updated_fts,
attribute='SORIND')
# Ensure new or updated features have valid SORIND
self.report += "New or Updated features (excluding carto notes) with invalid SORIND [CHECK]"
if self.sorind is None:
self.flags.new_updated_fts.sorind_invalid = self._check_features_for_valid_sorind(
objects=self.new_updated_fts,
check_space=False)
else:
self.flags.new_updated_fts.sorind_invalid = self._check_features_for_match_sorind(
objects=self.new_updated_fts)
# Ensure new or updated features have SORDAT
self.report += "New or Updated features (excluding carto notes) missing mandatory attribute SORDAT [CHECK]"
self.flags.new_updated_fts.sordat = self._check_features_for_attribute(
objects=self.new_updated_fts,
attribute='SORDAT')
# Ensure new or updated features have valid SORDAT
self.report += "New or Updated features (excluding carto notes) with invalid SORDAT [CHECK]"
if self.sordat is None:
self.flags.new_updated_fts.sordat_invalid = self._check_features_for_valid_sordat(self.new_updated_fts)
else:
self.flags.new_updated_fts.sordat_invalid = self._check_features_for_match_sordat(self.new_updated_fts)
# Select all the new features with VALSOU attribute
if self.use_mhw:
new_valsous = S57Aux.select_by_attribute(
objects=self.new_updated_fts,
attribute='VALSOU')
self.report += "New or Updated VALSOU features with invalid WATLEV [CHECK]"
self.flags.new_updated_fts.valsous_watlev = self._check_features_for_valid_watlev(
objects=new_valsous)
new_elevats = S57Aux.select_by_attribute(
objects=self.new_updated_fts,
attribute='ELEVAT')
self.report += "Invalid New or Updated ELEVAT features [CHECK]"
self.flags.new_updated_fts.elevat = self._check_features_for_valid_elevat(
objects=new_elevats)
# Select all the new features with valsou attribute and check for valid quasou.
new_valsous = S57Aux.select_by_attribute(objects=self.new_updated_fts, attribute='VALSOU')
self.report += "New or Updated VALSOU features with invalid QUASOU [CHECK]"
self.flags.new_updated_fts.valsous_quasou = self._check_features_for_valid_quasou(new_valsous)
@classmethod
def check_sorind(cls, value: str, check_space: bool = True) -> bool:
tokens = value.split(',')
# logger.debug("%s" % tokens)
if len(value.splitlines()) > 1:
logger.info('too many attribute lines')
return False
elif len(tokens) != 4:
logger.info('invalid number of comma-separated fields')
return False
elif (tokens[0][0] == " " or tokens[1][0] == " " or tokens[2][0] == " " or tokens[3][0] == " ") \
and check_space:
logger.info('invalid space after comma field-separator')
return False
elif tokens[0] != "US":
logger.info('first field should be "US", it is: "%s"' % tokens[0])
return False
elif tokens[1] != "US":
logger.info('second field should be "US", it is: "%s"' % tokens[1])
return False
elif tokens[2] != "graph":
logger.info('third field should be "graph", it is: "%s"' % tokens[2])
return False
if len(tokens[3]) != 6:
logger.info('issue with forth field length: %d (it should be 6)' % len(tokens[3]))
return False
return True
def _check_features_for_valid_sorind(self, objects: List['S57Record10'],
check_space: bool = True) -> List[list]:
"""Check if the passed features have valid SORIND"""
flagged = list()
for obj in objects:
# do the test
is_valid = True
for attr in obj.attributes:
if attr.acronym == "SORIND":
is_valid = self.check_sorind(attr.value, check_space)
break
# check passed
if is_valid:
continue
# add to the flagged report
self.report += 'Found %s at (%.7f, %.7f) with invalid SORIND' \
% (obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid SORIND", self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
return flagged
def _check_features_for_match_sorind(self, objects: List['S57Record10']) -> List[list]:
"""Check if the passed features have valid SORIND"""
flagged = list()
for obj in objects:
# do the test
is_valid = True
for attr in obj.attributes:
if attr.acronym == "SORIND":
is_valid = attr.value == self.sorind
break
# check passed
if is_valid:
continue
# add to the flagged report
self.report += 'Found %s at (%.7f, %.7f) with invalid SORIND' % (
obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid SORIND", self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
return flagged
@classmethod
def check_sordat(cls, value: str) -> bool:
# logger.debug("%s" % attr.value)
cast_issue = False
timestamp = None
now = None
# noinspection PyBroadException
try:
timestamp = datetime.datetime(year=int(value[0:4]),
month=int(value[4:6]),
day=int(value[6:8]))
now = datetime.datetime.now()
except Exception:
cast_issue = True
if cast_issue:
logger.info('invalid date format: %s' % value)
return False
elif len(value) != 8:
logger.info('the date format is YYYYMMDD, invalid number of digits: %d' % len(value))
return False
elif timestamp > now:
if (timestamp.year > now.year) or (timestamp.year == now.year and timestamp.month > now.month):
logger.info('the date in use is in the future: %d' % len(value))
return False
return True
def _check_features_for_valid_sordat(self, objects: List['S57Record10']) -> List[list]:
"""Check if the passed features have matching SORDAT"""
flagged = list()
for obj in objects:
# do the test
is_valid = True
for attr in obj.attributes:
if attr.acronym == "SORDAT":
is_valid = self.check_sordat(attr.value)
break
# check passed
if is_valid:
continue
# add to the flagged report
self.report += 'Found %s at (%.7f, %.7f) with invalid SORDAT' % (
obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid SORDAT", self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
return flagged
def _check_features_for_match_sordat(self, objects: List['S57Record10']) -> List[list]:
"""Check if the passed features have matching SORDAT"""
flagged = list()
for obj in objects:
# do the test
is_valid = True
for attr in obj.attributes:
if attr.acronym == "SORDAT":
is_valid = attr.value == self.sordat
break
# check passed
if is_valid:
continue
# add to the flagged report
self.report += 'Found %s at (%.7f, %.7f) with invalid SORDAT' % (
obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid SORDAT", self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
return flagged
def _check_features_for_valid_watlev(self, objects: List['S57Record10']) -> List[list]:
"""Check if the passed features have valid WATLEV"""
# logger.debug("checking for invalid WATLEV and VALSOU ...")
flagged = list()
for obj in objects:
# do the test
is_valid = True
is_invalid_for_valsou = False
watlev = None
valsou = None
for attr in obj.attributes:
if attr.acronym == "WATLEV":
try:
watlev = int(attr.value)
except ValueError:
logger.warning("issue with WATLEV value:'%s' at position: %s, %s" %
(attr.value, obj.centroid.x, obj.centroid.y))
elif attr.acronym == "VALSOU":
try:
valsou = float(attr.value)
except ValueError:
logger.warning("issue with VALSOU value:'%s' at position: %s, %s" %
(attr.value, obj.centroid.x, obj.centroid.y))
if (watlev is not None) and (valsou is not None):
break
if (watlev is None) or (valsou is None):
logger.debug("unable to find WATLEV or VALSOU values at position: %s, %s" %
(obj.centroid.x, obj.centroid.y))
continue
if self.survey_area == self.survey_areas["Great Lakes"]:
if valsou < - 0.1:
is_valid = False
is_invalid_for_valsou = True
else:
if valsou <= -0.1:
if watlev != 4: # Covers & Uncovers
is_valid = False
elif valsou <= 0.1:
if watlev != 5: # Awash
is_valid = False
else:
if watlev != 3: # Always Underwater
is_valid = False
else:
if valsou < (-self.mhw_value - 0.1):
is_valid = False
is_invalid_for_valsou = True
else:
if valsou <= -0.1:
if watlev != 4: # Covers & Uncovers
is_valid = False
elif valsou <= 0.1:
if watlev != 5: # Awash
is_valid = False
else:
if watlev != 3: # Always Underwater
is_valid = False
# check passed
if is_valid:
continue
# add to the flagged feature list and to the flagged report
if is_invalid_for_valsou:
self.report += 'Found %s at (%.7f, %.7f) with invalid VALSOU (islet ?)' % (
obj.acronym, obj.centroid.x, obj.centroid.y)
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid VALSOU (islet ?)", self.report.cur_section())
else:
self.report += 'Found %s at (%.7f, %.7f) with invalid WATLEV' % (
obj.acronym, obj.centroid.x, obj.centroid.y)
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid WATLEV", self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
# logger.debug("checking for invalid WATLEV and VALSOU -> flagged: %d" % len(flagged))
return flagged
def _check_features_for_valid_elevat(self, objects: List['S57Record10']) -> List[list]:
"""Check if the passed features have valid ELEVAT"""
# logger.debug("checking for invalid ELEVAT ...")
flagged = list()
for obj in objects:
elevat = None
for attr in obj.attributes:
if attr.acronym == "ELEVAT":
elevat = float(attr.value)
if elevat is not None:
break
if elevat > +0.1:
continue
self.report += 'Found %s at (%.7f, %.7f) with invalid ELEVAT' % (
obj.acronym, obj.centroid.x, obj.centroid.y)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid ELEVAT", self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
# logger.debug("checking for invalid ELEVAT -> flagged: %d" % len(flagged))
return flagged
def _check_features_for_valid_quasou(self, objects: List['S57Record10']) -> List[list]:
"""Check if the passed features have valid QUASOU"""
# logger.debug("checking for invalid QUASOU ...")
# list the allowable combinations of tecsous and quasous
allowable = [['3', '6'], ['4', '6'], ['5', '6'], ['12', '6'], ['2', '9']]
flagged = list()
for obj in objects:
tecsou = None
quasou = None
# check for the TECSOU and QUASOU attributes
for attr in obj.attributes:
if attr.acronym == "TECSOU":
tecsou = attr.value
elif attr.acronym == "QUASOU":
quasou = attr.value
if (tecsou is not None) and (quasou is not None):
break
# TODO: if TECSOU is not available?
if tecsou is None:
self.report += 'Could not verify QUASOU found %s at (%.7f, %.7f) because is missing TECSOU' \
% (obj.acronym, obj.centroid.x, obj.centroid.y)
# logger.debug("checking for TECSOU...")
self.flags.append(obj.centroid.x, obj.centroid.y, 'missing TECSOU', self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
elif tecsou in ['1', '7', '10']: # VBES, Lidar, Structure from Motion
if quasou is not None:
# add to the flagged report
self.report += "Warning: found %s at (%.7f, %.7f) has TECSOU '%s' without blank QUASOU" \
% (obj.acronym, obj.centroid.x, obj.centroid.y, tecsou)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y,
"warning: TECSOU requires blank QUASOU: %s" % (tecsou,),
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
# if QUASOU is not available?
if quasou is None:
logger.debug("Checking for QUASOU...")
self.report += 'Found %s at (%.7f, %.7f) is missing QUASOU required for TECSOU' \
% (obj.acronym, obj.centroid.x, obj.centroid.y)
self.flags.append(obj.centroid.x, obj.centroid.y, 'missing QUASOU required for TECSOU',
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
# splitting using ','
tecsou = tecsou.split(',')
quasou = quasou.split(',')
# if the list of QUASOU has different length than the list of TECSOU ?
if len(tecsou) != len(quasou):
self.report += 'Warning: found %s at (%.7f, %.7f) contains mismatch in the number of TECSOU and ' \
'QUASOU attributes' % (obj.acronym, obj.centroid.x, obj.centroid.y)
self.flags.append(obj.centroid.x, obj.centroid.y, 'warning: mismatch in the number of TECSOU and '
'QUASOU attributes', self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
for i in range(len(tecsou)):
check = [tecsou[i], quasou[i]]
if check in allowable:
continue
# add to the flagged report
self.report += 'Warning: found %s at (%.7f, %.7f) has prohibited TECSOU/QUASOU combination %s' \
% (obj.acronym, obj.centroid.x, obj.centroid.y, check)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y,
"warning: TECSOU and QUASOU combination is not allowed %s" % (check,),
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
break
if len(flagged) == 0:
self.report += "OK"
return flagged
# ### NEW OR DELETED FEATURES ###
def new_or_deleted_features(self):
self.report += "Checks for new/deleted features [SECTION]"
# Isolate features with descrp = New or Delete
self.new_deleted_fts = S57Aux.select_by_attribute_value(objects=self.all_fts, attribute='descrp',
value_filter=['1', '3'])
# Ensure new or deleted features have remrks
self.report += "New/Delete features missing mandatory attribute remarks [CHECK]"
self.flags.new_deleted_fts.remarks = self._check_features_for_attribute(objects=self.new_deleted_fts,
attribute='remrks')
# Ensure new or deleted features have recomd
self.report += "New/Delete features missing mandatory attribute recommendation [CHECK]"
self.flags.new_deleted_fts.recommend = self._check_features_for_attribute(objects=self.new_deleted_fts,
attribute='recomd')
# ### IMAGES ###
def images(self):
self.report += "Checks for features with images [SECTION]"
# Ensure all features for valid paths
self.flags.images.invalid_paths = self._check_features_for_images_path(objects=self.all_fts)
if not self.check_image_names:
return
# Isolate new or updated seabed areas (points + lines & areas)
sbdare = S57Aux.select_by_object(objects=self.new_updated_fts, object_filter=['SBDARE', ])
sbdare_points = S57Aux.select_only_points(sbdare)
sbdare_lines_areas = S57Aux.select_lines_and_areas(sbdare)
non_sbdare_features = S57Aux.filter_by_object(objects=self.all_fts, object_filter=['SBDARE', ])
if self.version in ["2019", ]:
self.report += "Invalid IMAGE name per HTD 2018-4/5 [CHECK]"
self.flags.images.invalid_names = \
self._check_sbdare_images_per_htd_2018_4(objects=sbdare_points) + \
self._check_nonsbdare_images_per_htd_2018_5(objects=sbdare_lines_areas + non_sbdare_features)
elif self.version in ["2020", ]:
self.report += "Invalid IMAGE name per HSSD 2020 [CHECK]"
self.flags.images.invalid_names = \
self._check_sbdare_images_per_htd_2018_4(objects=sbdare_points) + \
self._check_nonsbdare_images_per_htd_2018_5(objects=sbdare_lines_areas + non_sbdare_features)
elif self.version in ["2021"]:
self.report += "Invalid IMAGE name per HSSD 2021 [CHECK]"
self.flags.images.invalid_names = \
self._check_sbdare_images_per_hssd_2021(objects=sbdare_points) + \
self._check_nonsbdare_images_per_hssd_2021(
objects=sbdare_points + non_sbdare_features + sbdare_lines_areas)
if len(self.flags.images.invalid_names) == 0:
self.report += "OK"
def _check_features_for_images_path(self, objects: List['S57Record10']) -> List[list]:
# Checked if passed images have correct separator per HSSD and are found in the multimedia folder
# logger.debug("checking for invalid IMAGES ...")
self.report += "Images are not present in the Multimedia folder [CHECK]"
flagged = list()
for obj in objects:
images = None
for attr in obj.attributes:
if attr.acronym == "images":
images = attr.value
if images is None:
continue
images_list = [image.upper() for image in images.split(";")]
for image_filename in images_list:
if "," in image_filename:
self.report += 'Found %s at (%.7f, %.7f) with image having invalid separator: %s' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid separator", self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if self.multimedia_folder is None:
self.report += 'Found %s at (%.7f, %.7f) with missing images folder: %s' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "missing images folder",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if images_list.count(image_filename) > 1:
self.report += 'Found %s at (%.7f, %.7f) with a list of images without unique name: %s' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "listed image names not unique",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
img_path = os.path.join(self.multimedia_folder, image_filename.strip())
if not os.path.exists(img_path):
self.report += 'Found %s at (%.7f, %.7f) with invalid path to image: %s' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid path", self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if len(flagged) == 0:
self.report += "OK"
# logger.debug("checking for invalid images -> flagged: %d" % len(flagged))
return flagged
def _check_nonsbdare_images_per_htd_2018_5(self, objects: List['S57Record10']) -> List[list]:
""""Check if the passed features have valid image name per HTD 2018-5"""
# logger.debug("checking for invalid IMAGE NAMES per HTD 2018-5...")
flagged = list()
for obj in objects:
images = None
for attr in obj.attributes:
if attr.acronym == "images":
images = attr.value
if images is None:
continue
images_list = [image.upper() for image in images.split(";")]
for image_filename in images_list:
image_filename = os.path.splitext(image_filename)[0]
tokens = image_filename.split("_")
if len(tokens) not in [2, 3]:
self.report += 'Found %s at (%.7f, %.7f) with image having invalid filenaming (nr. of "_"): %s' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid filenaming", self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if len(tokens[0]) != 6:
self.report += 'Found %s at (%.7f, %.7f) with image having invalid survey in filename: %s' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid survey in filename",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if len(tokens[1]) != 15:
self.report += 'Found %s at (%.7f, %.7f) with image having invalid FIDN+FIDS in filename: %s' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid FIDN+FIDS in filename",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
# logger.debug("checking for invalid image names per HTD 2018-5 -> flagged %d" % len(flagged))
return flagged
def _check_sbdare_images_per_htd_2018_4(self, objects: List['S57Record10']) -> List[list]:
""""Check if the passed features have valid image name per HTD 2018-4"""
# logger.debug("checking for invalid IMAGE NAMES per HTD 2018-4...")
flagged = list()
for obj in objects:
images = None
for attr in obj.attributes:
if attr.acronym == "images":
images = attr.value
if images is None:
continue
images_list = [image.upper() for image in images.split(";")]
for image_filename in images_list:
image_filename = os.path.splitext(image_filename)[0]
tokens = image_filename.split("_")
if len(tokens) != 3:
self.report += 'Found %s at (%.7f, %.7f) with image having invalid filenaming (nr. of "_"): %s ' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid filenaming", self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if len(tokens[0]) != 6:
self.report += 'Found %s at (%.7f, %.7f) with image having invalid survey in filename: %s ' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid survey in filename",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if tokens[1] != "SBDARE":
self.report += 'Found %s at (%.7f, %.7f) with "_SBDARE_" not present in filename: %s ' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "'_SBDARE_' not present in filename",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if self.version in ["2019", ]:
if len(tokens[2]) != 15:
self.report += 'Found %s at (%.7f, %.7f) with image having invalid timestamp in filename: ' \
'%s ' % (obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid timestamp in filename",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if self.version in ["2020", ]:
if len(tokens[2]) not in [14, 15]:
self.report += 'Found %s at (%.7f, %.7f) with image having invalid timestamp in filename: ' \
'%s ' % (obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid timestamp in filename",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
# logger.debug("checking for invalid image names per HTD 2018-4 -> flagged: %d" % len(flagged))
return flagged
def _check_sbdare_images_per_hssd_2021(self, objects: List['S57Record10']) -> List[list]:
""""Check if the passed features have valid image name per HSSD"""
# logger.debug("checking for invalid IMAGE NAMES per HSSD...")
flagged = list()
for obj in objects:
images = None
for attr in obj.attributes:
if attr.acronym == "images":
images = attr.value
if images is None:
continue
images_list = [image.upper() for image in images.split(";")]
for image_filename in images_list:
image_filename = os.path.splitext(image_filename)[0]
tokens = image_filename.split("_")
if len(tokens) != 3:
self.report += 'Found %s at (%.7f, %.7f) with image having invalid filenaming (nr. of "_"): ' \
'%s ' % (obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid filenaming",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if len(tokens[0]) != 6:
self.report += 'Found %s at (%.7f, %.7f) with image having invalid survey in filename: %s ' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "invalid survey in filename",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
if tokens[1] != "SBDARE":
self.report += 'Found %s at (%.7f, %.7f) with "SBDARE" not stated in filename: %s ' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "'SBDARE' not stated in filename",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
try:
_ = int(tokens[2])
except ValueError:
self.report += 'Found %s at (%.7f, %.7f) with image not having numeric identifier in filename: ' \
'%s ' % (obj.acronym, obj.centroid.x, obj.centroid.y, image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "no numeric identifier in filename",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
continue
# logger.debug("checking for invalid image names per HSSD -> flagged: %d" % len(flagged))
return flagged
def _check_nonsbdare_images_per_hssd_2021(self, objects: List['S57Record10']) -> List[list]:
""""Check if the passed features have valid image name per HSSD"""
# create check that makes sure that the image naming convention is just a unique name.
flagged = list()
names = dict()
for obj in objects:
images = None
for attr in obj.attributes:
if attr.acronym == "images":
images = attr.value
if images is None:
continue
images_list = [image.lower() for image in images.split(";")]
for image_filename in images_list:
image_filename = os.path.splitext(image_filename)[0]
if image_filename in names.keys():
self.report += 'Warning: Found %s at (%.7f, %.7f) with same image filename as %s: %s' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, names[image_filename], image_filename)
# add to the flagged feature list and to the flagged report
self.flags.append(obj.centroid.x, obj.centroid.y, "warning: duplicated image filename",
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
else:
names[image_filename] = "%s at (%.7f, %.7f)" % (obj.acronym, obj.centroid.x, obj.centroid.y)
# logger.debug("checking for invalid image names per HSSD 2021 -> flagged: %d" % len(flagged))
return flagged
# SOUNDINGS
def soundings(self):
self.report += "Checks for soundings [SECTION]"
# Isolate sounding features
sounding_fts = S57Aux.select_by_object(objects=self.all_fts, object_filter=['SOUNDG', ])
# filter out soundings with tecsou vbes, lidar, photogrammetry
sounding_filtered_tecsou = S57Aux.filter_by_attribute_value(objects=sounding_fts, attribute='TECSOU',
value_filter=['1', '7', '10', ])
# Ensure soundings have tecsou
self.report += "SOUNDG with empty/missing mandatory attribute TECSOU [CHECK]"
self.flags.soundings.tecsou = self._flag_features_with_attribute_value(objects=sounding_fts, attribute='TECSOU',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure soundings have quasou, quasou shall be left blank for vbes, lidar, photogrammetry
self.report += "SOUNDG with empty/missing mandatory attribute QUASOU [CHECK]"
self.flags.soundings.quasou = self._flag_features_with_attribute_value(objects=sounding_filtered_tecsou,
attribute='QUASOU',
values_to_flag=['', ],
check_attrib_existence=True)
# DTONS
def dtons(self):
self.report += "Checks for DTONs [SECTION]"
# Isolate features that are no-carto, descrp = New or Updated, and sftype = DTON
dtons = S57Aux.select_by_attribute_value(objects=self.new_updated_fts, attribute='sftype', value_filter=['3', ])
# Remove soundings to prevent WRECK and OBSTRN DtoN objects from getting the image flag twice.
dtons = S57Aux.filter_by_object(objects=dtons, object_filter=['WRECKS', 'OBSTRN'])
# Ensure DTONs have images
self.report += "Special feature types (DTONS) missing images [CHECK]"
self.flags.dtons.images = self._check_features_for_attribute(objects=dtons, attribute='images')
# WRECKS
def wrecks(self):
self.report += "Checks for wrecks [SECTION]"
# Isolate new or updated wrecks
wrecks = S57Aux.select_by_object(objects=self.new_updated_fts, object_filter=['WRECKS', ])
# Filter wrecks if they have a known, undefined, and unknown valsou.
wrecks_valsou = S57Aux.select_by_attribute(objects=wrecks, attribute='VALSOU')
# logger.debug("Total number of wrecks without undefined VALSOU: %d" % (len(wrecks_valsou)))
wrecks_undefined_valsou = S57Aux.filter_by_attribute(wrecks, attribute='VALSOU')
# logger.debug("Total number of wrecks with undefined VALSOU: %d" % (len(wrecks_undefined_valsou)))
# filter out wrecks with tecsou vbes, lidar, photogrammetry
wrecks_filtered_tecsou = S57Aux.filter_by_attribute_value(objects=wrecks_valsou, attribute='TECSOU',
value_filter=['1', '7', '10', ])
# Ensure new or updated wrecks have images
self.report += "New or Updated WRECKS missing images [CHECK]"
self.flags.wrecks.images = self._check_features_for_attribute(objects=wrecks, attribute='images')
# Ensure new or updated wrecks have catwrk
self.report += "New or Updated WRECKS with empty/missing mandatory attribute CATWRK [CHECK]"
self.flags.wrecks.catwrk = self._flag_features_with_attribute_value(objects=wrecks, attribute='CATWRK',
values_to_flag=['', ],
check_attrib_existence=True)
self.report += "Warning: New or Updated WRECKS missing mandatory attribute VALSOU [CHECK]"
self.flags.wrecks.valsou = self._check_features_for_attribute(objects=wrecks, attribute='VALSOU', possible=True)
# If wreck has valsou, must have watlev, quasou, tecsou
# Ensure wrecks with valsou contain watlev
self.report += "New or Updated WRECKS with VALSOU with empty/missing mandatory attribute WATLEV [CHECK]"
self.flags.wrecks.watlev = self._flag_features_with_attribute_value(wrecks_valsou,
attribute='WATLEV',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure new or updated wrecks have quasou unless quasou left blank if tecsou is vbes, photogrammetry, or lidar
self.report += "New or Updated WRECKS with VALSOU with missing mandatory attribute QUASOU [CHECK]"
self.flags.wrecks.quasou = self._flag_features_with_attribute_value(objects=wrecks_filtered_tecsou,
attribute='QUASOU',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure new or updated wrecks have tecsou
self.report += "New or Updated WRECKS with VALSOU with empty/missing mandatory attribute TECSOU [CHECK]"
self.flags.wrecks.tecsou = self._flag_features_with_attribute_value(objects=wrecks_valsou, attribute='TECSOU',
values_to_flag=['', ],
check_attrib_existence=True)
# If wreck does not have VALSOU...
# Warning: Ensure wrecks with unknown valsou have watlev unknown
self.report += "Warning: New or Updated WRECKS with empty VALSOU shall have WATLEV of 'unknown' [CHECK]"
self.flags.wrecks.unknown_watlev = self._flag_features_with_attribute_value(wrecks_undefined_valsou,
attribute='WATLEV',
values_to_flag=["1", "2", "3", "4",
"5", "6", "7", ],
check_attrib_existence=True,
possible=True)
# Ensure wrecks with unknown valsou have tecsou "unknown"
self.report += "New or Updated WRECKS with empty VALSOU shall have TECSOU of 'unknown' [CHECK]"
self.flags.wrecks.unknown_tecsou = self._flag_features_with_attribute_value(wrecks_undefined_valsou,
attribute='TECSOU',
values_to_flag=["1", "2", "3", "4",
"5", "6", "7", "8",
"9", "10", "11",
"12", "13", "14", ],
check_attrib_existence=True)
# Ensure wrecks with unknown valsou have quasou "unknown"
self.report += "New or Updated WRECKS with empty VALSOU shall have QUASOU of NULL [CHECK]"
self.flags.wrecks.unknown_quasou = self._flag_features_with_attribute_value(wrecks_undefined_valsou,
attribute='QUASOU',
values_to_flag=["1", "2", "3", "4",
"5", "6", "7", "8",
"9", "10", "11"])
# ROCKS
def rocks(self):
self.report += "Checks for underwater rocks [SECTION]"
# Isolate new or updated rocks
rocks = S57Aux.select_by_object(objects=self.new_updated_fts, object_filter=['UWTROC', ])
# Filter rocks if they have a known, undefined, and unknown valsou.
rocks_valsou = S57Aux.select_by_attribute(objects=rocks, attribute='VALSOU')
rocks_undefined_valsou = S57Aux.filter_by_attribute(rocks, attribute='VALSOU')
# filter out rocks with tecsou vbes, lidar, photogrammetry
rocks_filtered_tecsou = S57Aux.filter_by_attribute_value(objects=rocks_valsou, attribute='TECSOU',
value_filter=['1', '7', '10', ])
# Ensure new or updated rocks have valsou
self.report += "Warning: New or Updated UWTROC missing mandatory attribute VALSOU [CHECK]"
self.flags.rocks.valsou = self._check_features_for_attribute(objects=rocks, attribute='VALSOU', possible=True)
# If new or updated rocks have valsou, must have watlev, quasou, tecsou
self.report += "New or Updated UWTROC with VALSOU with empty/missing mandatory attribute WATLEV [CHECK]"
self.flags.rocks.watlev = self._flag_features_with_attribute_value(rocks_valsou, attribute='WATLEV',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure new or updated rocks have quasou, quasou should be left blank if vbes, lidar, or photogrammetry
self.report += "New or Updated UWTROC with VALSOU with missing mandatory attribute QUASOU [CHECK]"
self.flags.rocks.quasou = self._flag_features_with_attribute_value(objects=rocks_filtered_tecsou,
attribute='QUASOU',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure new or updated rocks have tecsou
self.report += "New or Updated UWTROC with VALSOU with empty/missing mandatory attribute TECSOU [CHECK]"
self.flags.rocks.tecsou = self._flag_features_with_attribute_value(objects=rocks_valsou, attribute='TECSOU',
values_to_flag=['', ],
check_attrib_existence=True)
# If rock does not have VALSOU...
# Warning: Ensure rocks with unknown valsou have watlev unknown
self.report += "Warning: New or Updated UWTROC with empty VALSOU shall have WATLEV of 'unknown' [CHECK]"
self.flags.rocks.unknown_watlev = self._flag_features_with_attribute_value(rocks_undefined_valsou,
attribute='WATLEV',
values_to_flag=["1", "2", "3", "4",
"5", "6", "7", ],
check_attrib_existence=True,
possible=True)
# Ensure rocks with unknown valsou have tecsou "unknown"
self.report += "New or Updated UWTROC with empty VALSOU shall have TECSOU of 'unknown' [CHECK]"
self.flags.rocks.unknown_tecsou = self._flag_features_with_attribute_value(rocks_undefined_valsou,
attribute='TECSOU',
values_to_flag=["1", "2", "3", "4",
"5", "6", "7", "8",
"9", "10", "11",
"12", "13",
"14", ],
check_attrib_existence=True)
# Ensure rocks with unknown valsou have quasou "unknown"
self.report += "New or Updated UWTROC with empty VALSOU shall have QUASOU of NULL [CHECK]"
self.flags.rocks.unknown_quasou = self._flag_features_with_attribute_value(rocks_undefined_valsou,
attribute='QUASOU',
values_to_flag=["1", "2", "3", "4",
"5", "6", "7", "8",
"9", "10", "11"])
# OBSTRUCTIONS
def obstructions(self):
self.report += "Checks for obstructions [SECTION]"
# Isolate new or updated obstructions
obstrns = S57Aux.select_by_object(objects=self.new_updated_fts, object_filter=['OBSTRN', ])
obstrn_valsou = S57Aux.select_by_attribute(objects=obstrns, attribute='VALSOU')
# Exclude foul area and ground area obstructions
obstrns_no_foul_area_ground = S57Aux.filter_by_attribute_value(objects=obstrns, attribute='CATOBS',
value_filter=['6', '7', ])
# select all obstructions without valsous excluding foul ground and area
obstrn_undefined_valsou = S57Aux.filter_by_attribute(obstrns_no_foul_area_ground, attribute='VALSOU')
# filter out obstructions with tecsou vbes, lidar, photogrammetry
obstrn_filtered_tecsou = S57Aux.filter_by_attribute_value(objects=obstrn_valsou, attribute='TECSOU',
value_filter=['1', '7', '10', ])
# Exclude foul area obstructions
obstrns_no_foul = S57Aux.filter_by_attribute_value(objects=obstrns, attribute='CATOBS', value_filter=['6', ])
# Include foul ground area obstructions
obstrns_foul_ground = S57Aux.select_by_attribute_value(objects=obstrns, attribute='CATOBS',
value_filter=['7', ])
# Include only foul obstructions
obstrns_foul = S57Aux.select_by_attribute_value(objects=obstrns, attribute='CATOBS', value_filter=['6', ])
# Ensure new or updated obstructions (not foul area) have images
# Ensure new or updated wrecks have images
self.report += "New or Updated OBSTRN (excluding foul areas) missing images [CHECK]"
self.flags.obstructions.images = self._check_features_for_attribute(objects=obstrns_no_foul, attribute='images')
# Ensure new or updated obstructions (excluding foul ground) should have valsou
self.report += "Warning: New or Updated OBSTRN (excluding foul ground & areas) missing mandatory attribute " \
"VALSOU [CHECK]"
self.flags.obstructions.valsou = self._check_features_for_attribute(objects=obstrns_no_foul_area_ground,
attribute='VALSOU',
possible=True)
# Following checks are for obstructions that are foul.
# Ensure foul area does not have valsou
self.report += "Foul area OBSTRN shall not have VALSOU [CHECK]"
self.flags.obstructions.foul_valsou = self._check_features_without_attribute(objects=obstrns_foul,
attribute='VALSOU', possible=False)
# If new or updated obstructions have valsou, must have watlev, quasou, tecsou
self.report += "New or Updated OBSTRN with VALSOU with empty/missing mandatory attribute WATLEV [CHECK]"
self.flags.obstructions.watlev = self._flag_features_with_attribute_value(obstrn_valsou, attribute='WATLEV',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure new or updated obstructions have quasou except with tecsou of vbes, lidar or photogrammetry, those
# shall be left blank
self.report += "New or Updated OBSTRN with VALSOU with missing mandatory attribute QUASOU [CHECK]"
self.flags.obstructions.quasou = self._flag_features_with_attribute_value(objects=obstrn_filtered_tecsou,
attribute='QUASOU',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure new or updated obstructions have tecsou
self.report += "New or Updated OBSTRN with VALSOU with empty/missing mandatory attribute TECSOU [CHECK]"
self.flags.obstructions.tecsou = self._flag_features_with_attribute_value(objects=obstrn_valsou,
attribute='TECSOU',
values_to_flag=['', ],
check_attrib_existence=True)
# If obstructions does not have VALSOU...
# Warning: Ensure obstructions with unknown valsou have watlev unknown
self.report += "Warning: New or Updated OBSTRN with empty VALSOU shall have WATLEV of 'unknown' [CHECK]"
self.flags.obstructions.unknown_watlev = self._flag_features_with_attribute_value(obstrn_undefined_valsou,
attribute='WATLEV',
values_to_flag=["1", "2", "3",
"4", "5", "6",
"7", ],
check_attrib_existence=True,
possible=True)
# Ensure obstructions with unknown valsou have tecsou "unknown"
self.report += "New or Updated OBSTRN with empty VALSOU shall have TECSOU of 'unknown' [CHECK]"
self.flags.obstructions.unknown_tecsou = self._flag_features_with_attribute_value(obstrn_undefined_valsou,
attribute='TECSOU',
values_to_flag=["1", "2", "3",
"4", "5", "6",
"7", "8", "9",
"10", "11",
"12", "13",
"14", ],
check_attrib_existence=True)
# Ensure obstructions with unknown valsou have quasou "unknown"
self.report += "New or Updated OBSTRN with empty VALSOU shall have QUASOU of NULL [CHECK]"
self.flags.obstructions.unknown_quasou = self._flag_features_with_attribute_value(obstrn_undefined_valsou,
attribute='QUASOU',
values_to_flag=["1", "2", "3",
"4", "5", "6",
"7", "8", "9",
"10", "11"])
# New or updated foul ground obstructions shall have WATLEV
self.report += "New or Updated foul ground OBSTRN with empty/missing mandatory attribute WATLEV [CHECK]"
self.flags.obstructions.foul_ground_watlev = self._flag_features_with_attribute_value(
objects=obstrns_foul_ground,
attribute='WATLEV',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure new or updated foul ground obstructions do not have valsou
self.report += "New or Updated foul ground OBSTRN must have VALSOU of NULL [CHECK]"
self.flags.obstructions.foul_ground_valsou = self._check_features_without_attribute(objects=obstrns_foul_ground,
attribute='VALSOU',
possible=False)
# Ensure new or updated foul ground obstructions do not have quasou
self.report += "New or Updated foul ground OBSTRN must have QUASOU of NULL [CHECK]"
self.flags.obstructions.foul_ground_quasou = self._check_features_without_attribute(objects=obstrns_foul_ground,
attribute='QUASOU',
possible=False)
# Ensure new or updated foul ground obstructions do not have tecsou
self.report += "New or Updated foul ground OBSTRN must have TECSOU of NULL [CHECK]"
self.flags.obstructions.foul_ground_tecsou = self._check_features_without_attribute(objects=obstrns_foul_ground,
attribute='TECSOU',
possible=False)
# Foul area checks....
# Warning: Ensure foul area obstructions have watlev unknown
self.report += "Warning: New or Updated foul area OBSTRN shall have WATLEV of 'unknown' [CHECK]"
self.flags.obstructions.foul_unknown_watlev = \
self._flag_features_with_attribute_value(obstrns_foul,
attribute='WATLEV',
values_to_flag=["1", "2", "3", "4", "5", "6", "7"],
check_attrib_existence=True,
possible=True)
# Ensure foul area obstructions have tecsou "unknown"
self.report += "New or Updated foul area OBSTRN shall have TECSOU of 'unknown' [CHECK]"
self.flags.obstructions.foul_unknown_tecsou = \
self._flag_features_with_attribute_value(obstrns_foul,
attribute='TECSOU',
values_to_flag=["1", "2", "3", "4", "5", "6", "7", "8", "9", "10",
"11", "12", "13", "14"],
check_attrib_existence=True)
# Ensure foul area obstructions have quasou "unknown"
self.report += "New or Updated foul area OBSTRN shall have QUASOU of 'depth unknown' [CHECK]"
self.flags.obstructions.foul_unknown_quasou = \
self._flag_features_with_attribute_value(obstrns_foul,
attribute='QUASOU',
values_to_flag=["1", "3", "4", "5", "6", "7", "8", "9", "10",
"11"],
check_attrib_existence=True)
# OFFSHORE PLATFORMS
def platforms(self):
self.report += "Checks for offshore platforms [SECTION]"
# Isolate new or updated offshore platforms
ofsplf = S57Aux.select_by_object(
objects=self.new_updated_fts,
object_filter=['OFSPLF', ])
# Ensure new or updated offshore platforms have images
self.report += "New or Updated OFSPLF missing images [CHECK]"
self.flags.platforms.images = self._check_features_for_attribute(objects=ofsplf, attribute='images')
# SEABED AREAS
def sbdares(self):
self.report += "Checks for seabed areas [SECTION]"
# @ Isolate new or updated seabed areas
sbdare = S57Aux.select_by_object(objects=self.new_updated_fts, object_filter=['SBDARE', ])
# Isolate sbdare lines and areas
sbdare_lines_areas = S57Aux.select_lines_and_areas(objects=sbdare)
# Ensure new or updated seabed areas have natsur
self.report += "New or Updated SBDARE lines and areas with empty/missing mandatory attribute NATSUR [CHECK]"
self.flags.sbdares.natsur = self._flag_features_with_attribute_value(objects=sbdare_lines_areas,
attribute='NATSUR', values_to_flag=['', ],
check_attrib_existence=True)
# Ensure line and area seabed areas have watlev
self.report += "New or Updated SBDARE lines or areas missing mandatory attribute WATLEV [CHECK]"
self.flags.sbdares.watlev = self._check_features_for_attribute(objects=sbdare_lines_areas, attribute='WATLEV',
possible=True)
# Isolate new or updated point seabed areas
sbdare_points = S57Aux.select_only_points(objects=sbdare)
# Ensure not more natqua than natsur
self.report += "New or Updated point seabed areas with more NATQUA than NATSUR [CHECK]"
self.flags.sbdares.pt_natqua = self._check_sbdare_attribute_counts(sbdare_points=sbdare_points,
limiting_attribute='NATSUR',
dependent='NATQUA')
# Ensure not more colour than natsur
self.report += "New or Updated point seabed areas with more COLOUR than NATSUR [CHECK]"
self.flags.sbdares.pt_colour = self._check_sbdare_attribute_counts(sbdare_points=sbdare_points,
limiting_attribute='NATSUR',
dependent='COLOUR')
# Ensure no unallowable combinations of natqua and natsur
self.report += "No unallowable combinations of NATSUR and NATQUA [CHECK]"
self.flags.sbdares.pt_allowable_combo = self._allowable_sbdare(sbdare_points=sbdare_points)
def _check_sbdare_attribute_counts(self, sbdare_points: List['S57Record10'], limiting_attribute: str,
dependent: str) -> List[list]:
"""Function to ensure that one attribute (dependent) does not have more values
than one that relates to it (limiting attribute)"""
flagged = list()
for point in sbdare_points:
attribute_1 = None
attribute_2 = None
for attr in point.attributes:
if attr.acronym == limiting_attribute:
attribute_1 = attr.value
elif attr.acronym == dependent:
attribute_2 = attr.value
if not attribute_2:
continue
elif not attribute_1:
continue
elif len(attribute_1.split(',')) >= len(attribute_2.split(',')):
continue
# add to the flagged feature list
if dependent == 'NATQUA':
# add to the flagged report
self.report += 'Found %s at (%.7f, %.7f) has NATSUR/NATQUA imbalance' \
% (point.acronym, point.centroid.x, point.centroid.y)
self.flags.append(point.centroid.x, point.centroid.y, 'NATSUR/NATQUA imbalance',
self.report.cur_section())
else:
# add to the flagged report
self.report += 'Found %s at (%.7f, %.7f) has NATSUR/COLOUR imbalance' \
% (point.acronym, point.centroid.x, point.centroid.y)
self.flags.append(point.centroid.x, point.centroid.y, 'NATSUR/COLOUR imbalance',
self.report.cur_section())
flagged.append([point.acronym, point.centroid.x, point.centroid.y])
if len(flagged) == 0:
self.report += "OK"
return flagged
def _allowable_sbdare(self, sbdare_points: List['S57Record10']) -> List[list]:
# report section
# currently unsure whether the pairs with '' first ('UNDEFINED' in CARIS) are allowed by specs
# the pairs with '0' first ('-' in CARIS) were added based on NOAA Appendix G.5
allowable = [['1', '4'], ['2', '4'], ['3', '4'], ['4', '14'], ['4', '17'], ['5', '1'],
['5', '2'], ['5', '3'], ['6', '1'], ['6', '2'], ['6', '3'], ['6', '4'],
['7', '1'], ['7', '2'], ['7', '3'], ['8', '1'], ['8', '4'], ['8', '5'],
['8', '6'], ['8', '7'], ['8', '8'], ['8', '9'], ['8', '11'], ['8', '18'],
['9', '1'], ['9', '4'], ['9', '5'], ['9', '6'], ['9', '7'], ['9', '8'],
['9', '9'], ['9', '17'], ['9', '18'], ['10', '1'], ['10', '2'], ['10', '3'],
['10', '4'],
['', '1'], ['', '2'], ['', '3'], ['', '4'], ['', '5'],
['', '6'], ['', '7'], ['', '8'], ['', '9'], ['', '11'], ['', '14'],
['', '17'], ['', '18'],
['0', '1'], ['0', '2'], ['0', '3'], ['0', '4'], ['0', '5'],
['0', '6'], ['0', '7'], ['0', '8'], ['0', '9'], ['0', '11'], ['0', '14'],
['0', '17'], ['0', '18']]
flagged = list()
for sbdare in sbdare_points:
natqua = None
natsur = None
for attr in sbdare.attributes:
if attr.acronym == 'NATQUA':
natqua = attr.value
elif attr.acronym == 'NATSUR':
natsur = attr.value
if (natqua is None) or (natsur is None):
continue
else:
natqua = natqua.split(',')
natsur = natsur.split(',')
for i in range(min(len(natsur), len(natqua))):
check = [natqua[i], natsur[i]]
if check in allowable:
continue
# add to the flagged report
self.report += 'Found %s at (%.7f, %.7f) has prohibited NATSUR/NATQUA combination ' \
% (sbdare.acronym, sbdare.centroid.x, sbdare.centroid.y)
# add to the flagged feature list
self.flags.append(sbdare.centroid.x, sbdare.centroid.y,
"NATQUA and NATSUR combination is not allowed", self.report.cur_section())
flagged.append([sbdare.acronym, sbdare.centroid.x, sbdare.centroid.y])
break
if len(flagged) == 0:
self.report += "OK"
return flagged
# MOORINGS
def moorings(self):
self.report += "Checks for mooring facilities [SECTION]"
# Isolate new or updated mooring facilities
morfac = S57Aux.select_by_object(
objects=self.new_updated_fts,
object_filter=['MORFAC', ])
# Ensure new or updated mooring facilities have catmor
self.report += "New or Updated MORFAC with empty/missing mandatory attribute CATMOR [CHECK]"
self.flags.moorings.catmor = self._flag_features_with_attribute_value(
objects=morfac,
attribute='CATMOR',
values_to_flag=['', ],
check_attrib_existence=True)
# COASTLINES
def coastlines(self):
self.report += "Checks for coastlines and shorelines [SECTION]"
# Isolate new or updated coastline
coalne = S57Aux.select_by_object(objects=self.new_updated_fts, object_filter=['COALNE', ])
# Ensure new or updated coastline has catcoa
self.report += "New or Updated COALNE with empty/missing mandatory attribute CATCOA [CHECK]"
self.flags.coastlines.coalne = self._flag_features_with_attribute_value(objects=coalne, attribute='CATCOA',
values_to_flag=['', ],
check_attrib_existence=True)
# Isolate new or updated shoreline construction
slcons = S57Aux.select_by_object(objects=self.new_updated_fts, object_filter=['SLCONS', ])
# Ensure new or updated shoreline construction has catslc
self.report += "New or Updated SLCONS with empty/missing mandatory attribute CATSLC [CHECK]"
self.flags.coastlines.slcons = self._flag_features_with_attribute_value(objects=slcons, attribute='CATSLC',
values_to_flag=['', ],
check_attrib_existence=True)
# LANDS
def lands(self):
self.report += "Checks for land elevations [SECTION]"
# Isolate new or updated land elevation
lndelv = S57Aux.select_by_object(objects=self.new_updated_fts, object_filter=['LNDELV', ])
# @ Ensure new or updated land elevation has elevat
self.report += "New or Updated LNDELV missing mandatory attribute ELEVAT [CHECK]"
self.flags.lands.elevat = self._check_features_for_attribute(objects=lndelv, attribute='ELEVAT')
# META COVERAGES
def coverages(self):
self.report += "Checks for metadata coverages [SECTION]"
# Isolate M_COVR object
mcovr = S57Aux.select_by_object(objects=self.all_fts, object_filter=['M_COVR', ])
# Ensure M_COVR has catcov
self.report += "M_COVR with empty/missing mandatory attribute CATCOV [CHECK]"
self.flags.coverages.m_covr_catcov = self._flag_features_with_attribute_value(objects=mcovr, attribute='CATCOV',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure M_COVR has inform
self.report += "M_COVR missing mandatory attribute INFORM [CHECK]"
self.flags.coverages.m_covr_inform = self._check_features_for_attribute(objects=mcovr, attribute='INFORM')
# Ensure M_COVR has ninfom
self.report += "M_COVR missing mandatory attribute NINFOM [CHECK]"
self.flags.coverages.m_covr_ninfom = self._check_features_for_attribute(objects=mcovr, attribute='NINFOM')
# OFFICE ONLY
def office_only(self):
if self.profile != 0: # Not office
logger.info('Skipping checks only for the office')
return
self.report += "Checks only for office [SECTION]"
# For the office profile, ensure all features have onotes
self.report += "Features missing onotes [CHECK]"
self.flags.office.without_onotes = self._check_features_for_attribute(objects=self.all_fts, attribute='onotes')
# For the office profile, check for empty hsdrec
self.report += "Features with empty/unknown attribute hsdrec [CHECK]"
self.flags.office.hsdrec_empty = self._flag_features_with_attribute_value(objects=self.all_fts,
attribute='hsdrec',
values_to_flag=['', ],
check_attrib_existence=True)
# For the office profile, check for prohibited features by feature type
self.report += "Features without 'Prohibited feature' keyword [CHECK]"
prohibited = S57Aux.select_by_object(objects=self.all_fts, object_filter=['DRGARE', 'LOGPON', 'PIPARE',
'PIPOHD', 'PIPSOL', 'DMPGRD',
'LIGHTS', 'BOYLAT', 'BOYSAW',
'BOYSPP', 'DAYMAR', 'FOGSIG',
'CBLSUB', 'CBLARE', 'FAIRWY',
'RTPBCN', 'BOYISD', 'BOYINB',
'BOYCAR', 'CBLOHD', 'BCNSPP',
'BCNLAT', 'BRIDGE'])
self.flags.office.prohibited_kwds = self._check_for_missing_keywords(objects=prohibited, attr_acronym='onotes',
keywords=['Prohibited feature', ])
# For the office profile, check for prohibited fish haven
obstrn = S57Aux.select_by_object(objects=self.all_fts, object_filter=['OBSTRN', ])
fish_haven = S57Aux.select_by_attribute_value(objects=obstrn, attribute='CATOBS', value_filter=['5', ])
self.report += "Fish havens without 'Prohibited feature' keyword [CHECK]"
self.flags.office.fish_haven_kwds = self._check_for_missing_keywords(objects=fish_haven, attr_acronym='onotes',
keywords=['Prohibited feature', ])
# For the office profile, check for prohibited mooring buoys
morfac = S57Aux.select_by_object(objects=self.all_fts, object_filter=['MORFAC', ])
mooring_buoy = S57Aux.select_by_attribute_value(objects=morfac, attribute='CATMOR', value_filter=['7', ])
self.report += "Mooring buoy without 'Prohibited feature' keyword [CHECK]"
self.flags.office.mooring_buoy_kwds = self._check_for_missing_keywords(objects=mooring_buoy,
attr_acronym='onotes',
keywords=['Prohibited feature', ])
# For office profile, check for M_QUAL attribution
mqual = S57Aux.select_by_object(objects=self.all_fts, object_filter=['M_QUAL', ])
# Ensure M_QUAL has CATZOC
self.report += "M_QUAL features with empty/missing mandatory attribute CATZOC [CHECK]"
self.flags.office.m_qual_catzoc = self._flag_features_with_attribute_value(objects=mqual, attribute='CATZOC',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure M_QUAL has SURSTA
self.report += "M_QUAL features missing mandatory attribute SURSTA [CHECK]"
self.flags.office.m_qual_sursta = self._check_features_for_attribute(objects=mqual, attribute='SURSTA')
# Ensure M_QUAL has SUREND
self.report += "M_QUAL features missing mandatory attribute SUREND [CHECK]"
self.flags.office.m_qual_surend = self._check_features_for_attribute(objects=mqual, attribute='SUREND')
# Ensure M_QUAL has TECSOU
self.report += "M_QUAL features empty/missing mandatory attribute TECSOU [CHECK]"
self.flags.office.m_qual_tecsou = self._flag_features_with_attribute_value(objects=mqual, attribute='TECSOU',
values_to_flag=['', ],
check_attrib_existence=True)
# Ensure all features have descrp (per MCD)
self.report += "Features have empty or missing mandatory attribute description [CHECK]"
self.flags.office.mcd_description = self._check_features_for_attribute(objects=self.all_fts, attribute='descrp')
# Ensure all features have remrks (per MCD)
self.report += "Features missing mandatory attribute remarks [CHECK]"
self.flags.office.mcd_remarks = self._check_features_for_attribute(objects=self.all_fts, attribute='remrks')
if self.version in ["2019", "2020"]:
# Requirement only for the office, then also for the field
self.report += "Features with text input fields exceeding %d characters [CHECK]" % self.character_limit
self.flags.office.chars_limit = self._check_character_limit(objects=self.all_fts, attributes=['images',
'invreq',
'keywrd',
'onotes',
'recomd',
'remrks'],
character_limit=self.character_limit)
def _check_for_missing_keywords(self, objects: List['S57Record10'], attr_acronym: str, keywords: List[str]) \
-> List[list]:
"""Check if the passed features do not have the passed keywords in a specific attribute"""
flagged = list()
kws = list()
for keyword in keywords:
kws.append(keyword.lower())
for obj in objects:
# do the test
has_keywords = False
for attr in obj.attributes:
if attr.acronym == attr_acronym:
attr_value = attr.value.lower()
for kw in kws:
if kw in attr_value:
has_keywords = True
break
break
# keywords found
if has_keywords:
continue
else:
# add to the flagged report
self.report += 'Found %s at (%.7f, %.7f), missing %s' % \
(obj.acronym, obj.centroid.x, obj.centroid.y, keywords)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, "missing %s in %s" % (keywords, attr_acronym),
self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
return flagged
def _check_character_limit(self, objects: List['S57Record10'], attributes: List[str], character_limit: int) \
-> List[list]:
"""Check if the passed attribute of the passed features is not longer than the passed character limit"""
flagged = list()
for obj in objects:
# do the test
for attr in obj.attributes:
if attr.acronym in attributes:
nr_chars = len(attr.value)
if len(attr.value) > character_limit:
# add to the flagged report
self.report += 'Found %s at (%.7f, %.7f) exceeds %d-characters limit [%d in %s]' \
% (obj.acronym, obj.centroid.x, obj.centroid.y, character_limit,
nr_chars, attr.acronym)
# add to the flagged feature list
self.flags.append(obj.centroid.x, obj.centroid.y, '%d-characters limit exceeds [%d in %s]'
% (character_limit, nr_chars, attr.acronym), self.report.cur_section())
flagged.append([obj.acronym, obj.centroid.x, obj.centroid.y])
if len(flagged) == 0:
self.report += "OK"
return flagged
# noinspection PyStatementEffect
def finalize_summary(self):
"""Add a summary to the report"""
count = ord('A')
# Add a summary to the report
self.report += 'SUMMARY [SECTION]'
self.report += 'Summary by section: [CHECK]'
# ### ALL FEATURES ###
self.report += 'Section %s - Checks for feature file consistency: %s' \
% (chr(count), self.flags.all_fts.nr_of_flagged())
count += 1
# ### ASSIGNED FEATURES ###
self.report += 'Section %s - Checks for assigned features: %s' \
% (chr(count), self.flags.ass_fts.nr_of_flagged())
count += 1
# ### NEW OR UPDATED FEATURES ###
self.report += 'Section %s - Checks for new or updated features: %s' \
% (chr(count), self.flags.new_updated_fts.nr_of_flagged())
count += 1
# ### NEW OR DELETED FEATURES ###
self.report += 'Section %s - Checks for new or deleted features: %s' \
% (chr(count), self.flags.new_deleted_fts.nr_of_flagged())
count += 1
# ### IMAGES ###
self.report += 'Section %s - Checks for images: %s' \
% (chr(count), self.flags.images.nr_of_flagged())
count += 1
# SOUNDINGS
self.report += 'Section %s - Checks for soundings: %s' \
% (chr(count), self.flags.soundings.nr_of_flagged())
count += 1
# DTONS
self.report += 'Section %s - Checks for DTONs: %s' \
% (chr(count), self.flags.dtons.nr_of_flagged())
count += 1
# WRECKS
self.report += 'Section %s - Checks for wrecks: %s' \
% (chr(count), self.flags.wrecks.nr_of_flagged())
count += 1
# ROCKS
self.report += 'Section %s - Checks for underwater rocks: %s' \
% (chr(count), self.flags.rocks.nr_of_flagged())
count += 1
# OBSTRUCTIONS
self.report += 'Section %s - Checks for obstructions: %s' \
% (chr(count), self.flags.obstructions.nr_of_flagged())
count += 1
# OFFSHORE PLATFORMS
self.report += 'Section %s - Checks for offshore platforms: %s' \
% (chr(count), self.flags.platforms.nr_of_flagged())
count += 1
# SEABED AREAS
self.report += 'Section %s - Checks for seabed areas: %s' \
% (chr(count), self.flags.sbdares.nr_of_flagged())
count += 1
# MOORINGS
self.report += 'Section %s - Checks for mooring facilities: %s' \
% (chr(count), self.flags.moorings.nr_of_flagged())
count += 1
# COASTLINES
self.report += 'Section %s - Checks for coastlines and shorelines: %s' \
% (chr(count), self.flags.coastlines.nr_of_flagged())
count += 1
# LANDS
self.report += 'Section %s - Checks for land elevations: %s' \
% (chr(count), self.flags.lands.nr_of_flagged())
count += 1
# META COVERAGES
self.report += 'Section %s - Checks for meta coverages: %s' \
% (chr(count), self.flags.coverages.nr_of_flagged())
count += 1
# OFFICE ONLY
if self.profile == 0: # office profile
self.report += 'Section %s - Checks ONLY for office: %s' \
% (chr(count), self.flags.office.nr_of_flagged())
count += 1
|
PypiClean
|
/Alarmageddon-1.1.2-py3-none-any.whl/alarmageddon/validations/validation.py
|
from .exceptions import EnrichmentFailure, ValidationFailure
GLOBAL_NAMESPACE = "GLOBAL"
class Priority(object):
"""Priority levels that indicate how severe a validation failure is.
Validations have a priority that publishers use to determine whether
or not to publish in the case of failure.
"""
# If a LOW priority Validation fails, the failure be recorded for posterity
# but not reported to a human (e.g. logged in continuous integration server
# job history but not reported to IRC/HipChat)
LOW = 1
# If a NORMAL priority Validation fails, the failure should be reported to
# a human but it should not wake up a human (e.g. send a notification to
# IRC/HipChat, Graphite). Additionally the failure should be logged for
# posterity as in the case of LOW priority failures.
NORMAL = 2
# If a CRITICAL priority Validation fails, a human should be woken up
# (e.g. create an incident in PagerDuty). Additionally the failure should
# be more politely reported to humans (e.g. via IRC/HipChat, Graphite,
# etc.) and recorded for posterity as in the case of NORMAL and LOW
# priority failures.
CRITICAL = 3
@staticmethod
def string(priority):
"""Return the name of the priority (e.g. normal, low, critical)"""
if priority == Priority.NORMAL:
return "normal"
elif priority == Priority.LOW:
return "low"
elif priority == Priority.CRITICAL:
return "critical"
else:
return "unknown priority: {0}".format(priority)
class Validation(object):
"""The base class for validations.
The base class for all classes that represent some form of validation
(e.g. some expected system property that can be checked and categorized as
either passing or failing). Examples of Validations include: an HTTP
service returning an expected result in a specified amount of time, an
Upstart process on a Linux server is in the running state, a Message
Queue's queue length is lower than a maximum value.
:param name: The name of this validation.
:param priority: The :py:class:`.Priority` level of this validation.
:param timeout: How long this validation can take before being considered
a failure. If None, then the validation will never be considered a
failure due to timing out.
:param group: The group this validation belongs to.
"""
def __init__(self, name, priority=Priority.NORMAL,
timeout=None, group=None):
"""Creates a Validation object with the supplied name and priority.
Arguments:
name -- The name of this Validation
Keyword Arguments
priority -- The priority of this Validation.
timeout -- If this validation takes longer than this many seconds,
it will be considered a failure.
group -- The group this validation belongs to.
"""
self.name = name
self.priority = priority
self.timeout = timeout
self.group = group
#this should never be directly manipulated without very good reason
#it is used to store extra data for publishers, and the primary
#method of interaction should be the enric and get_enriched
#functions in publisher.py
self._enriched_data = {GLOBAL_NAMESPACE: {}}
#determines the partial ordering of the validations
#Alarmageddon guarantees that all Validations with lower order than
#this Validation's order will run before this Validation runs.
#most validations have no reason to change this
self.order = 0
def perform(self, group_failures):
"""Perform the validation.
If the validation fails, call self.fail passing it the reason for
the failure.
:param kwargs: A dictionary containing information from the whole
Alarmageddon run.
"""
pass
def fail(self, reason):
"""Log the validation as a failure.
:param reason: The cause of the failure.
:param stack_track: Whether or not to include a stack trace in the
result.
"""
raise ValidationFailure(reason)
def get_elapsed_time(self):
"""Return the amount of time this validation took.
The :py:class:`.reporter.Reporter` will check here before using
the call time.
Overwrite this if you need more precise timing -
eg, if you want to know how long an http request took, as opposed
to how long that whole test took to execute.
This function should return a number, not a timedelta.
"""
raise NotImplementedError
def __str__(self):
return "Validation {{ name: '{0}' priority: '{1}' timeout: {2}}}"\
.format(self.name,
Priority.string(self.priority),
self.timeout)
def timer_name(self):
"""Return the name of the timer that corresponds to this validation.
Used to indicate where a publisher should log the time taken.
"""
return None
def enrich(self, publisher, values, force_namespace=False):
"""Adds publisher-specific information to the validation.
Override at your own peril! Publishers are expected to assume the
standard behavior from this function.
:param publisher: The publisher to add enriched data for.
:param values: The enriched data to add to this validation.
:param force_namespace: If True, will never add the data to the global
namespace.
"""
namespace = str(type(publisher))
enriched = self._enriched_data
if namespace in enriched:
raise EnrichmentFailure(publisher, self, values)
enriched[namespace] = {}
for key, value in list(values.items()):
if force_namespace:
enriched[namespace][key] = value
else:
if key not in enriched[GLOBAL_NAMESPACE]:
enriched[GLOBAL_NAMESPACE][key] = value
else:
enriched[namespace][key] = value
return self
def get_enriched(self, publisher, force_namespace=False):
"""Retrieve the appropriate publisher-specific data.
Will retrieve all global enriched data along with any extra
publisher specific data. This means that if you enrich a
validation for more than one publisher, this function may
return a superset of the enriched data for a given publisher.
Override at your own peril! Publishers are expected to assume the
standard behavior from this function.
:param publisher: The publisher to retrieve enriched data for.
:param force_namespace: If True, will not retrieve global enrichments.
"""
namespace = str(type(publisher))
enriched = self._enriched_data
#copy global
data = {}
if not force_namespace:
data.update(enriched[GLOBAL_NAMESPACE])
try:
data.update(enriched[namespace])
except KeyError:
pass
return data
class GroupValidation(Validation):
"""A validation that checks the number of failures in a test group.
The priority level will be set dynamically based on the number of
failures and the supplied thresholds.
:param name: The name of this validation.
:param checked_group: The name of the group this validation will check.
:param low_threshold: The number of failures at which this validation
will itself fail.
:param normal_threshold: The number of failures at which this validation
will become NORMAL priority.
:param critical_threshold: The number of failures at which this validation
will become CRITICAL priority.
:param order: This validation will run after all validations of lower
order have run. Used when order matters - eg, creating a GroupValidation
for a group of GroupValidations.
:param group: The group this validation belongs to.
"""
def __init__(self, name, checked_group, low_threshold=float("inf"),
normal_threshold=float("inf"),
critical_threshold=float("inf"),
order=1, group=None):
Validation.__init__(self,
name, priority=Priority.LOW, timeout=None, group=group)
self.low_threshold = low_threshold
self.normal_threshold = normal_threshold
self.critical_threshold = critical_threshold
self._clean_thresholds()
self.order = order
self.checked_group = checked_group
def _clean_thresholds(self):
"""Ensure that the thresholds are consistent.
`low_threshold` must be less than `normal_threshold` which must be
less than `critical_threhsold`. If necessary, this function will alter
the thresholds to ensure this condition is met.
"""
if self.normal_threshold > self.critical_threshold:
self.normal_threshold = self.critical_threshold
if self.low_threshold > self.normal_threshold:
self.low_threshold = self.normal_threshold
def perform(self, group_failures):
"""Perform the validation."""
failures = len(group_failures[self.checked_group])
messages = group_failures[self.checked_group]
if failures >= self.low_threshold:
self._set_priority(failures)
self.fail("Group {0} had {1} failures! \n{2}".format(
self.checked_group, failures, messages))
def _set_priority(self, failures):
"""Set priority of this validation based on the number of failures.
:param failures: The number of failures in this validation's checked
group.
"""
if failures >= self.critical_threshold:
self.priority = Priority.CRITICAL
elif failures >= self.normal_threshold:
self.priority = Priority.NORMAL
else:
self.priority = Priority.LOW
|
PypiClean
|
/ligo-scald-0.8.4.tar.gz/ligo-scald-0.8.4/ligo/scald/utils.py
|
__author__ = "Patrick Godwin ([email protected])"
__description__ = "a module to store commonly used utility functions"
#-------------------------------------------------
### imports
import argparse
import bisect
from collections import namedtuple
import functools
import json
import os
import random
import re
import sys
import time
import timeit
from datetime import datetime
from dateutil.tz import tzutc
from dateutil.parser import parse as str_to_utc
import numpy
EPOCH_UNIX_GPS = 315964800
#-------------------------------------------------
### leapseconds utilities
leapseconds_table = [
46828800, # 1981-Jul-01
78364801, # 1982-Jul-01
109900802, # 1983-Jul-01
173059203, # 1985-Jul-01
252028804, # 1988-Jan-01
315187205, # 1990-Jan-01
346723206, # 1991-Jan-01
393984007, # 1992-Jul-01
425520008, # 1993-Jul-01
457056009, # 1994-Jul-01
504489610, # 1996-Jan-01
551750411, # 1997-Jul-01
599184012, # 1999-Jan-01
820108813, # 2006-Jan-01
914803214, # 2009-Jan-01
1025136015, # 2012-Jul-01
1119744016, # 2015-Jul-01
1167264017, # 2017-Jan-01
]
def leapseconds(gpstime):
return bisect.bisect_left(leapseconds_table, gpstime)
#-------------------------------------------------
### data utilities
def stats_on_data(data):
return float(numpy.min(data)), float(numpy.percentile(data, 15.9)), float(numpy.percentile(data, 84.1)), float(numpy.max(data))
#-------------------------------------------------
### aggregation utilities
def gps_range_to_process(jobtime, dt=1):
if jobtime:
gpsblocks = set((floor_div(t, dt) for t in jobtime))
if not gpsblocks:
return [], []
min_t, max_t = min(gpsblocks), max(gpsblocks)
return zip(range(min_t, max_t + dt, dt), range(min_t + dt, max_t + 2*dt, dt))
else:
return None
def span_to_process(start, end, dt=1):
return floor_div(start, dt), floor_div(end, dt) + dt
def duration_to_dt(duration):
"""
Given a time duration, returns back the sampling rate of timeseries
such that a maximum of 1000 points are plotted at a given time.
This is used as a default if the user doesn't specify a dt explicitly.
>>> duration_to_dt(900)
1
>>> duration_to_dt(11000)
100
"""
if duration <= 1000:
dt = 1
elif duration <= 10000:
dt = 10
elif duration <= 100000:
dt = 100
elif duration <= 1000000:
dt = 1000
elif duration <= 10000000:
dt = 10000
else:
dt = 100000
return dt
#-------------------------------------------------
### time utilities
def in_new_epoch(new_gps_time, prev_gps_time, gps_epoch):
"""
Returns whether new and old gps times are in different
epochs.
>>> in_new_epoch(1234561200, 1234560000, 1000)
True
>>> in_new_epoch(1234561200, 1234560000, 10000)
False
"""
return (new_gps_time - floor_div(prev_gps_time, gps_epoch)) >= gps_epoch
def floor_div(x, n):
"""
Floor a number by removing its remainder
from division by another number n.
>>> floor_div(163, 10)
160
>>> floor_div(158, 10)
150
"""
assert n > 0
if isinstance(x, int) or (isinstance(x, numpy.ndarray) and numpy.issubdtype(x.dtype, numpy.integer)):
return (x // n) * n
elif isinstance(x, numpy.ndarray):
return (x.astype(float) // n) * n
else:
return (float(x) // n) * n
def gps_now():
"""
Returns the current gps time.
"""
gpsnow = time.time() - EPOCH_UNIX_GPS
return gpsnow + leapseconds(gpsnow)
def gps_to_latency(gps_time):
"""
Given a gps time, measures the latency to ms precision relative to now.
"""
return numpy.round(gps_now() - gps_time, 3)
def rfc3339_to_gps(time_str):
"""
converts an rfc3339-formatted string (UTC+0 only) to a valid gps time.
"""
if time_str[-1] != 'Z':
raise ValueError('missing Z indicating UTC+0')
#utc = str_to_utc(time_str[:-1])
utc = str_to_utc(time_str, tzinfos={'Z': 0})
tdelta = utc - datetime.fromtimestamp(0, tzutc())
gps_time = tdelta.total_seconds() - EPOCH_UNIX_GPS
return gps_time + leapseconds(gps_time)
def gps_to_unix(gps_time):
"""
Converts from GPS to UNIX time, allows use of numpy arrays or scalars.
"""
if isinstance(gps_time, numpy.ndarray):
leapsec = leapseconds(int(gps_time[0]))
return ((gps_time + EPOCH_UNIX_GPS - leapsec) * 1e9).astype(int)
else:
leapsec = leapseconds(int(gps_time))
return int((gps_time + EPOCH_UNIX_GPS - leapsec) * 1e9)
def unix_to_gps(unix_time):
"""
Converts from UNIX to GPS time, allows use of numpy arrays or scalars.
"""
### FIXME: doesn't handle leapseconds correctly
return (unix_time / 1e9) - EPOCH_UNIX_GPS + 18
#-------------------------------------------------
### nagios utilities
def status_to_nagios_response(text_status, bad_status):
return {
"nagios_shib_scraper_ver": 0.1,
"status_intervals": [{
"num_status": 2 if bad_status else 0,
"txt_status": text_status,
}],
}
def extract_alert_tags(schema):
if 'tags' in schema:
return schema['tags']
else:
tag_type = schema['tag_key']
alert_tag_format = schema['tag_format']
if 'digit' in alert_tag_format:
num_digits = int(alert_tag_format[0])
num_tags = int(schema['num_tags'])
tag_start = int(schema['tag_start']) if 'tag_start' in schema else 0
return [(tag_type, str(tag_num).zfill(num_digits)) for tag_num in range(tag_start, tag_start+num_tags)]
else:
raise ValueError('{} tag format not recognized'.format(alert_tag_format))
#-------------------------------------------------
### parsing utilities
def append_subparser(subparser, cmd, func):
assert func.__doc__, "empty docstring: {}".format(func)
help_ = func.__doc__.split('\n')[0].lower().strip('.')
desc = func.__doc__.strip()
parser = subparser.add_parser(
cmd,
formatter_class=argparse.RawDescriptionHelpFormatter,
help=help_,
description=desc
)
parser.set_defaults(func=func)
return parser
#-------------------------------------------------
### multiprocessing utilities
def unpack(func):
"""
Unpacks an argument tuple and calls the target function 'func'.
Used as a workaround for python 2 missing multiprocessing.Pool.starmap.
Implemented from https://stackoverflow.com/a/52671399.
"""
@functools.wraps(func)
def wrapper(arg_tuple):
return func(*arg_tuple)
return wrapper
|
PypiClean
|
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/basic/plugins/filetools/plugin.js
|
/**
* @license Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
* For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
'use strict';
( function() {
CKEDITOR.plugins.add( 'filetools', {
lang: 'az,bg,ca,cs,da,de,de-ch,en,en-au,eo,es,es-mx,et,eu,fa,fr,gl,hr,hu,id,it,ja,km,ko,ku,lv,nb,nl,no,oc,pl,pt,pt-br,ro,ru,sk,sq,sr,sr-latn,sv,tr,ug,uk,zh,zh-cn', // %REMOVE_LINE_CORE%
beforeInit: function( editor ) {
/**
* An instance of the {@link CKEDITOR.fileTools.uploadRepository upload repository}.
* It allows you to create and get {@link CKEDITOR.fileTools.fileLoader file loaders}.
*
* var loader = editor.uploadRepository.create( file );
* loader.loadAndUpload( 'http://foo/bar' );
*
* @since 4.5.0
* @readonly
* @property {CKEDITOR.fileTools.uploadRepository} uploadRepository
* @member CKEDITOR.editor
*/
editor.uploadRepository = new UploadRepository( editor );
/**
* Event fired when the {@link CKEDITOR.fileTools.fileLoader file loader} should send XHR. If the event is not
* {@link CKEDITOR.eventInfo#stop stopped} or {@link CKEDITOR.eventInfo#cancel canceled}, the default request
* will be sent. Refer to the {@glink guide/dev_file_upload Uploading Dropped or Pasted Files} article for more information.
*
* @since 4.5.0
* @event fileUploadRequest
* @member CKEDITOR.editor
* @param data
* @param {CKEDITOR.fileTools.fileLoader} data.fileLoader A file loader instance.
* @param {Object} data.requestData An object containing all data to be sent to the server.
*/
editor.on( 'fileUploadRequest', function( evt ) {
var fileLoader = evt.data.fileLoader;
fileLoader.xhr.open( 'POST', fileLoader.uploadUrl, true );
// Adding file to event's data by default - allows overwriting it by user's event listeners. (https://dev.ckeditor.com/ticket/13518)
evt.data.requestData.upload = { file: fileLoader.file, name: fileLoader.fileName };
}, null, null, 5 );
editor.on( 'fileUploadRequest', function( evt ) {
var fileLoader = evt.data.fileLoader,
$formData = new FormData(),
requestData = evt.data.requestData,
configXhrHeaders = editor.config.fileTools_requestHeaders,
header;
for ( var name in requestData ) {
var value = requestData[ name ];
// Treating files in special way
if ( typeof value === 'object' && value.file ) {
$formData.append( name, value.file, value.name );
}
else {
$formData.append( name, value );
}
}
// Append token preventing CSRF attacks.
$formData.append( 'ckCsrfToken', CKEDITOR.tools.getCsrfToken() );
if ( configXhrHeaders ) {
for ( header in configXhrHeaders ) {
fileLoader.xhr.setRequestHeader( header, configXhrHeaders[ header ] );
}
}
fileLoader.xhr.send( $formData );
}, null, null, 999 );
/**
* Event fired when the {CKEDITOR.fileTools.fileLoader file upload} response is received and needs to be parsed.
* If the event is not {@link CKEDITOR.eventInfo#stop stopped} or {@link CKEDITOR.eventInfo#cancel canceled},
* the default response handler will be used. Refer to the
* {@glink guide/dev_file_upload Uploading Dropped or Pasted Files} article for more information.
*
* @since 4.5.0
* @event fileUploadResponse
* @member CKEDITOR.editor
* @param data All data will be passed to {@link CKEDITOR.fileTools.fileLoader#responseData}.
* @param {CKEDITOR.fileTools.fileLoader} data.fileLoader A file loader instance.
* @param {String} data.message The message from the server. Needs to be set in the listener — see the example above.
* @param {String} data.fileName The file name on server. Needs to be set in the listener — see the example above.
* @param {String} data.url The URL to the uploaded file. Needs to be set in the listener — see the example above.
*/
editor.on( 'fileUploadResponse', function( evt ) {
var fileLoader = evt.data.fileLoader,
xhr = fileLoader.xhr,
data = evt.data;
try {
var response = JSON.parse( xhr.responseText );
// Error message does not need to mean that upload finished unsuccessfully.
// It could mean that ex. file name was changes during upload due to naming collision.
if ( response.error && response.error.message ) {
data.message = response.error.message;
}
// But !uploaded means error.
if ( !response.uploaded ) {
evt.cancel();
} else {
for ( var i in response ) {
data[ i ] = response[ i ];
}
}
} catch ( err ) {
// Response parsing error.
data.message = fileLoader.lang.filetools.responseError;
CKEDITOR.warn( 'filetools-response-error', { responseText: xhr.responseText } );
evt.cancel();
}
}, null, null, 999 );
}
} );
/**
* File loader repository. It allows you to create and get {@link CKEDITOR.fileTools.fileLoader file loaders}.
*
* An instance of the repository is available as the {@link CKEDITOR.editor#uploadRepository}.
*
* var loader = editor.uploadRepository.create( file );
* loader.loadAndUpload( 'http://foo/bar' );
*
* To find more information about handling files see the {@link CKEDITOR.fileTools.fileLoader} class.
*
* @since 4.5.0
* @class CKEDITOR.fileTools.uploadRepository
* @mixins CKEDITOR.event
* @constructor Creates an instance of the repository.
* @param {CKEDITOR.editor} editor Editor instance. Used only to get the language data.
*/
function UploadRepository( editor ) {
this.editor = editor;
this.loaders = [];
}
UploadRepository.prototype = {
/**
* Creates a {@link CKEDITOR.fileTools.fileLoader file loader} instance with a unique ID.
* The instance can be later retrieved from the repository using the {@link #loaders} array.
*
* Fires the {@link CKEDITOR.fileTools.uploadRepository#instanceCreated instanceCreated} event.
*
* @param {Blob/String} fileOrData See {@link CKEDITOR.fileTools.fileLoader}.
* @param {String} fileName See {@link CKEDITOR.fileTools.fileLoader}.
* @param {Function} [loaderType] Loader type to be created. If skipped, the default {@link CKEDITOR.fileTools.fileLoader}
* type will be used.
* @returns {CKEDITOR.fileTools.fileLoader} The created file loader instance.
*/
create: function( fileOrData, fileName, loaderType ) {
loaderType = loaderType || FileLoader;
var id = this.loaders.length,
loader = new loaderType( this.editor, fileOrData, fileName );
loader.id = id;
this.loaders[ id ] = loader;
this.fire( 'instanceCreated', loader );
return loader;
},
/**
* Returns `true` if all loaders finished their jobs.
*
* @returns {Boolean} `true` if all loaders finished their job, `false` otherwise.
*/
isFinished: function() {
for ( var id = 0; id < this.loaders.length; ++id ) {
if ( !this.loaders[ id ].isFinished() ) {
return false;
}
}
return true;
}
/**
* Array of loaders created by the {@link #create} method. Loaders' {@link CKEDITOR.fileTools.fileLoader#id IDs}
* are indexes.
*
* @readonly
* @property {CKEDITOR.fileTools.fileLoader[]} loaders
*/
/**
* Event fired when the {@link CKEDITOR.fileTools.fileLoader file loader} is created.
*
* @event instanceCreated
* @param {CKEDITOR.fileTools.fileLoader} data Created file loader.
*/
};
/**
* The `FileLoader` class is a wrapper which handles two file operations: loading the content of the file stored on
* the user's device into the memory and uploading the file to the server.
*
* There are two possible ways to crate a `FileLoader` instance: with a [Blob](https://developer.mozilla.org/en/docs/Web/API/Blob)
* (e.g. acquired from the {@link CKEDITOR.plugins.clipboard.dataTransfer#getFile} method) or with data as a Base64 string.
* Note that if the constructor gets the data as a Base64 string, there is no need to load the data, the data is already loaded.
*
* The `FileLoader` is created for a single load and upload process so if you abort the process,
* you need to create a new `FileLoader`.
*
* All process parameters are stored in public properties.
*
* `FileLoader` implements events so you can listen to them to react to changes. There are two types of events:
* events to notify the listeners about changes and an event that lets the listeners synchronize with current {@link #status}.
*
* The first group of events contains {@link #event-loading}, {@link #event-loaded}, {@link #event-uploading},
* {@link #event-uploaded}, {@link #event-error} and {@link #event-abort}. These events are called only once,
* when the {@link #status} changes.
*
* The second type is the {@link #event-update} event. It is fired every time the {@link #status} changes, the progress changes
* or the {@link #method-update} method is called. Is is created to synchronize the visual representation of the loader with
* its status. For example if the dialog window shows the upload progress, it should be refreshed on
* the {@link #event-update} listener. Then when the user closes and reopens this dialog, the {@link #method-update} method should
* be called to refresh the progress.
*
* Default request and response formats will work with CKFinder 2.4.3 and above. If you need a custom request
* or response handling you need to overwrite the default behavior using the {@link CKEDITOR.editor#fileUploadRequest} and
* {@link CKEDITOR.editor#fileUploadResponse} events. For more information see their documentation.
*
* To create a `FileLoader` instance, use the {@link CKEDITOR.fileTools.uploadRepository} class.
*
* Here is a simple `FileLoader` usage example:
*
* editor.on( 'paste', function( evt ) {
* for ( var i = 0; i < evt.data.dataTransfer.getFilesCount(); i++ ) {
* var file = evt.data.dataTransfer.getFile( i );
*
* if ( CKEDITOR.fileTools.isTypeSupported( file, /image\/png/ ) ) {
* var loader = editor.uploadRepository.create( file );
*
* loader.on( 'update', function() {
* document.getElementById( 'uploadProgress' ).innerHTML = loader.status;
* } );
*
* loader.on( 'error', function() {
* alert( 'Error!' );
* } );
*
* loader.loadAndUpload( 'http://upload.url/' );
*
* evt.data.dataValue += 'loading...'
* }
* }
* } );
*
* Note that `FileLoader` uses the native file API which is supported **since Internet Explorer 10**.
*
* @since 4.5.0
* @class CKEDITOR.fileTools.fileLoader
* @mixins CKEDITOR.event
* @constructor Creates an instance of the class and sets initial values for all properties.
* @param {CKEDITOR.editor} editor The editor instance. Used only to get language data.
* @param {Blob/String} fileOrData A [blob object](https://developer.mozilla.org/en/docs/Web/API/Blob) or a data
* string encoded with Base64.
* @param {String} [fileName] The file name. If not set and the second parameter is a file, then its name will be used.
* If not set and the second parameter is a Base64 data string, then the file name will be created based on
* the {@link CKEDITOR.config#fileTools_defaultFileName} option.
*/
function FileLoader( editor, fileOrData, fileName ) {
var mimeParts,
defaultFileName = editor.config.fileTools_defaultFileName;
this.editor = editor;
this.lang = editor.lang;
if ( typeof fileOrData === 'string' ) {
// Data is already loaded from disc.
this.data = fileOrData;
this.file = dataToFile( this.data );
this.total = this.file.size;
this.loaded = this.total;
} else {
this.data = null;
this.file = fileOrData;
this.total = this.file.size;
this.loaded = 0;
}
if ( fileName ) {
this.fileName = fileName;
} else if ( this.file.name ) {
this.fileName = this.file.name;
} else {
mimeParts = this.file.type.split( '/' );
if ( defaultFileName ) {
mimeParts[ 0 ] = defaultFileName;
}
this.fileName = mimeParts.join( '.' );
}
this.uploaded = 0;
this.uploadTotal = null;
this.responseData = null;
this.status = 'created';
this.abort = function() {
this.changeStatus( 'abort' );
};
}
/**
* The loader status. Possible values:
*
* * `created` – The loader was created, but neither load nor upload started.
* * `loading` – The file is being loaded from the user's storage.
* * `loaded` – The file was loaded, the process is finished.
* * `uploading` – The file is being uploaded to the server.
* * `uploaded` – The file was uploaded, the process is finished.
* * `error` – The process stops because of an error, more details are available in the {@link #message} property.
* * `abort` – The process was stopped by the user.
*
* @property {String} status
*/
/**
* String data encoded with Base64. If the `FileLoader` is created with a Base64 string, the `data` is that string.
* If a file was passed to the constructor, the data is `null` until loading is completed.
*
* @readonly
* @property {String} data
*/
/**
* File object which represents the handled file. This property is set for both constructor options (file or data).
*
* @readonly
* @property {Blob} file
*/
/**
* The name of the file. If there is no file name, it is created by using the
* {@link CKEDITOR.config#fileTools_defaultFileName} option.
*
* @readonly
* @property {String} fileName
*/
/**
* The number of loaded bytes. If the `FileLoader` was created with a data string,
* the loaded value equals the {@link #total} value.
*
* @readonly
* @property {Number} loaded
*/
/**
* The number of uploaded bytes.
*
* @readonly
* @property {Number} uploaded
*/
/**
* The total file size in bytes.
*
* @readonly
* @property {Number} total
*/
/**
* All data received in the response from the server. If the server returns additional data, it will be available
* in this property.
*
* It contains all data set in the {@link CKEDITOR.editor#fileUploadResponse} event listener.
*
* @readonly
* @property {Object} responseData
*/
/**
* The total size of upload data in bytes.
* If the `xhr.upload` object is present, this value will indicate the total size of the request payload, not only the file
* size itself. If the `xhr.upload` object is not available and the real upload size cannot be obtained, this value will
* be equal to {@link #total}. It has a `null` value until the upload size is known.
*
* loader.on( 'update', function() {
* // Wait till uploadTotal is present.
* if ( loader.uploadTotal ) {
* console.log( 'uploadTotal: ' + loader.uploadTotal );
* }
* });
*
* @readonly
* @property {Number} uploadTotal
*/
/**
* The error message or additional information received from the server.
*
* @readonly
* @property {String} message
*/
/**
* The URL to the file when it is uploaded or received from the server.
*
* @readonly
* @property {String} url
*/
/**
* The target of the upload.
*
* @readonly
* @property {String} uploadUrl
*/
/**
*
* Native `FileReader` reference used to load the file.
*
* @readonly
* @property {FileReader} reader
*/
/**
* Native `XMLHttpRequest` reference used to upload the file.
*
* @readonly
* @property {XMLHttpRequest} xhr
*/
/**
* If `FileLoader` was created using {@link CKEDITOR.fileTools.uploadRepository},
* it gets an identifier which is stored in this property.
*
* @readonly
* @property {Number} id
*/
/**
* Aborts the process.
*
* This method has a different behavior depending on the current {@link #status}.
*
* * If the {@link #status} is `loading` or `uploading`, current operation will be aborted.
* * If the {@link #status} is `created`, `loading` or `uploading`, the {@link #status} will be changed to `abort`
* and the {@link #event-abort} event will be called.
* * If the {@link #status} is `loaded`, `uploaded`, `error` or `abort`, this method will do nothing.
*
* @method abort
*/
FileLoader.prototype = {
/**
* Loads a file from the storage on the user's device to the `data` attribute and uploads it to the server.
*
* The order of {@link #status statuses} for a successful load and upload is:
*
* * `created`,
* * `loading`,
* * `uploading`,
* * `uploaded`.
*
* @param {String} url The upload URL.
* @param {Object} [additionalRequestParameters] Additional parameters that would be passed to
* the {@link CKEDITOR.editor#fileUploadRequest} event.
*/
loadAndUpload: function( url, additionalRequestParameters ) {
var loader = this;
this.once( 'loaded', function( evt ) {
// Cancel both 'loaded' and 'update' events,
// because 'loaded' is terminated state.
evt.cancel();
loader.once( 'update', function( evt ) {
evt.cancel();
}, null, null, 0 );
// Start uploading.
loader.upload( url, additionalRequestParameters );
}, null, null, 0 );
this.load();
},
/**
* Loads a file from the storage on the user's device to the `data` attribute.
*
* The order of the {@link #status statuses} for a successful load is:
*
* * `created`,
* * `loading`,
* * `loaded`.
*/
load: function() {
var loader = this;
this.reader = new FileReader();
var reader = this.reader;
loader.changeStatus( 'loading' );
this.abort = function() {
loader.reader.abort();
};
reader.onabort = function() {
loader.changeStatus( 'abort' );
};
reader.onerror = function() {
loader.message = loader.lang.filetools.loadError;
loader.changeStatus( 'error' );
};
reader.onprogress = function( evt ) {
loader.loaded = evt.loaded;
loader.update();
};
reader.onload = function() {
loader.loaded = loader.total;
loader.data = reader.result;
loader.changeStatus( 'loaded' );
};
reader.readAsDataURL( this.file );
},
/**
* Uploads a file to the server.
*
* The order of the {@link #status statuses} for a successful upload is:
*
* * `created`,
* * `uploading`,
* * `uploaded`.
*
* @param {String} url The upload URL.
* @param {Object} [additionalRequestParameters] Additional data that would be passed to
* the {@link CKEDITOR.editor#fileUploadRequest} event.
*/
upload: function( url, additionalRequestParameters ) {
var requestData = additionalRequestParameters || {};
if ( !url ) {
this.message = this.lang.filetools.noUrlError;
this.changeStatus( 'error' );
} else {
this.uploadUrl = url;
this.xhr = new XMLHttpRequest();
this.attachRequestListeners();
if ( this.editor.fire( 'fileUploadRequest', { fileLoader: this, requestData: requestData } ) ) {
this.changeStatus( 'uploading' );
}
}
},
/**
* Attaches listeners to the XML HTTP request object.
*
* @private
* @param {XMLHttpRequest} xhr XML HTTP request object.
*/
attachRequestListeners: function() {
var loader = this,
xhr = this.xhr;
loader.abort = function() {
xhr.abort();
onAbort();
};
xhr.onerror = onError;
xhr.onabort = onAbort;
// https://dev.ckeditor.com/ticket/13533 - When xhr.upload is present attach onprogress, onerror and onabort functions to get actual upload
// information.
if ( xhr.upload ) {
xhr.upload.onprogress = function( evt ) {
if ( evt.lengthComputable ) {
// Set uploadTotal with correct data.
if ( !loader.uploadTotal ) {
loader.uploadTotal = evt.total;
}
loader.uploaded = evt.loaded;
loader.update();
}
};
xhr.upload.onerror = onError;
xhr.upload.onabort = onAbort;
} else {
// https://dev.ckeditor.com/ticket/13533 - If xhr.upload is not supported - fire update event anyway and set uploadTotal to file size.
loader.uploadTotal = loader.total;
loader.update();
}
xhr.onload = function() {
// https://dev.ckeditor.com/ticket/13433 - Call update at the end of the upload. When xhr.upload object is not supported there will be
// no update events fired during the whole process.
loader.update();
// https://dev.ckeditor.com/ticket/13433 - Check if loader was not aborted during last update.
if ( loader.status == 'abort' ) {
return;
}
loader.uploaded = loader.uploadTotal;
if ( xhr.status < 200 || xhr.status > 299 ) {
loader.message = loader.lang.filetools[ 'httpError' + xhr.status ];
if ( !loader.message ) {
loader.message = loader.lang.filetools.httpError.replace( '%1', xhr.status );
}
loader.changeStatus( 'error' );
} else {
var data = {
fileLoader: loader
},
// Values to copy from event to FileLoader.
valuesToCopy = [ 'message', 'fileName', 'url' ],
success = loader.editor.fire( 'fileUploadResponse', data );
for ( var i = 0; i < valuesToCopy.length; i++ ) {
var key = valuesToCopy[ i ];
if ( typeof data[ key ] === 'string' ) {
loader[ key ] = data[ key ];
}
}
// The whole response is also hold for use by uploadwidgets (https://dev.ckeditor.com/ticket/13519).
loader.responseData = data;
// But without reference to the loader itself.
delete loader.responseData.fileLoader;
if ( success === false ) {
loader.changeStatus( 'error' );
} else {
loader.changeStatus( 'uploaded' );
}
}
};
function onError() {
// Prevent changing status twice, when XHR.error and XHR.upload.onerror could be called together.
if ( loader.status == 'error' ) {
return;
}
loader.message = loader.lang.filetools.networkError;
loader.changeStatus( 'error' );
}
function onAbort() {
// Prevent changing status twice, when XHR.onabort and XHR.upload.onabort could be called together.
if ( loader.status == 'abort' ) {
return;
}
loader.changeStatus( 'abort' );
}
},
/**
* Changes {@link #status} to the new status, updates the {@link #method-abort} method if needed and fires two events:
* new status and {@link #event-update}.
*
* @private
* @param {String} newStatus New status to be set.
*/
changeStatus: function( newStatus ) {
this.status = newStatus;
if ( newStatus == 'error' || newStatus == 'abort' ||
newStatus == 'loaded' || newStatus == 'uploaded' ) {
this.abort = function() {};
}
this.fire( newStatus );
this.update();
},
/**
* Updates the state of the `FileLoader` listeners. This method should be called if the state of the visual representation
* of the upload process is out of synchronization and needs to be refreshed (e.g. because of an undo operation or
* because the dialog window with the upload is closed and reopened). Fires the {@link #event-update} event.
*/
update: function() {
this.fire( 'update' );
},
/**
* Returns `true` if the loading and uploading finished (successfully or not), so the {@link #status} is
* `loaded`, `uploaded`, `error` or `abort`.
*
* @returns {Boolean} `true` if the loading and uploading finished.
*/
isFinished: function() {
return !!this.status.match( /^(?:loaded|uploaded|error|abort)$/ );
}
/**
* Event fired when the {@link #status} changes to `loading`. It will be fired once for the `FileLoader`.
*
* @event loading
*/
/**
* Event fired when the {@link #status} changes to `loaded`. It will be fired once for the `FileLoader`.
*
* @event loaded
*/
/**
* Event fired when the {@link #status} changes to `uploading`. It will be fired once for the `FileLoader`.
*
* @event uploading
*/
/**
* Event fired when the {@link #status} changes to `uploaded`. It will be fired once for the `FileLoader`.
*
* @event uploaded
*/
/**
* Event fired when the {@link #status} changes to `error`. It will be fired once for the `FileLoader`.
*
* @event error
*/
/**
* Event fired when the {@link #status} changes to `abort`. It will be fired once for the `FileLoader`.
*
* @event abort
*/
/**
* Event fired every time the `FileLoader` {@link #status} or progress changes or the {@link #method-update} method is called.
* This event was designed to allow showing the visualization of the progress and refresh that visualization
* every time the status changes. Note that multiple `update` events may be fired with the same status.
*
* @event update
*/
};
CKEDITOR.event.implementOn( UploadRepository.prototype );
CKEDITOR.event.implementOn( FileLoader.prototype );
var base64HeaderRegExp = /^data:(\S*?);base64,/;
// Transforms Base64 string data into file and creates name for that file based on the mime type.
//
// @private
// @param {String} data Base64 string data.
// @returns {Blob} File.
function dataToFile( data ) {
var contentType = data.match( base64HeaderRegExp )[ 1 ],
base64Data = data.replace( base64HeaderRegExp, '' ),
byteCharacters = atob( base64Data ),
byteArrays = [],
sliceSize = 512,
offset, slice, byteNumbers, i, byteArray;
for ( offset = 0; offset < byteCharacters.length; offset += sliceSize ) {
slice = byteCharacters.slice( offset, offset + sliceSize );
byteNumbers = new Array( slice.length );
for ( i = 0; i < slice.length; i++ ) {
byteNumbers[ i ] = slice.charCodeAt( i );
}
byteArray = new Uint8Array( byteNumbers );
byteArrays.push( byteArray );
}
return new Blob( byteArrays, { type: contentType } );
}
//
// PUBLIC API -------------------------------------------------------------
//
// Two plugins extend this object.
if ( !CKEDITOR.fileTools ) {
/**
* Helpers to load and upload a file.
*
* @since 4.5.0
* @singleton
* @class CKEDITOR.fileTools
*/
CKEDITOR.fileTools = {};
}
CKEDITOR.tools.extend( CKEDITOR.fileTools, {
uploadRepository: UploadRepository,
fileLoader: FileLoader,
/**
* Gets the upload URL from the {@link CKEDITOR.config configuration}. Because of backward compatibility
* the URL can be set using multiple configuration options.
*
* If the `type` is defined, then four configuration options will be checked in the following order
* (examples for `type='image'`):
*
* * `[type]UploadUrl`, e.g. {@link CKEDITOR.config#imageUploadUrl},
* * {@link CKEDITOR.config#uploadUrl},
* * `filebrowser[uppercased type]uploadUrl`, e.g. {@link CKEDITOR.config#filebrowserImageUploadUrl},
* * {@link CKEDITOR.config#filebrowserUploadUrl}.
*
* If the `type` is not defined, two configuration options will be checked:
*
* * {@link CKEDITOR.config#uploadUrl},
* * {@link CKEDITOR.config#filebrowserUploadUrl}.
*
* `filebrowser[type]uploadUrl` and `filebrowserUploadUrl` are checked for backward compatibility with the
* `filebrowser` plugin.
*
* For both `filebrowser[type]uploadUrl` and `filebrowserUploadUrl` `&responseType=json` is added to the end of the URL.
*
* @param {Object} config The configuration file.
* @param {String} [type] Upload file type.
* @returns {String/null} Upload URL or `null` if none of the configuration options were defined.
*/
getUploadUrl: function( config, type ) {
var capitalize = CKEDITOR.tools.capitalize;
if ( type && config[ type + 'UploadUrl' ] ) {
return config[ type + 'UploadUrl' ];
} else if ( config.uploadUrl ) {
return config.uploadUrl;
} else if ( type && config[ 'filebrowser' + capitalize( type, 1 ) + 'UploadUrl' ] ) {
return config[ 'filebrowser' + capitalize( type, 1 ) + 'UploadUrl' ] + '&responseType=json';
} else if ( config.filebrowserUploadUrl ) {
return config.filebrowserUploadUrl + '&responseType=json';
}
return null;
},
/**
* Checks if the MIME type of the given file is supported.
*
* CKEDITOR.fileTools.isTypeSupported( { type: 'image/png' }, /image\/(png|jpeg)/ ); // true
* CKEDITOR.fileTools.isTypeSupported( { type: 'image/png' }, /image\/(gif|jpeg)/ ); // false
*
* @param {Blob} file The file to check.
* @param {RegExp} supportedTypes A regular expression to check the MIME type of the file.
* @returns {Boolean} `true` if the file type is supported.
*/
isTypeSupported: function( file, supportedTypes ) {
return !!file.type.match( supportedTypes );
},
/**
* Feature detection indicating whether the current browser supports methods essential to send files over an XHR request.
*
* @since 4.9.0
* @property {Boolean} isFileUploadSupported
*/
isFileUploadSupported: ( function() {
return typeof FileReader === 'function' &&
typeof ( new FileReader() ).readAsDataURL === 'function' &&
typeof FormData === 'function' &&
typeof ( new FormData() ).append === 'function' &&
typeof XMLHttpRequest === 'function' &&
typeof Blob === 'function';
} )()
} );
} )();
/**
* The URL where files should be uploaded.
*
* An empty string means that the option is disabled.
*
* @since 4.5.0
* @cfg {String} [uploadUrl='']
* @member CKEDITOR.config
*/
/**
* The default file name (without extension) that will be used for files created from a Base64 data string
* (for example for files pasted into the editor).
* This name will be combined with the MIME type to create the full file name with the extension.
*
* If `fileTools_defaultFileName` is set to `default-name` and data's MIME type is `image/png`,
* the resulting file name will be `default-name.png`.
*
* If `fileTools_defaultFileName` is not set, the file name will be created using only its MIME type.
* For example for `image/png` the file name will be `image.png`.
*
* @since 4.5.3
* @cfg {String} [fileTools_defaultFileName='']
* @member CKEDITOR.config
*/
/**
* Allows to add extra headers for every request made using the {@link CKEDITOR.fileTools} API.
*
* Note that headers can still be customized per a single request, using the
* [`fileUploadRequest`](https://ckeditor.com/docs/ckeditor4/latest/api/CKEDITOR_editor.html#event-fileUploadRequest)
* event.
*
* config.fileTools_requestHeaders = {
* 'X-Requested-With': 'XMLHttpRequest',
* 'Custom-Header': 'header value'
* };
*
* @since 4.9.0
* @cfg {Object} [fileTools_requestHeaders]
* @member CKEDITOR.config
*/
|
PypiClean
|
/large_image_source_bioformats-1.23.6-py3-none-any.whl/large_image_source_bioformats/__init__.py
|
# This tile sources uses javabridge to communicate between python and java. It
# requires some version of java's jvm to be available (see
# https://jdk.java.net/archive/). It uses the python-bioformats wheel to get
# the bioformats JAR file. A later version may be desirable (see
# https://www.openmicroscopy.org/bio-formats/downloads/). See
# https://downloads.openmicroscopy.org/bio-formats/5.1.5/api/loci/formats/
# IFormatReader.html for interface details.
import atexit
import logging
import math
import os
import re
import threading
import types
import weakref
import numpy as np
import large_image.tilesource.base
from large_image import config
from large_image.cache_util import LruCacheMetaclass, methodcache
from large_image.constants import TILE_FORMAT_NUMPY, SourcePriority
from large_image.exceptions import TileSourceError, TileSourceFileNotFoundError
from large_image.tilesource import FileTileSource, nearPowerOfTwo
try:
from importlib.metadata import PackageNotFoundError
from importlib.metadata import version as _importlib_version
except ImportError:
from importlib_metadata import PackageNotFoundError
from importlib_metadata import version as _importlib_version
try:
__version__ = _importlib_version(__name__)
except PackageNotFoundError:
# package is not installed
pass
bioformats = None
# import javabridge
javabridge = None
_javabridgeStarted = None
_openImages = []
# Default to ignoring files with no extension and some specific extensions.
config.ConfigValues['source_bioformats_ignored_names'] = \
r'(^[^.]*|\.(jpg|jpeg|jpe|png|tif|tiff|ndpi|nd2|ome|nc|json|isyntax))$'
def _monitor_thread():
main_thread = threading.main_thread()
main_thread.join()
if len(_openImages):
try:
javabridge.attach()
while len(_openImages):
source = _openImages.pop()
source = source()
try:
source._bioimage.close()
except Exception:
pass
source._bioimage = None
except AssertionError:
pass
finally:
if javabridge.get_env():
javabridge.detach()
_stopJavabridge()
def _reduceLogging():
# As of bioformat 4.0.0, org.apache.log4j isn't in the bundled
# jar file, so setting log levels just produces needless warnings.
# bioformats.log4j.basic_config()
# javabridge.JClassWrapper('loci.common.Log4jTools').setRootLevel(
# logging.getLevelName(logger.level))
#
# This is taken from
# https://github.com/pskeshu/microscoper/blob/master/microscoper/io.py
try:
rootLoggerName = javabridge.get_static_field(
'org/slf4j/Logger', 'ROOT_LOGGER_NAME', 'Ljava/lang/String;')
rootLogger = javabridge.static_call(
'org/slf4j/LoggerFactory', 'getLogger',
'(Ljava/lang/String;)Lorg/slf4j/Logger;', rootLoggerName)
logLevel = javabridge.get_static_field(
'ch/qos/logback/classic/Level', 'WARN', 'Lch/qos/logback/classic/Level;')
javabridge.call(rootLogger, 'setLevel', '(Lch/qos/logback/classic/Level;)V', logLevel)
except Exception:
pass
bioformats.formatreader.logger.setLevel(logging.ERROR)
def _startJavabridge(logger):
global _javabridgeStarted
if _javabridgeStarted is None:
# Only import these when first asked. They are slow to import.
global bioformats
global javabridge
if bioformats is None:
import bioformats
if javabridge is None:
import javabridge
# We need something to wake up at exit and shut things down
monitor = threading.Thread(target=_monitor_thread)
monitor.daemon = True
monitor.start()
try:
javabridge.start_vm(class_path=bioformats.JARS, run_headless=True)
_reduceLogging()
atexit.register(_stopJavabridge)
logger.info('Started JVM for Bioformats tile source.')
_javabridgeStarted = True
except RuntimeError as exc:
logger.exception('Cannot start JVM for Bioformats tile source.', exc)
_javabridgeStarted = False
return _javabridgeStarted
def _stopJavabridge(*args, **kwargs):
global _javabridgeStarted
if javabridge is not None:
javabridge.kill_vm()
_javabridgeStarted = None
class BioformatsFileTileSource(FileTileSource, metaclass=LruCacheMetaclass):
"""
Provides tile access to via Bioformats.
"""
cacheName = 'tilesource'
name = 'bioformats'
extensions = {
None: SourcePriority.FALLBACK,
'czi': SourcePriority.PREFERRED,
'lif': SourcePriority.MEDIUM,
'vsi': SourcePriority.PREFERRED,
}
mimeTypes = {
None: SourcePriority.FALLBACK,
'image/czi': SourcePriority.PREFERRED,
'image/vsi': SourcePriority.PREFERRED,
}
# If frames are smaller than this they are served as single tiles, which
# can be more efficient than handling multiple tiles.
_singleTileThreshold = 2048
_tileSize = 512
_associatedImageMaxSize = 8192
_maxSkippedLevels = 3
def __init__(self, path, **kwargs): # noqa
"""
Initialize the tile class. See the base class for other available
parameters.
:param path: the associated file path.
"""
super().__init__(path, **kwargs)
largeImagePath = str(self._getLargeImagePath())
self._ignoreSourceNames('bioformats', largeImagePath, r'\.png$')
if not _startJavabridge(self.logger):
msg = 'File cannot be opened by bioformats reader because javabridge failed to start'
raise TileSourceError(msg)
self._tileLock = threading.RLock()
try:
javabridge.attach()
try:
self._bioimage = bioformats.ImageReader(largeImagePath)
except (AttributeError, OSError) as exc:
if not os.path.isfile(largeImagePath):
raise TileSourceFileNotFoundError(largeImagePath) from None
self.logger.debug('File cannot be opened via Bioformats. (%r)', exc)
raise TileSourceError('File cannot be opened via Bioformats (%r)' % exc)
_openImages.append(weakref.ref(self))
rdr = self._bioimage.rdr
# Bind additional functions not done by bioformats module.
# Functions are listed at https://downloads.openmicroscopy.org
# /bio-formats/5.1.5/api/loci/formats/IFormatReader.html
for (name, params, desc) in [
('getBitsPerPixel', '()I', 'Get the number of bits per pixel'),
('getDomains', '()[Ljava/lang/String;', 'Get a list of domains'),
('getEffectiveSizeC', '()I', 'effectiveC * Z * T = imageCount'),
('getOptimalTileHeight', '()I', 'the optimal sub-image height '
'for use with openBytes'),
('getOptimalTileWidth', '()I', 'the optimal sub-image width '
'for use with openBytes'),
('getResolution', '()I', 'The current resolution level'),
('getResolutionCount', '()I', 'The number of resolutions for '
'the current series'),
('getZCTCoords', '(I)[I', 'Gets the Z, C and T coordinates '
'(real sizes) corresponding to the given rasterized index value.'),
('hasFlattenedResolutions', '()Z', 'True if resolutions have been flattened'),
('isMetadataComplete', '()Z', 'True if metadata is completely parsed'),
('isNormalized', '()Z', 'Is float data normalized'),
('setFlattenedResolutions', '(Z)V', 'Set if resolution should be flattened'),
('setResolution', '(I)V', 'Set the resolution level'),
]:
setattr(rdr, name, types.MethodType(
javabridge.jutil.make_method(name, params, desc), rdr))
# rdr.setFlattenedResolutions(False)
self._metadataForCurrentSeries(rdr)
self._checkSeries(rdr)
bmd = bioformats.metadatatools.MetadataRetrieve(self._bioimage.metadata)
try:
self._metadata['channelNames'] = [
bmd.getChannelName(0, c) or bmd.getChannelID(0, c)
for c in range(self._metadata['sizeColorPlanes'])]
except Exception:
self._metadata['channelNames'] = []
for key in ['sizeXY', 'sizeC', 'sizeZ', 'sizeT']:
if not isinstance(self._metadata[key], int) or self._metadata[key] < 1:
self._metadata[key] = 1
self.sizeX = self._metadata['sizeX']
self.sizeY = self._metadata['sizeY']
if self.sizeX <= 0 or self.sizeY <= 0:
msg = 'File cannot be opened with biofromats.'
raise TileSourceError(msg)
self._computeTiles()
self._computeLevels()
self._computeMagnification()
except javabridge.JavaException as exc:
es = javabridge.to_string(exc.throwable)
self.logger.debug('File cannot be opened via Bioformats. (%s)', es)
raise TileSourceError('File cannot be opened via Bioformats. (%s)' % es)
except (AttributeError, UnicodeDecodeError):
self.logger.exception('The bioformats reader threw an unhandled exception.')
msg = 'The bioformats reader threw an unhandled exception.'
raise TileSourceError(msg)
finally:
if javabridge.get_env():
javabridge.detach()
if self.levels < 1:
msg = 'Bioformats image must have at least one level.'
raise TileSourceError(msg)
if self.sizeX <= 0 or self.sizeY <= 0:
msg = 'Bioformats tile size is invalid.'
raise TileSourceError(msg)
try:
self.getTile(0, 0, self.levels - 1)
except Exception as exc:
raise TileSourceError('Bioformats cannot read a tile: %r' % exc)
self._populatedLevels = len([
v for v in self._metadata['frameSeries'][0]['series'] if v is not None])
def __del__(self):
if getattr(self, '_bioimage', None) is not None:
try:
javabridge.attach()
self._bioimage.close()
del self._bioimage
_openImages.remove(weakref.ref(self))
finally:
if javabridge.get_env():
javabridge.detach()
def _metadataForCurrentSeries(self, rdr):
self._metadata = getattr(self, '_metadata', {})
self._metadata.update({
'dimensionOrder': rdr.getDimensionOrder(),
'metadata': javabridge.jdictionary_to_string_dictionary(
rdr.getMetadata()),
'seriesMetadata': javabridge.jdictionary_to_string_dictionary(
rdr.getSeriesMetadata()),
'seriesCount': rdr.getSeriesCount(),
'imageCount': rdr.getImageCount(),
'rgbChannelCount': rdr.getRGBChannelCount(),
'sizeColorPlanes': rdr.getSizeC(),
'sizeT': rdr.getSizeT(),
'sizeZ': rdr.getSizeZ(),
'sizeX': rdr.getSizeX(),
'sizeY': rdr.getSizeY(),
'pixelType': rdr.getPixelType(),
'isLittleEndian': rdr.isLittleEndian(),
'isRGB': rdr.isRGB(),
'isInterleaved': rdr.isInterleaved(),
'isIndexed': rdr.isIndexed(),
'bitsPerPixel': rdr.getBitsPerPixel(),
'sizeC': rdr.getEffectiveSizeC(),
'normalized': rdr.isNormalized(),
'metadataComplete': rdr.isMetadataComplete(),
# 'domains': rdr.getDomains(),
'optimalTileWidth': rdr.getOptimalTileWidth(),
'optimalTileHeight': rdr.getOptimalTileHeight(),
'resolutionCount': rdr.getResolutionCount(),
'flattenedResolutions': rdr.hasFlattenedResolutions(),
})
def _getSeriesStarts(self, rdr): # noqa
self._metadata['frameSeries'] = [{
'series': [0],
'sizeX': self._metadata['sizeX'],
'sizeY': self._metadata['sizeY'],
}]
if self._metadata['seriesCount'] <= 1:
return 1
seriesMetadata = {}
for idx in range(self._metadata['seriesCount']):
rdr.setSeries(idx)
seriesMetadata.update(
javabridge.jdictionary_to_string_dictionary(rdr.getSeriesMetadata()))
frameList = []
nextSeriesNum = 0
try:
for key, value in seriesMetadata.items():
frameNum = int(value)
seriesNum = int(key.split('Series ')[1].split('|')[0]) - 1
if seriesNum >= 0 and seriesNum < self._metadata['seriesCount']:
while len(frameList) <= frameNum:
frameList.append([])
if seriesNum not in frameList[frameNum]:
frameList[frameNum].append(seriesNum)
frameList[frameNum].sort()
nextSeriesNum = max(nextSeriesNum, seriesNum + 1)
except Exception as exc:
self.logger.debug('Failed to parse series information: %s', exc)
rdr.setSeries(0)
if any(key for key in seriesMetadata if key.startswith('Series ')):
return 1
if not len(seriesMetadata) or not any(
key for key in seriesMetadata if key.startswith('Series ')):
frameList = [[0]]
nextSeriesNum = 1
rdr.setSeries(0)
lastX, lastY = rdr.getSizeX(), rdr.getSizeY()
for idx in range(1, self._metadata['seriesCount']):
rdr.setSeries(idx)
if (rdr.getSizeX() == self._metadata['sizeX'] and
rdr.getSizeY == self._metadata['sizeY']):
frameList.append([idx])
if nextSeriesNum == idx:
nextSeriesNum = idx + 1
lastX, lastY = self._metadata['sizeX'], self._metadata['sizeY']
if (rdr.getSizeX() * rdr.getSizeY() >
self._metadata['sizeX'] * self._metadata['sizeY']):
frameList = [[idx]]
nextSeriesNum = idx + 1
self._metadata['sizeX'] = self.sizeX = lastX = rdr.getSizeX()
self._metadata['sizeY'] = self.sizeY = lastY = rdr.getSizeY()
if (lastX and lastY and
nearPowerOfTwo(rdr.getSizeX(), lastX) and rdr.getSizeX() < lastX and
nearPowerOfTwo(rdr.getSizeY(), lastY) and rdr.getSizeY() < lastY):
steps = int(round(math.log(
lastX * lastY / (rdr.getSizeX() * rdr.getSizeY())) / math.log(2) / 2))
frameList[-1] += [None] * (steps - 1)
frameList[-1].append(idx)
lastX, lastY = rdr.getSizeX(), rdr.getSizeY()
if nextSeriesNum == idx:
nextSeriesNum = idx + 1
frameList = [fl for fl in frameList if len(fl)]
self._metadata['frameSeries'] = [{
'series': fl,
} for fl in frameList]
rdr.setSeries(0)
return nextSeriesNum
def _checkSeries(self, rdr):
firstPossibleAssoc = self._getSeriesStarts(rdr)
self._metadata['seriesAssociatedImages'] = {}
for seriesNum in range(firstPossibleAssoc, self._metadata['seriesCount']):
if any((seriesNum in series['series']) for series in self._metadata['frameSeries']):
continue
rdr.setSeries(seriesNum)
info = {
'sizeX': rdr.getSizeX(),
'sizeY': rdr.getSizeY(),
}
if (info['sizeX'] < self._associatedImageMaxSize and
info['sizeY'] < self._associatedImageMaxSize):
# TODO: Figure out better names for associated images. Can
# we tell if any of them are the macro or label image?
info['seriesNum'] = seriesNum
self._metadata['seriesAssociatedImages'][
'image%d' % seriesNum] = info
validate = None
for frame in self._metadata['frameSeries']:
for level in range(len(frame['series'])):
if level and frame['series'][level] is None:
continue
rdr.setSeries(frame['series'][level])
self._metadataForCurrentSeries(rdr)
info = {
'sizeX': rdr.getSizeX(),
'sizeY': rdr.getSizeY(),
}
if not level:
frame.update(info)
self._metadata['sizeX'] = max(self._metadata['sizeX'], frame['sizeX'])
self._metadata['sizeY'] = max(self._metadata['sizeY'], frame['sizeY'])
elif validate is not False:
if (not nearPowerOfTwo(frame['sizeX'], info['sizeX']) or
not nearPowerOfTwo(frame['sizeY'], info['sizeY'])):
frame['series'] = frame['series'][:level]
validate = True
break
rdr.setSeries(frame['series'][0])
self._metadataForCurrentSeries(rdr)
if validate is None:
validate = False
rdr.setSeries(0)
self._metadata['sizeXY'] = len(self._metadata['frameSeries'])
def _computeTiles(self):
if (self._metadata['resolutionCount'] <= 1 and
self.sizeX <= self._singleTileThreshold and
self.sizeY <= self._singleTileThreshold):
self.tileWidth = self.sizeX
self.tileHeight = self.sizeY
elif (128 <= self._metadata['optimalTileWidth'] <= self._singleTileThreshold and
128 <= self._metadata['optimalTileHeight'] <= self._singleTileThreshold):
self.tileWidth = self._metadata['optimalTileWidth']
self.tileHeight = self._metadata['optimalTileHeight']
else:
self.tileWidth = self.tileHeight = self._tileSize
def _computeLevels(self):
self.levels = int(math.ceil(max(
math.log(float(self.sizeX) / self.tileWidth),
math.log(float(self.sizeY) / self.tileHeight)) / math.log(2))) + 1
def _computeMagnification(self):
self._magnification = {}
metadata = self._metadata['metadata']
valuekeys = {
'x': [('Scaling|Distance|Value #1', 1e3)],
'y': [('Scaling|Distance|Value #2', 1e3)],
}
tuplekeys = [
('Physical pixel size', 1e-3),
]
magkeys = [
'Information|Instrument|Objective|NominalMagnification #1',
'Magnification #1',
]
for axis in {'x', 'y'}:
for key, units in valuekeys[axis]:
if metadata.get(key):
self._magnification['mm_' + axis] = float(metadata[key]) * units
if 'mm_x' not in self._magnification and 'mm_y' not in self._magnification:
for key, units in tuplekeys:
if metadata.get(key):
found = re.match(r'^\D*(\d+(|\.\d+))\D+(\d+(|\.\d+))\D*$', metadata[key])
if found:
try:
self._magnification['mm_x'], self._magnification['mm_y'] = (
float(found.groups()[0]) * units, float(found.groups()[2]) * units)
except Exception:
pass
for key in magkeys:
if metadata.get(key):
self._magnification['magnification'] = float(metadata[key])
break
def getNativeMagnification(self):
"""
Get the magnification at a particular level.
:return: magnification, width of a pixel in mm, height of a pixel in mm.
"""
mm_x = self._magnification.get('mm_x')
mm_y = self._magnification.get('mm_y', mm_x)
# Estimate the magnification if we don't have a direct value
mag = self._magnification.get('magnification') or 0.01 / mm_x if mm_x else None
return {
'magnification': mag,
'mm_x': mm_x,
'mm_y': mm_y,
}
def getMetadata(self):
"""
Return a dictionary of metadata containing levels, sizeX, sizeY,
tileWidth, tileHeight, magnification, mm_x, mm_y, and frames.
:returns: metadata dictionary.
"""
result = super().getMetadata()
# sizeC, sizeZ, sizeT, sizeXY
frames = []
for xy in range(self._metadata['sizeXY']):
for t in range(self._metadata['sizeT']):
for z in range(self._metadata['sizeZ']):
for c in range(self._metadata['sizeC']):
frames.append({
'IndexC': c,
'IndexZ': z,
'IndexT': t,
'IndexXY': xy,
})
if len(self._metadata['frameSeries']) == len(frames):
for idx, frame in enumerate(frames):
frame['sizeX'] = self._metadata['frameSeries'][idx]['sizeX']
frame['sizeY'] = self._metadata['frameSeries'][idx]['sizeY']
frame['levels'] = len(self._metadata['frameSeries'][idx]['series'])
if len(frames) > 1:
result['frames'] = frames
self._addMetadataFrameInformation(result, self._metadata['channelNames'])
return result
def getInternalMetadata(self, **kwargs):
"""
Return additional known metadata about the tile source. Data returned
from this method is not guaranteed to be in any particular format or
have specific values.
:returns: a dictionary of data or None.
"""
return self._metadata
def _getTileFromEmptyLevel(self, x, y, z, **kwargs):
"""
Composite tiles from missing levels from larger levels in pieces to
avoid using too much memory.
"""
fac = int(2 ** self._maxSkippedLevels)
z += self._maxSkippedLevels
scale = 2 ** (self.levels - 1 - z)
result = None
for tx in range(fac - 1, -1, -1):
if x * fac + tx >= int(math.ceil(self.sizeX / self.tileWidth / scale)):
continue
for ty in range(fac - 1, -1, -1):
if y * fac + ty >= int(math.ceil(self.sizeY / self.tileHeight / scale)):
continue
tile = self.getTile(
x * fac + tx, y * fac + ty, z, pilImageAllowed=False,
numpyAllowed=True, **kwargs)
if result is None:
result = np.zeros((
ty * fac + tile.shape[0],
tx * fac + tile.shape[1],
tile.shape[2]), dtype=tile.dtype)
result[
ty * fac:ty * fac + tile.shape[0],
tx * fac:tx * fac + tile.shape[1],
::] = tile
return result[::scale, ::scale, ::]
@methodcache()
def getTile(self, x, y, z, pilImageAllowed=False, numpyAllowed=False, **kwargs):
self._xyzInRange(x, y, z)
ft = fc = fz = 0
fseries = self._metadata['frameSeries'][0]
if kwargs.get('frame') is not None:
frame = self._getFrame(**kwargs)
fc = frame % self._metadata['sizeC']
fz = (frame // self._metadata['sizeC']) % self._metadata['sizeZ']
ft = (frame // self._metadata['sizeC'] //
self._metadata['sizeZ']) % self._metadata['sizeT']
fxy = (frame // self._metadata['sizeC'] //
self._metadata['sizeZ'] // self._metadata['sizeT'])
if frame < 0 or fxy > self._metadata['sizeXY']:
msg = 'Frame does not exist'
raise TileSourceError(msg)
fseries = self._metadata['frameSeries'][fxy]
seriesLevel = self.levels - 1 - z
scale = 1
while seriesLevel >= len(fseries['series']) or fseries['series'][seriesLevel] is None:
seriesLevel -= 1
scale *= 2
offsetx = x * self.tileWidth * scale
offsety = y * self.tileHeight * scale
width = min(self.tileWidth * scale, self.sizeX // 2 ** seriesLevel - offsetx)
height = min(self.tileHeight * scale, self.sizeY // 2 ** seriesLevel - offsety)
sizeXAtScale = fseries['sizeX'] // (2 ** seriesLevel)
sizeYAtScale = fseries['sizeY'] // (2 ** seriesLevel)
finalWidth = width // scale
finalHeight = height // scale
width = min(width, sizeXAtScale - offsetx)
height = min(height, sizeYAtScale - offsety)
if scale >= 2 ** self._maxSkippedLevels:
tile = self._getTileFromEmptyLevel(x, y, z, **kwargs)
format = TILE_FORMAT_NUMPY
else:
with self._tileLock:
try:
javabridge.attach()
if width > 0 and height > 0:
tile = self._bioimage.read(
c=fc, z=fz, t=ft, series=fseries['series'][seriesLevel],
rescale=False, # return internal data types
XYWH=(offsetx, offsety, width, height))
else:
# We need the same dtype, so read 1x1 at 0x0
tile = self._bioimage.read(
c=fc, z=fz, t=ft, series=fseries['series'][seriesLevel],
rescale=False, # return internal data types
XYWH=(0, 0, 1, 1))
tile = np.zeros(tuple([0, 0] + list(tile.shape[2:])), dtype=tile.dtype)
format = TILE_FORMAT_NUMPY
except javabridge.JavaException as exc:
es = javabridge.to_string(exc.throwable)
raise TileSourceError('Failed to get Bioformat region (%s, %r).' % (es, (
fc, fz, ft, fseries, self.sizeX, self.sizeY, offsetx,
offsety, width, height)))
finally:
if javabridge.get_env():
javabridge.detach()
if scale > 1:
tile = tile[::scale, ::scale]
if tile.shape[:2] != (finalHeight, finalWidth):
fillValue = 0
if tile.dtype == np.uint16:
fillValue = 65535
elif tile.dtype == np.uint8:
fillValue = 255
elif tile.dtype.kind == 'f':
fillValue = 1
retile = np.full(
tuple([finalHeight, finalWidth] + list(tile.shape[2:])),
fillValue,
dtype=tile.dtype)
retile[0:min(tile.shape[0], finalHeight), 0:min(tile.shape[1], finalWidth)] = tile[
0:min(tile.shape[0], finalHeight), 0:min(tile.shape[1], finalWidth)]
tile = retile
return self._outputTile(tile, format, x, y, z, pilImageAllowed, numpyAllowed, **kwargs)
def getAssociatedImagesList(self):
"""
Return a list of associated images.
:return: the list of image keys.
"""
return sorted(self._metadata['seriesAssociatedImages'].keys())
def _getAssociatedImage(self, imageKey):
"""
Get an associated image in PIL format.
:param imageKey: the key of the associated image.
:return: the image in PIL format or None.
"""
info = self._metadata['seriesAssociatedImages'].get(imageKey)
if info is None:
return
series = info['seriesNum']
with self._tileLock:
try:
javabridge.attach()
image = self._bioimage.read(
series=series,
rescale=False, # return internal data types
XYWH=(0, 0, info['sizeX'], info['sizeY']))
except javabridge.JavaException as exc:
es = javabridge.to_string(exc.throwable)
raise TileSourceError('Failed to get Bioformat series (%s, %r).' % (es, (
series, info['sizeX'], info['sizeY'])))
finally:
if javabridge.get_env():
javabridge.detach()
return large_image.tilesource.base._imageToPIL(image)
def open(*args, **kwargs):
"""
Create an instance of the module class.
"""
return BioformatsFileTileSource(*args, **kwargs)
def canRead(*args, **kwargs):
"""
Check if an input can be read by the module class.
"""
return BioformatsFileTileSource.canRead(*args, **kwargs)
|
PypiClean
|
/pixivdownloader-0.1.1.tar.gz/pixivdownloader-0.1.1/pixiv/downloader/downloader.py
|
from cv2 import VideoWriter
from cv2 import VideoWriter_fourcc
from cv2 import destroyAllWindows
from cv2 import imread
from pathlib import Path
from pixivpy3 import AppPixivAPI
from tempfile import TemporaryDirectory
from urllib.parse import urlparse
from zipfile import ZipFile
import os
import re
import shutil
import logging
class PixivDownloaderError(Exception):
def __init__(self, msg, data=None):
super().__init__(msg, data)
self.msg = msg
self.data = data
class PixivDownloader:
def __init__(self, client=None, username=None, password=None, log_level=logging.WARNING):
if not client and (bool(username) != bool(password)):
raise AttributeError('If no client is given both username and password must be given')
if client:
self.api = client
else:
self.api = AppPixivAPI()
if not client and username and password:
self.api.login(username, password)
self.logger = logging.getLogger('PixivDownloader')
stdout = logging.StreamHandler()
self.logger.addHandler(stdout)
self.logger.setLevel(log_level)
def login(self, username=None, password=None, refresh_token=None):
if refresh_token:
self.logger.info('Loging in with refresh_token')
elif username:
self.logger.info('Loging in with username %s', username)
else:
self.logger.info('Loging')
return self.api.auth(username=username, password=password, refresh_token=refresh_token)
def logout(self):
self.logger.info('Logout')
self.api = AppPixivAPI()
def get_id_from_url(self, url):
path = urlparse(url).path
ids = re.findall('(\\d+)', path)
if not ids:
raise ValueError('Url does not contain post id')
return ids[0]
def download_by_id(self, post_id, output_dir):
data = self.api.illust_detail(post_id)
if data.get('error'):
raise PixivDownloaderError('Could not get post info or post doesn\'t exist.', data)
return self.download(data.illust, output_dir)
def download_by_url(self, url, output_dir):
return self.download_by_id(self.get_id_from_url(url), output_dir)
def download(self, post, output_dir):
output_dir = Path(output_dir).expanduser().absolute()
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.logger.debug('Created dir "%s"', output_dir)
if post.type == 'illust' and not post.meta_pages:
downloader = self.download_illust
type = 'Image'
elif post.type == 'illust' and post.meta_pages:
downloader = self.download_illust_collection
type = 'Image Collection'
elif post.type == 'ugoira':
downloader = self.download_ugoira
type = 'Video'
elif post.type == 'manga':
downloader = self.download_manga
type = 'Manga'
else:
raise PixivDownloaderError(f'Post type "{post.type}" not supported')
self.logger.info('Initialize "%s" downloader for post %s', type, post.id)
return downloader(post, output_dir)
def download_illust(self, post, output_dir):
image_url = post.meta_single_page.get('original_image_url', post.image_urls.large)
if '_webp' in image_url:
extension = 'webp'
else:
extension = os.path.splitext(image_url)[1].lstrip('.')
filename = self.get_filename(post, extension)
self.logger.info('Downloading "%s"', image_url)
self.api.download(image_url, path=output_dir, name=filename, replace=True)
yield (Path(output_dir) / filename).absolute()
def download_illust_collection(self, post, output_dir):
output_dir = Path(output_dir)
yield from self._downloade_meta_pages(post, output_dir)
def download_manga(self, post, output_dir):
output_dir = Path(output_dir) / f'{post.title}-{post.user.account}'
if not output_dir.is_dir():
output_dir.mkdir(parents=True, exist_ok=True)
self.logger.debug('Created dir "%s"', output_dir)
yield from self._downloade_meta_pages(post, output_dir)
def _downloade_meta_pages(self, post, output_dir):
for index, image in enumerate(post.meta_pages, 1):
image_url = image.image_urls.get('original', image.image_urls.large)
if '_webp' in image_url:
extension = 'webp'
else:
extension = os.path.splitext(image_url)[1].lstrip('.')
filename = self.get_filename(post, extension, suffix=f'-{index:0>2}')
self.logger.info('Downloading "%s"', image_url)
self.api.download(image_url, path=str(output_dir), name=filename, replace=True)
yield (output_dir / filename).absolute()
def download_ugoira(self, post, output_dir):
ugoira_data = self.api.ugoira_metadata(post.id).ugoira_metadata
zip_url = ugoira_data.zip_urls.get('large', ugoira_data.zip_urls.medium)
with TemporaryDirectory() as dir:
temp_dir = Path(dir)
filename = '{post.id}.zip'
self.logger.info('Downloading "%s"', zip_url)
self.api.download(zip_url, path=str(temp_dir), name=filename)
frames_dir = temp_dir / 'frames'
os.mkdir(frames_dir)
self._extract_zip(temp_dir / filename, frames_dir)
video_name = self.get_filename(post, 'mp4')
video_file = temp_dir / video_name
self._generate_mp4_from_frames(video_file, frames_dir, ugoira_data.frames[0].delay)
final_path = (Path(output_dir) / video_name).absolute()
shutil.move(video_file, final_path)
yield final_path.absolute()
def get_filename(self, post, extension, prefix=None, suffix=None,):
suffix = suffix or ''
prefix = prefix or ''
filename = f'{prefix}{post.id}-{post.title}{suffix}.{extension}'.replace('/', '_').replace(' ', '_')
return filename
def _extract_zip(self, zip_file, output_dir):
self.logger.info('Extract "%s"', zip_file)
with ZipFile(zip_file, 'r') as zip_file:
zip_file.extractall(output_dir)
def _generate_mp4_from_frames(self, output_file, frames_dir, delay):
self.logger.info('Generate video to "%s"', output_file)
frames = sorted(map(lambda file: os.path.join(str(frames_dir), file), os.listdir(frames_dir)))
frames = list(map(imread, frames))
framerate = 1000 / delay
height, width, layers = frames[0].shape
video = VideoWriter(str(output_file), VideoWriter_fourcc(*'mp4v'), framerate, (width, height))
for frame in frames:
video.write(frame)
destroyAllWindows()
video.release()
|
PypiClean
|
/aiobroadlink-0.1.0.tar.gz/aiobroadlink-0.1.0/README.md
|
# aiobroadlink
Library to control various Broadlink devices using asyncio
This software is based on the protocol description from Ipsum Domus (?)
Details at https://blog.ipsumdomus.com/broadlink-smart-home-devices-complete-protocol-hack-bc0b4b397af1
This software is based on python-broadlink by Matthew Garrett
Details at https://github.com/mjg59/python-broadlink
Remote Control device seem to be working alright (both IR and RF)
RM4C are now supported.
A1 device also work.
Provisioning works.
Other will be tested when I get the relevant hardware.
Install with pip3. Be forwarned that aiobroadlink needs the 'cryptography' library.
This library will be automatically installed, but for this to succeed, you do need to
be able to compile things. To that effect you need a compiler and some header files. On
Debian/Ubuntu distributions, this means you need the packages 'libffi-dev' and 'libssl-dev'
You can run
python3 -m aiobroadlink
If your IP address cannot be guessed, do
python3 -m aiobroadlink -i xxx.xxx.xxx.xxx
with xxx.xxx.xxx.xxx the IP address of the interface you want to use.
|
PypiClean
|
/apache_airflow_providers_apache_hive-6.1.5rc1-py3-none-any.whl/airflow/providers/apache/hive/operators/hive_stats.py
|
from __future__ import annotations
import json
import warnings
from typing import TYPE_CHECKING, Any, Callable, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.providers.presto.hooks.presto import PrestoHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class HiveStatsCollectionOperator(BaseOperator):
"""Gather partition statistics and insert them into MySQL.
Statistics are gathered with a dynamically generated Presto query and
inserted with this format. Stats overwrite themselves if you rerun the
same date/partition.
.. code-block:: sql
CREATE TABLE hive_stats (
ds VARCHAR(16),
table_name VARCHAR(500),
metric VARCHAR(200),
value BIGINT
);
:param metastore_conn_id: Reference to the
:ref:`Hive Metastore connection id <howto/connection:hive_metastore>`.
:param table: the source table, in the format ``database.table_name``. (templated)
:param partition: the source partition. (templated)
:param extra_exprs: dict of expression to run against the table where
keys are metric names and values are Presto compatible expressions
:param excluded_columns: list of columns to exclude, consider
excluding blobs, large json columns, ...
:param assignment_func: a function that receives a column name and
a type, and returns a dict of metric names and an Presto expressions.
If None is returned, the global defaults are applied. If an
empty dictionary is returned, no stats are computed for that
column.
"""
template_fields: Sequence[str] = ("table", "partition", "ds", "dttm")
ui_color = "#aff7a6"
def __init__(
self,
*,
table: str,
partition: Any,
extra_exprs: dict[str, Any] | None = None,
excluded_columns: list[str] | None = None,
assignment_func: Callable[[str, str], dict[Any, Any] | None] | None = None,
metastore_conn_id: str = "metastore_default",
presto_conn_id: str = "presto_default",
mysql_conn_id: str = "airflow_db",
**kwargs: Any,
) -> None:
if "col_blacklist" in kwargs:
warnings.warn(
f"col_blacklist kwarg passed to {self.__class__.__name__} "
f"(task_id: {kwargs.get('task_id')}) is deprecated, "
f"please rename it to excluded_columns instead",
category=FutureWarning,
stacklevel=2,
)
excluded_columns = kwargs.pop("col_blacklist")
super().__init__(**kwargs)
self.table = table
self.partition = partition
self.extra_exprs = extra_exprs or {}
self.excluded_columns: list[str] = excluded_columns or []
self.metastore_conn_id = metastore_conn_id
self.presto_conn_id = presto_conn_id
self.mysql_conn_id = mysql_conn_id
self.assignment_func = assignment_func
self.ds = "{{ ds }}"
self.dttm = "{{ execution_date.isoformat() }}"
def get_default_exprs(self, col: str, col_type: str) -> dict[Any, Any]:
"""Get default expressions."""
if col in self.excluded_columns:
return {}
exp = {(col, "non_null"): f"COUNT({col})"}
if col_type in {"double", "int", "bigint", "float"}:
exp[(col, "sum")] = f"SUM({col})"
exp[(col, "min")] = f"MIN({col})"
exp[(col, "max")] = f"MAX({col})"
exp[(col, "avg")] = f"AVG({col})"
elif col_type == "boolean":
exp[(col, "true")] = f"SUM(CASE WHEN {col} THEN 1 ELSE 0 END)"
exp[(col, "false")] = f"SUM(CASE WHEN NOT {col} THEN 1 ELSE 0 END)"
elif col_type == "string":
exp[(col, "len")] = f"SUM(CAST(LENGTH({col}) AS BIGINT))"
exp[(col, "approx_distinct")] = f"APPROX_DISTINCT({col})"
return exp
def execute(self, context: Context) -> None:
metastore = HiveMetastoreHook(metastore_conn_id=self.metastore_conn_id)
table = metastore.get_table(table_name=self.table)
field_types = {col.name: col.type for col in table.sd.cols}
exprs: Any = {("", "count"): "COUNT(*)"}
for col, col_type in list(field_types.items()):
if self.assignment_func:
assign_exprs = self.assignment_func(col, col_type)
if assign_exprs is None:
assign_exprs = self.get_default_exprs(col, col_type)
else:
assign_exprs = self.get_default_exprs(col, col_type)
exprs.update(assign_exprs)
exprs.update(self.extra_exprs)
exprs_str = ",\n ".join(f"{v} AS {k[0]}__{k[1]}" for k, v in exprs.items())
where_clause_ = [f"{k} = '{v}'" for k, v in self.partition.items()]
where_clause = " AND\n ".join(where_clause_)
sql = f"SELECT {exprs_str} FROM {self.table} WHERE {where_clause};"
presto = PrestoHook(presto_conn_id=self.presto_conn_id)
self.log.info("Executing SQL check: %s", sql)
row = presto.get_first(sql)
self.log.info("Record: %s", row)
if not row:
raise AirflowException("The query returned None")
part_json = json.dumps(self.partition, sort_keys=True)
self.log.info("Deleting rows from previous runs if they exist")
mysql = MySqlHook(self.mysql_conn_id)
sql = f"""
SELECT 1 FROM hive_stats
WHERE
table_name='{self.table}' AND
partition_repr='{part_json}' AND
dttm='{self.dttm}'
LIMIT 1;
"""
if mysql.get_records(sql):
sql = f"""
DELETE FROM hive_stats
WHERE
table_name='{self.table}' AND
partition_repr='{part_json}' AND
dttm='{self.dttm}';
"""
mysql.run(sql)
self.log.info("Pivoting and loading cells into the Airflow db")
rows = [
(self.ds, self.dttm, self.table, part_json) + (r[0][0], r[0][1], r[1]) for r in zip(exprs, row)
]
mysql.insert_rows(
table="hive_stats",
rows=rows,
target_fields=[
"ds",
"dttm",
"table_name",
"partition_repr",
"col",
"metric",
"value",
],
)
|
PypiClean
|
/searchlight-9.0.0.0rc1.tar.gz/searchlight-9.0.0.0rc1/doc/source/configuration/authentication.rst
|
..
Copyright 2010 OpenStack Foundation
All Rights Reserved.
c) Copyright 2015 Hewlett-Packard Development Company, L.P.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Keystone Authentication
=======================
Searchlight should be integrated with keystone. Setting this up is
relatively straightforward, as the keystone distribution includes the
necessary middleware. Once you have installed keystone and edited your
configuration files, users will need to have an authenticated keystone token
in all API requests. The keystone integration will allow both active denial
of requests from unauthenticated users and will also allow proper search
result filtering.
.. DANGER::
If the API is not configured with keystone, all data indexed by
searchlight is at risk of being accessed by unauthorized users.
Configuring the searchlight services to use keystone
----------------------------------------------------
Keystone is integrated with searchlight through the use of middleware.
The default configuration files for the Searchlight API use a single piece of
middleware called ``unauthenticated-context``, which generates a request
context containing blank authentication information. In order to configure
Searchlight to use Keystone, the ``authtoken`` and ``context`` middleware
must be deployed in place of the ``unauthenticated-context`` middleware.
The ``authtoken`` middleware performs the authentication token validation
and retrieves actual user authentication information. It can be found in
the keystone distribution. For more information, please refer to the Keystone
documentation on the ``auth_token`` middleware:
https://docs.openstack.org/keystonemiddleware/latest/middlewarearchitecture.html
api-paste.ini
`````````````
First, ensure that declarations for the middleware exist in the
``api-paste.ini`` file. Here is an example for ``authtoken``::
[pipeline:searchlight-keystone]
pipeline = authtoken context rootapp
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
delay_auth_decision = true
searchlight.conf
````````````````
You must then update the main ``searchlight.conf`` configuration file
to enable the keystone application pipeline.
Set ``flavor`` to ``keystone`` in the ``paste_deploy`` group::
[paste_deploy]
flavor = keystone
Set ``keystone_authtoken`` options. The following sets the searchlight
service user as the user for performing policy API authentication checks.
The actual options and values in this section will need to be set according
to your environment::
[keystone_authtoken]
auth_url = http://127.0.0.1:5000
auth_type = password
project_domain_id = default
project_name = service
user_domain_id = default
password = <SERVICE_PASSWORD>
username = searchlight
.. note::
For development and unit testing, it is recommended to also set
``revocation_cache_timeout = 10`` under the ``keystone_authtoken`` group.
Set ``service_credentials`` options. Searchlight plugins may make API calls
to other services to index their data. Prior to doing this, it will get a
valid token based on the integration account credentials::
[service_credentials]
# These are needed to make API calls to other services when indexing
auth_type = password
username = searchlight
password = <SERVICE_PASSWORD>
user_domain_id = default
project_domain_id = default
project_name = service
auth_url = http://127.0.0.1:5000
# If resource_plugin.include_region_name is set, this value will be
# the default value for the 'region_name' field on all documents
# os_region_name =
For keystone v2 development::
[service_credentials]
auth_type = v2password
username = searchlight
tenant_name = service
password = <SERVICE_PASSWORD>
auth_url = http://127.0.0.1:35357/v2.0
# If resource_plugin.include_region_name is set, this value will be
# the default value for the 'region_name' field on all documents
# os_region_name =
Service integration account
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Some of the above configuration implicitly uses a ``searchlight`` service user.
If you intend to use this user, it must have been created and registered with
keystone. Typically, this is done with the following commands (v3 keystone)::
$ openstack project create --or-show service --property domain=default
$ openstack user create searchlight --password <SERVICE_PASSWORD> --project service
$ openstack role add admin --project service --user searchlight
For more information on keystone service accounts, see:
https://docs.openstack.org/keystone/latest/admin/manage-services.html#create-service-users
Policy restriction
==================
Searchlight uses the oslo policy library to allow control over the level of
access a user has based on their authenticated roles. Policy rules are defined
in a configuration file (by default, `etc/policy.json`). By default, all
operations are allowed.
https://docs.openstack.org/oslo.policy/latest/reference/index.html
rule formatting.
During the last few cycles concerns were raised about the scope of the
``admin`` role within OpenStack. Many services consider any token scoped with
the ``admin`` role to have access to resources within any project. With the
introduction of keystone v3 it is possible to create users with the admin role
on a particular project, but not with the intention of them seeing resources in
other projects.
Keystone added two configuration options called ``admin_project_name`` and
``admin_project_domain_name`` to attempt to address this. If a request is
authenticated against a the project whose name is ``admin_project_name``
in the ``admin_project_domain_name`` domain, a flag is set on the
authentication response headers indicating that the user is authenticated
against the administrative project. This can then be supported by the policy
rule (in Searchlight's ``policy.json``)::
"is_admin_context": "role:admin and is_admin_project:True"
Since devstack configures keystone to support those options, this is the
default in Searchlight. To maintain backwards compatibility, if your keystone
is *not* configured to set these options, any token with the ``admin`` role
will be assumed to have administrative powers (this approach has been taken
by other OpenStack services).
For more history see https://bugs.launchpad.net/keystone/+bug/968696.
Access to operations
--------------------
It is possible to restrict access to functionality by setting rules for
``query``, ``facets`` or ``plugins_info``. For instance, to restrict facet
listing to administrators and disable plugin information for all users::
"facets": "role:admin",
"plugins_info": "!"
Where a request is disallowed on this basis, the user will receive a
403 Forbidden response.
Note that policy rules are applied on the fly; no server restart is required.
Policy rules denying access to operations take precedence over the per-resource
access described below.
Access to resources
-------------------
It is possible to disable access to individual plugins. For instance, the
following restricts access to Nova servers to admins, and disables access
entirely to Glance images::
"resource:OS::Nova::Server": "role:admin",
"resource:OS::Glance::Image": "!",
.. note::
At current plugins still apply RBAC separately from policy rules. We
aim to bring the two closer together in a later patch.
When resources are restricted in this way resources will be excluded
from the search (which may result in empty search results). No Forbidden
response will be returned.
.. _service-policy-controls:
Service policy controls
-----------------------
If configured, Searchlight can consult service policy files (e.g. that used
to configure the nova API). Each resource is configured with a policy target
it will check if possible. Policy file paths can either be absolute or relative
to `service_policy_path` (which itself can be relative to the current working
directory or left blank). The actual filepath used will be determined by
oslo.config using the same `logic`_ as for other config files (for logging,
searchlight's policy file etc). With the following configuration
stanza::
[service_policies]
service_policy_files=compute:nova-policy.json
service_policy_path=/etc/searchlight/
And with the following contents in nova-policy.json (which might be a symlink
to an existing nova policy file, a copy or a separate file)::
{
"is_admin": "role: admin",
"os_compute_api:servers:index": "rule:is_admin"
}
Only requests with the admin role assigned will be allowed to search or facet
Nova servers.
Policy files are configured per *service*, not per resource type. If files
are in different directories absolute paths should be used, and
``service_policy_path`` left unset.
.. note::
Policy rules are always *more* restrictive. If a rule in Searchlight's
``policy.json`` would allow access but a service policy file would disallow
it (or vice versa), the more restrictive rule will be used.
.. _logic: https://docs.openstack.org/oslo.config/latest/reference/configopts.html
|
PypiClean
|
/fmqlreports-1.0.tar.gz/fmqlreports-1.0/user/webReportUser.py
|
import sys
import os
import re
import json
from collections import defaultdict, Counter
from datetime import datetime, date
from fmqlutils.reporter.reportUtils import MarkdownTable, reportPercent, reportAbsAndPercent, muBVC
from fmqlutils.typer.reduceTypeUtils import splitTypeDatas, checkDataPresent, singleValue, combineSubTypes, muBVCOfSTProp
from ..webReportUtils import TOP_MD_TEMPL, SITE_DIR_TEMPL, ensureWebReportLocations, keyStats, flattenFrequencyDistribution, roundFloat, reduce200
from .userClassifier import UserClassifier
from .USER_CONSTANTS import SSNO_DEFINITIONS
"""
FIRST TODO:
* ANC/Alaska and no POSTMASTER?
* HL7 patterns in Classifier? ie/ correlate the two ... 3.081 + the HL7 log entry on SPQ?
Basic User Type Summary/Overview
Unlike the stopcode centric reports (off locations), these are used and institution centered. Approach is PER USER STs and then Categorize Users (=> STs) as REMOTE
etc.
Requires:
- 3.081 (Type Reduction by User: YR1
- 200 (All/Indexed-reduce200)
- ... Will move to require image logs too to round out
TODO: classification is work in progress - remotes from non key institution, locals
from non subordinate divisions etc.
Graph It To Emulate:
https://matplotlib.org/gallery/showcase/bachelors_degrees_by_gender.html#sphx-glr-gallery-showcase-bachelors-degrees-by-gender-py - any dimension for users over time ... types of user?
"""
def webReportUser(stationNo, deidentify=False):
print("Preparing to make user report for {}{} - loading data ...".format(stationNo, " [DEIDENTIFY]" if deidentify else ""))
allThere, details = checkDataPresent(stationNo, [
# Additional settings/properties:
# - duration (in seconds)
# - force count on
# ipv4_address, remote_station_id, remote_user_ien, duration
{"fileType": "3_081", "check": "YR1E"}
])
if not allThere:
raise Exception("Some required data is missing - {}".format(details))
mu = TOP_MD_TEMPL.format("{} Users".format(stationNo), "Users")
userInfoByUserRef = reduce200(stationNo)
"""
Consider: if need to break by device (ie/ segment off SSH etc? See sniff) and
how to round out DOD USER breakdown ... are there any other 'CAPRI anyone' users
that must be broken too ie/ not just one remote user => must break out for remote
analysis
"""
print("Loading subtypes ...")
try:
type3_081, st3_081URLDs = splitTypeDatas(stationNo, "3_081", reductionLabel="YR1E", expectSubTypeProperties=["user", "level_of_assurance", "remote_app", "remote_200_user_ien", "workstation_label", "workstation_type", "remote_station_id", "device"])
except:
print("Can't split type data - exiting")
sts3_081ByUserRef = defaultdict(list)
for st in st3_081URLDs:
userRef = singleValue(st, "user")
sts3_081ByUserRef[userRef].append(st)
# BACKWARD COMPATIBLE ... TODO: make work with ULRD (and only recombine down below
# where needed)
print("Recombining subtypes by user (backward compatible) ...")
sts3_081Us = combineSubTypes(st3_081URLDs, ["user"], forceCountProps=["ipv4_address", "remote_station_id", "remote_user_ien", "duration"])
print("... done")
st3_081ByUserRef = dict((singleValue(st, "user"), st) for st in sts3_081Us if "user" in st)
userClassifier = UserClassifier(stationNo, userInfoByUserRef, type3_081, st3_081ByUserRef)
print("Classifying Users ...")
classification = userClassifier.classify()
print("... Classification complete")
if "_createDateProp" not in type3_081:
raise Exception("YR1 3.081's must have create date prop")
createDatePropInfo = type3_081[type3_081["_createDateProp"]]
overallFirstSODate = createDatePropInfo["firstCreateDate"].split("T")[0]
overallLastSODate = createDatePropInfo["lastCreateDate"].split("T")[0]
BLURB_TEMPL = """Of the <span class='yellowIt'>{:,}</span> users known to the system, a minority <span class='yellowIt'>{}</span> ("__Active Users__") signed in between {} and {}, {}.
Most Active Users are real people but there are "__Machine Users__" for specialized VA batch applications or applications which manage end users themselves.
"""
mu += BLURB_TEMPL.format(
len(userInfoByUserRef),
reportAbsAndPercent(len(classification["activeUserRefs"]), len(userInfoByUserRef)),
overallFirstSODate,
overallLastSODate,
"the most recent year for which data is available"
)
if deidentify:
mu += "__DE-IDENTIFIED__: the names and national identifiers of real (Non-Machine) end users have been scrubbed from this report. VistA-specific IENs are left to identify such users for those with access to the system.\n\n"
tbl = MarkdownTable([":Type", "Users", "SignOns"], includeNo=False)
tbl.addRow([
"Total Users (200 entries)",
len(userInfoByUserRef),
""
])
tbl.addRow([
"Active Users (3.081s for period)",
reportAbsAndPercent(len(classification["activeUserRefs"]), len(userInfoByUserRef)),
type3_081["_total"]
])
tbl.addRow([
"DoD User",
re.search(r'\[200\-([^\]]+)', classification["dodUserRef"]).group(1),
reportAbsAndPercent(
st3_081ByUserRef[classification["dodUserRef"]]["_total"],
type3_081["_total"]
)
])
tbl.addRow([
"Postmaster",
muUserRef(classification["postmasterUserRef"]),
reportAbsAndPercent(
st3_081ByUserRef[classification["postmasterUserRef"]]["_total"],
type3_081["_total"]
)
])
tbl.addRow([
"Active __Proxy Users__",
len(classification["activeProxyUserRefs"]),
reportAbsAndPercent(
sum(st3_081ByUserRef[userRef]["_total"] for userRef
in classification["activeProxyUserRefs"]),
type3_081["_total"]
)
])
tbl.addRow([
"Active __(Non Proxy) Machine Users__",
len(classification["activeNonProxyMachineUserRefs"]),
reportAbsAndPercent(
sum(st3_081ByUserRef[userRef]["_total"] for userRef
in classification["activeNonProxyMachineUserRefs"]),
type3_081["_total"]
)
])
for usrCls, label in [
("activeRemoteUserRefs", "Active __Remote Users__"),
("activeLocalUserRefs", "Active __Local Users__"),
("activeNotCategorizedUserRefs", "Active __Uncategorized__")
]:
if len(classification[usrCls]):
tbl.addRow([
label,
reportAbsAndPercent(len(classification[usrCls]), len(classification["activeUserRefs"])),
reportAbsAndPercent(
sum(st3_081ByUserRef[userRef]["_total"] for userRef
in classification[usrCls]),
type3_081["_total"]
)
])
mu += "User type signon summary ...\n\n"
mu += tbl.md() + "\n\n"
mu += "Signon by week day ...\n\n"
mu += tblByWeekDay(type3_081) + "\n\n"
mu += webReportPostmaster(
classification["postmasterUserRef"],
userInfoByUserRef[classification["postmasterUserRef"]],
type3_081["_total"],
st3_081ByUserRef[classification["postmasterUserRef"]],
classification["warningsByUserRef"][classification["postmasterUserRef"]] if classification["postmasterUserRef"] in classification["warningsByUserRef"] else []
)
mu += webReportDoDUser(
classification["dodUserRef"],
userInfoByUserRef[classification["dodUserRef"]],
type3_081["_total"],
st3_081ByUserRef[classification["dodUserRef"]],
classification["warningsByUserRef"][classification["dodUserRef"]] if classification["dodUserRef"] in classification["warningsByUserRef"] else [],
deidentify
)
# Beside's the special 2, 4 classes of User
mu += webReportProxyUsers(
classification["activeProxyUserRefs"],
classification["warningsByUserRef"],
userInfoByUserRef,
st3_081ByUserRef,
type3_081["_total"],
keyColsOnly=deidentify
)
mu += webReportNonProxyMachineUsers(
classification["activeNonProxyMachineUserRefs"],
classification["warningsByUserRef"],
userInfoByUserRef,
type3_081["_total"],
st3_081ByUserRef,
keyColsOnly = deidentify
)
mu += webReportRemoteUsers(
classification["activeRemoteUserRefs"],
classification["warningsByUserRef"],
len(classification["activeUserRefs"]),
userInfoByUserRef,
type3_081["_total"],
st3_081ByUserRef,
classification["remoteExcludeReasonCount"],
deidentify,
keyColsOnly = deidentify
)
# NOTE: far too crude now - need breaks
mu += webReportLocalUsers(
classification["activeLocalUserRefs"],
classification["warningsByUserRef"],
len(classification["activeUserRefs"]),
userInfoByUserRef,
type3_081["_total"],
st3_081ByUserRef,
deidentify,
keyColsOnly = deidentify
)
mu += webReportUnclassifiedUsers(
classification["activeNotCategorizedUserRefs"],
classification["warningsByUserRef"],
len(classification["activeUserRefs"]),
userInfoByUserRef,
type3_081["_total"],
st3_081ByUserRef,
deidentify,
keyColsOnly = deidentify
)
userSiteDir = SITE_DIR_TEMPL.format(stationNo)
reportFile = userSiteDir + ("user.md" if not deidentify else "userDeId.md")
print("Writing report {}".format(reportFile))
open(reportFile, "w").write(mu)
"""
Proxy Users
Classifier enforces CONNECTOR PROXY, AV presence and LOA 2
TO ADD:
- workstation_name pattern (ie/ form of TCP Connect)
- forced_close on SO %
Refs: <----- TODO: make explicit in report
https://github.com/vistadataproject/RPCDefinitionToolkit/issues/44
- EDIS - Emergency Department Integration Software (EDIS) https://www.va.gov/vdl/documents/Clinical/Emergency_Dept_Integration_Software/edp_2_1_1_tm.pdf (SPOK: CONNECTOR,EDIS?)
- AVS may be After Visit Summary
- The VistALink Connector Proxy User is an entry on your VistA NEW PERSON file that the PATS application and other web-based applications use to connect to your VistA site. For the PATS application, there will be one data center located in Falling Waters, VA and a second fail-over data center in Hines, IL. A VistALink connector proxy user needs to be set up on your VistA server for the Falling Waters data center and also for the Hines data center.
(SPOK: VISTALINK,EMC HINES)
- RTLS: "application proxy user 'VIAASERVICE,RTLS APPLICATION PROXY' will be created automatically."
- VPS:
(SPOK: CONNECT,VPS)
- Fee Basis Claims System (FBCS) application
- CPGATEWAY,USER: The CP Gateway Service is composed of two subsystems ... via the RPC Broker to retrieve the HL7 message ... Vendor CIS for Clinical Procedures and VistA
Note: tied up with vistalink and two step of connector (with station number
lookup) and then switch to local user <------------ see if two step means two sign ons?
"""
def webReportProxyUsers(activeProxyUserRefs, warningsByUserRef, userInfoByUserRef, st3_081ByUserRef, totalSignOns, keyColsOnly=False):
totalProxySignOnCount = sum(st3_081ByUserRef[userRef]["_total"] for userRef
in activeProxyUserRefs)
mu = """## Proxy Users
There are <span class='yellowIt'>{:,}</span> active __Proxy Machine Users__ (user_class is \"CONNECTOR PROXY\") with <span class='yellowIt'>{}</span> signons. All user records have _access_, _verify_ and lack a social while their signons have _LOA_ 2 and don't have \"remote_...\" properties (ie/ CPRS-like combo). These signons probably happen over __VistALink__ and not the plain RPC Broker ...\n\n""".format(len(activeProxyUserRefs), reportAbsAndPercent(totalProxySignOnCount, totalSignOns))
cols = [":Name [IEN]", "Entered", "SignOns", "Period", "\# IPs"] if keyColsOnly else [":Name [IEN]", "Entered", ":PMO", ":SMOs", ":Keys", "SignOns", "Period", "\# IPs", "Duration", ":Unexpected"]
tbl = MarkdownTable(cols)
allSTs = []
for userRef in sorted(activeProxyUserRefs, key=lambda x: st3_081ByUserRef[x]["_total"], reverse=True):
userInfo = userInfoByUserRef[userRef]
st = st3_081ByUserRef[userRef]
allSTs.append(st)
pmoMU, smosMU, keysMU = muOptionsNKeys(userInfo)
if "duration" in st and "byValueCount" in st["duration"]: # TODO: remove and FORCE all to have duration once redo E
if st["_total"] > 1:
kstatsDur = keyStats(
flattenFrequencyDistribution(st["duration"]["byValueCount"])
)
durMU = "{}/{}/{}".format(muSeconds(kstatsDur["median"]), muSeconds(kstatsDur["min"]), muSeconds(kstatsDur["max"]))
else:
durMU = muSeconds(singleValue(st, "duration"))
else:
durMU = ""
unexpectedMU = "" if userRef not in warningsByUserRef else "/ ".join(warningsByUserRef[userRef])
if keyColsOnly:
row = [
muUserRef(userRef),
userInfo["date_entered"] if "date_entered" in userInfo else "",
reportAbsAndPercent(st["_total"], totalProxySignOnCount),
muSignOnPeriod(st),
muBVC(st["ipv4_address"]["byValueCount"], countOnlyIfOver=5) if "ipv4_address" in st else " "
]
else:
row = [
muUserRef(userRef),
userInfo["date_entered"] if "date_entered" in userInfo else "",
pmoMU,
smosMU,
keysMU,
reportAbsAndPercent(st["_total"], totalProxySignOnCount),
muSignOnPeriod(st),
muBVC(st["ipv4_address"]["byValueCount"], countOnlyIfOver=5) if "ipv4_address" in st else " ",
durMU,
unexpectedMU
]
tbl.addRow(row)
mu += tbl.md() + "\n\n"
return mu
"""
Off Key Words BUT Not the DoD User (| Postmaster)
Expects: machine SSN, visited_from, NO remote_app, LOA 2 (usually), no PMO or keys
Note:
- no PMO or Keys as none specified (in VCB)
- not enforcing LOA 2 as see 200, 2001 combos where first is 1 and then move to 2
- showing remote_user_ien as apparently fixed
- NOT enforcing no remote_app as one CVIX has VISTAWEB login in VCB
CVIX remote_user_ien seems to have a fixed IEN
CVIX_MHVUSER_SSNIEN = "200:412864" # expect 2001 too and 2006_95's >> sign ons
CVIX_USER_SSNIEN = "200:217122" # expect 2001 too; 2006_95 << sign ons
...
and fixed IP and LOA 1 usually
"""
def webReportNonProxyMachineUsers(activeNonProxyMachineUserRefs, warningsByUserRef, userInfoByUserRef, totalSignOns, st3_081ByUserRef, keyColsOnly=False):
totalNonProxyMachineSignOnCount = sum(st3_081ByUserRef[userRef]["_total"] for userRef in activeNonProxyMachineUserRefs)
mu = """## (Non Proxy) Machine Users
Besides the _DoD User_, there are <span class='yellowIt'>{:,}</span> active __Non-Proxy Machine Users__ with <span class='yellowIt'>{}</span> signons. These users appear in most VistAs under fabricated social security numbers ...\n\n""".format(
len(activeNonProxyMachineUserRefs),
reportAbsAndPercent(totalNonProxyMachineSignOnCount, totalSignOns)
)
# To add: workstation_name - take first part? ipv4_address
cols = [":Name [IEN]", "Entered", "SSN", "SignOns", "Period", "Remote Station Id(s)", "Remote IEN(s)", ":IPs"] if keyColsOnly else [":Name [IEN]", "Entered", "SSN", ":SMOs", "SignOns", "Period", "Remote Station Id(s)", "Remote IEN(s)", ":IPs", "Duration", ":Unexpected"]
tbl = MarkdownTable(cols)
for userRef in sorted(activeNonProxyMachineUserRefs, key=lambda x: st3_081ByUserRef[x]["_total"], reverse=True):
userInfo = userInfoByUserRef[userRef]
st = st3_081ByUserRef[userRef]
pmoMU, smosMU, keysMU = muOptionsNKeys(userInfo)
unexpectedMU = "" if userRef not in warningsByUserRef else "/ ".join(warningsByUserRef[userRef])
if "remote_user_ien" in st:
if len(st["remote_user_ien"]["byValueCount"]) > 5:
remoteIENsMU = "_#{:,}_".format(len(st["remote_user_ien"]["byValueCount"]))
else:
remoteIENsMU = "/".join(st["remote_user_ien"]["byValueCount"])
else:
remoteIENsMU = ""
if "duration" in st and "byValueCount" in st["duration"]: # TODO: remove
if st["_total"] > 1:
kstatsDur = keyStats(
flattenFrequencyDistribution(st["duration"]["byValueCount"])
)
durMU = "{}/{}/{}".format(muSeconds(kstatsDur["median"]), muSeconds(kstatsDur["min"]), muSeconds(kstatsDur["max"]))
else:
durMU = muSeconds(singleValue(st, "duration"))
else:
durMU = ""
if keyColsOnly:
row = [
muUserRef(userRef),
userInfo["date_entered"] if "date_entered" in userInfo else "",
"__{}__".format(userInfo["ssn"]) if "ssn" in userInfo else "",
reportAbsAndPercent(st["_total"], totalNonProxyMachineSignOnCount),
muSignOnPeriod(st),
# NO remote app?
"/".join(st["remote_station_id"]["byValueCount"].keys()) if "remote_station_id" in st else "",
remoteIENsMU,
muBVC(st["ipv4_address"]["byValueCount"], countOnlyIfOver=5)
]
else:
row = [
muUserRef(userRef),
userInfo["date_entered"] if "date_entered" in userInfo else "",
"__{}__".format(userInfo["ssn"]) if "ssn" in userInfo else "",
smosMU,
reportAbsAndPercent(st["_total"], totalNonProxyMachineSignOnCount),
muSignOnPeriod(st),
# NO remote app?
"/".join(st["remote_station_id"]["byValueCount"].keys()) if "remote_station_id" in st else "",
remoteIENsMU,
muBVC(st["ipv4_address"]["byValueCount"], countOnlyIfOver=5),
durMU,
unexpectedMU
]
tbl.addRow(row)
mu += tbl.md() + "\n\n"
return mu
"""
FIRST: exceed and nix
- https://github.com/vistadataproject/RPCDefinitionToolkit/blob/master/Reporters/Users/reportRemoteUsersE.py
- bseEntries = [entry for entry in entries if "remote_app" in entry] etc in
https://github.com/vistadataproject/DataExtractNSync/blob/master/RPCSessionTests/reportUsersAndLogins.py
TODO: needs more
- JLV vs other - see IPs in fmrepo's util
... JLV with DoD Ids
ie/ DoD JLV
ie/ may show REAL dod ids => de-identify
- non station id/ien combo (need from custom run on SO!) ie/ X:1 in particular
"""
def webReportDoDUser(userRef, userInfo, totalSignons, st, warnings, deidentify):
mu = """## \"DoD User\"
One special non proxy, machine user, the __\"DoD User\"__ is used for JLV DoD access and for access by a number of other applications ...\n\n"""
tbl = MarkdownTable([":Property", ":Value"], includeNo=False)
tbl.addRow(["IEN", re.search(r'\[200\-([^\]]+)', userRef).group(1)])
tbl.addRow(["Date Entered", userInfo["date_entered"] if "date_entered" in userInfo else ""])
tbl.addRow(["SSN", "__{}__".format(userInfo["ssn"])])
pmoMU, smosMU, keysMU = muOptionsNKeys(userInfo)
tbl.addRow(["SMOs", smosMU])
tbl.addRow(["Sign ons", reportAbsAndPercent(st["_total"], totalSignons)])
tbl.addRow(["Sign on period", muSignOnPeriod(st)])
wdCntr = expandByWeekDay(st)
tbl.addRow(["Days", ", ".join(["{} [{}]".format("__{}__".format(day) if i < 5 else day, wdCntr[day]) for i, day in enumerate(wdCntr)])])
noRemoteStationIds = len(st["remote_station_id"]["byValueCount"])
tbl.addRow(["Station Ids", noRemoteStationIds])
def topRemoteStationIds(st):
orderedTopCounts = {}
for i, rsid in enumerate(sorted(st["remote_station_id"]["byValueCount"], key=lambda x: st["remote_station_id"]["byValueCount"][x], reverse=True), 1):
if i > 5:
break
orderedTopCounts[rsid] = st["remote_station_id"]["byValueCount"][rsid]
return orderedTopCounts
tbl.addRow(["Top Station Ids", muBVC(topRemoteStationIds(st))])
fiveOrMoreRemoteStationIds = sum(1 for rsid in st["remote_station_id"]["byValueCount"] if re.match(r'\d\d\d\d\d', rsid))
tbl.addRow(["5 digit plus Station Ids (DoD?)", fiveOrMoreRemoteStationIds])
threeAlphaRemoteStationIds = dict((rsid, st["remote_station_id"]["byValueCount"][rsid]) for rsid in st["remote_station_id"]["byValueCount"] if re.match(r'\d\d\d\d?[A-Z]*[A-Z]*$', rsid))
tbl.addRow(["3 digit [2 alpha] Station Ids (VA)", muBVC(threeAlphaRemoteStationIds, countOnlyIfOver=10)])
tbl.addRow(["IPs", len(st["ipv4_address"]["byValueCount"])])
tbl.addRow(["Divisions", muBVC(st["division"]["byValueCount"])])
if len(warnings):
tbl.addRow(["Unexpected", "/ ".join(warnings)])
mu += tbl.md() + "\n\n"
return mu
def webReportPostmaster(userRef, userInfo, totalSignons, st, warnings):
mu = """## Postmaster
Every VistA has __Postmaster__, (one of) the first user in the system ...
"""
tbl = MarkdownTable([":Property", ":Value"], includeNo=False)
tbl.addRow(["Name \[IEN\]", muUserRef(userRef)])
tbl.addRow(["Date Entered", userInfo["date_entered"] if "date_entered" in userInfo else ""])
pmoMU, smosMU, keysMU = muOptionsNKeys(userInfo)
if pmoMU:
tbl.addRow(["PMO", pmoMU])
if smosMU:
tbl.addRow(["SMOs", smosMU])
if keysMU:
tbl.addRow(["Keys", keysMU])
# Division is hardly ever set and then to main site - ignoring
tbl.addRow(["Sign ons", reportAbsAndPercent(st["_total"], totalSignons)])
tbl.addRow(["Sign on period", muSignOnPeriod(st)])
if len(warnings):
tbl.addRow(["Unexpected", "/ ".join(warnings)])
mu += tbl.md() + "\n\n"
return mu
"""
REM: the dynamic sniffing of a session would yield much of this (user from certain IPs, remote station id, context chosen is VPR or , workstation 10 etc)
TODO: (review the issues in TK too)
- should add DIVISION restriction (only def div) to REMOTE CHECK
- JLV subset (add to classifier)
- workstation 10 ie/ the JLV Sub
- more on VPR SMO ... split em out
ie mu += "{:,} VPR Remote Users; {:,} no remote_app remote users; {:,} both; {:,} VPR only; {:,} no remote_app only
"ipv4_addresses": set(JLV_INVA_IPS).union(set(JLV_INVA_EXTRA_IPS)),
"level_of_assurance": "1",
"workstation_name": "10"
and CC JLV
(besides DoD JLV)
ipt = IPTracker(defn["name"])
for so in sos:
ipt.processSignOn(so)
globalIPTracker.processSignOn(so) # overalls!
<-------- VPR subset and tie to JLV IPs too
- REMOTE APP SPEC => BSE subset
- by APP ie/ CAPRI et al + ENFORCE [classifier] on loa 1/2 (3 for local?) ie/ break JLV vs others in separate tables (fuller report)
- manilla called out; 200 too?
fmrepo/user/catag (move over)
"""
def webReportRemoteUsers(activeRemoteUserRefs, warningsByUserRef, totalActiveUsers, userInfoByUserRef, totalSignOns, st3_081ByUserRef, remoteExcludeReasonCount, deidentify, keyColsOnly=False):
if len(activeRemoteUserRefs) == 0:
return ""
remoteSignOnCountsByUserRef = dict((userRef, st3_081ByUserRef[userRef]["_total"]) for userRef in activeRemoteUserRefs)
totalRemoteSignOns = sum(remoteSignOnCountsByUserRef[userRef] for userRef in remoteSignOnCountsByUserRef)
kstats = keyStats(list(remoteSignOnCountsByUserRef.values()))
print("About to combine lot's of remote sts (takes time) ...")
comboST = combineSubTypes([st3_081ByUserRef[userRef] for userRef in activeRemoteUserRefs], forceCountProps=["remote_station_id", "remote_app", "division"])[0]
print("... end combo'ing")
mu = """## Remote Users
Remote users dominate in every VistA - <span class='yellowIt'>{}</span> - but account for less sign ons - <span class='yellowIt'>{}</span> - than their numbers suggest. The median number of sign ons per remote user is <span class='yellowIt'>{:,}</span>.
""".format(
reportAbsAndPercent(len(activeRemoteUserRefs), totalActiveUsers),
reportAbsAndPercent(totalRemoteSignOns, totalSignOns),
roundFloat(kstats["median"])
)
remoteStationBVC = comboST["remote_station_id"]["byValueCount"]
mu += "Remote sign on comes from <span class='yellowIt'>{:,}</span> Remote Stations or <span class='yellowIt'>{:,}</span> three digit stations. The top 10 are ...\n\n".format(
len(remoteStationBVC),
len(set(stId[0:3] for stId in remoteStationBVC))
)
tbl = MarkdownTable([":Remote Station Id", ":Count of Remote SignOns"], includeNo=False)
for i, stationId in enumerate(sorted(remoteStationBVC, key=lambda x: remoteStationBVC[x], reverse=True), 1):
if i > 10:
break
stationMU = "__{}__".format(stationId)
if stationId in SSNO_DEFINITIONS:
stationMU = "__{} [{}]__".format(stationId, SSNO_DEFINITIONS[stationId])
tbl.addRow([
stationMU,
reportAbsAndPercent(remoteStationBVC[stationId], totalRemoteSignOns)
])
mu += tbl.md() + "\n\n"
mu += "Remote signon by week day ...\n\n"
mu += tblByWeekDay(comboST) + "\n\n"
def muRemoteIds(st): # points to need for custom combine of sno:ien
stationIdVC = st["remote_station_id"]["byValueCount"]
# Could happen but ? ... TODO: force IEN count
if "byValueCount" not in st["remote_user_ien"]:
return ""
ienVC = st["remote_user_ien"]["byValueCount"]
def muTripler(sid):
if sid == "459":
return "__459__"
return sid
if len(stationIdVC) == 1:
return "{}:{}".format(
muTripler(list(stationIdVC)[0]),
list(ienVC)[0]
)
if len(ienVC) == 1:
return "{}:{}".format(
"/".join([muTripler(sid) for sid in stationIdVC]),
list(ienVC)[0]
)
# TODO: match counts in both to assemble id
return ""
mu += "The following shows the top 50 Remote Users ...\n\n"
cols = [":Name [IEN]", "Entry", "Remote Id(s)", "SignOns", "Period", "Days", ":Remote Apps"] if keyColsOnly else [":Name [IEN]", "Entry", "Remote Id(s)", "SignOns", "Period", "Days", ":Remote Apps", ":Options", "Duration", ":Unexpected"]
tbl = MarkdownTable(cols)
for i, userRef in enumerate(sorted(activeRemoteUserRefs, key=lambda x: st3_081ByUserRef[x]["_total"], reverse=True), 1):
if i > 50:
break
userInfo = userInfoByUserRef[userRef]
st = st3_081ByUserRef[userRef]
# May put VPR in bold later as JLV indicator
pmoMU, smosMU, keysMU = muOptionsNKeys(userInfo)
remote_app_count = st["remote_app"]["byValueCount"] if "remote_app" in st else {}
no_remote_app_count = st["_total"] if "remote_app" not in st else st["_total"] - st["remote_app"]["count"]
if no_remote_app_count:
remote_app_count["UNIDENTIFIED"] = no_remote_app_count
remoteAppMU = ", ".join(["{} ({})".format(remote_app.split(" [")[0], remote_app_count[remote_app]) for remote_app in sorted(remote_app_count, key=lambda x: remote_app_count[x], reverse=True)])
wdCntr = expandByWeekDay(st)
if "duration" in st and "byValueCount" in st["duration"]:
if st["_total"] > 1:
kstatsDur = keyStats(
flattenFrequencyDistribution(st["duration"]["byValueCount"])
)
durMU = "{}/{}/{}".format(muSeconds(kstatsDur["median"]), muSeconds(kstatsDur["min"]), muSeconds(kstatsDur["max"]))
else:
durMU = muSeconds(singleValue(st, "duration"))
else:
durMU = ""
if keyColsOnly:
row = [
muUserRef(userRef, ssn="NO SSN" if "ssn" not in userInfo else userInfo["ssn"], deidentify=deidentify),
userInfo["date_entered"] if "date_entered" in userInfo else "",
muRemoteIds(st),
reportAbsAndPercent(st["_total"], totalRemoteSignOns),
muSignOnPeriod(st),
", ".join(["{} [{}]".format("__{}__".format(day) if i < 5 else day, wdCntr[day]) for i, day in enumerate(wdCntr)]),
remoteAppMU
]
else:
row = [
muUserRef(userRef, ssn="NO SSN" if "ssn" not in userInfo else userInfo["ssn"], deidentify=deidentify),
userInfo["date_entered"] if "date_entered" in userInfo else "",
muRemoteIds(st),
reportAbsAndPercent(st["_total"], totalRemoteSignOns),
muSignOnPeriod(st),
", ".join(["{} [{}]".format("__{}__".format(day) if i < 5 else day, wdCntr[day]) for i, day in enumerate(wdCntr)]),
remoteAppMU,
smosMU,
durMU,
"" if userRef not in warningsByUserRef else "/ ".join(warningsByUserRef[userRef])
]
tbl.addRow(row)
mu += tbl.md() + "\n\n"
return mu
"""
TODO:
- broad: FAR TOO catch all -- why does PUG have so many more in Pug than ALake?
- and device: ssh vs other
- exclude from remote on MAND_PROPS and ALLOWED_PROPS as opposed to 0's ie/ 0's in here PLUS station_no in locals
"""
def webReportLocalUsers(activeLocalUserRefs, warningsByUserRef, totalActiveUsers, userInfoByUserRef, totalSignOns, st3_081ByUserRef, deidentify, keyColsOnly=False):
if len(activeLocalUserRefs) == 0:
return ""
totalLocalSignOns = sum(st3_081ByUserRef[userRef]["_total"] for userRef in activeLocalUserRefs)
mu = """## Local Users
There are <span class='yellowIt'>{}</span> active Local Users with <span class='yellowIt'>{}</span> signons.\n\n""".format(reportAbsAndPercent(len(activeLocalUserRefs), totalActiveUsers), reportAbsAndPercent(totalLocalSignOns, totalSignOns))
comboST = combineSubTypes([st3_081ByUserRef[userRef] for userRef in activeLocalUserRefs], forceCountProps=["division"])[0]
mu += "Local users select <span class='yellowIt'>{:,}</span> divisions (SHOULD RESTRICT THIS SET) ...\n\n".format(len(comboST["division"]["byValueCount"]))
tbl = MarkdownTable([":Division", ":Count %"])
for divisionRef in sorted(comboST["division"]["byValueCount"], key=lambda x: comboST["division"]["byValueCount"][x], reverse=True):
tbl.addRow([
"__{}__".format(re.sub(r'4\-', '', divisionRef)),
reportAbsAndPercent(comboST["division"]["byValueCount"][divisionRef], totalLocalSignOns)
])
mu += tbl.md() + "\n\n"
mu += "And don't just sign on through device 0 ...\n\n"
tbl = MarkdownTable([":Device", "Count %"], includeNo=False)
for device in sorted(comboST["device"]["byValueCount"], key=lambda x: comboST["device"]["byValueCount"][x], reverse=True):
tbl.addRow([
"__{}__".format(device),
reportAbsAndPercent(comboST["device"]["byValueCount"][device], comboST["_total"])
])
mu += tbl.md() + "\n\n"
# TODO: will break on these
mu += "And have multiple levels of assurance ...\n\n"
tbl = MarkdownTable([":LOA", "Count %"], includeNo=False)
for loa in sorted(comboST["level_of_assurance"]["byValueCount"], key=lambda x: comboST["level_of_assurance"]["byValueCount"][x], reverse=True):
tbl.addRow([
"__{}__".format(device),
reportAbsAndPercent(comboST["level_of_assurance"]["byValueCount"][device], comboST["_total"])
])
mu += tbl.md() + "\n\n"
SUPERUSER_KEYS = ["XUMGR"] # removing XUPROG
superUserRefs = set(userRef for userRef in activeLocalUserRefs if "keys" in userInfoByUserRef[userRef] and len(set(SUPERUSER_KEYS).intersection(set(userInfoByUserRef[userRef]["keys"]))))
mu += "<span class='yellowIt'>{:,}</span> Local Users are __Superusers__ (those with key {}) ...\n\n".format(len(superUserRefs), "|".join(SUPERUSER_KEYS))
cols = [":Name [IEN]", "Entry", ":Title", "SignOns", "Period", "Days"] if keyColsOnly else [":Name [IEN]", "Entry", ":Title", "SignOns", "Period", "Days", ":PMO", ":SMOs", ":Keys", ":Unexpected"]
tbl = MarkdownTable(cols)
for userRef in sorted(superUserRefs, key=lambda x: st3_081ByUserRef[x]["_total"], reverse=True):
userInfo = userInfoByUserRef[userRef]
st = st3_081ByUserRef[userRef]
pmoMU, smosMU, keysMU = muOptionsNKeys(userInfo)
wdCntr = expandByWeekDay(st)
if keyColsOnly:
row = [
muUserRef(userRef, ssn="NO SSN" if "ssn" not in userInfo else userInfo["ssn"], deidentify=deidentify),
userInfo["date_entered"] if "date_entered" in userInfo else "",
userInfo["title"] if "title" in userInfo else "",
reportAbsAndPercent(st["_total"], totalLocalSignOns),
muSignOnPeriod(st),
", ".join(["{} [{}]".format("__{}__".format(day) if i < 5 else day, wdCntr[day]) for i, day in enumerate(wdCntr)])
]
else:
row = [
muUserRef(userRef, ssn="NO SSN" if "ssn" not in userInfo else userInfo["ssn"], deidentify=deidentify),
userInfo["date_entered"] if "date_entered" in userInfo else "",
userInfo["title"] if "title" in userInfo else "",
reportAbsAndPercent(st["_total"], totalLocalSignOns),
muSignOnPeriod(st),
", ".join(["{} [{}]".format("__{}__".format(day) if i < 5 else day, wdCntr[day]) for i, day in enumerate(wdCntr)]),
pmoMU,
smosMU,
keysMU,
"" if userRef not in warningsByUserRef else "/ ".join(warningsByUserRef[userRef])
]
tbl.addRow(row)
mu += tbl.md() + "\n\n"
return mu
def webReportUnclassifiedUsers(activeNotCategorizedUserRefs, warningsByUserRef, totalActiveUsers, userInfoByUserRef, totalSignOns, st3_081ByUserRef, deidentify, keyColsOnly=False):
if len(activeNotCategorizedUserRefs) == 0:
return ""
totalUnclassfiedSignOns = sum(st3_081ByUserRef[userRef]["_total"] for userRef in activeNotCategorizedUserRefs)
mu = """## Unclassified Users
There are <span class='yellowIt'>{}</span> active unclassified Users with <span class='yellowIt'>{}</span> signons.\n\n""".format(reportAbsAndPercent(len(activeNotCategorizedUserRefs), totalActiveUsers), reportAbsAndPercent(totalUnclassfiedSignOns, totalSignOns))
return mu
# TODO: change to calc length formally in case of gaps
def muSignOnPeriod(st):
if len(st["date_time"]["byValueCount"]) == 13:
soPeriodMU = "EVERY MONTH"
elif st["date_time"]["firstCreateDate"].split("T")[0] == st["date_time"]["lastCreateDate"].split("T")[0]:
soPeriodMU = st["date_time"]["lastCreateDate"].split("T")[0]
elif re.search(r'\-', list(st["date_time"]["byValueCount"])[0]): # months
soPeriodMU = "{} - {} ({})".format(st["date_time"]["firstCreateDate"].split("T")[0], st["date_time"]["lastCreateDate"].split("T")[0], len(st["date_time"]["byValueCount"]))
else:
soPeriodMU = "{} - {}".format(st["date_time"]["firstCreateDate"].split("T")[0], st["date_time"]["lastCreateDate"].split("T")[0])
return soPeriodMU
"""
TODO: change -- move to reducing non RPC options
"""
def muOptionsNKeys(userInfo):
# rosByLabel = dict((res["label"], res) for res in rpcOptions(stationNo))
pmoMU = ""
if "primary_menu_option" in userInfo:
pmoMU = userInfo["primary_menu_option"]
# if userInfo["primary_menu_option"] not in rosByLabel:
# pmoMU += " [NOT RPC]"
smosMU = ""
if "secondary_menu_options" in userInfo:
# smosMU = ", ".join(sorted([smo if smo in rosByLabel else "{} [NOT RPC]".format(smo) for smo in userInfo["secondary_menu_options"]]))
if len(userInfo["secondary_menu_options"]) <= 5:
smosMU = ", ".join(sorted(userInfo["secondary_menu_options"])[0:5])
else:
smosMU = "_{:,}_".format(len(userInfo["secondary_menu_options"]))
keysMU = ""
if "keys" in userInfo:
if len(userInfo["keys"]) <= 5:
keysMU = ", ".join([key for key in sorted(userInfo["keys"])[0:5]])
else:
keysMU = "_{:,}_".format(len(userInfo["keys"]))
return pmoMU, smosMU, keysMU
def muUserRef(userRef, ssn=None, deidentify=False):
name = userRef.split(" [200-")[0]
if deidentify:
name = re.sub(r'[A-Za-z]', "X", name)
if ssn:
ssn = "XXXXXXXXX"
ien = userRef.split("[200-")[1][:-1]
return "__{}__ [{}]".format(
name,
"{}/{}".format(ien, ssn) if ssn != None else ien
)
def tblByWeekDay(st):
ocntr = expandByWeekDay(st, fullDays=True)
tbl = MarkdownTable([":Day", "Total", "Average"], includeNo=False)
for i, day in enumerate(ocntr):
avg = ocntr[day].split("/")[1]
avgMU = "" if avg == "0" else avg
tbl.addRow([
"__{}__".format(day) if i < 5 else day,
ocntr[day].split("/")[0],
avgMU
])
return tbl.md()
"""
Want average as well as total for period - but note that average may be bad
indicator if median is low. Best st can offer.
"""
def expandByWeekDay(st, fullDays=False):
def countDaysInPeriod(s, e):
d1 = datetime.strptime(s.split("T")[0], "%Y-%m-%d")
d2 = datetime.strptime(e.split("T")[0], "%Y-%m-%d")
cnter = Counter()
for d_ord in range(d1.toordinal(), d2.toordinal() + 1):
d = date.fromordinal(d_ord)
cnter[d.weekday()] += 1
return cnter
createDatePropInfo = st[st["_createDateProp"]]
dayCnter = countDaysInPeriod(createDatePropInfo["firstCreateDate"], createDatePropInfo["lastCreateDate"])
weekdays=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] if fullDays else ["Mon", "Tues", "Wed", "Thurs", "Fri", "Sat", "Sun"]
cntr = {}
byWeekDay = st["date_time"]["byWeekDay"]
for wd in sorted(byWeekDay, key=lambda x: int(x)):
if wd not in byWeekDay:
continue
avg = int(round(float(byWeekDay[wd])/ float(dayCnter[int(wd)])))
val = "{:,}/{:,}".format(byWeekDay[wd], avg)
cntr[weekdays[int(wd)]] = val
return cntr
def muSeconds(seconds):
seconds = int(seconds)
(days, remainder) = divmod(seconds, 86400)
(hours, remainder) = divmod(remainder, 3600)
(minutes, seconds) = divmod(remainder, 60)
if days:
return "{} {}:{}:{}".format(
int(days),
int(hours),
int(minutes),
int(seconds)
)
if hours:
return "{}:{}:{}".format(
int(hours),
int(minutes),
int(seconds)
)
if minutes:
return "{}:{}".format(
int(minutes),
int(seconds)
)
return int(seconds)
# ################################# DRIVER #######################
def main():
assert sys.version_info >= (3, 6)
try:
stationNo = sys.argv[1]
except IndexError:
raise SystemExit("Usage _EXE_ STATIONNO [DEID]")
ensureWebReportLocations(stationNo)
if len(sys.argv) == 3:
deidentify = True
else:
deidentify = False
webReportUser(stationNo, deidentify)
if __name__ == "__main__":
main()
|
PypiClean
|
/Products.DigestoContentTypes-1.2a1.tar.gz/Products.DigestoContentTypes-1.2a1/Products/DigestoContentTypes/setuphandlers.py
|
__author__ = """Emanuel Sartor <[email protected]>, Santiago Bruno <unknown>"""
__docformat__ = 'plaintext'
import logging
logger = logging.getLogger('DigestoContentTypes: setuphandlers')
from Products.DigestoContentTypes.config import PROJECTNAME
from Products.DigestoContentTypes.config import DEPENDENCIES
import os
from Products.CMFCore.utils import getToolByName
import transaction
##code-section HEAD
from Products.DigestoContentTypes.config import PLACEFUL_WORKFLOW_POLICY
##/code-section HEAD
def isNotDigestoContentTypesProfile(context):
return context.readDataFile("DigestoContentTypes_marker.txt") is None
def updateRoleMappings(context):
"""after workflow changed update the roles mapping. this is like pressing
the button 'Update Security Setting' and portal_workflow"""
if isNotDigestoContentTypesProfile(context): return
wft = getToolByName(context.getSite(), 'portal_workflow')
wft.updateRoleMappings()
def postInstall(context):
"""Called as at the end of the setup process. """
# the right place for your custom code
if isNotDigestoContentTypesProfile(context): return
shortContext = context._profile_path.split(os.path.sep)[-3]
if shortContext != 'DigestoContentTypes': # avoid infinite recursions
return
site = context.getSite()
##code-section FOOT
def registerAttachmentsFormControllerActions(context, contentType=None, template='manage_attachments'):
"""Register the form controller actions necessary for the widget to work.
This should probably be called from the Install.py script. The parameter
'context' should be the portal root or another place from which the form
controller can be acquired. The contentType and template argument allow
you to restrict the registration to only one content type and choose a
template other than base_edit, if necessary.
"""
site = context.getSite()
pfc = site.portal_form_controller
pfc.addFormAction(template,
'success',
contentType,
'UploadAttachment',
'traverse_to',
'string:widget_attachmentsmanager_upload')
pfc.addFormAction(template,
'success',
contentType,
'RenameAttachments',
'traverse_to',
'string:widget_attachmentsmanager_rename')
pfc.addFormAction(template,
'success',
contentType,
'MoveAttachments',
'traverse_to',
'string:widget_attachmentsmanager_move')
pfc.addFormAction(template,
'success',
contentType,
'DeleteAttachments',
'traverse_to',
'string:widget_attachmentsmanager_delete')
def allowNormativaInLargeFolders(context):
"""Allow Normativa as an addable type inside Large Plone Folders.
"""
types = getToolByName(context.getSite(), 'portal_types')
fti = getattr(types, 'Large Plone Folder')
if 'Normativa' not in fti.allowed_content_types:
fti.allowed_content_types = fti.allowed_content_types + ('Normativa',)
def addAreaPlacefulWorkflowPolicy(context):
"""Add the placeful workflow policy used for areas.
"""
placeful_workflow = getToolByName(context, 'portal_placeful_workflow')
if PLACEFUL_WORKFLOW_POLICY not in placeful_workflow.objectIds():
placeful_workflow.manage_addWorkflowPolicy(PLACEFUL_WORKFLOW_POLICY)
policy = placeful_workflow.getWorkflowPolicyById(PLACEFUL_WORKFLOW_POLICY)
policy.setTitle('[DigestoContentTypes] Area workflows')
policy.setDefaultChain(('area_workflow',))
types = ('Folder', 'Large Plone Folder')
policy.setChainForPortalTypes(types, ('area_workflow',))
policy.setChainForPortalTypes(('Normativa',), ('normativa_workflow',))
def addCatalogIndexes(context):
"""Add our indexes to the catalog.
Doing it here instead of in profiles/default/catalog.xml means we do
not need to reindex those indexes after every reinstall.
"""
catalog = getToolByName(context.getSite(), 'portal_catalog')
indexes = catalog.indexes()
wanted = (('getDate', 'DateIndex'),
('getSource', 'FieldIndex'),
('getNumber', 'FieldIndex'),
('getRepeals', 'FieldIndex'),
('getModifies', 'FieldIndex'),
('getRepealedBy', 'FieldIndex'),
('getKind', 'FieldIndex'),
('getAbbreviation', 'FieldIndex'),
('getArea', 'FieldIndex'),
('getCudap', 'FieldIndex'),
)
for name, meta_type in wanted:
if name not in indexes:
catalog.addIndex(name, meta_type)
logger.info("Added %s for field %s.", meta_type, name)
##/code-section FOOT
|
PypiClean
|
/scs_analysis-2.8.5-py3-none-any.whl/scs_analysis-2.8.5.data/scripts/cognito_user_identity.py
|
import requests
import sys
from scs_analysis.cmd.cmd_cognito_user_identity import CmdCognitoUserIdentity
from scs_core.aws.security.cognito_client_credentials import CognitoClientCredentials
from scs_core.aws.security.cognito_login_manager import CognitoLoginManager
from scs_core.aws.security.cognito_user import CognitoUserIdentity
from scs_core.aws.security.cognito_user_finder import CognitoUserFinder
from scs_core.aws.security.cognito_user_manager import CognitoUserCreator, CognitoUserEditor
from scs_core.client.http_exception import HTTPException, HTTPConflictException
from scs_core.data.datum import Datum
from scs_core.data.json import JSONify
from scs_core.sys.logging import Logging
from scs_host.comms.stdio import StdIO
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
logger = None
gatekeeper = None
credentials = None
auth = None
finder = None
report = None
try:
# ------------------------------------------------------------------------------------------------------------
# cmd...
cmd = CmdCognitoUserIdentity()
if not cmd.is_valid():
cmd.print_help(sys.stderr)
exit(2)
Logging.config('cognito_user_identity', verbose=cmd.verbose)
logger = Logging.getLogger()
logger.info(cmd)
# ------------------------------------------------------------------------------------------------------------
# authentication...
if not cmd.create:
credentials = CognitoClientCredentials.load_for_user(Host, name=cmd.credentials_name)
if not credentials:
exit(1)
gatekeeper = CognitoLoginManager(requests)
auth = gatekeeper.user_login(credentials)
if not auth.is_ok():
logger.error("login: %s" % auth.authentication_status.description)
exit(1)
# ------------------------------------------------------------------------------------------------------------
# resources...
if not cmd.create:
finder = CognitoUserFinder(requests)
# ------------------------------------------------------------------------------------------------------------
# run...
if cmd.retrieve:
report = finder.get_self(auth.id_token)
if cmd.create:
# create...
given_name = StdIO.prompt("Enter given name")
family_name = StdIO.prompt("Enter family name")
email = StdIO.prompt("Enter email address")
password = StdIO.prompt("Enter password")
retrieval_password = StdIO.prompt("Enter retrieval password (RETURN for same)")
if not retrieval_password:
retrieval_password = password
# validate...
if not given_name or not given_name:
logger.error("Given name and family name are required.")
exit(1)
if not Datum.is_email_address(email):
logger.error("The email address is not valid.")
exit(1)
if not CognitoUserIdentity.is_valid_password(password):
logger.error("The password must include lower and upper case, numeric and punctuation characters.")
exit(1)
# save identity...
identity = CognitoUserIdentity(None, None, None, True, False, email,
given_name, family_name, password, False, False, False, None)
manager = CognitoUserCreator(requests)
report = manager.create(identity)
# create credentials...
credentials = CognitoClientCredentials(cmd.credentials_name, email, password, retrieval_password)
credentials.save(Host, encryption_key=retrieval_password)
if cmd.update:
# find...
identity = finder.get_self(auth.id_token)
# update identity...
given_name = StdIO.prompt("Enter given name", default=identity.given_name)
family_name = StdIO.prompt("Enter family name", default=identity.family_name)
email = StdIO.prompt("Enter email address", default=identity.email)
password = StdIO.prompt("Enter password (RETURN to keep existing)")
if not password:
password = credentials.password
if credentials.retrieval_password == credentials.password:
retrieval_password = StdIO.prompt("Enter retrieval password (RETURN for same)")
if not retrieval_password:
retrieval_password = password
else:
retrieval_password = StdIO.prompt("Enter retrieval password (RETURN to keep existing)")
if not retrieval_password:
retrieval_password = credentials.retrieval_password
# validate...
if not given_name or not given_name:
logger.error("Given name and family name are required.")
exit(1)
if not Datum.is_email_address(email):
logger.error("The email address '%s' is not valid." % email)
exit(1)
if password and not CognitoUserIdentity.is_valid_password(password):
logger.error("The password '%s' is not valid." % password)
exit(1)
# save identity...
identity = CognitoUserIdentity(identity.username, None, None, True, identity.email_verified, email,
given_name, family_name, password, identity.is_super, identity.is_tester,
identity.is_financial, None)
auth = gatekeeper.user_login(credentials) # renew credentials
manager = CognitoUserEditor(requests, auth.id_token)
report = manager.update(identity)
# update credentials...
credentials = CognitoClientCredentials(credentials.name, email, password, retrieval_password)
credentials.save(Host, encryption_key=retrieval_password)
# ----------------------------------------------------------------------------------------------------------------
# end...
if report is not None:
print(JSONify.dumps(report, indent=cmd.indent))
except KeyboardInterrupt:
print(file=sys.stderr)
except HTTPConflictException as ex:
logger.error("the email address '%s' is already in use." % report.email)
exit(1)
except HTTPException as ex:
logger.error(ex.error_report)
exit(1)
|
PypiClean
|
/portier-python-0.1.1.tar.gz/portier-python-0.1.1/README.rst
|
Portier authentication Python helpers
=====================================
|travis| |master-coverage|
.. |travis| image:: https://travis-ci.org/portier/portier-python.svg?branch=master
:target: https://travis-ci.org/portier/portier-python
.. |master-coverage| image::
https://coveralls.io/repos/portier/portier-python/badge.png?branch=master
:alt: Coverage
:target: https://coveralls.io/r/portier/portier-python
*portier-python* is a set of helpers that you can use to authenticate
your user with `the Portier Identity Provider <https://portier.io/>`_.
* `Issue tracker <https://github.com/portier/portier-python/issues>`_
Installation
------------
Install the Python package:
::
pip install portier-python
Install from the source code:
::
source <your-venv>/bin/activate
git clone [email protected]:portier/portier-python.git
cd portier-python
pip install -e .
|
PypiClean
|
/theabbie-1.1.0.tar.gz/theabbie-1.1.0/README.md
|
# TheAbbie
<p align='center'><img src="https://theabbie.github.io/files/logo.png" alt="TheAbbie" width="100" height="100"></p>
[](https://openbase.io/js/theabbie?utm_source=embedded&utm_medium=badge&utm_campaign=rate-badge)
* [About Me](#about-me)
* [My Octocat](#my-octocat)
* [My Blog](#my-blog)
* [Tasks](#tasks)
* [10 Ways to contact me](#10-ways-to-contact-me)
* [Donate](#donate)
## about Me
Hello World, I am Abhishek Chaudhary
A pseudo-introvert, a web developer, and a Maker
https://theabbie.github.io
[Resume](https://theabbie.github.io/resume.pdf)
<div itemscope itemtype="https://schema.org/Person"><a itemprop="sameAs" content="https://orcid.org/0000-0003-1526-9128" href="https://orcid.org/0000-0003-1526-9128" target="orcid.widget" rel="me noopener noreferrer" style="vertical-align:top;"><img src="https://orcid.org/sites/default/files/images/orcid_16x16.png" style="width:1em;margin-right:.5em;" alt="ORCID iD icon">https://orcid.org/0000-0003-1526-9128</a></div>
<a href="https://codetrace.com/users/theabbie"><img src="https://codetrace.com/widget/theabbie" width="220" height="50" /></a>
<img src="http://www.hackthebox.eu/badge/image/370240" alt="Hack The Box">
<img align="center" src="https://github-readme-stats.vercel.app/api?username=theabbie&show_icons=true&include_all_commits=true&theme=radical" alt="TheAbbie's github stats" />
<img align="center" src="https://github-readme-stats.vercel.app/api/top-langs/?username=theabbie&layout=compact&theme=radical" />
<table>
<caption>Abhishek Chaudhary</caption>
<thead>
<tr>
<th colspan="2">Quick Info</th>
</tr>
</thead>
<tbody>
<tr><th scope='row'>Name</th><td>Abhishek Chaudhary</td></tr>
<tr><th scope='row'>Born</th><td><time datetime="2002-01-11 08:00">11 January, 2002</time></td></tr>
<tr><th scope='row'>Education</th><td>B.E.</td></tr>
<tr><th scope='row'>Alma mater</th><td>Fr C Rodrigues institute of technology</td></tr>
<tr><th scope='row'>Nationality</th><td>Indian</td></tr>
<tr><th scope='row'>Occupation</th><td>Web Developer</td></tr>
<tr><th scope='row'>Skills</th><td>HTML, CSS, JavaScript, Node.js, SEO</td></tr>
<tr><th scope='row'>Other Name</th><td>TheAbbie</td></tr>
<tr><th scope='row'>Title</th><td>CEO of TheAbbie</td></tr>
<tr><th scope='row'>Known For</th><td>TheAbbie</td></tr>
</tbody>
</table>
## my octocat
<img src="https://theabbie.github.io/files/octocat.png" alt="TheAbbie" width="200" height="200">
## my blog
https://theabbie.github.io/blog
## tasks
- [x] Born
- [ ] Got a job
- [ ] Married
- [ ] Have children
- [ ] Die
## 10 ways to contact me
<ul>
<li><a href="mailto:[email protected]" rel="me">Mail</a>
<li><a href="https://www.instagram.com/sasta_abbie/" rel="me">Instagram DM</a>
<li><a href="https://t.me/theabbie" rel="me">Telegram</a>
<li><a href="https://wa.me/918928412138?text=Hi" rel="me">Whatsapp</a>
<li><a href="https://linkedin.com/in/theabbie" rel="me">Linkedin</a>
<li><a href="https://twitter.com/theabbiee" rel="me">Twitter</a>
<li><a href="https://www.snapchat.com/add/abbie_shaikh" rel="me">Snapchat</a>
<li><a href="https://icq.im/theabbie" rel="me">ICQ</a>
<li><a href="https://www.facebook.com/abhishek.vice.versa" rel="me">Facebook</a>
<li>Call</li>
</li>
</ul>
## donate
[](https://ko-fi.com/K3K31DJFA)
[](https://patreon.com/theabbie)
[](https://www.paypal.me/theabbie)
[](https://donorbox.org/theabbie)
[](https://opencollective.com/theabbie)
[](https://flattr.com/@theabbie)
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/groups/item/transitive_members/item/user/user_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from ......models import user
from ......models.o_data_errors import o_data_error
class UserRequestBuilder():
"""
Casts the previous resource to user.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new UserRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/groups/{group%2Did}/transitiveMembers/{directoryObject%2Did}/microsoft.graph.user{?%24select,%24expand}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_get_request_information(self,request_configuration: Optional[UserRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
Get the item of type microsoft.graph.directoryObject as microsoft.graph.user
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = "application/json"
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
async def get(self,request_configuration: Optional[UserRequestBuilderGetRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> Optional[user.User]:
"""
Get the item of type microsoft.graph.directoryObject as microsoft.graph.user
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: Optional[user.User]
"""
request_info = self.create_get_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_async(request_info, user.User, response_handler, error_mapping)
@dataclass
class UserRequestBuilderGetQueryParameters():
"""
Get the item of type microsoft.graph.directoryObject as microsoft.graph.user
"""
# Expand related entities
expand: Optional[List[str]] = None
# Select properties to be returned
select: Optional[List[str]] = None
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "expand":
return "%24expand"
if original_name == "select":
return "%24select"
return original_name
@dataclass
class UserRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[UserRequestBuilder.UserRequestBuilderGetQueryParameters] = None
|
PypiClean
|
/MultistateEpigeneticPacemaker-0.0.1.tar.gz/MultistateEpigeneticPacemaker-0.0.1/msepm/msepm_cv.py
|
import random
from typing import Dict, Tuple
import joblib
import numpy as np
from tqdm import tqdm
from msepm.base import EPMBase
from msepm import MultistateEpigeneticPacemaker
from msepm.helpers import get_fold_step_size, tqdm_joblib
class MultistateEpigeneticPacemakerCV(EPMBase):
"""
"""
def __init__(self,
cv_folds: int = 3, randomize_sample_order: bool = False,
iter_limit=100, n_jobs=1,
error_tolerance=0.001, learning_rate=0.01,
scale_X=False, verbose=False):
EPMBase.__init__(self)
self.cv_folds = cv_folds
self.randomize = randomize_sample_order
self.iter_limit = iter_limit
self.n_jobs = n_jobs
self.error_tolerance = error_tolerance
self.learning_rate = learning_rate
self.scale_X = scale_X
self.verbose = verbose
def fit(self, X, Y, sample_weights=None, return_out_of_fold_predictions=False):
cv_groups = self.get_cv_folds(X.shape[0])
fold_count = 0
# reshape X if one dimensional
X_fit = X if len(X.shape) > 1 else X.reshape(-1, 1)
coefs, intercepts, errors = np.zeros((Y.shape[0], X_fit.shape[1])), np.zeros(Y.shape[0]), 0.0
training_sample_count = 0
predictions = {}
with tqdm_joblib(tqdm(desc="Fitting CV Folds", total=self.cv_folds,
disable=True if not self.verbose else False)) as progress_bar:
models = joblib.Parallel(n_jobs=self.n_jobs)(
joblib.delayed(self.fit_fold)(*[X_fit, Y,
test_indices,
return_out_of_fold_predictions,
sample_weights]) for
test_indices in cv_groups)
for model, train_len, model_predictions in models:
training_sample_count += train_len
predictions.update(model_predictions)
coefs += model._coefs * train_len
intercepts += model._intercepts * train_len
errors += model._error * train_len
fold_count += 1
self._coefs = coefs / training_sample_count
self._intercepts = intercepts / training_sample_count
self._error = errors / training_sample_count
if return_out_of_fold_predictions:
return self.unpack_out_of_fold_predictions(predictions)
def fit_fold(self, X, Y, test_indices, return_out_of_fold_predictions=False, sample_weights=None):
fold_epm = MultistateEpigeneticPacemaker(iter_limit=self.iter_limit, error_tolerance=self.error_tolerance,
learning_rate=self.learning_rate, scale_X=self.scale_X, n_jobs=1)
train_indices = [index for index in range(X.shape[0]) if index not in test_indices]
train_Y = Y[:, train_indices]
train_X = X[train_indices, :]
test_Y = Y[:, test_indices]
fold_epm.fit(train_X, train_Y, sample_weights=sample_weights)
predictions = {}
if return_out_of_fold_predictions:
test_states = fold_epm.predict(test_Y)
for index, state in zip(test_indices, test_states):
predictions[index] = state
return fold_epm, len(train_indices), predictions
def get_cv_folds(self, sample_number):
if self.cv_folds < 0:
self.cv_folds = sample_number
sample_indices = [count for count in range(sample_number)]
if self.randomize:
random.shuffle(sample_indices)
step_size = get_fold_step_size(sample_number, self.cv_folds)
test_indices = []
for fold in range(self.cv_folds):
if fold + 1 == self.cv_folds:
test_indices.append(sample_indices[fold * step_size:])
else:
test_indices.append(sample_indices[fold * step_size: fold * step_size + step_size])
return test_indices
@staticmethod
def unpack_out_of_fold_predictions(predictions):
return np.array([predictions[index] for index in range(len(predictions))])
|
PypiClean
|
/simple_rl-0.811.tar.gz/simple_rl-0.811/README.md
|
# simple_rl
A simple framework for experimenting with Reinforcement Learning in Python.
There are loads of other great libraries out there for RL. The aim of this one is twofold:
1. Simplicity.
2. Reproducibility of results.
A brief tutorial for a slightly earlier version is available [here](http://cs.brown.edu/~dabel/blog/posts/simple_rl.html). As of version 0.77, the library should work with both Python 2 and Python 3. Please let me know if you find that is not the case!
simple_rl requires [numpy](http://www.numpy.org/) and [matplotlib](http://matplotlib.org/). Some MDPs have visuals, too, which requires [pygame](http://www.pygame.org/news). Also includes support for hooking into any of the [Open AI Gym environments](https://gym.openai.com/envs). The library comes along with basic test script, contained in the _tests_ directory. I suggest running it and making sure all tests pass when you install the library.
[Documentation available here](https://david-abel.github.io/simple_rl/docs/index.html)
## Installation
The easiest way to install is with [pip](https://pypi.python.org/pypi/pip). Just run:
pip install simple_rl
Alternatively, you can download simple_rl [here](https://github.com/david-abel/simple_rl/tarball/v0.811).
## New Feature: Easy Reproduction of Results
I just added a new feature I'm quite excited about: *easy reproduction of results*. Every experiment run now outputs a file "full_experiment.txt" in the _results/exp_name/_ directory. The new function _reproduce_from_exp_file(file_name)_, when pointed at an experiment directory, will reassemble and rerun an entire experiment based on this file. The goal here is to encourage simple tracking of experiments and enable quick result-reproduction. It only works with MDPs though -- it does not yet work with OOMDPs, POMDPs, or MarkovGames (I'd be delighted if someone wants to make it work, though!).
See the second example below for a quick sense of how to use this feature.
## Example
Some examples showcasing basic functionality are included in the [examples](https://github.com/david-abel/simple_rl/tree/master/examples) directory.
To run a simple experiment, import the _run_agents_on_mdp(agent_list, mdp)_ method from _simple_rl.run_experiments_ and call it with some agents for a given MDP. For example:
# Imports
from simple_rl.run_experiments import run_agents_on_mdp
from simple_rl.tasks import GridWorldMDP
from simple_rl.agents import QLearningAgent
# Run Experiment
mdp = GridWorldMDP()
agent = QLearningAgent(mdp.get_actions())
run_agents_on_mdp([agent], mdp)
Running the above code will run _Q_-learning on a simple GridWorld. When it finishes it stores the results in _cur_dir/results/*_ and makes and opens the following plot:
<img src="https://david-abel.github.io/blog/posts/images/simple_grid.jpg" width="480" align="center">
For a slightly more complicated example, take a look at the code of _simple_example.py_. Here we run two agents on the grid world from the Russell-Norvig AI textbook:
from simple_rl.agents import QLearningAgent, RandomAgent, RMaxAgent
from simple_rl.tasks import GridWorldMDP
from simple_rl.run_experiments import run_agents_on_mdp
# Setup MDP.
mdp = GridWorldMDP(width=4, height=3, init_loc=(1, 1), goal_locs=[(4, 3)], lava_locs=[(4, 2)], gamma=0.95, walls=[(2, 2)], slip_prob=0.05)
# Setup Agents.
ql_agent = QLearningAgent(actions=mdp.get_actions())
rmax_agent = RMaxAgent(actions=mdp.get_actions())
rand_agent = RandomAgent(actions=mdp.get_actions())
# Run experiment and make plot.
run_agents_on_mdp([ql_agent, rmax_agent, rand_agent], mdp, instances=5, episodes=50, steps=10)
The above code will generate the following plot:
<img src="https://david-abel.github.io/blog/posts/images/rn_grid.jpg" width="480" align="center">
To showcase the new reproducibility feature, suppose we now wanted to reproduce the above experiment. We just do the following:
from simple_rl.run_experiments import reproduce_from_exp_file
reproduce_from_exp_file("gridworld_h-3_w-4")
Which will rerun the entire experiment, based on a file created and populated behind the scenes. Then, we should get the following plot:
<img src="https://david-abel.github.io/blog/posts/images/rn_grid_reproduce.jpg" width="480" align="center">
Easy! This is a new feature, so there may be bugs -- just let me know as things come up. It's only supposed to work for MDPs, not POMDPs/OOMDPs/MarkovGameMDPs (so far). Take a look at [_reproduce_example.py_](https://github.com/david-abel/simple_rl/blob/master/examples/reproduce_example.py) for a bit more detail.
## Overview
* (_agents_): Code for some basic agents (a random actor, _Q_-learning, [[R-Max]](http://www.jmlr.org/papers/volume3/brafman02a/brafman02a.pdf), _Q_-learning with a Linear Approximator, and so on).
* (_experiments_): Code for an Experiment class to track parameters and reproduce results.
* (_mdp_): Code for a basic MDP and MDPState class, and an MDPDistribution class (for lifelong learning). Also contains OO-MDP implementation [[Diuk et al. 2008]](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.149.7056&rep=rep1&type=pdf).
* (_planning_): Implementations for planning algorithms, includes ValueIteration and MCTS [[Couloum 2006]](https://hal.archives-ouvertes.fr/file/index/docid/116992/filename/CG2006.pdf), the latter being still in development.
* (_tasks_): Implementations for a few standard MDPs (grid world, N-chain, Taxi [[Dietterich 2000]](http://www.scs.cmu.edu/afs/cs/project/jair/pub/volume13/dietterich00a.pdf), and the [OpenAI Gym](https://gym.openai.com/envs)).
* (_utils_): Code for charting and other utilities.
## Contributing
If you'd like to contribute: that's great! Take a look at some of the needed improvements below: I'd love for folks to work on those items. Please see the [contribution guidelines](https://github.com/david-abel/simple_rl/blob/master/CONTRIBUTING.md). Email me with any questions.
## Making a New MDP
Make an MDP subclass, which needs:
* A static variable, _ACTIONS_, which is a list of strings denoting each action.
* Implement a reward and transition function and pass them to MDP constructor (along with _ACTIONS_).
* I also suggest overwriting the "\_\_str\_\_" method of the class, and adding a "\_\_init\_\_.py" file to the directory.
* Create a State subclass for your MDP (if necessary). I suggest overwriting the "\_\_hash\_\_", "\_\_eq\_\_", and "\_\_str\_\_" for the class to play along well with the agents.
## Making a New Agent
Make an Agent subclass, which requires:
* A method, _act(self, state, reward)_, that returns an action.
* A method, _reset()_, that puts the agent back to its _tabula rasa_ state.
## In Development
I'm hoping to add the following features:
* __Planning__: Finish MCTS [[Coloum 2006]](https://hal.inria.fr/file/index/docid/116992/filename/CG2006.pdf), implement RTDP [[Barto et al. 1995]](https://pdfs.semanticscholar.org/2838/e01572bf53805c502ec31e3e00a8e1e0afcf.pdf)
* __Deep RL__: Write a DQN [[Mnih et al. 2015]](http://www.davidqiu.com:8888/research/nature14236.pdf) in PyTorch, possibly others (some kind of policy gradient).
* __Efficiency__: Convert most defaultdict/dict uses to numpy.
* __Reproducibility__: The new reproduce feature is limited in scope -- I'd love for someone to extend it to work with OO-MDPs, Planning, MarkovGames, POMDPs, and beyond.
* __Docs__: Tutorial and documentation.
* __Visuals__: Unify MDP visualization.
* __Misc__: Additional testing.
Cheers,
-Dave
|
PypiClean
|
/nnisgf-0.4-py3-none-manylinux1_x86_64.whl/nnisgf-0.4.data/data/nni/node_modules/wide-align/node_modules/is-fullwidth-code-point/readme.md
|
# is-fullwidth-code-point [](https://travis-ci.org/sindresorhus/is-fullwidth-code-point)
> Check if the character represented by a given [Unicode code point](https://en.wikipedia.org/wiki/Code_point) is [fullwidth](https://en.wikipedia.org/wiki/Halfwidth_and_fullwidth_forms)
## Install
```
$ npm install --save is-fullwidth-code-point
```
## Usage
```js
const isFullwidthCodePoint = require('is-fullwidth-code-point');
isFullwidthCodePoint('谢'.codePointAt());
//=> true
isFullwidthCodePoint('a'.codePointAt());
//=> false
```
## API
### isFullwidthCodePoint(input)
#### input
Type: `number`
[Code point](https://en.wikipedia.org/wiki/Code_point) of a character.
## License
MIT © [Sindre Sorhus](https://sindresorhus.com)
|
PypiClean
|
/rflow_tfx-1.1.18-py3-none-any.whl/tfx/tools/cli/handler/base_handler.py
|
"""Base handler class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import json
import os
import subprocess
import sys
import tempfile
from typing import Any, Dict, List, Text
import click
from six import with_metaclass
from tfx.dsl.components.base import base_driver
from tfx.dsl.io import fileio
from tfx.tools.cli import labels
from tfx.utils import io_utils
class BaseHandler(with_metaclass(abc.ABCMeta, object)):
"""Base Handler for CLI.
Attributes:
flags_dict: A dictionary with flags provided in README.ml-pipelines-sdk.md command.
"""
def __init__(self, flags_dict: Dict[Text, Any]):
self.flags_dict = flags_dict
self._handler_home_dir = self._get_handler_home()
@abc.abstractmethod
def create_pipeline(self) -> None:
"""Creates pipeline for the handler."""
pass
@abc.abstractmethod
def update_pipeline(self) -> None:
"""Updates pipeline for the handler."""
pass
@abc.abstractmethod
def list_pipelines(self) -> None:
"""List all the pipelines in the environment."""
pass
@abc.abstractmethod
def delete_pipeline(self) -> None:
"""Deletes pipeline for the handler."""
pass
@abc.abstractmethod
def compile_pipeline(self) -> None:
"""Compiles pipeline for the handler."""
pass
@abc.abstractmethod
def create_run(self) -> None:
"""Runs README.ml-pipelines-sdk.md pipeline for the handler."""
pass
@abc.abstractmethod
def delete_run(self) -> None:
"""Deletes README.ml-pipelines-sdk.md run."""
pass
@abc.abstractmethod
def terminate_run(self) -> None:
"""Stops README.ml-pipelines-sdk.md run."""
pass
@abc.abstractmethod
def list_runs(self) -> None:
"""Lists all runs of README.ml-pipelines-sdk.md pipeline."""
pass
@abc.abstractmethod
def get_run(self) -> None:
"""Checks run status."""
pass
def _check_pipeline_dsl_path(self) -> None:
"""Check if pipeline dsl path exists."""
pipeline_dsl_path = self.flags_dict[labels.PIPELINE_DSL_PATH]
if not fileio.exists(pipeline_dsl_path):
sys.exit('Invalid pipeline path: {}'.format(pipeline_dsl_path))
def _check_dsl_runner(self) -> None:
"""Check if runner in dsl is same as engine flag."""
engine_flag = self.flags_dict[labels.ENGINE_FLAG]
with open(self.flags_dict[labels.PIPELINE_DSL_PATH], 'r') as f:
dsl_contents = f.read()
runner_names = {
labels.AIRFLOW_ENGINE: 'AirflowDagRunner',
labels.KUBEFLOW_ENGINE: 'KubeflowDagRunner',
labels.BEAM_ENGINE: 'BeamDagRunner',
labels.LOCAL_ENGINE: 'LocalDagRunner',
}
if runner_names[engine_flag] not in dsl_contents:
sys.exit('{} runner not found in dsl.'.format(engine_flag))
def _extract_pipeline_args(self) -> Dict[Text, Any]:
"""Get pipeline args from the DSL.
Returns:
Python dictionary with pipeline details extracted from DSL.
"""
# TODO(b/157599419): Consider using README.ml-pipelines-sdk.md better way to extract pipeline info:
# e.g. pipeline name/root. Currently we relies on consulting README.ml-pipelines-sdk.md env var when
# creating Pipeline object, which is brittle.
pipeline_dsl_path = self.flags_dict[labels.PIPELINE_DSL_PATH]
if os.path.isdir(pipeline_dsl_path):
sys.exit('Provide dsl file path.')
# Create an environment for subprocess.
temp_env = os.environ.copy()
# Create temp file to store pipeline_args from pipeline dsl.
temp_file = tempfile.mkstemp(prefix='cli_tmp_', suffix='_pipeline_args')[1]
# Store temp_file path in temp_env.
# LINT.IfChange
temp_env[labels.TFX_JSON_EXPORT_PIPELINE_ARGS_PATH] = temp_file
# LINT.ThenChange(
# ../../../orchestration/beam/beam_dag_runner.py,
# ../../../orchestration/local/local_dag_runner.py,
# )
# Run dsl with mock environment to store pipeline args in temp_file.
self._subprocess_call([sys.executable, pipeline_dsl_path], env=temp_env)
if os.stat(temp_file).st_size != 0:
# Load pipeline_args from temp_file for TFX pipelines
with open(temp_file, 'r') as f:
pipeline_args = json.load(f)
else:
# For non-TFX pipelines, extract pipeline name from the dsl filename.
pipeline_args = {
labels.PIPELINE_NAME:
os.path.basename(pipeline_dsl_path).split('.')[0]
}
# Delete temp file
io_utils.delete_dir(temp_file)
return pipeline_args
def _get_handler_home(self) -> Text:
"""Sets handler home.
Returns:
Path to handler home directory.
"""
engine_flag = self.flags_dict[labels.ENGINE_FLAG]
handler_home_dir = engine_flag.upper() + '_HOME'
if handler_home_dir in os.environ:
return os.environ[handler_home_dir]
return os.path.join(os.environ['HOME'], 'tfx', engine_flag, '')
def _get_deprecated_handler_home(self) -> Text:
"""Sets old handler home for compatibility.
Returns:
Path to handler home directory.
"""
engine_flag = self.flags_dict[labels.ENGINE_FLAG]
handler_home_dir = engine_flag.upper() + '_HOME'
if handler_home_dir in os.environ:
return os.environ[handler_home_dir]
return os.path.join(os.environ['HOME'], engine_flag, '')
def _subprocess_call(self,
command: List[Text],
env: Dict[Text, Any] = None) -> None:
return_code = subprocess.call(command, env=env)
if return_code != 0:
sys.exit('Error while running "{}" '.format(' '.join(command)))
def _check_pipeline_existence(self,
pipeline_name: Text,
required: bool = True) -> None:
"""Check if pipeline folder exists and if not, exit system.
Args:
pipeline_name: Name of the pipeline.
required: Set it as True if pipeline needs to exist else set it to False.
"""
handler_pipeline_path = os.path.join(self._handler_home_dir, pipeline_name)
# Check if pipeline folder exists.
exists = fileio.exists(handler_pipeline_path)
if required and not exists:
# Check pipeline directory prior 0.25 and move files to the new location
# automatically.
old_handler_pipeline_path = os.path.join(
self._get_deprecated_handler_home(), pipeline_name)
if fileio.exists(old_handler_pipeline_path):
fileio.makedirs(os.path.dirname(handler_pipeline_path))
fileio.rename(old_handler_pipeline_path, handler_pipeline_path)
engine_flag = self.flags_dict[labels.ENGINE_FLAG]
handler_home_variable = engine_flag.upper() + '_HOME'
click.echo(
('[WARNING] Pipeline "{pipeline_name}" was found in "{old_path}", '
'but the location that TFX stores pipeline information was moved '
'since TFX 0.25.0.\n'
'[WARNING] Your files in "{old_path}" was automatically moved to '
'the new location, "{new_path}".\n'
'[WARNING] If you want to keep the files at the old location, set '
'`{handler_home}` environment variable to "{old_handler_home}".'
).format(
pipeline_name=pipeline_name,
old_path=old_handler_pipeline_path,
new_path=handler_pipeline_path,
handler_home=handler_home_variable,
old_handler_home=self._get_deprecated_handler_home()),
err=True)
else:
sys.exit('Pipeline "{}" does not exist.'.format(pipeline_name))
elif not required and exists:
sys.exit('Pipeline "{}" already exists.'.format(pipeline_name))
def get_schema(self):
pipeline_name = self.flags_dict[labels.PIPELINE_NAME]
# Check if pipeline exists.
self._check_pipeline_existence(pipeline_name)
# Path to pipeline args.
pipeline_args_path = os.path.join(self._handler_home_dir,
self.flags_dict[labels.PIPELINE_NAME],
'pipeline_args.json')
# Get pipeline_root.
with open(pipeline_args_path, 'r') as f:
pipeline_args = json.load(f)
self._read_schema_from_pipeline_root(pipeline_name,
pipeline_args[labels.PIPELINE_ROOT])
def _read_schema_from_pipeline_root(self, pipeline_name, pipeline_root):
# Check if pipeline root created. If not, it means that the user has not
# created README.ml-pipelines-sdk.md run yet or the pipeline is still running for the first time.
if not fileio.exists(pipeline_root):
sys.exit(
'Create README.ml-pipelines-sdk.md run before inferring schema. If pipeline is already running, then wait for it to successfully finish.'
)
# If pipeline_root exists, then check if SchemaGen output exists.
components = fileio.listdir(pipeline_root)
if 'SchemaGen' not in components:
sys.exit(
'Either SchemaGen component does not exist or pipeline is still running. If pipeline is running, then wait for it to successfully finish.'
)
# Get the latest SchemaGen output.
component_output_dir = os.path.join(pipeline_root, 'SchemaGen')
schema_dir = os.path.join(component_output_dir, 'schema')
schemagen_outputs = fileio.listdir(schema_dir)
latest_schema_folder = max(schemagen_outputs, key=int)
# Copy schema to current dir.
latest_schema_uri = base_driver._generate_output_uri( # pylint: disable=protected-access
component_output_dir, 'schema', int(latest_schema_folder))
latest_schema_path = os.path.join(latest_schema_uri, 'schema.pbtxt')
curr_dir_path = os.path.join(os.getcwd(), 'schema.pbtxt')
io_utils.copy_file(latest_schema_path, curr_dir_path, overwrite=True)
# Print schema and path to schema
click.echo('Path to schema: {}'.format(curr_dir_path))
click.echo('*********SCHEMA FOR {}**********'.format(pipeline_name.upper()))
with open(curr_dir_path, 'r') as f:
click.echo(f.read())
|
PypiClean
|
/Django-4.2.4.tar.gz/Django-4.2.4/django/contrib/admin/static/admin/js/SelectFilter2.js
|
SelectFilter2 - Turns a multiple-select box into a filter interface.
Requires core.js and SelectBox.js.
*/
'use strict';
{
window.SelectFilter = {
init: function(field_id, field_name, is_stacked) {
if (field_id.match(/__prefix__/)) {
// Don't initialize on empty forms.
return;
}
const from_box = document.getElementById(field_id);
from_box.id += '_from'; // change its ID
from_box.className = 'filtered';
for (const p of from_box.parentNode.getElementsByTagName('p')) {
if (p.classList.contains("info")) {
// Remove <p class="info">, because it just gets in the way.
from_box.parentNode.removeChild(p);
} else if (p.classList.contains("help")) {
// Move help text up to the top so it isn't below the select
// boxes or wrapped off on the side to the right of the add
// button:
from_box.parentNode.insertBefore(p, from_box.parentNode.firstChild);
}
}
// <div class="selector"> or <div class="selector stacked">
const selector_div = quickElement('div', from_box.parentNode);
selector_div.className = is_stacked ? 'selector stacked' : 'selector';
// <div class="selector-available">
const selector_available = quickElement('div', selector_div);
selector_available.className = 'selector-available';
const title_available = quickElement('h2', selector_available, interpolate(gettext('Available %s') + ' ', [field_name]));
quickElement(
'span', title_available, '',
'class', 'help help-tooltip help-icon',
'title', interpolate(
gettext(
'This is the list of available %s. You may choose some by ' +
'selecting them in the box below and then clicking the ' +
'"Choose" arrow between the two boxes.'
),
[field_name]
)
);
const filter_p = quickElement('p', selector_available, '', 'id', field_id + '_filter');
filter_p.className = 'selector-filter';
const search_filter_label = quickElement('label', filter_p, '', 'for', field_id + '_input');
quickElement(
'span', search_filter_label, '',
'class', 'help-tooltip search-label-icon',
'title', interpolate(gettext("Type into this box to filter down the list of available %s."), [field_name])
);
filter_p.appendChild(document.createTextNode(' '));
const filter_input = quickElement('input', filter_p, '', 'type', 'text', 'placeholder', gettext("Filter"));
filter_input.id = field_id + '_input';
selector_available.appendChild(from_box);
const choose_all = quickElement('a', selector_available, gettext('Choose all'), 'title', interpolate(gettext('Click to choose all %s at once.'), [field_name]), 'href', '#', 'id', field_id + '_add_all_link');
choose_all.className = 'selector-chooseall';
// <ul class="selector-chooser">
const selector_chooser = quickElement('ul', selector_div);
selector_chooser.className = 'selector-chooser';
const add_link = quickElement('a', quickElement('li', selector_chooser), gettext('Choose'), 'title', gettext('Choose'), 'href', '#', 'id', field_id + '_add_link');
add_link.className = 'selector-add';
const remove_link = quickElement('a', quickElement('li', selector_chooser), gettext('Remove'), 'title', gettext('Remove'), 'href', '#', 'id', field_id + '_remove_link');
remove_link.className = 'selector-remove';
// <div class="selector-chosen">
const selector_chosen = quickElement('div', selector_div, '', 'id', field_id + '_selector_chosen');
selector_chosen.className = 'selector-chosen';
const title_chosen = quickElement('h2', selector_chosen, interpolate(gettext('Chosen %s') + ' ', [field_name]));
quickElement(
'span', title_chosen, '',
'class', 'help help-tooltip help-icon',
'title', interpolate(
gettext(
'This is the list of chosen %s. You may remove some by ' +
'selecting them in the box below and then clicking the ' +
'"Remove" arrow between the two boxes.'
),
[field_name]
)
);
const filter_selected_p = quickElement('p', selector_chosen, '', 'id', field_id + '_filter_selected');
filter_selected_p.className = 'selector-filter';
const search_filter_selected_label = quickElement('label', filter_selected_p, '', 'for', field_id + '_selected_input');
quickElement(
'span', search_filter_selected_label, '',
'class', 'help-tooltip search-label-icon',
'title', interpolate(gettext("Type into this box to filter down the list of selected %s."), [field_name])
);
filter_selected_p.appendChild(document.createTextNode(' '));
const filter_selected_input = quickElement('input', filter_selected_p, '', 'type', 'text', 'placeholder', gettext("Filter"));
filter_selected_input.id = field_id + '_selected_input';
const to_box = quickElement('select', selector_chosen, '', 'id', field_id + '_to', 'multiple', '', 'size', from_box.size, 'name', from_box.name);
to_box.className = 'filtered';
const warning_footer = quickElement('div', selector_chosen, '', 'class', 'list-footer-display');
quickElement('span', warning_footer, '', 'id', field_id + '_list-footer-display-text');
quickElement('span', warning_footer, ' (click to clear)', 'class', 'list-footer-display__clear');
const clear_all = quickElement('a', selector_chosen, gettext('Remove all'), 'title', interpolate(gettext('Click to remove all chosen %s at once.'), [field_name]), 'href', '#', 'id', field_id + '_remove_all_link');
clear_all.className = 'selector-clearall';
from_box.name = from_box.name + '_old';
// Set up the JavaScript event handlers for the select box filter interface
const move_selection = function(e, elem, move_func, from, to) {
if (elem.classList.contains('active')) {
move_func(from, to);
SelectFilter.refresh_icons(field_id);
SelectFilter.refresh_filtered_selects(field_id);
SelectFilter.refresh_filtered_warning(field_id);
}
e.preventDefault();
};
choose_all.addEventListener('click', function(e) {
move_selection(e, this, SelectBox.move_all, field_id + '_from', field_id + '_to');
});
add_link.addEventListener('click', function(e) {
move_selection(e, this, SelectBox.move, field_id + '_from', field_id + '_to');
});
remove_link.addEventListener('click', function(e) {
move_selection(e, this, SelectBox.move, field_id + '_to', field_id + '_from');
});
clear_all.addEventListener('click', function(e) {
move_selection(e, this, SelectBox.move_all, field_id + '_to', field_id + '_from');
});
warning_footer.addEventListener('click', function(e) {
filter_selected_input.value = '';
SelectBox.filter(field_id + '_to', '');
SelectFilter.refresh_filtered_warning(field_id);
SelectFilter.refresh_icons(field_id);
});
filter_input.addEventListener('keypress', function(e) {
SelectFilter.filter_key_press(e, field_id, '_from', '_to');
});
filter_input.addEventListener('keyup', function(e) {
SelectFilter.filter_key_up(e, field_id, '_from');
});
filter_input.addEventListener('keydown', function(e) {
SelectFilter.filter_key_down(e, field_id, '_from', '_to');
});
filter_selected_input.addEventListener('keypress', function(e) {
SelectFilter.filter_key_press(e, field_id, '_to', '_from');
});
filter_selected_input.addEventListener('keyup', function(e) {
SelectFilter.filter_key_up(e, field_id, '_to', '_selected_input');
});
filter_selected_input.addEventListener('keydown', function(e) {
SelectFilter.filter_key_down(e, field_id, '_to', '_from');
});
selector_div.addEventListener('change', function(e) {
if (e.target.tagName === 'SELECT') {
SelectFilter.refresh_icons(field_id);
}
});
selector_div.addEventListener('dblclick', function(e) {
if (e.target.tagName === 'OPTION') {
if (e.target.closest('select').id === field_id + '_to') {
SelectBox.move(field_id + '_to', field_id + '_from');
} else {
SelectBox.move(field_id + '_from', field_id + '_to');
}
SelectFilter.refresh_icons(field_id);
}
});
from_box.closest('form').addEventListener('submit', function() {
SelectBox.filter(field_id + '_to', '');
SelectBox.select_all(field_id + '_to');
});
SelectBox.init(field_id + '_from');
SelectBox.init(field_id + '_to');
// Move selected from_box options to to_box
SelectBox.move(field_id + '_from', field_id + '_to');
// Initial icon refresh
SelectFilter.refresh_icons(field_id);
},
any_selected: function(field) {
// Temporarily add the required attribute and check validity.
field.required = true;
const any_selected = field.checkValidity();
field.required = false;
return any_selected;
},
refresh_filtered_warning: function(field_id) {
const count = SelectBox.get_hidden_node_count(field_id + '_to');
const selector = document.getElementById(field_id + '_selector_chosen');
const warning = document.getElementById(field_id + '_list-footer-display-text');
selector.className = selector.className.replace('selector-chosen--with-filtered', '');
warning.textContent = interpolate(ngettext(
'%s selected option not visible',
'%s selected options not visible',
count
), [count]);
if(count > 0) {
selector.className += ' selector-chosen--with-filtered';
}
},
refresh_filtered_selects: function(field_id) {
SelectBox.filter(field_id + '_from', document.getElementById(field_id + "_input").value);
SelectBox.filter(field_id + '_to', document.getElementById(field_id + "_selected_input").value);
},
refresh_icons: function(field_id) {
const from = document.getElementById(field_id + '_from');
const to = document.getElementById(field_id + '_to');
// Active if at least one item is selected
document.getElementById(field_id + '_add_link').classList.toggle('active', SelectFilter.any_selected(from));
document.getElementById(field_id + '_remove_link').classList.toggle('active', SelectFilter.any_selected(to));
// Active if the corresponding box isn't empty
document.getElementById(field_id + '_add_all_link').classList.toggle('active', from.querySelector('option'));
document.getElementById(field_id + '_remove_all_link').classList.toggle('active', to.querySelector('option'));
SelectFilter.refresh_filtered_warning(field_id);
},
filter_key_press: function(event, field_id, source, target) {
const source_box = document.getElementById(field_id + source);
// don't submit form if user pressed Enter
if ((event.which && event.which === 13) || (event.keyCode && event.keyCode === 13)) {
source_box.selectedIndex = 0;
SelectBox.move(field_id + source, field_id + target);
source_box.selectedIndex = 0;
event.preventDefault();
}
},
filter_key_up: function(event, field_id, source, filter_input) {
const input = filter_input || '_input';
const source_box = document.getElementById(field_id + source);
const temp = source_box.selectedIndex;
SelectBox.filter(field_id + source, document.getElementById(field_id + input).value);
source_box.selectedIndex = temp;
SelectFilter.refresh_filtered_warning(field_id);
SelectFilter.refresh_icons(field_id);
},
filter_key_down: function(event, field_id, source, target) {
const source_box = document.getElementById(field_id + source);
// right key (39) or left key (37)
const direction = source === '_from' ? 39 : 37;
// right arrow -- move across
if ((event.which && event.which === direction) || (event.keyCode && event.keyCode === direction)) {
const old_index = source_box.selectedIndex;
SelectBox.move(field_id + source, field_id + target);
SelectFilter.refresh_filtered_selects(field_id);
SelectFilter.refresh_filtered_warning(field_id);
source_box.selectedIndex = (old_index === source_box.length) ? source_box.length - 1 : old_index;
return;
}
// down arrow -- wrap around
if ((event.which && event.which === 40) || (event.keyCode && event.keyCode === 40)) {
source_box.selectedIndex = (source_box.length === source_box.selectedIndex + 1) ? 0 : source_box.selectedIndex + 1;
}
// up arrow -- wrap around
if ((event.which && event.which === 38) || (event.keyCode && event.keyCode === 38)) {
source_box.selectedIndex = (source_box.selectedIndex === 0) ? source_box.length - 1 : source_box.selectedIndex - 1;
}
}
};
window.addEventListener('load', function(e) {
document.querySelectorAll('select.selectfilter, select.selectfilterstacked').forEach(function(el) {
const data = el.dataset;
SelectFilter.init(el.id, data.fieldName, parseInt(data.isStacked, 10));
});
});
}
|
PypiClean
|
/pyvision_toolkit-1.3.4.tar.gz/pyvision_toolkit-1.3.4/samples/pyvision_banner.py
|
import os.path
from Image import composite,LINEAR
import pyvision as pv
from pyvision.edge.sobel import sobel
#from pyvision.edge.canny import canny
from pyvision.point.DetectorSURF import DetectorSURF
import cv
if __name__ == '__main__':
ilog = pv.ImageLog()
source_name = os.path.join(pv.__path__[0],'data','misc','p5240019.jpg')
#Load source image and resize to smaller scale
im = pv.Image(source_name)
print("Size before affine scale: %s"%str(im.size))
im = pv.AffineScale(0.25,(320,240)).transformImage(im)
print("Size after scaling: %s"%str(im.size))
ilog.log(im, 'Input')
#im.show(window='Input', pos=(0,0))
#Generate edge image using sobel edge detector
edges = sobel(im, 1, 0 , 3, 0)
ilog.log(edges, 'Edges')
#edges.show(window='Edges', pos=(360,0))
#Generate threshold mask, shows numpy integration
mat = im.asMatrix2D()
high = mat > 180
low = mat < 50
mask = high#+low
ilog.log(pv.Image(1.0*mask), 'Mask')
#Composite operation using PIL
e = edges.asPIL().convert('RGB')
m = pv.Image(1.0*mask).asPIL()
i = im.asPIL()
logo = pv.Image(composite(i,e,m))
ilog.log(logo, 'Composite')
#logo.show(window='Composite', pos=(0,300) )
#Keypoint detection using OpenCV's SURF detector
logo_surf = logo.copy()
sm = pv.Image(im.asPIL().resize((320,240),LINEAR))
detector = DetectorSURF()
points = detector.detect(sm)
for score,pt,radius in points:
logo_surf.annotateCircle(pt*4,radius*4)
ilog.log(logo_surf, 'Annotated')
#logo_surf.show(window='Annotated',pos=(360,300))
#Demonstrate use of ImageMontage class to show a few small images in a single window
print("Have the image montage focused in UI and hit spacebar to continue...")
imontage = pv.ImageMontage([im,edges,logo,logo_surf], layout=(2,2), tileSize=im.size, gutter=3, byrow=True, labels=None)
imontage.show(window="Image Montage", delay=0)
#Show the images stored to the image log object
print("Showing image log. These images are stored in a tmp directory.")
ilog.show()
|
PypiClean
|
/protocols/http_server.py
|
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
from pmercury.protocols.protocol import Protocol
class HTTP_Server(Protocol):
def __init__(self, fp_database=None, config=None):
# populate fingerprint databases
self.fp_db = {}
# configuration
HTTP_Server.all_headers = False
HTTP_Server.all_headers_and_data = False
if config == None or 'http_server' not in config:
HTTP_Server.static_names = set([b'appex-activity-id',b'cdnuuid',b'cf-ray',b'content-range',b'content-type',
b'date',b'etag',b'expires',b'flow_context',b'ms-cv',b'msregion',b'ms-requestid',
b'request-id',b'vary',b'x-amz-cf-pop',b'x-amz-request-id',b'x-azure-ref-originshield',
b'x-cache',b'x-cache-hits',b'x-ccc',b'x-diagnostic-s',b'x-feserver',b'x-hw',
b'x-msedge-ref',b'x-ocsp-responder-id',b'x-requestid',b'x-served-by',b'x-timer',
b'x-trace-context'])
HTTP_Server.static_names_and_values = set([b'access-control-allow-credentials',b'access-control-allow-headers',
b'access-control-allow-methods',b'access-control-expose-headers',
b'cache-control',b'connection',b'content-language',b'content-transfer-encoding',
b'p3p',b'pragma',b'server',b'strict-transport-security',b'x-aspnetmvc-version',
b'x-aspnet-version',b'x-cid',b'x-ms-version',b'x-xss-protection'])
HTTP_Server.headers_data = [0,1,2]
HTTP_Server.contextual_data = {b'via':'via'}
else:
HTTP_Server.static_names = set([])
HTTP_Server.static_names_and_values = set([])
HTTP_Server.headers_data = []
HTTP_Server.contextual_data = {}
if 'static_names' in config['http_server']:
if config['http_server']['static_names'] == ['*']:
HTTP_Server.all_headers = True
HTTP_Server.static_names = set(map(lambda x: x.encode(), config['http_server']['static_names']))
if 'static_names_and_values' in config['http_server']:
if config['http_server']['static_names_and_values'] == ['*']:
HTTP_Server.all_headers_and_data = True
HTTP_Server.static_names_and_values = set(map(lambda x: x.encode(), config['http_server']['static_names_and_values']))
if 'preamble' in config['http_server']:
if 'version' in config['http_server']['preamble']:
HTTP_Server.headers_data.append(0)
if 'code' in config['http_server']['preamble']:
HTTP_Server.headers_data.append(1)
if 'reason' in config['http_server']['preamble']:
HTTP_Server.headers_data.append(2)
if '*' in config['http_server']['preamble']:
HTTP_Server.headers_data = [0,1,2]
if 'context' in config['http_server']:
for c in config['http_server']['context']:
HTTP_Server.contextual_data[c.encode()] = c.lower().replace('-','_')
@staticmethod
def proto_identify(data, offset, data_len):
if data_len-offset < 16:
return False
if (data[offset] == 72 and
data[offset+1] == 84 and
data[offset+2] == 84 and
data[offset+3] == 80 and
data[offset+4] == 47 and
data[offset+5] == 49):
return True
return False
@staticmethod
def fingerprint(data, offset, data_len):
t_ = data[offset:].split(b'\x0d\x0a', 1)
response = t_[0].split(b'\x20',2)
if len(response) < 2:
return None, None
c = []
for rh in HTTP_Server.headers_data:
try:
c.append('(%s)' % response[rh].hex())
except IndexError:
c.append('()')
if len(t_) == 1:
return ''.join(c), None
headers = t_[1].split(b'\x0d\x0a')
if headers[0] == '':
headers = headers[1:]
http_ah = HTTP_Server.all_headers
http_ahd = HTTP_Server.all_headers_and_data
http_sn = HTTP_Server.static_names
http_snv = HTTP_Server.static_names_and_values
http_ctx = HTTP_Server.contextual_data
context = []
for h_ in headers:
if h_ == b'':
break
t0_ = h_.split(b'\x3a\x20',1)[0]
t0_lower = t0_.lower()
h_c = ''
if http_ahd:
h_c = h_.hex()
elif t0_lower in http_snv:
h_c = h_.hex()
elif t0_lower in http_sn:
h_c = t0_.hex()
elif http_ah:
h_c = t0_.hex()
if h_c != '':
c.append('(%s)' % h_c)
if t0_lower in http_ctx:
if b'\x3a\x20' in h_:
try:
context.append({'name':http_ctx[t0_lower], 'data':h_.split(b'\x3a\x20',1)[1].decode()})
except UnicodeDecodeError:
context.append({'name':http_ctx[t0_lower], 'data':h_.split(b'\x3a\x20',1)[1].hex()})
else:
context.append({'name':http_ctx[t0_lower], 'data':''})
return ''.join(c), context
def get_human_readable(self, fp_str_):
t_ = [bytes.fromhex(x[1:]) for x in fp_str_.split(')')[:-1]]
try:
fp_h = [{'version':t_[0].decode()},{'code':t_[1].decode()},{'response':t_[2].decode()}]
except:
fp_h = [{'version':t_[0].hex()},{'code':t_[1].hex()},{'response':t_[2].hex()}]
for i in range(3, len(t_)-1):
field = t_[i].split(b': ')
if len(field) == 2:
try:
fp_h.append({field[0].decode(): field[1].decode()})
except:
fp_h.append({field[0].hex(): field[1].hex()})
else:
try:
fp_h.append({field[0].decode(): ''})
except:
fp_h.append({field[0].hex(): ''})
return fp_h
|
PypiClean
|
/collective.geo.polymaps-0.1.tar.gz/collective.geo.polymaps-0.1/collective/geo/polymaps/browser/viewlets.py
|
from zope.interface import implements
from zope.component import getUtility
from zope.component import queryAdapter
from shapely.geometry import asShape
from Products.CMFCore.utils import getToolByName
from plone.app.layout.viewlets import ViewletBase
from plone.registry.interfaces import IRegistry
from collective.geo.geographer.interfaces import IGeoreferenced
from collective.geo.settings.interfaces import (
IGeoCustomFeatureStyle,
IGeoFeatureStyle,
IGeoSettings)
from collective.geo.polymaps.interfaces import IJsonPolymapsViewlet
from utils import INLINE_STYLES, create_map_js
class ContentViewlet(ViewletBase):
implements(IJsonPolymapsViewlet)
def get_js(self):
defaultsetting = getUtility(IRegistry).forInterface(IGeoSettings)
zoom = int(defaultsetting.zoom)
shape = asShape(self.coordinates.geo)
lat = shape.centroid.y
lon = shape.centroid.x
return create_map_js(self.context, self.layers())
@property
def coordinates(self):
return IGeoreferenced(self.context)
@property
def geofeaturestyle(self):
self.custom_styles = queryAdapter(self.context, IGeoCustomFeatureStyle)
self.defaultstyles = getUtility(IRegistry).forInterface(IGeoFeatureStyle)
if self.custom_styles and self.custom_styles.use_custom_styles:
return self.custom_styles
else:
return self.defaultstyles
@property
def map_inline_css(self):
"""Return inline CSS for our map according to style settings.
"""
inline_css = ''
for style in INLINE_STYLES:
value = getattr(self.geofeaturestyle, INLINE_STYLES[style], None)
if value:
inline_css += "%s:%s;" % (style, value)
return inline_css or None
@property
def map_viewlet_position(self):
return self.geofeaturestyle.map_viewlet_position
def render(self):
if 'polymaps:' + self.manager.__name__ != self.map_viewlet_position:
return u''
coords = self.coordinates
if coords.type and coords.coordinates:
return super(ContentViewlet, self).render()
else:
return ''
def layers(self):
context_url = self.context.absolute_url()
if not context_url.endswith('/'):
context_url += '/'
return [{'name': self.context.Title(),
'url': context_url + '@@geo-json.json',
'id': self.context.getId()}]
class JSViewlet(ViewletBase):
@property
def portal_url(self):
return getToolByName(self.context, 'portal_url')()
|
PypiClean
|
/XGEE-0.3.0.tar.gz/XGEE-0.3.0/xgee/core/plugins/ecoreSync/mdb/esSyncEvents.js
|
import UUID from '../util/uuid.js'
const _watchEvent =(atoken) => {
var resolveAnnouncement=() => {};
var rejectAnnouncement=() => {};
const promise=new Promise(function(resolve,reject){
resolveAnnouncement=(eObject) => { resolve(eObject) };
rejectAnnouncement=() => { reject() };
})
return {
promise: promise,
token: atoken,
check: (token) => {return atoken==token},
resolve: (eObject) => { resolveAnnouncement(eObject) },
reject: () => { rejectAnnouncement(); }
}
};
export default class EsSyncLock{
constructor(){
this.events=new Map();
this.history=new Set();
}
reserve(syncEventId){
let uuid=new UUID.v4
let token=null;
if(!this.events.has(syncEventId)){
token=uuid.toString();;
this.events.set(syncEventId,_watchEvent(token));
}
return token;
}
fire(syncEventId,token,value,success=true){
var res=false;
if(this.canFire(syncEventId,token) && this.events.has(syncEventId)){
if(success){
this.events.get(syncEventId).resolve(value);
}
else
{
this.events.get(syncEventId).reject(value);
}
this.history.add(syncEventId);
this.events.delete(syncEventId);
res=true;
}
return res;
}
isReserved(syncEventId){
var res=false;
if(this.events.has(syncEventId)){
res=true;
}
return res;
}
canFire(syncEventId,token)
{
var res=false;
if(!this.events.has(syncEventId)){
res=true;
}
else
{
if(this.events.get(syncEventId).check(token))
{
res=true;
}
}
return res;
}
async waitFor(syncEventId){
var event=this.events.get(syncEventId);
if(event)
{
var res=await event.promise
return res;
}
else
{
throw 'no event with id='+syncEventId+' is present'
}
}
}
|
PypiClean
|
/swordcloud-0.0.9.tar.gz/swordcloud-0.0.9/README.md
|
# **swordcloud**
`swordcloud`: A semantic word cloud generator that uses t-SNE and k-means clustering to visualize words in high-dimensional semantic space. Based on [A. Mueller's `wordcloud` module](https://github.com/amueller/word_cloud), `swordcloud` can generate semantic word clouds from Thai and English texts based on any word vector models.
## **Content**
1. [Installation](#installation)
2. [Usage](#usage)\
2.1 [Initialize `SemanticWordCloud` instance](#initialize-semanticwordcloud-instance)\
2.2 [Generate from Raw Text](#generate-from-raw-text)\
2.3 [Generate from Word Frequencies](#generate-from-word-frequencies)\
2.4 [Generate k-means Cluster Clouds](#generate-k-means-cluster-clouds)\
2.5 [Recolor Words](#recolor-words)\
2.6 [Export Word Clouds](#export-word-clouds)
3. [Color "Functions"](#color-functions)
## **Installation**
`swordcloud` can be installed using `pip`:
```
pip install swordcloud
```
Optionally, if you want to be able to embed fonts directly into [the generated SVGs](#export-word-clouds), an `embedfont` extra can also be specified:
```
pip install swordcloud[embedfont]
```
As of **version 0.0.9**, the exact list of dependencies is as follow:
- `python >= 3.8`
- `numpy >= 1.21.0`
- `pillow`
- `matplotlib >= 1.5.3`
- `gensim >= 4.0.0`
- `pandas`
- `pythainlp >= 3.1.0`
- `k-means-constrained`
- `scikit-learn`
- (optional) `fonttools`
## **Usage**
All code below can also be found in [the example folder](https://github.com/nlp-chula/swordcloud/tree/main/example).
### **Initialize `SemanticWordCloud` instance**
For most use cases, the `SemanticWordCloud` class is the main API the users will be interacting with.
```python
from swordcloud import SemanticWordCloud
# See the `Color "Functions"` section for detail about these color functions
from swordcloud.color_func import SingleColorFunc
wordcloud = SemanticWordCloud(
language = 'TH',
width = 1600,
height = 800,
max_font_size = 150,
prefer_horizontal = 1,
color_func = SingleColorFunc('black')
)
```
Please refer to the documentation in [src/swordcloud/wordcloud.py](https://github.com/nlp-chula/swordcloud/blob/main/src/swordcloud/wordcloud.py) or in your IDE for more detail about various options available for customizing the word cloud.
### **Generate from Raw Text**
```python
# Can also be one large string instead of a list of strings
raw_text = list(map(str.strip, open('raw_text.txt', encoding='utf-8')))
wordcloud.generate_from_text(raw_txt, random_state=42)
```

### **Generate from Word Frequencies**
```python
freq = {}
for line in open("word_frequencies.tsv", encoding="utf-8"):
word, count = line.strip().split('\t')
freq[word] = int(count)
wordcloud.generate_from_frequencies(freq, random_state=42)
```

### **Generate k-means Cluster Clouds**
```python
wordcloud = SemanticWordCloud(
language = 'TH',
# make sure the canvas is appropriately large for the number of clusters
width = 2400,
height = 1200,
max_font_size = 150,
prefer_horizontal = 1,
color_func = SingleColorFunc('black')
)
wordcloud.generate_from_text(raw_text, kmeans=6, random_state=42)
# Or directly from `generate_kmeans_cloud` if you already have word frequencies
wordcloud.generate_kmeans_cloud(freq, n_clusters=6, random_state=42)
# Each sub cloud can then be individually interacted with
# by accessing individual cloud in `sub_clouds` attribute
for cloud in wordcloud.sub_clouds:
...
```
||||
-|-|-
||
||
### **Recolor Words**
```python
# If the generated colors are not to your liking
# We can recolor them instead of re-generating the whole cloud
from swordcloud.color_func import RandomColorFunc
wordcloud.recolor(RandomColorFunc, random_state=42)
```

### **Export Word Clouds**
- As `pillow`'s `Image`
```python
img = wordcloud.to_image()
```
- As image file
```python
wordcloud.to_file('wordcloud.png')
```
- As SVG
```python
# Without embedded font
svg = wordcloud.to_svg()
# With embedded font
svg = wordcloud.to_svg(embed_font=True)
# Note that in order to be able to embed fonts
# the `fonttools` package needs to be installed
```
- As `numpy`'s image array
```python
array = wordcloud.to_array()
```
## **Color "Functions"**
A number of built-in color "functions" can be accessed from `swordcloud.color_func`:
```python
from swordcloud.color_func import <your_color_function_here>
```
The list of available functions is as follow:
- `RandomColorFunc` (Default)\
Return a random color.
- `ColorMapFunc`\
Return a random color from the user-specified [`matplotlib`'s colormap](https://matplotlib.org/stable/gallery/color/colormap_reference.html).
- `ImageColorFunc`\
Use a user-provided colored image array to determine word color at each position on the canvas.
- `SingleColorFunc`\
Always return the user-specified color every single time, resulting in every word having the same color.
- `ExactColorFunc`\
Use a user-provided color dictionary to determine exactly which word should have which color.
All the above functions, **except** `RandomColorFunc` which cannot be customized further, must be initialized before passing them to the `SemanticWordCloud` class. For example:
```python
from swordcloud.color_func import ColorMapFunc
color_func = ColorMapFunc("magma")
wordcloud = SemanticWordCloud(
...
color_func = color_func
...
)
```
Users can also implement their own color functions, provided that they are callable with the following signature:
**Input**:
- `word: str`\
The word we are coloring
- `font_size: int`\
Font size of the word
- `position: tuple[int, int]`\
Coordinate of the top-left point of the word's bounding box on the canvas
- `orientation: int`\
[`pillow`'s orientation](https://pillow.readthedocs.io/en/stable/reference/Image.html#transpose-methods).
- `font_path: str`\
Path to the font file (OTF or TFF)
- `random_state: random.Random`\
Python's `random.Random` object
**Return**:\
Any object that can be interpreted as a color by `pillow`. See [`pillow`'s documentation](https://pillow.readthedocs.io/en/stable/) for more detail.
Internally, arguments to color functions are always passed as keyword arguments so they can be in any order. However, if your functions only use some of them, make sure to include `**kwargs` at the end of your function headers so that other arguments do not cause an error.
|
PypiClean
|
/kelvin_sdk-7.12.2-py3-none-any.whl/kelvin/sdk/lib/schema/schema_manager.py
|
import json
from json import JSONDecodeError
from typing import Any, Dict, Optional, Tuple
import jsonschema
import requests
from jsonschema import RefResolver
from yaml.parser import ParserError
from kelvin.sdk.lib.configs.general_configs import GeneralConfigs
from kelvin.sdk.lib.configs.schema_manager_configs import SchemaManagerConfigs
from kelvin.sdk.lib.exceptions import InvalidApplicationConfiguration, InvalidSchemaVersionException
from kelvin.sdk.lib.models.apps.ksdk_app_configuration import ApplicationFlavour, ProjectType
from kelvin.sdk.lib.models.apps.ksdk_app_setup import ProjectCreationParametersObject
from kelvin.sdk.lib.models.generic import KPath
from kelvin.sdk.lib.models.ksdk_docker import DockerImageName
from kelvin.sdk.lib.models.operation import OperationResponse
from kelvin.sdk.lib.models.types import VersionStatus
from kelvin.sdk.lib.session.session_manager import session_manager
from kelvin.sdk.lib.utils.logger_utils import logger
from kelvin.sdk.lib.utils.version_utils import assess_version_status
_RESOLVER = RefResolver("https://apps.kelvininc.com/schemas/kelvin/", {})
def generate_base_schema_template(project_creation_parameters_object: ProjectCreationParametersObject) -> dict:
"""
Generate the base schema template.
Attempt to retrieve the latest schema version and generate the default from it.
Parameters
----------
project_creation_parameters_object : ProjectCreationParametersObject
the app creation parameters object used to generate the default schema.
Returns
-------
dict
A dict containing the default app creation object.
"""
latest_schema_version, latest_schema_contents, latest_schema_path = get_latest_app_schema_version()
app_type_obj: Dict[str, Any] = {"type": project_creation_parameters_object.app_type.value}
if project_creation_parameters_object.app_type == ProjectType.kelvin:
kelvin_app_dict = build_kelvin_app_block(project_creation_parameters_object)
app_type_obj.update(kelvin_app_dict)
elif project_creation_parameters_object.app_type == ProjectType.bridge:
language_block = {"type": project_creation_parameters_object.kelvin_app_lang.value}
language_block.update(project_creation_parameters_object.get_language_block())
app_type_obj.update(
{
"bridge": {
"logging_level": "INFO",
"language": language_block,
"configuration": {},
"metrics_map": [],
}
}
)
else:
app_type_obj.update(
{
"docker": {
"dockerfile": "Dockerfile",
"context": ".",
"args": [],
}
}
)
creation_object = {
"spec_version": latest_schema_version,
"info": {
"name": project_creation_parameters_object.app_name,
"title": project_creation_parameters_object.app_name,
"version": "1.0.0",
"description": project_creation_parameters_object.app_description
or project_creation_parameters_object.app_name,
},
"app": app_type_obj,
}
_validate_schema(content=creation_object, schema=latest_schema_contents)
return creation_object
def build_kelvin_app_block(project_creation_parameters_object: ProjectCreationParametersObject) -> dict:
"""Creates the app configuration for the kelvin apps
Parameters
----------
project_creation_parameters_object : ProjectCreationParametersObject
the app creation parameters object used to generate the default schema.
Returns
-------
dict
The schema app block for kelvin apps
"""
# 1 - Create the language block
language_block = {"type": project_creation_parameters_object.kelvin_app_lang.value}
language_block.update(project_creation_parameters_object.get_language_block())
# 2 - Create the interface block
kelvin_block = {"language": language_block}
# add mqtt config if flavour is pubsub
if project_creation_parameters_object.app_flavour is ApplicationFlavour.pubsub:
broker_name = session_manager.get_current_session_metadata().sdk.components.kelvin_broker
kelvin_broker_container = DockerImageName.parse(name=broker_name)
kelvin_block.update(
{
"outputs": [ # type: ignore
{"name": "bar", "data_type": "raw.float32", "targets": [{"asset_names": ["some-asset"]}]}
],
"mqtt": {
"ip": kelvin_broker_container.container_name,
"port": GeneralConfigs.default_mqtt_port, # type: ignore
},
}
)
return {
"kelvin": kelvin_block,
}
def validate_app_schema_from_app_config_file(
app_config: Optional[Dict] = None, app_config_file_path: Optional[KPath] = None
) -> bool:
"""
When provided with an app configuration file, retrieve the schema for that version and validate it.
Parameters
----------
app_config : Optional[Dict]
the alternative app configuration to the app_config_file_path.
app_config_file_path : Optional[KPath]
the path to the app configuration.
Returns
-------
bool
A boolean indicating whether or not the schema complies with the provided spec.
"""
app_config_content: dict = {}
if app_config:
app_config_content = app_config
if not app_config_content and app_config_file_path:
app_config_content = app_config_file_path.read_yaml()
if not app_config_content:
raise InvalidApplicationConfiguration()
# Retrieve the current spec version, the minimum and latest values
spec_version: str = ""
try:
spec_version = app_config_content.get("spec_version", "")
current_session_metadata = session_manager.get_current_session_metadata()
min_schema_version, latest_schema_version = current_session_metadata.get_min_and_latest_schema_versions()
version_status = assess_version_status(
current_version=spec_version, minimum_version=min_schema_version, latest_version=latest_schema_version
)
if version_status == VersionStatus.UNSUPPORTED:
raise InvalidSchemaVersionException(
min_version=min_schema_version, current_version=spec_version, latest_version=latest_schema_version
)
except InvalidSchemaVersionException:
raise
except Exception:
logger.warning("No spec version defined. Proceeding with the latest schema version")
latest_schema_contents, _ = _get_and_persist_app_schema(
schema_url=SchemaManagerConfigs.general_app_schema_url, schema_version=spec_version
)
return _validate_schema(content=app_config_content, schema=latest_schema_contents)
def schema_validate(
file_path: str, schema_file_path: Optional[str], full_schema_errors: bool = True
) -> OperationResponse:
"""
Validate a file against a schema.
Parameters
----------
file_path : str
The path to the file to validate.
schema_file_path : Optional[str]
The path to the schema file to validate the file against.
full_schema_errors : bool
Indicates whether or not it should log the complete stack trace.
Returns
-------
OperationResponse
an OperationResponse object encapsulating the result of the file schema validation.
"""
try:
file_path_object: KPath = KPath(file_path)
if not file_path_object.exists():
raise ValueError("The provided file does not exist.")
schema_file_path_object: Optional[KPath] = KPath(schema_file_path) if schema_file_path else None
if schema_file_path_object and schema_file_path_object.exists():
schema_content = schema_file_path_object.read_yaml()
else:
_, schema_content, _ = get_latest_app_schema_version()
file_content = file_path_object.read_yaml()
validation_result = _validate_schema(content=file_content, schema=schema_content)
success_message = "The provided file complies with the schema."
logger.relevant(success_message)
return OperationResponse(success=validation_result, log=success_message)
except (jsonschema.exceptions.ValidationError, jsonschema.exceptions.SchemaError) as exc:
error_message = exc.message
if full_schema_errors:
error_message = f"Error validating schema: {str(exc)}"
logger.exception(error_message)
return OperationResponse(success=False, log=error_message)
except ParserError:
error_message = "Invalid file format: Cannot parse yaml content"
logger.exception(error_message)
return OperationResponse(success=False, log=error_message)
except Exception as exc:
error_message = f"Error validating schema: {exc}"
logger.exception(error_message)
return OperationResponse(success=False, log=error_message)
def schema_get(schema_file_path: Optional[str] = None) -> OperationResponse:
"""
Yield the the content of a schema.
Parameters
----------
schema_file_path : Optional[str]
The path to the schema file to yield.
Returns
-------
OperationResponse
an OperationResponse object encapsulating the result of the file schema validation.
"""
try:
schema_contents: dict = {}
if schema_file_path:
path_schema_file_path: KPath = KPath(schema_file_path.strip('"')).complete_path()
if path_schema_file_path.exists():
schema_contents = path_schema_file_path.read_yaml()
else:
raise InvalidApplicationConfiguration(message="Please provide a valid file")
else:
_, schema_contents, _ = get_latest_app_schema_version()
return OperationResponse(success=True, data=schema_contents)
except Exception as exc:
error_message = f"Error retrieving the schema: {exc}"
logger.exception(error_message)
return OperationResponse(success=False, log=error_message)
def get_latest_app_schema_version() -> Tuple[str, dict, KPath]:
"""
Retrieve the latest app schema version and persist it to the configured schema directory.
Returns
-------
A Tuple containing:
1) latest spec version
2) the corresponding schema
3) the path to the persisted schema.
"""
logger.info("Retrieving the latest schema version")
_, latest_schema_version = session_manager.get_current_session_metadata().get_min_and_latest_schema_versions()
latest_schema_contents, latest_schema_file_path = _get_and_persist_app_schema(
schema_url=SchemaManagerConfigs.general_app_schema_url, schema_version=latest_schema_version
)
return latest_schema_version, latest_schema_contents, latest_schema_file_path
def _validate_schema(
content: dict,
schema: Optional[dict] = None,
schema_path: Optional[KPath] = None,
resolver: RefResolver = _RESOLVER,
) -> bool:
"""
Validate a specific content against a schema.
Parameters
----------
content : dict
the content to validate.
schema : Optional[dict]
the schema, as a dict, to validate the content against.
schema_path : Optional[KPath]
the path to the schema to validate the content against.
resolver: RefResolver
optional resolver to override schema component cache
Returns
-------
bool
A bool indicating whether the provided content is valid.
"""
schema_content = {}
if schema:
schema_content = schema
if not schema_content and schema_path:
logger.debug(f'Loading schema from "{schema_path}"')
schema_content = json.loads(schema_path.read_text())
if not schema_content:
raise InvalidApplicationConfiguration(message="Please provide a valid schema")
jsonschema.validate(instance=content, schema=schema_content, resolver=resolver)
logger.debug("Provided content successfully validated against the schema")
return True
def _get_and_persist_app_schema(schema_url: str, schema_version: str) -> Tuple[dict, KPath]:
"""
Attempt to retrieve the specified schema/version combination from the platform.
Persist said combination in the default directory
Parameters
----------
schema_url : str
the url to retrieve the schema from.
schema_version : str
the latest schema version.
Returns
-------
Tuple[dict, KPath]
A Tuple containing both the latest schema contents and the path to its persisted file.
"""
schema_contents: dict = {}
schema_file_path = _get_schema_version_file_path(schema_version=schema_version)
# 1 - If there's already a cached version, use it
if schema_file_path and schema_file_path.exists():
logger.info(f"Valid schema available locally. Using cached version ({schema_file_path})")
try:
schema_contents = schema_file_path.read_json()
except JSONDecodeError:
schema_contents = {}
# 2 - If not, fetch it and persist it
if not schema_contents:
schema_contents = _fetch_app_schema_from_url(schema_url=schema_url, schema_version=schema_version)
schema_file_path.write_json(content=schema_contents)
# 3 - Yield both the schema contents
return schema_contents, schema_file_path
def _fetch_app_schema_from_url(schema_url: str, schema_version: str) -> Dict:
"""
Fetch the targeted schema version from the provided schema url.
Parameters
----------
schema_url : str
the url to retrieve the schema from.
schema_version : str
the latest schema version.
Returns
-------
The latest schema contents of the platform.
"""
specific_app_schema_response = requests.get(
schema_url.format(version=schema_version), timeout=SchemaManagerConfigs.request_timeout
)
if specific_app_schema_response.status_code != 200:
raise InvalidApplicationConfiguration(message=f'Invalid schema version "{schema_version}"')
return specific_app_schema_response.json()
def _get_schema_version_file_path(schema_version: str) -> KPath:
"""
Centralize all calls to get the ksdk schemas directory path.
Parameters
----------
schema_version : str
The version corresponding to the generated schema file path.
Returns
-------
KPath
The KPath of the specific schema file.
"""
schema_storage_path: KPath = session_manager.get_global_ksdk_configuration().ksdk_schema_dir_path
return KPath(schema_storage_path / f"{schema_version}.json").complete_path()
|
PypiClean
|
/tensorflow-gpu-macosx-1.8.1.tar.gz/tensorflow/contrib/framework/python/framework/graph_util.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
# pylint: disable=unused-import
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework.graph_util_impl import _assert_nodes_are_present
from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
from tensorflow.python.framework.graph_util_impl import _node_name
__all__ = ["fuse_op", "get_placeholders"]
def fuse_op(graph_def, input_nodes, output_nodes, output_dtypes,
output_quantized, op_name, op_type):
"""Fuse subgraph between input_nodes and output_nodes into a single custom op.
Args:
graph_def: A graph_pb2.GraphDef proto.
input_nodes: input nodes to the subgraph to be fused.
output_nodes: output nodes to the subgraph to be fused.
output_dtypes: A list of output datatypes for the custom op
output_quantized: A boolean flag that indicates if output is quantized
op_name: fused op name.
op_type: fused op type.
Returns:
The GraphDef of the new graph.
Raises:
TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto.
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be a graph_pb2.GraphDef proto.")
if isinstance(input_nodes, six.string_types):
raise TypeError("input_nodes must be a list.")
if isinstance(output_nodes, six.string_types):
raise TypeError("output_nodes must be a list.")
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
graph_def)
_assert_nodes_are_present(name_to_node, input_nodes + output_nodes)
# Nodes upto and including input_nodes
reachable_by_input = _bfs_for_reachable_nodes(input_nodes, name_to_input_name)
# Nodes upto and including output_nodes
reachable_by_output = _bfs_for_reachable_nodes(output_nodes,
name_to_input_name)
# Set of nodes in the list input_nodes
input_nodes_set = set(input_nodes)
# Set of nodes in the list output_nodes
output_nodes_set = set(output_nodes)
nodes_post_output = []
for node in graph_def.node:
n = _node_name(node.name)
if n in reachable_by_output:
if n not in reachable_by_input and n not in output_nodes_set:
# n is between input and output, i.e., part of the fused op
next_to_visit = [n]
visited = set()
while next_to_visit:
cur_node = next_to_visit[0]
visited.add(cur_node)
del next_to_visit[0]
if cur_node in reachable_by_input and cur_node not in input_nodes_set:
raise TypeError("Node %s uses input %s not in input_nodes." %
(n, cur_node))
if cur_node not in input_nodes_set:
next_to_visit += [
input_node for input_node in name_to_input_name[cur_node]
if input_node not in visited
]
elif n not in reachable_by_input:
nodes_post_output.append(n)
# Add all nodes upto the input nodes
out = graph_pb2.GraphDef()
reachable_by_input_sorted = sorted(
list(reachable_by_input), key=lambda n: name_to_seq_num[n])
for node in reachable_by_input_sorted:
out.node.extend([copy.deepcopy(name_to_node[node])])
# Add the custom op
new_node = node_def_pb2.NodeDef()
for node in input_nodes:
new_node.input.append(node)
new_node.attr["_output_types"].list.type[:] = output_dtypes
new_node.attr["_output_quantized"].b = output_quantized
new_node.op = op_type
new_node.name = op_name
out.node.extend([new_node])
# Add the nodes in the output of the custom op
for index, n in enumerate(output_nodes):
assert len(name_to_node[n].input) == 1
new_node = copy.deepcopy(name_to_node[n])
del new_node.input[:]
new_node.input.append(op_name + (":" + str(index) if index != 0 else ""))
out.node.extend([new_node])
# Add the nodes post output_nodes
for n in nodes_post_output:
out.node.extend([copy.deepcopy(name_to_node[n])])
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
return out
def get_placeholders(graph):
"""Get placeholders of a graph.
For example:
```python
a = tf.placeholder(dtype=tf.float32, shape=[2, 2], name='a')
a = tf.placeholder(dtype=tf.int32, shape=[3, 2], name='b')
tf.contrib.framework.get_placeholders(tf.get_default_graph())
# Returns:
# [<tf.Tensor 'a:0' shape=(2, 2) dtype=float32>,
# <tf.Tensor 'b:0' shape=(3, 2) dtype=int32>]
```
Args:
graph: A tf.Graph.
Returns:
A list contains all placeholders of given graph.
Raises:
TypeError: If `graph` is not a tensorflow graph.
"""
if not isinstance(graph, ops.Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# For each placeholder() call, there is a corresponding
# operation of type 'Placeholder' registered to the graph.
# The return value (a Tensor) of placeholder() is the
# first output of this operation in fact.
operations = graph.get_operations()
result = [i.outputs[0] for i in operations if i.type == "Placeholder"]
return result
|
PypiClean
|
/moralis-0.1.37.tar.gz/moralis-0.1.37/src/openapi_evm_api/paths/nft_address_trades/get.py
|
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from openapi_evm_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from openapi_evm_api import schemas # noqa: F401
from openapi_evm_api.model.trade_collection import TradeCollection
from openapi_evm_api.model.chain_list import ChainList
from . import path
# Query params
ChainSchema = ChainList
class FromBlockSchema(
schemas.IntSchema
):
class MetaOapg:
inclusive_minimum = 0
ToBlockSchema = schemas.StrSchema
FromDateSchema = schemas.StrSchema
ToDateSchema = schemas.StrSchema
class MarketplaceSchema(
schemas.EnumBase,
schemas.StrSchema
):
class MetaOapg:
enum_value_to_name = {
"opensea": "OPENSEA",
}
@schemas.classproperty
def OPENSEA(cls):
return cls("opensea")
CursorSchema = schemas.StrSchema
class LimitSchema(
schemas.IntSchema
):
class MetaOapg:
inclusive_minimum = 0
DisableTotalSchema = schemas.BoolSchema
RequestRequiredQueryParams = typing_extensions.TypedDict(
'RequestRequiredQueryParams',
{
}
)
RequestOptionalQueryParams = typing_extensions.TypedDict(
'RequestOptionalQueryParams',
{
'chain': typing.Union[ChainSchema, ],
'from_block': typing.Union[FromBlockSchema, decimal.Decimal, int, ],
'to_block': typing.Union[ToBlockSchema, str, ],
'from_date': typing.Union[FromDateSchema, str, ],
'to_date': typing.Union[ToDateSchema, str, ],
'marketplace': typing.Union[MarketplaceSchema, str, ],
'cursor': typing.Union[CursorSchema, str, ],
'limit': typing.Union[LimitSchema, decimal.Decimal, int, ],
'disable_total': typing.Union[DisableTotalSchema, bool, ],
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_chain = api_client.QueryParameter(
name="chain",
style=api_client.ParameterStyle.FORM,
schema=ChainSchema,
explode=True,
)
request_query_from_block = api_client.QueryParameter(
name="from_block",
style=api_client.ParameterStyle.FORM,
schema=FromBlockSchema,
explode=True,
)
request_query_to_block = api_client.QueryParameter(
name="to_block",
style=api_client.ParameterStyle.FORM,
schema=ToBlockSchema,
explode=True,
)
request_query_from_date = api_client.QueryParameter(
name="from_date",
style=api_client.ParameterStyle.FORM,
schema=FromDateSchema,
explode=True,
)
request_query_to_date = api_client.QueryParameter(
name="to_date",
style=api_client.ParameterStyle.FORM,
schema=ToDateSchema,
explode=True,
)
request_query_marketplace = api_client.QueryParameter(
name="marketplace",
style=api_client.ParameterStyle.FORM,
schema=MarketplaceSchema,
explode=True,
)
request_query_cursor = api_client.QueryParameter(
name="cursor",
style=api_client.ParameterStyle.FORM,
schema=CursorSchema,
explode=True,
)
request_query_limit = api_client.QueryParameter(
name="limit",
style=api_client.ParameterStyle.FORM,
schema=LimitSchema,
explode=True,
)
request_query_disable_total = api_client.QueryParameter(
name="disable_total",
style=api_client.ParameterStyle.FORM,
schema=DisableTotalSchema,
explode=True,
)
# Path params
AddressSchema = schemas.StrSchema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'address': typing.Union[AddressSchema, str, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_address = api_client.PathParameter(
name="address",
style=api_client.ParameterStyle.SIMPLE,
schema=AddressSchema,
required=True,
)
_auth = [
'ApiKeyAuth',
]
SchemaFor200ResponseBodyApplicationJson = TradeCollection
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _get_nft_trades_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _get_nft_trades_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _get_nft_trades_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _get_nft_trades_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Get NFT trades by marketplace
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params)
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_address,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
prefix_separator_iterator = None
for parameter in (
request_query_chain,
request_query_from_block,
request_query_to_block,
request_query_from_date,
request_query_to_date,
request_query_marketplace,
request_query_cursor,
request_query_limit,
request_query_disable_total,
):
parameter_data = query_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
if prefix_separator_iterator is None:
prefix_separator_iterator = parameter.get_prefix_separator_iterator()
serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator)
for serialized_value in serialized_data.values():
used_path += serialized_value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class GetNftTrades(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def get_nft_trades(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get_nft_trades(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get_nft_trades(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get_nft_trades(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._get_nft_trades_oapg(
query_params=query_params,
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._get_nft_trades_oapg(
query_params=query_params,
path_params=path_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
PypiClean
|
/assemblyline_service_server-4.4.0.50-py3-none-any.whl/assemblyline_service_server/config.py
|
import logging
import os
import threading
from assemblyline.common import forge
from assemblyline.common import log as al_log
from assemblyline.common.version import BUILD_MINOR, FRAMEWORK_VERSION, SYSTEM_VERSION
from assemblyline.remote.datatypes.counters import Counters
from assemblyline.remote.datatypes import get_client
from assemblyline_core.safelist_client import SafelistClient
from assemblyline_core.tasking_client import TaskingClient
config = forge.get_config()
redis = get_client(
host=config.core.redis.nonpersistent.host,
port=config.core.redis.nonpersistent.port,
private=False,
)
redis_persist = get_client(
host=config.core.redis.persistent.host,
port=config.core.redis.persistent.port,
private=False,
)
#################################################################
# Configuration
CLASSIFICATION = forge.get_classification()
DEBUG = config.ui.debug
VERSION = os.environ.get('ASSEMBLYLINE_VERSION', f"{FRAMEWORK_VERSION}.{SYSTEM_VERSION}.{BUILD_MINOR}.dev0")
AUTH_KEY = os.environ.get('SERVICE_API_KEY', 'ThisIsARandomAuthKey...ChangeMe!')
RATE_LIMITER = Counters(prefix="quota", host=redis, track_counters=True)
# End of Configuration
#################################################################
#################################################################
# Prepare loggers
config.logging.log_to_console = config.logging.log_to_console or DEBUG
al_log.init_logging('svc', config=config)
LOGGER = logging.getLogger('assemblyline.svc')
LOGGER.debug('Logger ready!')
# End of prepare logger
#################################################################
#################################################################
# Global instances
STORAGE = forge.get_datastore(config=config)
FILESTORE = forge.get_filestore(config=config)
LOCK = threading.Lock()
TASKING_CLIENT = TaskingClient(datastore=STORAGE, filestore=FILESTORE, redis=redis, redis_persist=redis_persist)
SAFELIST_CLIENT = SafelistClient(datastore=STORAGE)
# End global
#################################################################
|
PypiClean
|
/django-sencha-1.3.55555.tar.gz/django-sencha-1.3.55555/sencha/static/sencha/Ux/locale/override/st/picker/Date.js
|
Ext.define('Ux.locale.override.st.picker.Date', {
override : 'Ext.picker.Date',
setLocale : function(locale) {
var me = this,
locales = me.locales || me.getInitialConfig().locales,
months = locales.months,
day = locales.dayText,
month = locales.monthText,
year = locales.yearText,
slotOrder = locales.slotOrder,
manager = me.locale,
defaultText = '',
defaultDay = 'Day',
defaultMonth = 'Month',
defaultYear = 'Year',
defaultSlotOrder = ['month', 'day', 'year'],
slot, store, value;
if(months) {
if(Ext.isObject(months)) {
defaultText = months.defaultText;
months = months.key;
}
months = manager.get(months, defaultText);
if(Ext.isObject(months)) {
slot = this.down('pickerslot[name=month]');
store = slot && slot.getStore();
if(store) {
store.each(function(rec) {
value = rec.get('value');
rec.set('text', months[value]);
});
}
}
}
if(day) {
if(Ext.isObject(day)) {
defaultDay = day.defaultDay;
day = day.key;
}
day = manager.get(day, defaultDay);
if(Ext.isString(day)) {
me.setDayText(day);
}
}
if(month) {
if(Ext.isObject(month)) {
defaultMonth = month.defaultMonth;
month = month.key;
}
month = manager.get(month, defaultMonth);
if(Ext.isString(month)) {
me.setMonthText(month);
}
}
if(year) {
if(Ext.isObject(year)) {
defaultYear = year.defaultYear;
year = year.key;
}
year = manager.get(year, defaultYear);
if(Ext.isString(year)) {
me.setYearText(year);
}
}
if(slotOrder) {
if(Ext.isObject(slotOrder)) {
defaultSlotOrder = slotOrder.defaultSlotOrder;
slotOrder = slotOrder.key;
}
slotOrder = Ext.JSON.decode(manager.get(slotOrder, defaultSlotOrder));
if(Ext.isArray(slotOrder)) {
me.setSlotOrder(slotOrder);
}
}
me.callParent(arguments);
}
});
|
PypiClean
|
/detectron2_cdo-0.5.tar.gz/detectron2_cdo-0.5/detectron2/model_zoo/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py
|
from ..common.data.coco import dataloader
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.optim import SGD as optimizer
from ..common.train import train
from detectron2.modeling.mmdet_wrapper import MMDetDetector
from detectron2.config import LazyCall as L
model = L(MMDetDetector)(
detector=dict(
type="MaskRCNN",
pretrained="torchvision://resnet50",
backbone=dict(
type="ResNet",
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
norm_eval=True,
style="pytorch",
),
neck=dict(type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5),
rpn_head=dict(
type="RPNHead",
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type="AnchorGenerator",
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64],
),
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0],
),
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
),
roi_head=dict(
type="StandardRoIHead",
bbox_roi_extractor=dict(
type="SingleRoIExtractor",
roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
),
bbox_head=dict(
type="Shared2FCBBoxHead",
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2],
),
reg_class_agnostic=False,
loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type="L1Loss", loss_weight=1.0),
),
mask_roi_extractor=dict(
type="SingleRoIExtractor",
roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
),
mask_head=dict(
type="FCNMaskHead",
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0),
),
),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type="MaxIoUAssigner",
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1,
),
sampler=dict(
type="RandomSampler",
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False,
),
allowed_border=-1,
pos_weight=-1,
debug=False,
),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type="nms", iou_threshold=0.7),
min_bbox_size=0,
),
rcnn=dict(
assigner=dict(
type="MaxIoUAssigner",
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1,
),
sampler=dict(
type="RandomSampler",
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
),
mask_size=28,
pos_weight=-1,
debug=False,
),
),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type="nms", iou_threshold=0.7),
min_bbox_size=0,
),
rcnn=dict(
score_thr=0.05,
nms=dict(type="nms", iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5,
),
),
),
pixel_mean=[123.675, 116.280, 103.530],
pixel_std=[58.395, 57.120, 57.375],
)
dataloader.train.mapper.image_format = "RGB" # torchvision pretrained model
train.init_checkpoint = None # pretrained model is loaded inside backbone
|
PypiClean
|
/WebCore-2.0.4.tar.gz/WebCore-2.0.4/web/ext/annotation.py
|
# ## Imports
from __future__ import unicode_literals
from inspect import ismethod, getfullargspec
from web.core.compat import items
# ## Extension
class AnnotationExtension(object):
"""Utilize Python 3 function annotations as a method to filter arguments coming in from the web.
Argument annotations are treated as callbacks to execute, passing in the unicode value coming in from the web and
swapping it with the value returned by the callback. This allows for trivial typecasting to most built-in Python
types such as `int`, `float`, etc., as well as creative use such as `','.split` to automatically split a comma-
separated value. One can of course also write custom callbacks, notably ones that raise `HTTPException`
subclasses to not appear as an Internal Server Error.
For example:
def multiply(a: int, b: int):
return str(a * b)
This extension also performs a utility wrapping of returned values in the form of a 2-tuple of the return
annotation itself and the value returned by the callable endpoint. This integrates well with the view registered
by the `web.template` package to define a template at the head of the function, returning data for the template
to consume:
def hello(name="world"): -> 'mako:hello.html'
return dict(name=name)
If your editor has difficulty syntax highlighting such annotations, check for a Python 3 compatible update to your
editor's syntax definitions.
"""
__slots__ = tuple()
provides = ['annotation', 'cast', 'typecast'] # Export these symbols for other extensions to depend upon.
# ### Request-Local Callbacks
def mutate(self, context, handler, args, kw):
"""Inspect and potentially mutate the given handler's arguments.
The args list and kw dictionary may be freely modified, though invalid arguments to the handler will fail.
"""
annotations = getattr(handler.__func__ if hasattr(handler, '__func__') else handler, '__annotations__', None)
if not annotations:
return
argspec = getfullargspec(handler)
arglist = list(argspec.args)
if ismethod(handler):
del arglist[0]
for i, value in enumerate(list(args)):
key = arglist[i]
if key in annotations:
args[i] = annotations[key](value)
# Convert keyword arguments
for key, value in list(items(kw)):
if key in annotations:
kw[key] = annotations[key](value)
def transform(self, context, handler, result):
"""Transform the value returned by the controller endpoint.
This extension transforms returned values if the endpoint has a return type annotation.
"""
handler = handler.__func__ if hasattr(handler, '__func__') else handler
annotation = getattr(handler, '__annotations__', {}).get('return', None)
if annotation:
return (annotation, result)
return result
|
PypiClean
|
/faculty_sync-0.4.1.tar.gz/faculty_sync-0.4.1/faculty_sync/controller.py
|
import logging
import threading
import time
import traceback
from concurrent.futures import ThreadPoolExecutor
from .file_trees import (
compare_file_trees,
get_remote_subdirectories,
remote_is_dir,
)
from .pubsub import Messages
from .screens import (
DifferencesScreen,
RemoteDirectoryPromptScreen,
SynchronizationScreen,
SynchronizationScreenDirection,
WalkingFileTreesScreen,
WalkingFileTreesStatus,
WatchSyncScreen,
)
from .ssh import sftp_from_ssh_details
from .sync import Synchronizer
from .watch_sync import WatcherSynchronizer
class Controller(object):
def __init__(self, configuration, ssh_details, view, exchange):
self._configuration = configuration
self._ssh_details = ssh_details
self._sftp = sftp_from_ssh_details(self._ssh_details)
self._view = view
self._exchange = exchange
self._stop_event = threading.Event()
self._current_screen = None
self._current_screen_subscriptions = []
self._thread = None
self._executor = ThreadPoolExecutor(max_workers=8)
self._synchronizer = None
self._watcher_synchronizer = None
def start(self):
self._exchange.subscribe(
Messages.STOP_CALLED, lambda _: self._stop_event.set()
)
self._exchange.subscribe(
Messages.VERIFY_REMOTE_DIRECTORY,
lambda directory: self._submit(
lambda: self._resolve_remote_directory(directory)
),
)
self._exchange.subscribe(
Messages.PROMPT_FOR_REMOTE_DIRECTORY,
lambda _: self._submit(self._prompt_for_remote_directory),
)
self._exchange.subscribe(
Messages.START_INITIAL_FILE_TREE_WALK,
lambda _: self._submit(self._show_differences),
)
self._exchange.subscribe(
Messages.DISPLAY_DIFFERENCES,
lambda differences: self._submit(
self._display_differences, differences
),
)
self._exchange.subscribe(
Messages.SYNC_PLATFORM_TO_LOCAL,
lambda _: self._submit(self._sync_platform_to_local),
)
self._exchange.subscribe(
Messages.SYNC_LOCAL_TO_PLATFORM,
lambda _: self._submit(self._sync_local_to_platform),
)
self._exchange.subscribe(
Messages.START_WATCH_SYNC,
lambda _: self._submit(self._start_watch_sync),
)
self._exchange.subscribe(
Messages.ERROR_HANDLING_FS_EVENT,
lambda _: self._submit(self._restart_watch_sync),
)
self._exchange.subscribe(
Messages.STOP_WATCH_SYNC,
lambda _: self._submit(self._stop_watch_sync),
)
self._exchange.subscribe(
Messages.DOWN_IN_WATCH_SYNC,
lambda _: self._submit(self._down_in_watch_sync),
)
def run():
while not self._stop_event.is_set():
time.sleep(0.1)
self._thread = threading.Thread(target=run)
self._thread.start()
self._exchange.publish(
Messages.VERIFY_REMOTE_DIRECTORY, self._configuration.remote_dir
)
def _submit(self, fn, *args, **kwargs):
future = self._executor.submit(fn, *args, **kwargs)
try:
future.result()
except Exception:
traceback.print_exc()
def _resolve_remote_directory(self, remote_dir):
if remote_dir is not None:
if remote_is_dir(remote_dir, self._sftp):
logging.info(
"Setting {} as remote directory".format(remote_dir)
)
self._remote_dir = remote_dir.rstrip("/") + "/"
self._synchronizer = Synchronizer(
self._configuration.local_dir,
self._remote_dir,
self._ssh_details,
self._configuration.ignore,
)
self._exchange.publish(
Messages.REMOTE_DIRECTORY_SET, self._remote_dir
)
self._exchange.publish(Messages.START_INITIAL_FILE_TREE_WALK)
else:
self._exchange.publish(Messages.PROMPT_FOR_REMOTE_DIRECTORY)
else:
self._exchange.publish(Messages.PROMPT_FOR_REMOTE_DIRECTORY)
def _prompt_for_remote_directory(self):
self._clear_current_subscriptions()
self._current_screen = RemoteDirectoryPromptScreen(
self._exchange,
get_paths_in_directory=lambda directory: list(
get_remote_subdirectories(directory, self._sftp)
),
)
self._view.mount(self._current_screen)
def _sync_platform_to_local(self):
self._clear_current_subscriptions()
self._current_screen = SynchronizationScreen(
direction=SynchronizationScreenDirection.DOWN
)
self._view.mount(self._current_screen)
self._synchronizer.down(rsync_opts=["--delete"])
self._show_differences()
def _sync_local_to_platform(self):
self._clear_current_subscriptions()
self._current_screen = SynchronizationScreen(
direction=SynchronizationScreenDirection.UP
)
self._view.mount(self._current_screen)
self._synchronizer.up(rsync_opts=["--delete"])
self._show_differences()
def _display_differences(self, differences):
self._clear_current_subscriptions()
self._current_screen = DifferencesScreen(differences, self._exchange)
subscription_id = self._exchange.subscribe(
Messages.REFRESH_DIFFERENCES,
lambda _: self._submit(self._show_differences),
)
self._current_screen_subscriptions.append(subscription_id)
self._view.mount(self._current_screen)
def _clear_current_subscriptions(self):
for subscription_id in self._current_screen_subscriptions:
self._exchange.unsubscribe(subscription_id)
def _show_differences(self):
self._clear_current_subscriptions()
self._current_screen = WalkingFileTreesScreen(
WalkingFileTreesStatus.CONNECTING, self._exchange
)
try:
self._view.mount(self._current_screen)
differences = self._calculate_differences(publish_progress=True)
self._exchange.publish(Messages.DISPLAY_DIFFERENCES, differences)
finally:
self._current_screen.stop()
def _calculate_differences(self, publish_progress=True):
if publish_progress:
self._exchange.publish(
Messages.WALK_STATUS_CHANGE, WalkingFileTreesStatus.LOCAL_WALK
)
local_files = self._synchronizer.list_local()
logging.info(
"Found {} files locally at path {}.".format(
len(local_files), self._configuration.local_dir
)
)
if publish_progress:
self._exchange.publish(
Messages.WALK_STATUS_CHANGE, WalkingFileTreesStatus.REMOTE_WALK
)
remote_files = self._synchronizer.list_remote()
logging.info(
"Found {} files on Faculty Platform at path {}.".format(
len(remote_files), self._configuration.remote_dir
)
)
if publish_progress:
self._exchange.publish(
Messages.WALK_STATUS_CHANGE,
WalkingFileTreesStatus.CALCULATING_DIFFERENCES,
)
differences = list(compare_file_trees(local_files, remote_files))
return differences
def _start_watch_sync(self):
self._clear_current_subscriptions()
self._current_screen = WatchSyncScreen(self._exchange)
self._view.mount(self._current_screen)
self._watcher_synchronizer = WatcherSynchronizer(
self._sftp, self._synchronizer, self._exchange
)
self._watcher_synchronizer.start()
def _restart_watch_sync(self):
self._clear_current_subscriptions()
if self._watcher_synchronizer is not None:
self._watcher_synchronizer.stop()
self._synchronizer.up(rsync_opts=["--delete"])
self._start_watch_sync()
def _stop_watch_sync(self):
logging.info("Stopping watch-synchronization loop.")
if self._watcher_synchronizer is not None:
self._watcher_synchronizer.stop()
self._show_differences()
def _down_in_watch_sync(self):
logging.info("Doing down synchronization as part of watch-sync.")
if self._watcher_synchronizer is not None:
self._watcher_synchronizer.stop()
self._current_screen = SynchronizationScreen(
direction=SynchronizationScreenDirection.DOWN
)
self._view.mount(self._current_screen)
self._synchronizer.down(rsync_opts=["--update"])
self._start_watch_sync()
def join(self):
self._thread.join()
|
PypiClean
|
/satnogs_decoders-1.60.0-py3-none-any.whl/satnogsdecoders/decoder/neudose.py
|
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Neudose(KaitaiStruct):
""":field dest_callsign: ax25_header.dest_callsign_raw.callsign_ror.callsign
:field src_callsign: ax25_header.src_callsign_raw.callsign_ror.callsign
:field src_ssid: ax25_header.src_ssid_raw.ssid
:field dest_ssid: ax25_header.dest_ssid_raw.ssid
:field ctl: ax25_header.ctl
:field pid: ax25_header.pid
:field csp_hdr_crc: csp_header.crc
:field csp_hdr_rdp: csp_header.rdp
:field csp_hdr_xtea: csp_header.xtea
:field csp_hdr_hmac: csp_header.hmac
:field csp_hdr_src_port: csp_header.source_port
:field csp_hdr_dst_port: csp_header.destination_port
:field csp_hdr_destination: csp_header.destination
:field csp_hdr_source: csp_header.source
:field csp_hdr_priority: csp_header.priority
:field last_rx_timestamp_raw: beacon.last_rx_timestamp_raw
:field packet_version: beacon.packet_version
:field unix_timestamp: beacon.unix_timestamp
:field cdh_on: beacon.sat_status.cdh_on
:field eps_on: beacon.sat_status.eps_on
:field comms_on: beacon.sat_status.comms_on
:field antenna_on: beacon.sat_status.antenna_on
:field payload_on: beacon.sat_status.payload_on
:field mech: beacon.sat_status.mech
:field thermal: beacon.sat_status.thermal
:field antenna_deployed: beacon.sat_status.antenna_deployed
:field last_gs_conn_timestamp: beacon.last_gs_conn_timestamp
:field eps_bat_state: beacon.eps_status.bat_state
:field eps_bat_htr_state: beacon.eps_status.bat_htr_state
:field eps_bat_htr_mode: beacon.eps_status.bat_htr_mode
:field eps_last_reset_rsn: beacon.eps_status.last_reset_rsn
:field eps_gs_wtdg_rst_mark: beacon.eps_status.gs_wtdg_rst_mark
:field eps_uptime: beacon.eps_uptime
:field eps_vbat: beacon.eps_vbat
:field eps_bat_chrge_curr: beacon.eps_bat_chrge_curr
:field eps_bat_dischrge_curr: beacon.eps_bat_dischrge_curr
:field eps_mppt_conv1_temp: beacon.eps_temp.mppt_conv1
:field eps_mppt_conv2_temp: beacon.eps_temp.mppt_conv2
:field eps_mppt_conv3_temp: beacon.eps_temp.mppt_conv3
:field eps_out_conv_3v3_temp: beacon.eps_temp.out_conv_3v3
:field eps_out_conv_5v0_temp: beacon.eps_temp.out_conv_5v0
:field eps_battery_pack_temp: beacon.eps_temp.battery_pack
:field eps_solar_panel_y_n_curr: beacon.eps_solar_panel_curr.y_n
:field eps_solar_panel_y_p_curr: beacon.eps_solar_panel_curr.y_p
:field eps_solar_panel_x_n_curr: beacon.eps_solar_panel_curr.x_n
:field eps_solar_panel_x_p_curr: beacon.eps_solar_panel_curr.x_p
:field eps_solar_panel_z_n_curr: beacon.eps_solar_panel_curr.z_n
:field eps_solar_panel_z_p_curr: beacon.eps_solar_panel_curr.z_p
:field eps_cdh_channel_curr_out: beacon.eps_channel_curr_out.cdh
:field eps_comm_3v3_channel_curr_out: beacon.eps_channel_curr_out.comm_3v3
:field eps_comm_5v0_channel_curr_out: beacon.eps_channel_curr_out.comm_5v0
:field eps_ant_channel_curr_out: beacon.eps_channel_curr_out.ant
:field eps_pld_channel_curr_out: beacon.eps_channel_curr_out.pld
:field cdh_curr_state: beacon.cdh_curr_state
:field cdh_prev_state: beacon.cdh_prev_state
:field cdh_reset_cause: beacon.cdh_boot_reset_cause.reset_cause
:field cdh_boot_cause: beacon.cdh_boot_reset_cause.boot_cause
:field cdh_uptime: beacon.cdh_uptime
:field cdh_temp_mcu_raw: beacon.cdh_temp_mcu_raw
:field cdh_temp_ram_raw: beacon.cdh_temp_ram_raw
:field comms_rtsm_state: beacon.comms_status.rtsm_state
:field comms_rst_reason: beacon.comms_status.rst_reason
:field comms_boot_img_bank: beacon.comms_status.boot_img_bank
:field comms_uptime_raw: beacon.comms_uptime_raw
:field comms_ina233_pa_curr_raw: beacon.comms_ina233_pa_curr_raw
:field comms_ad7294_pa_curr_raw: beacon.comms_ad7294_pa_curr_raw
:field comms_ad7294_gate_volt_raw: beacon.comms_ad7294_gate_volt_raw
:field comms_cc1125_rssi_raw: beacon.comms_cc1125_rssi_raw
:field comms_lna_therm_temp: beacon.comms_temp.lna_therm
:field comms_lna_diode_temp: beacon.comms_temp.lna_diode
:field comms_stm32_internal_temp: beacon.comms_temp.stm32_internal
:field comms_cc1125_uhf_temp: beacon.comms_temp.cc1125_uhf
:field comms_cc1125_vhf_temp: beacon.comms_temp.cc1125_vhf
:field comms_pa_therm_temp: beacon.comms_temp.pa_therm
:field comms_pa_diode_temp: beacon.comms_temp.pa_diode
:field comms_pa_therm_strap_temp: beacon.comms_temp.pa_therm_strap
:field comms_ad7294_internal_temp: beacon.comms_temp.ad7294_internal
:field ant_deployment_status: beacon.ant_deployment_status
:field ant_prev_isis_status: beacon.ant_prev_isis_status
:field pld_status: beacon.pld_status
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ax25_header = Neudose.Ax25HeaderT(self._io, self, self._root)
self.csp_header = Neudose.CspHeaderT(self._io, self, self._root)
self.beacon = Neudose.BeaconT(self._io, self, self._root)
class EpsSolarPanelCurrT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.eps_solar_panel_curr = self._io.read_bytes(12)
@property
def z_p(self):
if hasattr(self, '_m_z_p'):
return self._m_z_p if hasattr(self, '_m_z_p') else None
self._m_z_p = ((KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 10) << 8) + KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 11))
return self._m_z_p if hasattr(self, '_m_z_p') else None
@property
def x_p(self):
if hasattr(self, '_m_x_p'):
return self._m_x_p if hasattr(self, '_m_x_p') else None
self._m_x_p = ((KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 6) << 8) + KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 7))
return self._m_x_p if hasattr(self, '_m_x_p') else None
@property
def y_p(self):
if hasattr(self, '_m_y_p'):
return self._m_y_p if hasattr(self, '_m_y_p') else None
self._m_y_p = ((KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 2) << 8) + KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 3))
return self._m_y_p if hasattr(self, '_m_y_p') else None
@property
def x_n(self):
if hasattr(self, '_m_x_n'):
return self._m_x_n if hasattr(self, '_m_x_n') else None
self._m_x_n = ((KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 4) << 8) + KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 5))
return self._m_x_n if hasattr(self, '_m_x_n') else None
@property
def y_n(self):
if hasattr(self, '_m_y_n'):
return self._m_y_n if hasattr(self, '_m_y_n') else None
self._m_y_n = ((KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 0) << 8) + KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 1))
return self._m_y_n if hasattr(self, '_m_y_n') else None
@property
def z_n(self):
if hasattr(self, '_m_z_n'):
return self._m_z_n if hasattr(self, '_m_z_n') else None
self._m_z_n = ((KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 8) << 8) + KaitaiStream.byte_array_index(self.eps_solar_panel_curr, 9))
return self._m_z_n if hasattr(self, '_m_z_n') else None
class SatStatusT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sat_status = self._io.read_u1()
@property
def comms_on(self):
if hasattr(self, '_m_comms_on'):
return self._m_comms_on if hasattr(self, '_m_comms_on') else None
self._m_comms_on = ((self.sat_status >> 5) & 1)
return self._m_comms_on if hasattr(self, '_m_comms_on') else None
@property
def antenna_on(self):
if hasattr(self, '_m_antenna_on'):
return self._m_antenna_on if hasattr(self, '_m_antenna_on') else None
self._m_antenna_on = ((self.sat_status >> 4) & 1)
return self._m_antenna_on if hasattr(self, '_m_antenna_on') else None
@property
def cdh_on(self):
if hasattr(self, '_m_cdh_on'):
return self._m_cdh_on if hasattr(self, '_m_cdh_on') else None
self._m_cdh_on = ((self.sat_status >> 7) & 1)
return self._m_cdh_on if hasattr(self, '_m_cdh_on') else None
@property
def mech(self):
if hasattr(self, '_m_mech'):
return self._m_mech if hasattr(self, '_m_mech') else None
self._m_mech = ((self.sat_status >> 2) & 1)
return self._m_mech if hasattr(self, '_m_mech') else None
@property
def antenna_deployed(self):
if hasattr(self, '_m_antenna_deployed'):
return self._m_antenna_deployed if hasattr(self, '_m_antenna_deployed') else None
self._m_antenna_deployed = ((self.sat_status >> 0) & 1)
return self._m_antenna_deployed if hasattr(self, '_m_antenna_deployed') else None
@property
def thermal(self):
if hasattr(self, '_m_thermal'):
return self._m_thermal if hasattr(self, '_m_thermal') else None
self._m_thermal = ((self.sat_status >> 1) & 1)
return self._m_thermal if hasattr(self, '_m_thermal') else None
@property
def eps_on(self):
if hasattr(self, '_m_eps_on'):
return self._m_eps_on if hasattr(self, '_m_eps_on') else None
self._m_eps_on = ((self.sat_status >> 6) & 1)
return self._m_eps_on if hasattr(self, '_m_eps_on') else None
@property
def payload_on(self):
if hasattr(self, '_m_payload_on'):
return self._m_payload_on if hasattr(self, '_m_payload_on') else None
self._m_payload_on = ((self.sat_status >> 3) & 1)
return self._m_payload_on if hasattr(self, '_m_payload_on') else None
class Callsign(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.callsign = (self._io.read_bytes(6)).decode(u"ASCII")
class Ax25HeaderT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.dest_callsign_raw = Neudose.CallsignRaw(self._io, self, self._root)
self.dest_ssid_raw = Neudose.SsidMask(self._io, self, self._root)
self.src_callsign_raw = Neudose.CallsignRaw(self._io, self, self._root)
self.src_ssid_raw = Neudose.SsidMask(self._io, self, self._root)
self.ctl = self._io.read_u1()
self.pid = self._io.read_u1()
class CommsStatusT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.comms_status = self._io.read_u1()
@property
def rtsm_state(self):
if hasattr(self, '_m_rtsm_state'):
return self._m_rtsm_state if hasattr(self, '_m_rtsm_state') else None
self._m_rtsm_state = ((self.comms_status >> 5) & 7)
return self._m_rtsm_state if hasattr(self, '_m_rtsm_state') else None
@property
def rst_reason(self):
if hasattr(self, '_m_rst_reason'):
return self._m_rst_reason if hasattr(self, '_m_rst_reason') else None
self._m_rst_reason = ((self.comms_status >> 2) & 7)
return self._m_rst_reason if hasattr(self, '_m_rst_reason') else None
@property
def boot_img_bank(self):
if hasattr(self, '_m_boot_img_bank'):
return self._m_boot_img_bank if hasattr(self, '_m_boot_img_bank') else None
self._m_boot_img_bank = ((self.comms_status >> 0) & 3)
return self._m_boot_img_bank if hasattr(self, '_m_boot_img_bank') else None
class EpsChannelCurrOutT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.eps_channel_curr_out = self._io.read_bytes(10)
@property
def cdh(self):
if hasattr(self, '_m_cdh'):
return self._m_cdh if hasattr(self, '_m_cdh') else None
self._m_cdh = ((KaitaiStream.byte_array_index(self.eps_channel_curr_out, 0) << 8) + KaitaiStream.byte_array_index(self.eps_channel_curr_out, 1))
return self._m_cdh if hasattr(self, '_m_cdh') else None
@property
def ant(self):
if hasattr(self, '_m_ant'):
return self._m_ant if hasattr(self, '_m_ant') else None
self._m_ant = ((KaitaiStream.byte_array_index(self.eps_channel_curr_out, 6) << 8) + KaitaiStream.byte_array_index(self.eps_channel_curr_out, 7))
return self._m_ant if hasattr(self, '_m_ant') else None
@property
def comm_3v3(self):
if hasattr(self, '_m_comm_3v3'):
return self._m_comm_3v3 if hasattr(self, '_m_comm_3v3') else None
self._m_comm_3v3 = ((KaitaiStream.byte_array_index(self.eps_channel_curr_out, 2) << 8) + KaitaiStream.byte_array_index(self.eps_channel_curr_out, 3))
return self._m_comm_3v3 if hasattr(self, '_m_comm_3v3') else None
@property
def comm_5v0(self):
if hasattr(self, '_m_comm_5v0'):
return self._m_comm_5v0 if hasattr(self, '_m_comm_5v0') else None
self._m_comm_5v0 = ((KaitaiStream.byte_array_index(self.eps_channel_curr_out, 4) << 8) + KaitaiStream.byte_array_index(self.eps_channel_curr_out, 5))
return self._m_comm_5v0 if hasattr(self, '_m_comm_5v0') else None
@property
def pld(self):
if hasattr(self, '_m_pld'):
return self._m_pld if hasattr(self, '_m_pld') else None
self._m_pld = ((KaitaiStream.byte_array_index(self.eps_channel_curr_out, 8) << 8) + KaitaiStream.byte_array_index(self.eps_channel_curr_out, 9))
return self._m_pld if hasattr(self, '_m_pld') else None
class CommsTempT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.lna_therm = self._io.read_s1()
self.lna_diode = self._io.read_s1()
self.stm32_internal = self._io.read_s1()
self.cc1125_uhf = self._io.read_s1()
self.cc1125_vhf = self._io.read_s1()
self.pa_therm = self._io.read_s1()
self.pa_diode = self._io.read_s1()
self.pa_therm_strap = self._io.read_s1()
self.ad7294_internal = self._io.read_s1()
class BeaconT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.last_rx_timestamp_raw = self._io.read_u4be()
self.packet_version = self._io.read_u1()
self.unix_timestamp = self._io.read_u4be()
self.sat_status = Neudose.SatStatusT(self._io, self, self._root)
self.last_gs_conn_timestamp = self._io.read_u4be()
self.eps_status = Neudose.EpsStatusT(self._io, self, self._root)
self.eps_uptime = self._io.read_u4be()
self.eps_vbat = self._io.read_u2be()
self.eps_bat_chrge_curr = self._io.read_u2be()
self.eps_bat_dischrge_curr = self._io.read_u2be()
self.eps_temp = Neudose.EpsTempT(self._io, self, self._root)
self.eps_solar_panel_curr = Neudose.EpsSolarPanelCurrT(self._io, self, self._root)
self.eps_channel_curr_out = Neudose.EpsChannelCurrOutT(self._io, self, self._root)
self.cdh_curr_state = self._io.read_u1()
self.cdh_prev_state = self._io.read_u1()
self.cdh_boot_reset_cause = Neudose.CdhBootResetCauseT(self._io, self, self._root)
self.cdh_uptime = self._io.read_u4be()
self.cdh_temp_mcu_raw = self._io.read_s2be()
self.cdh_temp_ram_raw = self._io.read_s2be()
self.comms_status = Neudose.CommsStatusT(self._io, self, self._root)
self.comms_uptime_raw = self._io.read_u4be()
self.comms_ina233_pa_curr_raw = self._io.read_u2be()
self.comms_ad7294_pa_curr_raw = self._io.read_u2be()
self.comms_ad7294_gate_volt_raw = self._io.read_u2be()
self.comms_cc1125_rssi_raw = self._io.read_u2be()
self.comms_temp = Neudose.CommsTempT(self._io, self, self._root)
self.ant_deployment_status = self._io.read_u1()
self.ant_prev_isis_status = self._io.read_u2be()
self.pld_status = self._io.read_u1()
class SsidMask(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ssid_mask = self._io.read_u1()
@property
def ssid(self):
if hasattr(self, '_m_ssid'):
return self._m_ssid if hasattr(self, '_m_ssid') else None
self._m_ssid = ((self.ssid_mask & 15) >> 1)
return self._m_ssid if hasattr(self, '_m_ssid') else None
class EpsTempT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.mppt_conv1 = self._io.read_s1()
self.mppt_conv2 = self._io.read_s1()
self.mppt_conv3 = self._io.read_s1()
self.out_conv_3v3 = self._io.read_s1()
self.out_conv_5v0 = self._io.read_s1()
self.battery_pack = self._io.read_s1()
class CspHeaderT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.csp_length = self._io.read_u2be()
self.csp_header_raw = self._io.read_u4be()
@property
def source(self):
if hasattr(self, '_m_source'):
return self._m_source if hasattr(self, '_m_source') else None
self._m_source = ((self.csp_header_raw >> 25) & 31)
return self._m_source if hasattr(self, '_m_source') else None
@property
def source_port(self):
if hasattr(self, '_m_source_port'):
return self._m_source_port if hasattr(self, '_m_source_port') else None
self._m_source_port = ((self.csp_header_raw >> 8) & 63)
return self._m_source_port if hasattr(self, '_m_source_port') else None
@property
def destination_port(self):
if hasattr(self, '_m_destination_port'):
return self._m_destination_port if hasattr(self, '_m_destination_port') else None
self._m_destination_port = ((self.csp_header_raw >> 14) & 63)
return self._m_destination_port if hasattr(self, '_m_destination_port') else None
@property
def rdp(self):
if hasattr(self, '_m_rdp'):
return self._m_rdp if hasattr(self, '_m_rdp') else None
self._m_rdp = ((self.csp_header_raw & 2) >> 1)
return self._m_rdp if hasattr(self, '_m_rdp') else None
@property
def destination(self):
if hasattr(self, '_m_destination'):
return self._m_destination if hasattr(self, '_m_destination') else None
self._m_destination = ((self.csp_header_raw >> 20) & 31)
return self._m_destination if hasattr(self, '_m_destination') else None
@property
def priority(self):
if hasattr(self, '_m_priority'):
return self._m_priority if hasattr(self, '_m_priority') else None
self._m_priority = (self.csp_header_raw >> 30)
return self._m_priority if hasattr(self, '_m_priority') else None
@property
def reserved(self):
if hasattr(self, '_m_reserved'):
return self._m_reserved if hasattr(self, '_m_reserved') else None
self._m_reserved = ((self.csp_header_raw >> 4) & 15)
return self._m_reserved if hasattr(self, '_m_reserved') else None
@property
def xtea(self):
if hasattr(self, '_m_xtea'):
return self._m_xtea if hasattr(self, '_m_xtea') else None
self._m_xtea = ((self.csp_header_raw & 4) >> 2)
return self._m_xtea if hasattr(self, '_m_xtea') else None
@property
def hmac(self):
if hasattr(self, '_m_hmac'):
return self._m_hmac if hasattr(self, '_m_hmac') else None
self._m_hmac = ((self.csp_header_raw & 8) >> 3)
return self._m_hmac if hasattr(self, '_m_hmac') else None
@property
def crc(self):
if hasattr(self, '_m_crc'):
return self._m_crc if hasattr(self, '_m_crc') else None
self._m_crc = (self.csp_header_raw & 1)
return self._m_crc if hasattr(self, '_m_crc') else None
class CallsignRaw(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self._raw__raw_callsign_ror = self._io.read_bytes(6)
self._raw_callsign_ror = KaitaiStream.process_rotate_left(self._raw__raw_callsign_ror, 8 - (1), 1)
_io__raw_callsign_ror = KaitaiStream(BytesIO(self._raw_callsign_ror))
self.callsign_ror = Neudose.Callsign(_io__raw_callsign_ror, self, self._root)
class CdhBootResetCauseT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.cdh_boot_reset_cause = self._io.read_u1()
@property
def boot_cause(self):
if hasattr(self, '_m_boot_cause'):
return self._m_boot_cause if hasattr(self, '_m_boot_cause') else None
self._m_boot_cause = ((self.cdh_boot_reset_cause >> 4) & 15)
return self._m_boot_cause if hasattr(self, '_m_boot_cause') else None
@property
def reset_cause(self):
if hasattr(self, '_m_reset_cause'):
return self._m_reset_cause if hasattr(self, '_m_reset_cause') else None
self._m_reset_cause = ((self.cdh_boot_reset_cause >> 0) & 15)
return self._m_reset_cause if hasattr(self, '_m_reset_cause') else None
class EpsStatusT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.eps_status = self._io.read_u1()
@property
def gs_wtdg_rst_mark(self):
if hasattr(self, '_m_gs_wtdg_rst_mark'):
return self._m_gs_wtdg_rst_mark if hasattr(self, '_m_gs_wtdg_rst_mark') else None
self._m_gs_wtdg_rst_mark = ((self.eps_status >> 0) & 1)
return self._m_gs_wtdg_rst_mark if hasattr(self, '_m_gs_wtdg_rst_mark') else None
@property
def bat_htr_mode(self):
if hasattr(self, '_m_bat_htr_mode'):
return self._m_bat_htr_mode if hasattr(self, '_m_bat_htr_mode') else None
self._m_bat_htr_mode = ((self.eps_status >> 4) & 1)
return self._m_bat_htr_mode if hasattr(self, '_m_bat_htr_mode') else None
@property
def bat_htr_state(self):
if hasattr(self, '_m_bat_htr_state'):
return self._m_bat_htr_state if hasattr(self, '_m_bat_htr_state') else None
self._m_bat_htr_state = ((self.eps_status >> 5) & 1)
return self._m_bat_htr_state if hasattr(self, '_m_bat_htr_state') else None
@property
def bat_state(self):
if hasattr(self, '_m_bat_state'):
return self._m_bat_state if hasattr(self, '_m_bat_state') else None
self._m_bat_state = ((self.eps_status >> 6) & 3)
return self._m_bat_state if hasattr(self, '_m_bat_state') else None
@property
def last_reset_rsn(self):
if hasattr(self, '_m_last_reset_rsn'):
return self._m_last_reset_rsn if hasattr(self, '_m_last_reset_rsn') else None
self._m_last_reset_rsn = ((self.eps_status >> 1) & 7)
return self._m_last_reset_rsn if hasattr(self, '_m_last_reset_rsn') else None
|
PypiClean
|
/mouse_behavior_analysis_tools-1.0.0-py3-none-any.whl/mouse_behavior_analysis_tools/utils/custom_functions.py
|
import datetime
import ntpath
import random
import re
import sys
from itertools import chain, compress
import numpy as np
import pandas as pd
# import glob
# import socket
import scipy.optimize as opt
from sklearn.linear_model import LinearRegression, LogisticRegressionCV
from mouse_behavior_analysis_tools.utils.misc_utils import update_progress
def first_diff_zero(array):
# define a function that returns only those indices of a binary!
# vector (0 or 1)
# where some values are first different than 0
# create a new vector that is the same but shifted
# move everything one space forward
newarray = np.concatenate((0, array), axis=None)[0 : len(array)]
difarray = array - newarray
get_indexes = lambda x, xs: [
i for (y, i) in zip(xs, range(len(xs))) if x == y
]
# find which indexes are 1
indexes = get_indexes(1, difarray)
return indexes
def time_to_zero(input_list):
return list(np.array(input_list) - input_list[0])
def ParseForTimes(files):
# looks for 8digits followed by underscore and 6digits (bpod style)
dates = []
for title in files:
try:
match = re.search(r"\d{8}_\d{6}", ntpath.basename(title))
dates.append(match.group())
except Exception:
dates.append("notFound")
return dates
def BpodDatesToTime(inputDates):
# assumes input style YYYYMMDD_HHMMSS
# returns a time object
outputDates = []
for date in inputDates:
try:
x = datetime.datetime(
int(date[0:4]),
int(date[4:6]),
int(date[6:8]),
int(date[9:11]),
int(date[11:13]),
)
outputDates.append(x)
except Exception:
outputDates.append("notFound")
return outputDates
def PsychPerformance(trialsDif, sideSelected):
# function to calculate psychometric performance and
# fit logistic regression to the data
# returns a dictionary
if trialsDif.any(): # in case an empty thing is passed
# masks to remove nans for logistic regression
nan_mask = ~(np.isnan(trialsDif) | np.isnan(sideSelected))
# logistic regression
if len(np.unique(sideSelected)) > 1:
clf = LogisticRegressionCV(cv=3).fit(
trialsDif[nan_mask, np.newaxis], sideSelected[nan_mask]
)
else:
# in case a model cannot be fitted
# (e.g. mouse always goes to the left)
# fit model on dummy data
clf = LogisticRegressionCV(cv=3).fit(
np.array([0, 0, 0, 100, 100, 100]).reshape(-1, 1),
np.array([1, 0, 1, 0, 1, 0]),
)
# Calculate performance
# Initialize values
difficulty = np.unique(trialsDif[~np.isnan(trialsDif)])
performance = np.full(len(difficulty), np.nan)
for i in range(len(difficulty)):
if np.nansum(sideSelected[trialsDif == difficulty[i]]) > 0:
performance[i] = 100 * (
np.nanmean(sideSelected[trialsDif == difficulty[i]]) - 1
)
else:
performance[i] = np.nan
DictToReturn = {
"Difficulty": difficulty,
"Performance": performance,
"Logit": clf,
}
else:
DictToReturn = {}
return DictToReturn
def splitOpto(SessionData):
# SessionData comes from bpod: ExperimentData[x]['SessionData']
# Returns two dictionaries
Trials_normalMask = SessionData["OptoStim"] == 0
Trials_optoMask = SessionData["OptoStim"] == 1
# selection of normal and opto trials
normalTrials_sideSelected = SessionData["FirstPoke"][Trials_normalMask]
normalTrials_difficulty = SessionData["TrialHighPerc"][Trials_normalMask]
optoTrials_sideSelected = SessionData["FirstPoke"][Trials_optoMask]
optolTrials_difficulty = SessionData["TrialHighPerc"][Trials_optoMask]
# create dictionaries
NormalTrials = {
"SideSelected": normalTrials_sideSelected,
"Difficulty": normalTrials_difficulty,
}
OptoTrials = {
"SideSelected": optoTrials_sideSelected,
"Difficulty": optolTrials_difficulty,
}
return NormalTrials, OptoTrials
def generate_fake_data(trialsDif, sideSel):
# Generates data for bootstrapping, sampling and replacing, so each
# unique trialsDif maintains the same size
fake_side_sel = np.empty_like(sideSel)
for curr_diff in np.unique(trialsDif):
diff_mask = trialsDif == curr_diff
population = sideSel[diff_mask]
fake_side_sel[diff_mask] = np.random.choice(
population, len(population)
)
return fake_side_sel
def BootstrapPerformances(
trialsDif, sideSelected, ntimes, prediction_difficulties
):
# Bootstrap data and return logistic regression
# predictions for each sampled model
# remove nans
nan_mask = ~(np.isnan(sideSelected) | np.isnan(trialsDif))
difficulties = trialsDif[nan_mask]
sideselection = sideSelected[nan_mask]
predictPerFake = np.empty((len(prediction_difficulties), ntimes))
for i in range(predictPerFake.shape[1]):
# create fake data
fake_data = generate_fake_data(difficulties, sideselection)
try:
clf_fake = LogisticRegressionCV(cv=3).fit(
difficulties.reshape(-1, 1), fake_data
)
predictPerFake[:, i] = (
100 * clf_fake.predict_proba(prediction_difficulties)[:, 1]
)
except Exception:
# in case a model cannot be fitted
# (e.g. mouse always goes to the left)
# fit model on dummy data
clf_fake = LogisticRegressionCV(cv=3).fit(
np.array([0, 0, 0, 100, 100, 100]).reshape(-1, 1),
np.array([1, 0, 1, 0, 1, 0]),
)
return predictPerFake
def SessionDataToDataFrame(
AnimalID, ExperimentalGroup, SessionID, SessionData
):
# function to create a dataframe out of the session
# each trial is an entry on the dataframe
# if the session is empty output a message
if "nTrials" not in SessionData:
print("Session is empty")
return pd.DataFrame()
numberOfTrials = SessionData["nTrials"]
# protocol information
ts = SessionData["TrialSettings"]
protocols = [
ts[0]["GUIMeta"]["TrainingLevel"]["String"][x]
for x in [y["GUI"]["TrainingLevel"] - 1 for y in ts]
]
stimulations = [
ts[0]["GUIMeta"]["OptoStim"]["String"][x]
for x in [y["GUI"]["OptoStim"] - 1 for y in ts]
]
# muscimol
yList = []
for y in ts:
try:
yList.append(y["GUI"]["Muscimol"] - 1)
except Exception:
yList.append(0)
muscimol = []
for x in yList:
try:
muscimol.append(ts[0]["GUIMeta"]["Muscimol"]["String"][x])
except Exception:
muscimol.append("No")
# punish method
yList = []
for y in ts:
try:
yList.append(y["GUI"]["Punish"] - 1)
except Exception:
yList.append(0)
punish = []
for x in yList:
try:
punish.append(ts[0]["GUIMeta"]["Punish"]["String"][x])
except Exception:
punish.append("No")
# reward change
yList = []
reward_change_block = []
for y in ts:
try:
yList.append(y["GUI"]["RewardChange"] - 1)
reward_change_block.append(y["RewardChangeBlock"])
except Exception:
yList.append(0)
reward_change_block.append(0)
reward_change = []
for x in yList:
try:
reward_change.append(ts[0]["GUIMeta"]["RewardChange"]["String"][x])
except Exception:
reward_change.append("No")
if not np.logical_and(
len(protocols) == numberOfTrials, len(stimulations) == numberOfTrials
):
print(
"protocols and/or stimulations length do\
not match with the number of trials"
)
return pd.DataFrame()
CenterPortDuration = [x["GUI"]["CenterPortDuration"] for x in ts]
Contingency = [x["GUI"]["Contingency"] for x in ts]
RewardAmount = [x["GUI"]["RewardAmount"] for x in ts]
PunishDelay = [x["GUI"]["PunishDelay"] for x in ts]
BiasCorrection = [x["GUI"]["BiasCorrection"] for x in ts]
FullGUI = [x["GUI"] for x in ts]
# trial events
trev = [x["Events"] for x in SessionData["RawEvents"]["Trial"]]
if not len(trev) == numberOfTrials:
print("trial events length do not match with the number of trials")
return pd.DataFrame()
# trial states
trst = [x["States"] for x in SessionData["RawEvents"]["Trial"]]
if not len(trst) == numberOfTrials:
print("trial states length do not match with the number of trials")
return pd.DataFrame()
# calculate the cumulative performance
firstpokecorrect = SessionData["FirstPokeCorrect"][0:numberOfTrials]
correct_cp = np.cumsum(firstpokecorrect == 1)
incorrect_cp = np.cumsum(firstpokecorrect == 0)
# the following line gives an error sometimes
cumper = 100 * correct_cp / (correct_cp + incorrect_cp)
# calculate when there is a side-switching event
TriSide = np.array(SessionData["TrialSide"][0:numberOfTrials])
SwitchSide = 1 * ((TriSide - np.insert(TriSide[:-1], 0, 0)) != 0)
# add information about the choice in the previous trial'
FirstPoke = SessionData["FirstPoke"][0:numberOfTrials]
PrevTriChoice = np.insert(np.asfarray(FirstPoke[:-1]), 0, np.nan)
# create a nice ID for the session (pretty date/time)
prettyDate = SessionID.strftime("%b%d %H:%M")
DFtoReturn = pd.DataFrame(
{
"AnimalID": pd.Series(np.repeat(AnimalID, numberOfTrials)).astype(
"category"
),
"ExperimentalGroup": pd.Series(
np.repeat(ExperimentalGroup, numberOfTrials)
).astype("category"),
"SessionTime": pd.Series(
np.repeat(prettyDate, numberOfTrials)
).astype("category"),
"FullSessionTime": np.repeat(SessionID, numberOfTrials),
"Protocol": protocols,
"Stimulation": stimulations,
"Muscimol": muscimol,
"RewardChange": reward_change,
"RewardChangeBlock": reward_change_block,
"CenterPortDuration": CenterPortDuration,
"Contingency": Contingency,
"RewardAmount": RewardAmount,
"PunishDelay": PunishDelay,
"Punish": punish,
"BiasCorrection": BiasCorrection,
"TrialIndex": list(range(numberOfTrials)),
"TrialHighPerc": SessionData["TrialHighPerc"][0:numberOfTrials],
"Outcomes": SessionData["Outcomes"][0:numberOfTrials],
"OptoStim": SessionData["OptoStim"][0:numberOfTrials],
"FirstPokeCorrect": firstpokecorrect,
"FirstPoke": FirstPoke,
"TrialSide": TriSide,
"TrialSequence": SessionData["TrialSequence"][0:numberOfTrials],
"ResponseTime": SessionData["ResponseTime"][0:numberOfTrials],
"TrialStartTimestamp": SessionData["TrialStartTimestamp"],
"CumulativePerformance": cumper,
"SwitchSide": SwitchSide,
"PreviousChoice": PrevTriChoice,
"TrialEvents": trev,
"TrialStates": trst,
"FullGUI": FullGUI,
}
)
return DFtoReturn
def identifyIdx(datatimes, ntrialsList, ntrials_thr):
idxlist = []
for i in range(len(datatimes)):
if np.logical_or(
datatimes[i] == "notFound", ntrialsList[i] < ntrials_thr
):
idxlist.append(i)
return sorted(idxlist, reverse=True)
# Analyze this with the optotrials as well
def AnalyzeSwitchTrials(df):
# df is a dataframe containing the following columns:
# 'SwitchSide'
# 'FirstPokeCorrect'
# 'SessionTime'
# 'OptoStim'
# it returns a different dataframe with information grouped for a bar plot
# get info for the sessions
sessionsID = pd.unique(df["SessionTime"])
# initialize list to hold dataframes
sessionsInfo = []
# fill the new dataframe with info for each session
for session in sessionsID:
# get the dataframe for that session
Sdf = df[df["SessionTime"] == session]
# split the dataset into opto and normal
Ndf = Sdf[Sdf["OptoStim"] == 0]
Odf = Sdf[Sdf["OptoStim"] == 1]
# percentage of correct trials on stay trials without stimulation
StayNoStim = (
100
* np.sum(Ndf[Ndf["SwitchSide"] == 0]["FirstPokeCorrect"] == 1)
/ len(Ndf[Ndf["SwitchSide"] == 0])
)
# percentage of correct trials on switch trials without stimulation
SwitchNoStim = (
100
* np.sum(Ndf[Ndf["SwitchSide"] == 1]["FirstPokeCorrect"] == 1)
/ len(Ndf[Ndf["SwitchSide"] == 1])
)
# percentage of correct trials on stay trials with stimulation
StayStim = (
100
* np.sum(Odf[Odf["SwitchSide"] == 0]["FirstPokeCorrect"] == 1)
/ len(Odf[Odf["SwitchSide"] == 0])
)
# percentage of correct trials on switch trials with stimulation
SwitchStim = (
100
* np.sum(Odf[Odf["SwitchSide"] == 1]["FirstPokeCorrect"] == 1)
/ len(Odf[Odf["SwitchSide"] == 1])
)
# fill the dataframe
SessionDF = pd.DataFrame(
{
"SessionTime": np.repeat(session, 4),
"Condition": np.array(
[
"Normal_Stay",
"Normal_Switch",
"Opto_Stay",
"Opto_Switch",
]
),
"PercCorrect": np.array(
[StayNoStim, SwitchNoStim, StayStim, SwitchStim]
),
}
)
# append it to list
sessionsInfo.append(SessionDF)
# merge into a single df and return
return pd.concat(sessionsInfo, ignore_index=True)
# Analyze this with the trial side as well
def AnalyzeSwitchTrials_for_sides(df):
# df is a dataframe containing the following columns:
# 'SwitchSide'
# 'FirstPokeCorrect'
# 'SessionTime'
# 'TrialSide'
# it returns a different dataframe with information grouped for a bar plot
# get info for the sessions
sessionsID = pd.unique(df["SessionTime"])
# initialize list to hold dataframes
sessionsInfo = []
# fill the new dataframe with info for each session
for session in sessionsID:
# get the dataframe for that session
Sdf = df[df["SessionTime"] == session]
# split the dataset into opto and normal
Ndf = Sdf[Sdf["TrialSide"] == 1]
Odf = Sdf[Sdf["TrialSide"] == 2]
# percentage of correct trials on stay trials without stimulation
StayNoStim = (
100
* np.sum(Ndf[Ndf["SwitchSide"] == 0]["FirstPokeCorrect"] == 1)
/ len(Ndf[Ndf["SwitchSide"] == 0])
)
# percentage of correct trials on switch trials without stimulation
SwitchNoStim = (
100
* np.sum(Ndf[Ndf["SwitchSide"] == 1]["FirstPokeCorrect"] == 1)
/ len(Ndf[Ndf["SwitchSide"] == 1])
)
# percentage of correct trials on stay trials with stimulation
StayStim = (
100
* np.sum(Odf[Odf["SwitchSide"] == 0]["FirstPokeCorrect"] == 1)
/ len(Odf[Odf["SwitchSide"] == 0])
)
# percentage of correct trials on switch trials with stimulation
SwitchStim = (
100
* np.sum(Odf[Odf["SwitchSide"] == 1]["FirstPokeCorrect"] == 1)
/ len(Odf[Odf["SwitchSide"] == 1])
)
# fill the dataframe
SessionDF = pd.DataFrame(
{
"SessionTime": np.repeat(session, 4),
"TrialSide": np.array(
["Left_Stay", "Left_Switch", "Right_Stay", "Right_Switch"]
),
"PercCorrect": np.array(
[StayNoStim, SwitchNoStim, StayStim, SwitchStim]
),
}
)
# append it to list
sessionsInfo.append(SessionDF)
# merge into a single df and return
return pd.concat(sessionsInfo, ignore_index=True)
# function to process the data of an experiment
# for psychometric performance plots:
def PP_ProcessExperiment(SessionData, bootstrap=None, error_bars=None):
# SessionData is a dataframe that needs to have the following column names:
# 'TrialHighPerc'
# 'FirstPoke'
diffs = np.array(SessionData["TrialHighPerc"])
choices = np.array(SessionData["FirstPoke"])
# Calculate psychometric performance parameters
PsyPer = PsychPerformance(trialsDif=diffs, sideSelected=choices)
# predict data
predictDif = np.linspace(1, 100, 2000).reshape(-1, 1)
if PsyPer:
predictPer = 100 * PsyPer["Logit"].predict_proba(predictDif)[:, 1]
# Calculate the error bars if asked to
if error_bars is not None:
EBdata = SessionData.groupby(by=error_bars).apply(getEBdata)
# flatten the lists
EB_diffs_flat = list(chain(*[x["Difficulty"] for x in EBdata]))
EB_perfs_flat = list(chain(*[x["Performance"] for x in EBdata]))
# calculate error bars for each difficulty
Std_list = [
np.std(list(compress(EB_perfs_flat, EB_diffs_flat == dif)))
for dif in PsyPer["Difficulty"]
]
else:
Std_list = np.nan
else: # needed for the return
predictPer = np.nan
Std_list = np.nan
# Bootstrap on fake data (generated inside the bootstrap function)
fakePredictions = np.nan
if bootstrap is not None:
np.random.seed(12233) # fixed random seed for reproducibility
if PsyPer:
fakePredictions = BootstrapPerformances(
trialsDif=diffs,
sideSelected=choices,
ntimes=bootstrap,
prediction_difficulties=predictDif,
)
# return what is needed for the plot
return predictDif, PsyPer, fakePredictions, predictPer, Std_list
def getEBdata(SessionData):
# SessionData is a dataframe that needs to have the following column names:
# 'TrialHighPerc'
# 'FirstPoke'
diffs = np.array(SessionData["TrialHighPerc"])
choices = np.array(SessionData["FirstPoke"])
PsyPer = PsychPerformance(trialsDif=diffs, sideSelected=choices)
return PsyPer
def timeDifferences(listOfDates):
"""
Return the absolute time, in days, of elements in a list of dates,
related to the first
Assumes data is in order (would return negative values otherwise)
:param listOfDates: list of size X of dates. Format: YYYYMMDD_HHMMSS
:return: array of size X of absolute time
"""
if len(listOfDates) == 0:
return []
abstimeList = []
for date in listOfDates:
strList = [
int(date[0:4]),
int(date[4:6]),
int(date[6:8]),
int(date[9:11]),
int(date[11:13]),
int(date[13:15]),
]
intList = list(map(int, strList))
# Calculate absolute time in days
multipliers = [365, 30, 1, 1 / 24, 1 / (24 * 60), 1 / (24 * 60 * 60)]
mulList = [a * b for a, b in zip(intList, multipliers)]
abstime = sum(mulList)
abstimeList.append(abstime)
diftime = np.array(abstimeList) - abstimeList[0]
return diftime
def RBias(FirstPokes, FirstPokesCorrect):
"""
Returns the bias to the right
FirstPokes is a vector of 1s and 2s (Left or Right), indicating
the poked port
FirstPokesCorrect is a 0 and 1 vector (wrong or correct poke)
Both could have NaN values
Returns from -1 to 1. 0 Being not biased, 1 being Right-biased, and
-1 being left-biased. It is a conservative function. E.g, in a 50-50
trial chance, and being totally biased to one side, only half of the
trials would be wrong, so the function would output +/-0.5.
Correct trials based on proportion of wrong pokes
Determine the proportion of wrong pokes to the right side
"""
WrongSides = FirstPokes[FirstPokesCorrect == 0]
if len(WrongSides) < 1:
RBias = 0
else:
WrongSideProportion = len(WrongSides) / len(FirstPokes) # from 0 to 1
WrongRightsProportion = (
WrongSideProportion * np.nansum(WrongSides == 2) / len(WrongSides)
)
WrongLeftsProportion = (
WrongSideProportion * np.nansum(WrongSides == 1) / len(WrongSides)
)
RBias = WrongRightsProportion - WrongLeftsProportion
return RBias
def CalculateRBiasWindow(FirstPokes, FirstPokesCorrect, Window):
"""Calculates RBias over the lenght of the vectors FirstPokes and
FirstPokesCorrect using a Window. Returns vector of same lenght"""
# Create empty vector
RBiasVector = np.empty(len(FirstPokes))
RBiasVector[:] = np.nan
for i in range(Window, len(FirstPokes)):
win = range((i - Window), i)
RBiasVector[i] = RBias(FirstPokes[win], FirstPokesCorrect[win])
return RBiasVector
# calculate the number of times they go to the middle (anxiousness?)
def CalculateMidPokes(df):
return np.sum(
df["TrialEvents"]["Port2In"] <= df["TrialStates"]["WaitForResponse"][0]
)
# this might fail if WaitForResponse is empty...
# quantify how long they wait in the middle
def MidPortWait(df):
timeOut = df["TrialStates"]["WaitForResponse"].astype("float32")[0]
PortIn = df["TrialEvents"]["Port2In"]
# sometimes this is an integer (rarely)
if isinstance(PortIn, int):
PortIn = float(PortIn)
if not isinstance(PortIn, float):
PortIn = PortIn.astype("float32") # does not work for int
PortInIdx = np.where(PortIn < timeOut)[0][-1]
PortInTime = PortIn[PortInIdx]
else:
PortInTime = PortIn
PortTime = timeOut - PortInTime
return PortTime
def CalculateTrialInitiationTime(df):
# quantify the time they take to initiate a trial
# (from trialstart to center poke in)
# the first time they poke
try:
return float(df.TrialEvents["Port2In"][0])
except Exception:
return float("NaN")
def AnalyzePercentageByDay(rdf):
# df is a dataframe containing the following columns:
# 'FirstPokeCorrect'
# 'TrainingDay'
# 'AnimalID'
# 'Protocol'
# 'ExperimentalGroup'
# it returns a different dataframe with information grouped for a bar plot
AnimalIDs = pd.unique(rdf["AnimalID"])
animalsInfo = []
for animalid in AnimalIDs:
df = rdf[rdf["AnimalID"] == animalid]
# get info for the sessions
TrainingDays = pd.unique(df["TrainingDay"])
# initialize value for cumulative trials
CumTrials = 0
# fill the new dataframe with info for each session
for session in TrainingDays:
# get the dataframe for that session
Sdf = df[df["TrainingDay"] == session]
# protocol and ExperimentalGroup
prot = Sdf.Protocol.iloc[0]
inj = Sdf.ExperimentalGroup.iloc[0]
# percentage of correct trials
PercCorrect = 100 * np.sum(Sdf["FirstPokeCorrect"]) / len(Sdf)
# number of trials per session
NumOfTrials = len(Sdf)
# cumulative trials
CumTrials = CumTrials + NumOfTrials
# fill the dataframe
SessionDF = pd.DataFrame(
{
"AnimalID": animalid,
"SessionTime": session,
"PercCorrect": np.array([PercCorrect]),
"NumberOfTrials": NumOfTrials,
"CumulativeTrials": CumTrials,
"Protocol": prot,
"ExperimentalGroup": inj,
}
)
# append it to list
animalsInfo.append(SessionDF)
# merge into a single df and return
return pd.concat(animalsInfo, ignore_index=True)
def get_new_files(filelist, existing_dates):
"""
Compares dates in files to a datetime dataset to check for existing data
:param filelist: list of full paths to bpod files
:type filelist: list of strings
:param existing_dates: time objects in datetime format
:returns: subset of filelist
"""
filenames = [ntpath.basename(x) for x in filelist]
dates = BpodDatesToTime(ParseForTimes(filenames))
dates_formatted = [str(i) for i in dates]
existing_dates_formatted = [str(i) for i in existing_dates]
new_dates = list(set(dates_formatted) - set(existing_dates_formatted))
new_idx = [i for i, n in enumerate(dates_formatted) if n in new_dates]
new_files = [filelist[i] for i in new_idx]
return new_files
def split_files_into_old_and_new(filelist, existing_dates):
"""
Compares dates in files to a datetime dataset to split them
into new files and old files
:param filelist: list of full paths to bpod files
:type filelist: list of strings
:param existing_dates: time objects in datetime format
:returns: two subsets of filelist
"""
# files with a new date
dif_files = get_new_files(filelist, existing_dates)
# compare dates and split
# idx of old_files
filenames = [ntpath.basename(x) for x in dif_files]
dates = BpodDatesToTime(ParseForTimes(filenames))
old_idx = [
i
for i, n in enumerate(dates)
if n < existing_dates.max().to_pydatetime()
]
# split
old_files = [dif_files[i] for i in old_idx]
new_files = [
dif_files[i] for i in list(set(range(len(dif_files))) - set(old_idx))
]
return old_files, new_files
def perf_window_calculator(df, window):
"""
Calculate the performance of the last X trials
"""
firstpokecorrect = df["FirstPokeCorrect"] # 0s and 1s
# create empty vector of the same size
perf_window = np.full(len(firstpokecorrect), np.nan)
for i in range(window - 1, len(perf_window)):
perf_window[i] = (
np.nansum(firstpokecorrect[i - window + 1 : i + 1]) / window * 100
)
return perf_window
# calculate the trials per minute that animals do by fitting a line
def trials_per_minute(trial_index, trial_start_timestamp):
"""
function to calculate the speed of the mouse in trials per minute
param trial_index: pandas.core.series.Series with the trial index
param trial_start_timestamp: pandas.core.series.Series with the trial
start time in seconds
returns a value which is the trials per minute
"""
lrmodel = LinearRegression().fit(
trial_index[:, np.newaxis], trial_start_timestamp
)
return 60 * 1 / lrmodel.coef_[0]
def speed_window_calculator(df, window):
"""
Calculate the speed over X trials
"""
trial_index = df.TrialIndex
trial_start_timestamp = df.TrialStartTimestamp
# create empty vector of the same size
speed_window = np.full(len(trial_index), np.nan)
for i in range(int(window / 2) - 1, len(speed_window) - int(window / 2)):
win_idx_low = i - int(window / 2) + 1
win_idx_high = i + int(window / 2)
speed_window[i] = trials_per_minute(
trial_index[win_idx_low:win_idx_high],
trial_start_timestamp[win_idx_low:win_idx_high],
)
return speed_window
def itis_calculator(df):
# df is a behavioural dataframe
# find inter-trial-intervals
itis = np.diff(df.TrialStartTimestamp)
# append a 0 at the beginning so it matches the trial indexes
# how long did the mouse take to initiate this trial from the previous?
itis = np.insert(itis, 0, 0)
return itis
def find_disengaged_trials(itis):
# itis is a vector of inter trial intervals
# this function returns indexes
disengaged_indexes = np.where(itis > 3 * np.median(itis))
return disengaged_indexes
def sigmoid_func(x, slope, bias, upper_lapse, lower_lapse):
return (upper_lapse - lower_lapse) / (
1 + np.exp(-slope * (x - bias))
) + lower_lapse
def linear_func(x, beta, alpha):
return beta * x + alpha
def fit_custom_sigmoid(difficulty, performance):
# scale the data
xdatasc = (difficulty - difficulty.mean()) / difficulty.std()
ydatasc = performance / 100
cost_func = lambda x: np.mean(
np.abs(sigmoid_func(xdatasc, x[0], x[1], x[2], x[3]) - ydatasc)
)
res = opt.minimize(cost_func, [-3, 0, 1, 0])
# rescale
slope = res.x[0] / difficulty.std()
bias = res.x[1] * difficulty.std() + difficulty.mean()
upper_lapse = res.x[2] * 100
lower_lapse = res.x[3] * 100
return slope, bias, upper_lapse, lower_lapse
def get_random_optolike_choices(df, n_times=100):
"""
gets a dataframe that has optostimulated trials,
and returns, per each difficulty,
choices sampled randomly from the non-stimulated trials, n_times
"""
normal_df, opto_df = splitOpto(df)
fake_opto_side_sel_samples = np.zeros(
(n_times, len(opto_df["SideSelected"]))
)
for i in range(n_times):
fake_opto_side_sel = np.empty_like(opto_df["SideSelected"])
for curr_diff in np.unique(opto_df["Difficulty"]):
diff_opto_mask = opto_df["Difficulty"] == curr_diff
diff_normal_mask = normal_df["Difficulty"] == curr_diff
population = normal_df["SideSelected"][diff_normal_mask]
fake_opto_side_sel[diff_opto_mask] = np.random.choice(
population, sum(diff_opto_mask)
)
fake_opto_side_sel_samples[i] = fake_opto_side_sel
return fake_opto_side_sel_samples
def get_mean_and_std_of_random_optolike_choices(df, n_times=100):
# deprecated
"""
gets a dataframe that has optostimulated trials, and
outputs, per difficulty, the mean and the std of
choices sampled randomly from the non-stimulated trials, n_times
"""
normal_df, opto_df = splitOpto(df)
available_difficulties = np.unique(opto_df["Difficulty"])
random_means = np.zeros_like(available_difficulties)
random_std = np.zeros_like(available_difficulties)
for k, curr_diff in enumerate(available_difficulties):
diff_opto_mask = opto_df["Difficulty"] == curr_diff
diff_normal_mask = normal_df["Difficulty"] == curr_diff
population = normal_df["SideSelected"][diff_normal_mask]
if len(population) == 0:
sys.exit("No normal trials with that difficulty")
fake_opto_side_sel_list = np.zeros(n_times)
for i in range(n_times):
fake_opto_side_sel_list[i] = np.nanmean(
np.random.choice(population, sum(diff_opto_mask))
)
random_means[k] = np.nanmean(fake_opto_side_sel_list)
random_std[k] = np.nanstd(fake_opto_side_sel_list)
df_to_return = pd.DataFrame(
{
"Difficulty": available_difficulties,
"Mean_of_choice": 100 * (random_means - 1),
"Std_of_choice": 100 * random_std,
}
)
return df_to_return
def get_choices(sideSelected, trialsDif):
"""
returns mean of choices per difficulty
"""
# Calculate performance
# Initialize values
difficulty = np.unique(trialsDif[~np.isnan(trialsDif)])
choice_mean = np.full(len(difficulty), np.nan)
for i in range(len(difficulty)):
if np.nansum(sideSelected[trialsDif == difficulty[i]]) > 0:
choice_mean[i] = 100 * (
np.nanmean(sideSelected[trialsDif == difficulty[i]]) - 1
)
else:
choice_mean[i] = np.nan
return difficulty, choice_mean
def generate_eg(list_size, prob, labs):
# function to generate a list of experimental groups randomly
ltr = []
for i in range(list_size):
if random.random() < prob:
ltr.append(labs[0])
else:
ltr.append(labs[1])
return ltr
def get_shuffled_means_difference_df(df_colsel, hue_order, nsh):
# get the experimental group for each animal
exp_gr = [
df_colsel[df_colsel.AnimalID == x].ExperimentalGroup.unique()[0]
for x in df_colsel.AnimalID.unique()
]
# get the number of mice
n_an = len(exp_gr)
# get the probability of a mouse to be a control for this dataset
cb_prob = sum([x == hue_order[0] for x in exp_gr]) / n_an
# set random seed
np.random.seed(124321)
# calculate the differences of means by resampling
shuff_res = []
for i in range(nsh):
# shuffle the list of groups by assigning a probability for each mouse
# to be in a group based on the real ratio
exp_grs = generate_eg(n_an, cb_prob, hue_order)
# create a diccionary
egs_dict = dict(zip(df_colsel.AnimalID.unique(), exp_grs))
# create a new column with the shuffled group
df_colsel["egs"] = [egs_dict[x] for x in df_colsel.AnimalID]
# calculate the differences and append
shuff_res.append(
df_colsel[df_colsel.egs == hue_order[1]]
.groupby("TrialIndexBinned")
.mean()["Performance"]
- df_colsel[df_colsel.egs == hue_order[0]]
.groupby("TrialIndexBinned")
.mean()["Performance"]
)
update_progress(i / nsh)
update_progress(1)
# return in a data frame format
return pd.concat(shuff_res)
def get_shuffled_means_difference_global_significance(
df_colsel, shrdf, quants_to_test, nsh, hue_order
):
# get the experimental group for each animal
exp_gr = [
df_colsel[df_colsel.AnimalID == x].ExperimentalGroup.unique()[0]
for x in df_colsel.AnimalID.unique()
]
# get the number of mice
n_an = len(exp_gr)
# get the probability of a mouse to be a control for this dataset
cb_prob = sum([x == hue_order[0] for x in exp_gr]) / n_an
# create an empty array to store results
global_sig = np.empty((nsh, len(quants_to_test)), dtype=bool)
# loop over shuffle data
for i in range(nsh):
# shuffle the list of groups by assigning a probability for each mouse
# to be in a group based on the real ratio
exp_grs = generate_eg(n_an, cb_prob, hue_order)
# create a diccionary
egs_dict = dict(zip(df_colsel.AnimalID.unique(), exp_grs))
# create a new column with the shuffled group
df_colsel["egs"] = [egs_dict[x] for x in df_colsel.AnimalID]
# calculate the differences
sh_dif = (
df_colsel[df_colsel.egs == hue_order[1]]
.groupby("TrialIndexBinned")
.mean()["Performance"]
- df_colsel[df_colsel.egs == hue_order[0]]
.groupby("TrialIndexBinned")
.mean()["Performance"]
)
# for each quantile band, what percentages of lines cross at any point
for k, q in enumerate(quants_to_test):
global_sig[i, k] = any(
np.logical_or(
sh_dif > shrdf.groupby("TrialIndexBinned").quantile(q),
sh_dif < shrdf.groupby("TrialIndexBinned").quantile(1 - q),
)
)
update_progress(i / nsh)
update_progress(1)
return global_sig
def get_random_choices_for_optostimulation(df, ntimes):
data = np.empty([len(pd.unique(df["SessionID"])), 3], dtype=object)
for i, session in enumerate(pd.unique(df["SessionID"])):
# generate the random dataset, and save it to a general
# dataframe for later use
session_df = df[df["SessionID"] == session]
roc = get_random_optolike_choices(df=session_df, n_times=ntimes)
_, odf = splitOpto(session_df)
roc_ds = np.apply_along_axis(
get_choices, 1, roc, trialsDif=odf["Difficulty"]
)
avail_diffs = roc_ds[0, 0, :]
rand_choices_ds = roc_ds[:, 1, :]
# fill
data[i] = [session, avail_diffs, rand_choices_ds]
update_progress(
i / len(pd.unique(df["SessionID"])), head="Generating dataset"
)
random_opto_df = pd.DataFrame(
data, columns=["SessionID", "Difficulties", "Random_choices"]
)
update_progress(1)
return random_opto_df
def calculate_differences_with_random_optostimulation(
otl_df, random_opto_df, n_times
):
# Calculate the differences between the random choices and the
# opto ones to measure the bias
random_opto_df["bias"] = None
random_opto_df["bias_mean"] = None
random_opto_df["bias_std"] = None
tot_sess = len(pd.unique(random_opto_df["SessionID"]))
for sc, session in enumerate(pd.unique(random_opto_df["SessionID"])):
session_idx = random_opto_df.index[
random_opto_df.SessionID == session
].item()
# get data for opto
opto_df = otl_df[
(otl_df["SessionID"] == session) & (otl_df["OptoStim"] == 1)
]
trialsDif = np.array(opto_df["TrialHighPerc"])
sideSelected = np.array(opto_df["FirstPoke"])
difficulty, opto_perf = get_choices(sideSelected, trialsDif)
# get data for the shuffles
updown_list = np.empty(n_times)
for i in range(n_times):
y_vals = random_opto_df.loc[session_idx].Random_choices[i]
# calculate difference and normalize
updown_list[i] = np.sum(y_vals - opto_perf) / len(difficulty)
random_opto_df.at[session_idx, "bias"] = updown_list
random_opto_df.at[session_idx, "bias_mean"] = np.nanmean(updown_list)
random_opto_df.at[session_idx, "bias_std"] = np.nanstd(updown_list)
update_progress((sc + 1) / tot_sess, head="Getting differences")
return random_opto_df
def add_info_and_contralateral_bias(oal_df, random_opto_df):
# add the side in which stimulation happened,
# and translate the bias to contralateral / ipsilateral
random_opto_df["stimulated_side"] = None
random_opto_df["contralateral_bias"] = None
# Get a column with the mouse name
random_opto_df["AnimalID"] = None
# And the type of mouse
random_opto_df["Genotype"] = None
tot_sess = len(pd.unique(random_opto_df["SessionID"]))
for sc, session in enumerate(pd.unique(random_opto_df["SessionID"])):
session_idx = random_opto_df.index[
random_opto_df.SessionID == session
].item()
# get information
stim_side = (
oal_df[oal_df["SessionID"] == session].Stimulation.unique().item()
)
# add info
random_opto_df.at[session_idx, "stimulated_side"] = stim_side
mouse_name = random_opto_df.loc[session_idx].SessionID.split(" ")[0]
random_opto_df.at[session_idx, "AnimalID"] = mouse_name
random_opto_df.at[session_idx, "Genotype"] = mouse_name.split("-")[0]
# bias is the normal minus the opto.
# This means that a positive value is a leftwards bias
# of the opto trials
# this is good as a contralateral bias for the trials in which
# optostimulation occurs in the right side
# flip the sign of the ones where stimulation happens on the left
if stim_side == "Right":
random_opto_df.at[
session_idx, "contralateral_bias"
] = random_opto_df.at[session_idx, "bias_mean"]
elif stim_side == "Left":
random_opto_df.at[
session_idx, "contralateral_bias"
] = -random_opto_df.at[session_idx, "bias_mean"]
elif stim_side == "Both":
random_opto_df.at[session_idx, "contralateral_bias"] = np.nan
else:
print("Something wrong")
update_progress((sc + 1) / tot_sess, head="Adding info")
return random_opto_df
def get_random_dataframe_for_optostimulation(oal_df, n_times):
# Generate random optostimulation choices for every session
random_opto_df = get_random_choices_for_optostimulation(oal_df, n_times)
# Calculate differences with the stimulated trials
random_opto_df = calculate_differences_with_random_optostimulation(
oal_df, random_opto_df, n_times
)
# add the side in which stimulation happened, mouse name and genotype,
# and translate the bias to contralateral / ipsilateral
random_opto_df = add_info_and_contralateral_bias(oal_df, random_opto_df)
print("Done computing the random dataframe")
return random_opto_df
def difficulty_for_bias(mouse_line, stimulated_side):
# function to determine which difficulty to look at
# create a logic table in order to find which difficulty
# to look at when calculating the bias
# this depends on the contingency, on the mouse line,
# and on the the fiber placement
# expected movements given the sound (difficulty)
set_contingency = {"Right": 2.0, "Left": 98.0}
# expectation (and observation) of bias
bias_expectation = {"D1opto": "ipsi", "D2opto": "contra"}
# logical table for what to expect given ipsi/contra
# e.g. if you expect an ipsi bias and the fiber is on the right,
# you wanna look at the left movements
# -----------------------------
# | Right Left
# |-----------------
# ipsi | Left Right
# contra | Right Left
# -----------------------------
logic_rows = ["ipsi", "contra"]
logic_cols = ["Right", "Left"]
logic_table = [["Left", "Right"], ["Right", "Left"]]
expected_bias = bias_expectation[mouse_line]
table_row = logic_rows.index(expected_bias)
table_col = logic_cols.index(stimulated_side)
affected_side = logic_table[table_row][table_col]
return set_contingency[affected_side]
def fiber_unique_id(panda_series):
return "_".join([panda_series.AnimalID, panda_series.stimulated_side])
def significance_calculator(panda_series):
cbe = panda_series.bias_exp
# get contra value
if panda_series.stimulated_side == "Left":
cbe = [-x for x in cbe]
if panda_series.Genotype == "D1opto":
n_sig = np.sum([x > 0 for x in cbe])
if panda_series.Genotype == "D2opto":
n_sig = np.sum([x < 0 for x in cbe])
return n_sig / len(cbe)
def get_simplified_dataframe_for_optostimulation(random_opto_df):
# columns: animal_id | genotype
# | session_performance | contralateral_bias_exp
animal_id_list = []
genotype_list = []
session_performance_list = []
contra_bias_list = []
fibers = pd.unique(random_opto_df.fiber_id)
for animal in fibers:
animal_rdf = random_opto_df[random_opto_df.fiber_id == animal]
for session in pd.unique(animal_rdf.SessionID):
session_idx = animal_rdf.index[
animal_rdf.SessionID == session
].item()
sp = animal_rdf.loc[session_idx].session_performance
cb = animal_rdf.loc[session_idx].contralateral_bias_exp
session_performance_list.append(sp)
contra_bias_list.append(cb)
genotype_list.append(animal_rdf.loc[session_idx].Genotype)
animal_id_list.append(animal)
simplified_df = pd.DataFrame(
{
"animal_id": animal_id_list,
"genotype": genotype_list,
"session_performance": session_performance_list,
"contralateral_bias": contra_bias_list,
}
)
return simplified_df
def get_fit_coefs(df):
x = df.session_performance
y = df.contralateral_bias
return np.polyfit(x, y, 1)
def get_dicctionary_of_regressions_optostimulation(
simplified_df, shuffle_times=100, xs=range(50, 100)
):
# calculate slopes and generate shuffles of biases
# per mouse to get the significance for each individual
# save all in a diccionary
reg_dicc = {
"animals": [],
"genotypes": [],
"reg_coefs": [],
"fits": [],
"predicted_matrices": [],
"shuffled_coefficients": [],
}
for animal in simplified_df.animal_id.unique():
reg_df = simplified_df[simplified_df.animal_id == animal].copy()
slope, intercept = get_fit_coefs(reg_df)
# get a list of coefficients for suffled dataframes
shuffled_slopes = np.zeros(shuffle_times)
shuffled_int = np.zeros(shuffle_times)
# generate a matrix of predictions
predicted_matrix = np.zeros([shuffle_times, len(xs)])
for i in range(shuffle_times):
# shuffle dataframe
shuffled_df = reg_df.copy()
np.random.shuffle(shuffled_df.contralateral_bias.values)
# get coefficients
shuffled_slopes[i], shuffled_int[i] = get_fit_coefs(shuffled_df)
# fill matrix
predicted_matrix[i, :] = shuffled_int[i] + shuffled_slopes[i] * xs
# fill diccionary
reg_dicc["animals"].append(animal)
reg_dicc["genotypes"].append(
simplified_df[simplified_df.animal_id == animal].genotype.unique()[
0
]
)
reg_dicc["reg_coefs"].append([slope, intercept])
reg_dicc["fits"].append(intercept + slope * xs)
reg_dicc["predicted_matrices"].append(predicted_matrix)
reg_dicc["shuffled_coefficients"].append(
[shuffled_slopes, shuffled_int]
)
return reg_dicc
def get_binned_dataframe_for_optostimulation(
random_opto_df, significance=0.05
):
# save all in a diccionary
binned_dict = {
"genotype": [],
"performance_window": [],
"contra_biases": [],
"contra_bias_mean": [],
"contra_bias_std": [],
"contra_bias_upper_percentile": [],
"contra_bias_lower_percentile": [],
"n_sessions": [],
"n_animals": [],
"significance_expected_bias": [],
"number_of_significant_sessions": [],
}
# binned_window = 10
performance_windows = [
[0, 60],
[60, 65],
[65, 70],
[70, 75],
[75, 80],
[80, 85],
[85, 90],
[90, 100],
]
for genot in ["D1opto", "D2opto"]:
# subselect the dataframe
genotdf = random_opto_df[random_opto_df.Genotype == genot].copy()
for bracket in performance_windows:
# find sessions belonging to that bracket
bracket_mask = np.logical_and(
bracket[0] <= genotdf.session_performance,
genotdf.session_performance < bracket[1],
)
subdf = genotdf[bracket_mask].copy()
# extract the contralateral biases of random choices
contralateral_bias_exp_merge = []
n_sess = subdf.shape[0]
n_ans = len(np.unique(subdf.AnimalID))
for i in range(n_sess):
cbe = subdf.iloc[i].bias_exp
# get contra value
if subdf.iloc[i].stimulated_side == "Left":
cbe = [-x for x in cbe]
contralateral_bias_exp_merge.append(cbe)
# flatten
contralateral_bias_exp_merge = [
item
for sublist in contralateral_bias_exp_merge
for item in sublist
]
# append to dicc
binned_dict["genotype"].append(genot)
binned_dict["performance_window"].append(bracket[1] - 2.5)
binned_dict["contra_biases"].append(contralateral_bias_exp_merge)
binned_dict["contra_bias_mean"].append(
np.mean(contralateral_bias_exp_merge)
)
binned_dict["contra_bias_std"].append(
np.std(contralateral_bias_exp_merge)
)
if len(contralateral_bias_exp_merge) > 1:
binned_dict["contra_bias_upper_percentile"].append(
np.percentile(contralateral_bias_exp_merge, 97.5)
)
binned_dict["contra_bias_lower_percentile"].append(
np.percentile(contralateral_bias_exp_merge, 2.5)
)
else:
binned_dict["contra_bias_upper_percentile"].append(np.nan)
binned_dict["contra_bias_lower_percentile"].append(np.nan)
binned_dict["n_sessions"].append(n_sess)
binned_dict["n_animals"].append(n_ans)
if genot == "D1opto":
n_sig = np.sum([x > 0 for x in contralateral_bias_exp_merge])
if genot == "D2opto":
n_sig = np.sum([x < 0 for x in contralateral_bias_exp_merge])
sig = n_sig / len(contralateral_bias_exp_merge)
binned_dict["significance_expected_bias"].append(sig)
# calculate the number of sessions that are significant!
significant_sessions_list = []
for i in range(n_sess):
cbe = subdf.iloc[i].bias_exp
# get contra value
if subdf.iloc[i].stimulated_side == "Left":
cbe = [-x for x in cbe]
if genot == "D1opto":
n_sig = np.sum([x > 0 for x in cbe])
if genot == "D2opto":
n_sig = np.sum([x < 0 for x in cbe])
sig = n_sig / len(cbe)
if sig < significance:
significant_sessions_list.append(True)
else:
significant_sessions_list.append(False)
sig = np.sum(significant_sessions_list)
binned_dict["number_of_significant_sessions"].append(sig)
# create df
binned_df = pd.DataFrame(binned_dict)
# add lower and upper std
binned_df["lower_std"] = (
binned_df.contra_bias_mean - binned_df.contra_bias_std
)
binned_df["upper_std"] = (
binned_df.contra_bias_mean + binned_df.contra_bias_std
)
return binned_df
def get_general_right_bias(df_one, df_two):
"returns the general bias to the right, between df_one and df_two"
# mean choices for each data frame for each difficulty
tdone = np.array(df_one["TrialHighPerc"])
ssone = np.array(df_one["FirstPoke"])
_, perf_one = get_choices(ssone, tdone)
tdtwo = np.array(df_two["TrialHighPerc"])
sstwo = np.array(df_two["FirstPoke"])
_, perf_two = get_choices(sstwo, tdtwo)
return np.mean(perf_one) - np.mean(perf_two)
def get_random_biases(df, n_times, it, aot):
# create array
rblist = np.zeros(n_times)
for i in range(n_times):
# shuffle TrialIndexes
df.TrialIndex = df.TrialIndex.sample(frac=1).values
# calculate bias
rblist[i] = get_general_right_bias(
df[df.TrialIndex < it], df[df.TrialIndex > aot]
)
return rblist
def get_dopamine_optostimulation_differences_dataframe(
dao_df, ini_trials, ao_trials, n_times
):
# Generate another dataset for every session containing information
# about the difference between
# the optostimulated trials and the normal ones,
# as well as random differences, calculated
# shuffling the trial indexes
BRS = ["tStr", "NAc"]
PS = ["Left", "Right"]
PI = ["Center", "Side"]
CondList = [
(dao_df["TrialIndex"] < ini_trials),
(dao_df["TrialIndex"] > ao_trials),
]
cols = [
"AnimalID",
"SessionID",
"Ntrials",
"Protocol",
"Stim",
"FiberSide",
"FiberArea",
"StimSide",
"StimPort",
"Contralateral",
"InitialBias",
"Bias",
"BiasToStimPort",
"RandomBiases",
"RandomBiasMean",
"RandomBiasStd",
]
data = np.empty(
[len(pd.unique(dao_df["SessionID"])), len(cols)], dtype=object
)
for i, sessionid in enumerate(pd.unique(dao_df["SessionID"])):
# get dataframe of the session
session_df = dao_df[dao_df["SessionID"] == sessionid].copy()
# get animal name
animalid = session_df.AnimalID.unique()[0]
# get number of trials
ntrials = session_df.shape[0]
# protocol
protocol = session_df.Protocol.unique()[0]
# is it a stimulated session?
stim = session_df.Stimulation.unique()[0] != "NoStimulation"
# which fiber was plugged in
fiberside = session_df.Stimulation.unique()[0]
# which brain area is this fiber over
fiberarea = BRS[int(session_df.iloc[0].FullGUI["FiberLocation"]) - 1]
# which one of the side ports, or trial type, was stimulated
stimside = PS[int(session_df.iloc[0].FullGUI["JOPSide"]) - 1]
# in which one of the ports did stimulation occurred
stimport = PI[int(session_df.iloc[0].FullGUI["OptoState"]) - 1]
# is the fiber contralateral to the port
contralateral = True
if (fiberside == stimside) or fiberside == "Both":
contralateral = False
# what is the initial bias of the mouse in trials before stimulation
ini_sess = session_df[session_df.TrialIndex < ini_trials].copy()
initialbias = np.mean(
get_choices(ini_sess["FirstPoke"], ini_sess["TrialHighPerc"])[1]
)
# what is the total bias of that session after opto
bias = get_general_right_bias(
session_df[CondList[1]], session_df[CondList[0]]
)
# is this bias positive towards the stimulated port?
if stimside == "Right":
biastostimport = bias
if stimside == "Left":
biastostimport = -bias
# calculate random biases
randombiases = get_random_biases(
session_df, n_times, ini_trials, ao_trials
)
# random mean
randombiasmean = np.mean(randombiases)
# random std
randombiasstd = np.std(randombiases)
# fill
data[i] = [
animalid,
sessionid,
ntrials,
protocol,
stim,
fiberside,
fiberarea,
stimside,
stimport,
contralateral,
initialbias,
bias,
biastostimport,
randombiases,
randombiasmean,
randombiasstd,
]
update_progress(i / len(pd.unique(dao_df["SessionID"])))
# create dataframe
opto_df = pd.DataFrame(data, columns=cols)
update_progress(1)
return opto_df
def find_indexes_of_repeated_cases(opto_df_sel, same_columns):
# Find indexes of repeated cases
equal_indexes = []
for index in opto_df_sel.index:
data = opto_df_sel.loc[index][same_columns].values
i_list = []
for i in opto_df_sel.index:
if np.array_equal(data, opto_df_sel.loc[i][same_columns].values):
i_list.append(i)
if len(i_list) > 1:
if i_list not in equal_indexes:
equal_indexes.append(i_list)
return equal_indexes
def merge_repeated_cases_for_dopamine_optostimulation(opto_df_sel):
# Find indexes of repeated cases
same_columns = [
"AnimalID",
"FiberSide",
"FiberArea",
"StimSide",
"StimPort",
]
equal_indexes = find_indexes_of_repeated_cases(opto_df_sel, same_columns)
# Combine those cases
for case in equal_indexes:
sub_df = opto_df_sel.loc[case].copy()
# create new instance to add to the dataframe,
# initiating it in the first index of the set
new_element = sub_df.iloc[0].copy()
# change relevant values
new_element.SessionID = "merge"
new_element.Ntrials = np.mean(sub_df.Ntrials.values)
new_element.Protocol = "merge"
new_element.InitialBias = np.nan
new_element.Bias = np.nan
new_element.BiasToStimPort = np.mean(sub_df.BiasToStimPort.values)
new_element.RandomBiases = np.concatenate(sub_df.RandomBiases.values)
new_element.RandomBiasMean = np.mean(new_element.RandomBiases)
new_element.RandomBiasStd = np.std(new_element.RandomBiases)
# remove old indexes
opto_df_sel.drop(case, inplace=True)
# add new row
opto_df_sel = opto_df_sel.append(new_element)
opto_df_sel.sort_index(inplace=True)
return opto_df_sel
|
PypiClean
|
/azure_mgmt_containerservice-26.0.0-py3-none-any.whl/azure/mgmt/containerservice/v2020_09_01/models/_models_py3.py
|
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
class SubResource(_serialization.Model):
"""Reference to another subresource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class AgentPool(SubResource): # pylint: disable=too-many-instance-attributes
"""Agent Pool.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the
range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for
system pools. The default value is 1.
:vartype count: int
:ivar vm_size: Size of agent VMs. Known values are: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", and "Standard_NV6".
:vartype vm_size: str or
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceVMSizeTypes
:ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine
in this master/agent pool. If you specify 0, it will apply the default osDisk size according to
the vmSize specified.
:vartype os_disk_size_gb: int
:ivar os_disk_type: OS disk type to be used for machines in a given agent pool. Allowed values
are 'Ephemeral' and 'Managed'. Defaults to 'Managed'. May not be changed after creation. Known
values are: "Managed" and "Ephemeral".
:vartype os_disk_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSDiskType
:ivar vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:vartype vnet_subnet_id: str
:ivar max_pods: Maximum number of pods that can run on a node.
:vartype max_pods: int
:ivar os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to
Linux. Known values are: "Linux" and "Windows".
:vartype os_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSType
:ivar max_count: Maximum number of nodes for auto-scaling.
:vartype max_count: int
:ivar min_count: Minimum number of nodes for auto-scaling.
:vartype min_count: int
:ivar enable_auto_scaling: Whether to enable auto-scaler.
:vartype enable_auto_scaling: bool
:ivar type_properties_type: AgentPoolType represents types of an agent pool. Known values are:
"VirtualMachineScaleSets" and "AvailabilitySet".
:vartype type_properties_type: str or
~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolType
:ivar mode: AgentPoolMode represents mode of an agent pool. Known values are: "System" and
"User".
:vartype mode: str or ~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolMode
:ivar orchestrator_version: Version of orchestrator specified when creating the managed
cluster.
:vartype orchestrator_version: str
:ivar node_image_version: Version of node image.
:vartype node_image_version: str
:ivar upgrade_settings: Settings for upgrading the agentpool.
:vartype upgrade_settings:
~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolUpgradeSettings
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar power_state: Describes whether the Agent Pool is Running or Stopped.
:vartype power_state: ~azure.mgmt.containerservice.v2020_09_01.models.PowerState
:ivar availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets
AgentPoolType.
:vartype availability_zones: list[str]
:ivar enable_node_public_ip: Enable public IP for nodes.
:vartype enable_node_public_ip: bool
:ivar scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set
priority. Default to regular. Known values are: "Spot" and "Regular".
:vartype scale_set_priority: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetPriority
:ivar scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction policy
for Spot virtual machine scale set. Default to Delete. Known values are: "Delete" and
"Deallocate".
:vartype scale_set_eviction_policy: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetEvictionPolicy
:ivar spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing to
pay in US Dollars. Possible values are any decimal value greater than zero or -1 which
indicates default price to be up-to on-demand.
:vartype spot_max_price: float
:ivar tags: Agent pool tags to be persisted on the agent pool virtual machine scale set.
:vartype tags: dict[str, str]
:ivar node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:vartype node_labels: dict[str, str]
:ivar node_taints: Taints added to new nodes during node pool create and scale. For example,
key=value:NoSchedule.
:vartype node_taints: list[str]
:ivar proximity_placement_group_id: The ID for Proximity Placement Group.
:vartype proximity_placement_group_id: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"os_disk_size_gb": {"maximum": 1023, "minimum": 0},
"node_image_version": {"readonly": True},
"provisioning_state": {"readonly": True},
"power_state": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"count": {"key": "properties.count", "type": "int"},
"vm_size": {"key": "properties.vmSize", "type": "str"},
"os_disk_size_gb": {"key": "properties.osDiskSizeGB", "type": "int"},
"os_disk_type": {"key": "properties.osDiskType", "type": "str"},
"vnet_subnet_id": {"key": "properties.vnetSubnetID", "type": "str"},
"max_pods": {"key": "properties.maxPods", "type": "int"},
"os_type": {"key": "properties.osType", "type": "str"},
"max_count": {"key": "properties.maxCount", "type": "int"},
"min_count": {"key": "properties.minCount", "type": "int"},
"enable_auto_scaling": {"key": "properties.enableAutoScaling", "type": "bool"},
"type_properties_type": {"key": "properties.type", "type": "str"},
"mode": {"key": "properties.mode", "type": "str"},
"orchestrator_version": {"key": "properties.orchestratorVersion", "type": "str"},
"node_image_version": {"key": "properties.nodeImageVersion", "type": "str"},
"upgrade_settings": {"key": "properties.upgradeSettings", "type": "AgentPoolUpgradeSettings"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"power_state": {"key": "properties.powerState", "type": "PowerState"},
"availability_zones": {"key": "properties.availabilityZones", "type": "[str]"},
"enable_node_public_ip": {"key": "properties.enableNodePublicIP", "type": "bool"},
"scale_set_priority": {"key": "properties.scaleSetPriority", "type": "str"},
"scale_set_eviction_policy": {"key": "properties.scaleSetEvictionPolicy", "type": "str"},
"spot_max_price": {"key": "properties.spotMaxPrice", "type": "float"},
"tags": {"key": "properties.tags", "type": "{str}"},
"node_labels": {"key": "properties.nodeLabels", "type": "{str}"},
"node_taints": {"key": "properties.nodeTaints", "type": "[str]"},
"proximity_placement_group_id": {"key": "properties.proximityPlacementGroupID", "type": "str"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
count: Optional[int] = None,
vm_size: Optional[Union[str, "_models.ContainerServiceVMSizeTypes"]] = None,
os_disk_size_gb: Optional[int] = None,
os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None,
vnet_subnet_id: Optional[str] = None,
max_pods: Optional[int] = None,
os_type: Union[str, "_models.OSType"] = "Linux",
max_count: Optional[int] = None,
min_count: Optional[int] = None,
enable_auto_scaling: Optional[bool] = None,
type_properties_type: Optional[Union[str, "_models.AgentPoolType"]] = None,
mode: Optional[Union[str, "_models.AgentPoolMode"]] = None,
orchestrator_version: Optional[str] = None,
upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None,
availability_zones: Optional[List[str]] = None,
enable_node_public_ip: Optional[bool] = None,
scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular",
scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete",
spot_max_price: float = -1,
tags: Optional[Dict[str, str]] = None,
node_labels: Optional[Dict[str, str]] = None,
node_taints: Optional[List[str]] = None,
proximity_placement_group_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword count: Number of agents (VMs) to host docker containers. Allowed values must be in the
range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for
system pools. The default value is 1.
:paramtype count: int
:keyword vm_size: Size of agent VMs. Known values are: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", and "Standard_NV6".
:paramtype vm_size: str or
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceVMSizeTypes
:keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
machine in this master/agent pool. If you specify 0, it will apply the default osDisk size
according to the vmSize specified.
:paramtype os_disk_size_gb: int
:keyword os_disk_type: OS disk type to be used for machines in a given agent pool. Allowed
values are 'Ephemeral' and 'Managed'. Defaults to 'Managed'. May not be changed after creation.
Known values are: "Managed" and "Ephemeral".
:paramtype os_disk_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSDiskType
:keyword vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:paramtype vnet_subnet_id: str
:keyword max_pods: Maximum number of pods that can run on a node.
:paramtype max_pods: int
:keyword os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default
to Linux. Known values are: "Linux" and "Windows".
:paramtype os_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSType
:keyword max_count: Maximum number of nodes for auto-scaling.
:paramtype max_count: int
:keyword min_count: Minimum number of nodes for auto-scaling.
:paramtype min_count: int
:keyword enable_auto_scaling: Whether to enable auto-scaler.
:paramtype enable_auto_scaling: bool
:keyword type_properties_type: AgentPoolType represents types of an agent pool. Known values
are: "VirtualMachineScaleSets" and "AvailabilitySet".
:paramtype type_properties_type: str or
~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolType
:keyword mode: AgentPoolMode represents mode of an agent pool. Known values are: "System" and
"User".
:paramtype mode: str or ~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolMode
:keyword orchestrator_version: Version of orchestrator specified when creating the managed
cluster.
:paramtype orchestrator_version: str
:keyword upgrade_settings: Settings for upgrading the agentpool.
:paramtype upgrade_settings:
~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolUpgradeSettings
:keyword availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets
AgentPoolType.
:paramtype availability_zones: list[str]
:keyword enable_node_public_ip: Enable public IP for nodes.
:paramtype enable_node_public_ip: bool
:keyword scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set
priority. Default to regular. Known values are: "Spot" and "Regular".
:paramtype scale_set_priority: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetPriority
:keyword scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction
policy for Spot virtual machine scale set. Default to Delete. Known values are: "Delete" and
"Deallocate".
:paramtype scale_set_eviction_policy: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetEvictionPolicy
:keyword spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing
to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which
indicates default price to be up-to on-demand.
:paramtype spot_max_price: float
:keyword tags: Agent pool tags to be persisted on the agent pool virtual machine scale set.
:paramtype tags: dict[str, str]
:keyword node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:paramtype node_labels: dict[str, str]
:keyword node_taints: Taints added to new nodes during node pool create and scale. For example,
key=value:NoSchedule.
:paramtype node_taints: list[str]
:keyword proximity_placement_group_id: The ID for Proximity Placement Group.
:paramtype proximity_placement_group_id: str
"""
super().__init__(**kwargs)
self.count = count
self.vm_size = vm_size
self.os_disk_size_gb = os_disk_size_gb
self.os_disk_type = os_disk_type
self.vnet_subnet_id = vnet_subnet_id
self.max_pods = max_pods
self.os_type = os_type
self.max_count = max_count
self.min_count = min_count
self.enable_auto_scaling = enable_auto_scaling
self.type_properties_type = type_properties_type
self.mode = mode
self.orchestrator_version = orchestrator_version
self.node_image_version = None
self.upgrade_settings = upgrade_settings
self.provisioning_state = None
self.power_state = None
self.availability_zones = availability_zones
self.enable_node_public_ip = enable_node_public_ip
self.scale_set_priority = scale_set_priority
self.scale_set_eviction_policy = scale_set_eviction_policy
self.spot_max_price = spot_max_price
self.tags = tags
self.node_labels = node_labels
self.node_taints = node_taints
self.proximity_placement_group_id = proximity_placement_group_id
class AgentPoolAvailableVersions(_serialization.Model):
"""The list of available versions for an agent pool.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Id of the agent pool available versions.
:vartype id: str
:ivar name: Name of the agent pool available versions.
:vartype name: str
:ivar type: Type of the agent pool available versions.
:vartype type: str
:ivar agent_pool_versions: List of versions available for agent pool.
:vartype agent_pool_versions:
list[~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"agent_pool_versions": {
"key": "properties.agentPoolVersions",
"type": "[AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem]",
},
}
def __init__(
self,
*,
agent_pool_versions: Optional[List["_models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem"]] = None,
**kwargs: Any
) -> None:
"""
:keyword agent_pool_versions: List of versions available for agent pool.
:paramtype agent_pool_versions:
list[~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.agent_pool_versions = agent_pool_versions
class AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem(_serialization.Model):
"""AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem.
:ivar default: Whether this version is the default agent pool version.
:vartype default: bool
:ivar kubernetes_version: Kubernetes version (major, minor, patch).
:vartype kubernetes_version: str
:ivar is_preview: Whether Kubernetes version is currently in preview.
:vartype is_preview: bool
"""
_attribute_map = {
"default": {"key": "default", "type": "bool"},
"kubernetes_version": {"key": "kubernetesVersion", "type": "str"},
"is_preview": {"key": "isPreview", "type": "bool"},
}
def __init__(
self,
*,
default: Optional[bool] = None,
kubernetes_version: Optional[str] = None,
is_preview: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword default: Whether this version is the default agent pool version.
:paramtype default: bool
:keyword kubernetes_version: Kubernetes version (major, minor, patch).
:paramtype kubernetes_version: str
:keyword is_preview: Whether Kubernetes version is currently in preview.
:paramtype is_preview: bool
"""
super().__init__(**kwargs)
self.default = default
self.kubernetes_version = kubernetes_version
self.is_preview = is_preview
class AgentPoolListResult(_serialization.Model):
"""The response from the List Agent Pools operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of agent pools.
:vartype value: list[~azure.mgmt.containerservice.v2020_09_01.models.AgentPool]
:ivar next_link: The URL to get the next set of agent pool results.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[AgentPool]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.AgentPool"]] = None, **kwargs: Any) -> None:
"""
:keyword value: The list of agent pools.
:paramtype value: list[~azure.mgmt.containerservice.v2020_09_01.models.AgentPool]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class AgentPoolUpgradeProfile(_serialization.Model):
"""The list of available upgrades for an agent pool.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of the agent pool upgrade profile.
:vartype id: str
:ivar name: Name of the agent pool upgrade profile.
:vartype name: str
:ivar type: Type of the agent pool upgrade profile.
:vartype type: str
:ivar kubernetes_version: Kubernetes version (major, minor, patch). Required.
:vartype kubernetes_version: str
:ivar os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to
Linux. Known values are: "Linux" and "Windows".
:vartype os_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSType
:ivar upgrades: List of orchestrator types and versions available for upgrade.
:vartype upgrades:
list[~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolUpgradeProfilePropertiesUpgradesItem]
:ivar latest_node_image_version: LatestNodeImageVersion is the latest AKS supported node image
version.
:vartype latest_node_image_version: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"kubernetes_version": {"required": True},
"os_type": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"kubernetes_version": {"key": "properties.kubernetesVersion", "type": "str"},
"os_type": {"key": "properties.osType", "type": "str"},
"upgrades": {"key": "properties.upgrades", "type": "[AgentPoolUpgradeProfilePropertiesUpgradesItem]"},
"latest_node_image_version": {"key": "properties.latestNodeImageVersion", "type": "str"},
}
def __init__(
self,
*,
kubernetes_version: str,
os_type: Union[str, "_models.OSType"] = "Linux",
upgrades: Optional[List["_models.AgentPoolUpgradeProfilePropertiesUpgradesItem"]] = None,
latest_node_image_version: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword kubernetes_version: Kubernetes version (major, minor, patch). Required.
:paramtype kubernetes_version: str
:keyword os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default
to Linux. Known values are: "Linux" and "Windows".
:paramtype os_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSType
:keyword upgrades: List of orchestrator types and versions available for upgrade.
:paramtype upgrades:
list[~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolUpgradeProfilePropertiesUpgradesItem]
:keyword latest_node_image_version: LatestNodeImageVersion is the latest AKS supported node
image version.
:paramtype latest_node_image_version: str
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.kubernetes_version = kubernetes_version
self.os_type = os_type
self.upgrades = upgrades
self.latest_node_image_version = latest_node_image_version
class AgentPoolUpgradeProfilePropertiesUpgradesItem(_serialization.Model):
"""AgentPoolUpgradeProfilePropertiesUpgradesItem.
:ivar kubernetes_version: Kubernetes version (major, minor, patch).
:vartype kubernetes_version: str
:ivar is_preview: Whether Kubernetes version is currently in preview.
:vartype is_preview: bool
"""
_attribute_map = {
"kubernetes_version": {"key": "kubernetesVersion", "type": "str"},
"is_preview": {"key": "isPreview", "type": "bool"},
}
def __init__(
self, *, kubernetes_version: Optional[str] = None, is_preview: Optional[bool] = None, **kwargs: Any
) -> None:
"""
:keyword kubernetes_version: Kubernetes version (major, minor, patch).
:paramtype kubernetes_version: str
:keyword is_preview: Whether Kubernetes version is currently in preview.
:paramtype is_preview: bool
"""
super().__init__(**kwargs)
self.kubernetes_version = kubernetes_version
self.is_preview = is_preview
class AgentPoolUpgradeSettings(_serialization.Model):
"""Settings for upgrading an agentpool.
:ivar max_surge: Count or percentage of additional nodes to be added during upgrade. If empty
uses AKS default.
:vartype max_surge: str
"""
_attribute_map = {
"max_surge": {"key": "maxSurge", "type": "str"},
}
def __init__(self, *, max_surge: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword max_surge: Count or percentage of additional nodes to be added during upgrade. If
empty uses AKS default.
:paramtype max_surge: str
"""
super().__init__(**kwargs)
self.max_surge = max_surge
class BaseManagedCluster(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""BaseManagedCluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar identity: The identity of the managed cluster, if configured.
:vartype identity: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterIdentity
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar power_state: Represents the Power State of the cluster.
:vartype power_state: ~azure.mgmt.containerservice.v2020_09_01.models.PowerState
:ivar max_agent_pools: The max number of agent pools for the managed cluster.
:vartype max_agent_pools: int
:ivar kubernetes_version: Version of Kubernetes specified when creating the managed cluster.
:vartype kubernetes_version: str
:ivar dns_prefix: DNS prefix specified when creating the managed cluster.
:vartype dns_prefix: str
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
:ivar private_fqdn: FQDN of private cluster.
:vartype private_fqdn: str
:ivar agent_pool_profiles: Properties of the agent pool.
:vartype agent_pool_profiles:
list[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAgentPoolProfile]
:ivar linux_profile: Profile for Linux VMs in the container service cluster.
:vartype linux_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceLinuxProfile
:ivar windows_profile: Profile for Windows VMs in the container service cluster.
:vartype windows_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterWindowsProfile
:ivar service_principal_profile: Information about a service principal identity for the cluster
to use for manipulating Azure APIs.
:vartype service_principal_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterServicePrincipalProfile
:ivar addon_profiles: Profile of managed cluster add-on.
:vartype addon_profiles: dict[str,
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAddonProfile]
:ivar node_resource_group: Name of the resource group containing agent pool nodes.
:vartype node_resource_group: str
:ivar enable_rbac: Whether to enable Kubernetes Role-Based Access Control.
:vartype enable_rbac: bool
:ivar enable_pod_security_policy: (DEPRECATING) Whether to enable Kubernetes pod security
policy (preview). This feature is set for removal on October 15th, 2020. Learn more at
aka.ms/aks/azpodpolicy.
:vartype enable_pod_security_policy: bool
:ivar network_profile: Profile of network configuration.
:vartype network_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceNetworkProfile
:ivar aad_profile: Profile of Azure Active Directory configuration.
:vartype aad_profile: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAADProfile
:ivar auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled.
:vartype auto_scaler_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPropertiesAutoScalerProfile
:ivar api_server_access_profile: Access profile for managed cluster API server.
:vartype api_server_access_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAPIServerAccessProfile
:ivar disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling
encryption at rest.
:vartype disk_encryption_set_id: str
:ivar identity_profile: Identities associated with the cluster.
:vartype identity_profile: dict[str,
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPropertiesIdentityProfileValue]
"""
_validation = {
"provisioning_state": {"readonly": True},
"power_state": {"readonly": True},
"max_agent_pools": {"readonly": True},
"fqdn": {"readonly": True},
"private_fqdn": {"readonly": True},
}
_attribute_map = {
"identity": {"key": "identity", "type": "ManagedClusterIdentity"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"power_state": {"key": "properties.powerState", "type": "PowerState"},
"max_agent_pools": {"key": "properties.maxAgentPools", "type": "int"},
"kubernetes_version": {"key": "properties.kubernetesVersion", "type": "str"},
"dns_prefix": {"key": "properties.dnsPrefix", "type": "str"},
"fqdn": {"key": "properties.fqdn", "type": "str"},
"private_fqdn": {"key": "properties.privateFQDN", "type": "str"},
"agent_pool_profiles": {"key": "properties.agentPoolProfiles", "type": "[ManagedClusterAgentPoolProfile]"},
"linux_profile": {"key": "properties.linuxProfile", "type": "ContainerServiceLinuxProfile"},
"windows_profile": {"key": "properties.windowsProfile", "type": "ManagedClusterWindowsProfile"},
"service_principal_profile": {
"key": "properties.servicePrincipalProfile",
"type": "ManagedClusterServicePrincipalProfile",
},
"addon_profiles": {"key": "properties.addonProfiles", "type": "{ManagedClusterAddonProfile}"},
"node_resource_group": {"key": "properties.nodeResourceGroup", "type": "str"},
"enable_rbac": {"key": "properties.enableRBAC", "type": "bool"},
"enable_pod_security_policy": {"key": "properties.enablePodSecurityPolicy", "type": "bool"},
"network_profile": {"key": "properties.networkProfile", "type": "ContainerServiceNetworkProfile"},
"aad_profile": {"key": "properties.aadProfile", "type": "ManagedClusterAADProfile"},
"auto_scaler_profile": {
"key": "properties.autoScalerProfile",
"type": "ManagedClusterPropertiesAutoScalerProfile",
},
"api_server_access_profile": {
"key": "properties.apiServerAccessProfile",
"type": "ManagedClusterAPIServerAccessProfile",
},
"disk_encryption_set_id": {"key": "properties.diskEncryptionSetID", "type": "str"},
"identity_profile": {
"key": "properties.identityProfile",
"type": "{ManagedClusterPropertiesIdentityProfileValue}",
},
}
def __init__(
self,
*,
identity: Optional["_models.ManagedClusterIdentity"] = None,
kubernetes_version: Optional[str] = None,
dns_prefix: Optional[str] = None,
agent_pool_profiles: Optional[List["_models.ManagedClusterAgentPoolProfile"]] = None,
linux_profile: Optional["_models.ContainerServiceLinuxProfile"] = None,
windows_profile: Optional["_models.ManagedClusterWindowsProfile"] = None,
service_principal_profile: Optional["_models.ManagedClusterServicePrincipalProfile"] = None,
addon_profiles: Optional[Dict[str, "_models.ManagedClusterAddonProfile"]] = None,
node_resource_group: Optional[str] = None,
enable_rbac: Optional[bool] = None,
enable_pod_security_policy: Optional[bool] = None,
network_profile: Optional["_models.ContainerServiceNetworkProfile"] = None,
aad_profile: Optional["_models.ManagedClusterAADProfile"] = None,
auto_scaler_profile: Optional["_models.ManagedClusterPropertiesAutoScalerProfile"] = None,
api_server_access_profile: Optional["_models.ManagedClusterAPIServerAccessProfile"] = None,
disk_encryption_set_id: Optional[str] = None,
identity_profile: Optional[Dict[str, "_models.ManagedClusterPropertiesIdentityProfileValue"]] = None,
**kwargs: Any
) -> None:
"""
:keyword identity: The identity of the managed cluster, if configured.
:paramtype identity: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterIdentity
:keyword kubernetes_version: Version of Kubernetes specified when creating the managed cluster.
:paramtype kubernetes_version: str
:keyword dns_prefix: DNS prefix specified when creating the managed cluster.
:paramtype dns_prefix: str
:keyword agent_pool_profiles: Properties of the agent pool.
:paramtype agent_pool_profiles:
list[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAgentPoolProfile]
:keyword linux_profile: Profile for Linux VMs in the container service cluster.
:paramtype linux_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceLinuxProfile
:keyword windows_profile: Profile for Windows VMs in the container service cluster.
:paramtype windows_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterWindowsProfile
:keyword service_principal_profile: Information about a service principal identity for the
cluster to use for manipulating Azure APIs.
:paramtype service_principal_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterServicePrincipalProfile
:keyword addon_profiles: Profile of managed cluster add-on.
:paramtype addon_profiles: dict[str,
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAddonProfile]
:keyword node_resource_group: Name of the resource group containing agent pool nodes.
:paramtype node_resource_group: str
:keyword enable_rbac: Whether to enable Kubernetes Role-Based Access Control.
:paramtype enable_rbac: bool
:keyword enable_pod_security_policy: (DEPRECATING) Whether to enable Kubernetes pod security
policy (preview). This feature is set for removal on October 15th, 2020. Learn more at
aka.ms/aks/azpodpolicy.
:paramtype enable_pod_security_policy: bool
:keyword network_profile: Profile of network configuration.
:paramtype network_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceNetworkProfile
:keyword aad_profile: Profile of Azure Active Directory configuration.
:paramtype aad_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAADProfile
:keyword auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled.
:paramtype auto_scaler_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPropertiesAutoScalerProfile
:keyword api_server_access_profile: Access profile for managed cluster API server.
:paramtype api_server_access_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAPIServerAccessProfile
:keyword disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling
encryption at rest.
:paramtype disk_encryption_set_id: str
:keyword identity_profile: Identities associated with the cluster.
:paramtype identity_profile: dict[str,
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPropertiesIdentityProfileValue]
"""
super().__init__(**kwargs)
self.identity = identity
self.provisioning_state = None
self.power_state = None
self.max_agent_pools = None
self.kubernetes_version = kubernetes_version
self.dns_prefix = dns_prefix
self.fqdn = None
self.private_fqdn = None
self.agent_pool_profiles = agent_pool_profiles
self.linux_profile = linux_profile
self.windows_profile = windows_profile
self.service_principal_profile = service_principal_profile
self.addon_profiles = addon_profiles
self.node_resource_group = node_resource_group
self.enable_rbac = enable_rbac
self.enable_pod_security_policy = enable_pod_security_policy
self.network_profile = network_profile
self.aad_profile = aad_profile
self.auto_scaler_profile = auto_scaler_profile
self.api_server_access_profile = api_server_access_profile
self.disk_encryption_set_id = disk_encryption_set_id
self.identity_profile = identity_profile
class CloudErrorBody(_serialization.Model):
"""An error response from the Container service.
:ivar code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:vartype code: str
:ivar message: A message describing the error, intended to be suitable for display in a user
interface.
:vartype message: str
:ivar target: The target of the particular error. For example, the name of the property in
error.
:vartype target: str
:ivar details: A list of additional details about the error.
:vartype details: list[~azure.mgmt.containerservice.v2020_09_01.models.CloudErrorBody]
"""
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
"details": {"key": "details", "type": "[CloudErrorBody]"},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["_models.CloudErrorBody"]] = None,
**kwargs: Any
) -> None:
"""
:keyword code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:paramtype code: str
:keyword message: A message describing the error, intended to be suitable for display in a user
interface.
:paramtype message: str
:keyword target: The target of the particular error. For example, the name of the property in
error.
:paramtype target: str
:keyword details: A list of additional details about the error.
:paramtype details: list[~azure.mgmt.containerservice.v2020_09_01.models.CloudErrorBody]
"""
super().__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class ContainerServiceDiagnosticsProfile(_serialization.Model):
"""Profile for diagnostics on the container service cluster.
All required parameters must be populated in order to send to Azure.
:ivar vm_diagnostics: Profile for diagnostics on the container service VMs. Required.
:vartype vm_diagnostics:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceVMDiagnostics
"""
_validation = {
"vm_diagnostics": {"required": True},
}
_attribute_map = {
"vm_diagnostics": {"key": "vmDiagnostics", "type": "ContainerServiceVMDiagnostics"},
}
def __init__(self, *, vm_diagnostics: "_models.ContainerServiceVMDiagnostics", **kwargs: Any) -> None:
"""
:keyword vm_diagnostics: Profile for diagnostics on the container service VMs. Required.
:paramtype vm_diagnostics:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceVMDiagnostics
"""
super().__init__(**kwargs)
self.vm_diagnostics = vm_diagnostics
class ContainerServiceLinuxProfile(_serialization.Model):
"""Profile for Linux VMs in the container service cluster.
All required parameters must be populated in order to send to Azure.
:ivar admin_username: The administrator username to use for Linux VMs. Required.
:vartype admin_username: str
:ivar ssh: SSH configuration for Linux-based VMs running on Azure. Required.
:vartype ssh: ~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceSshConfiguration
"""
_validation = {
"admin_username": {"required": True, "pattern": r"^[A-Za-z][-A-Za-z0-9_]*$"},
"ssh": {"required": True},
}
_attribute_map = {
"admin_username": {"key": "adminUsername", "type": "str"},
"ssh": {"key": "ssh", "type": "ContainerServiceSshConfiguration"},
}
def __init__(self, *, admin_username: str, ssh: "_models.ContainerServiceSshConfiguration", **kwargs: Any) -> None:
"""
:keyword admin_username: The administrator username to use for Linux VMs. Required.
:paramtype admin_username: str
:keyword ssh: SSH configuration for Linux-based VMs running on Azure. Required.
:paramtype ssh:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceSshConfiguration
"""
super().__init__(**kwargs)
self.admin_username = admin_username
self.ssh = ssh
class ContainerServiceMasterProfile(_serialization.Model):
"""Profile for the container service master.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar count: Number of masters (VMs) in the container service cluster. Allowed values are 1, 3,
and 5. The default value is 1. Known values are: 1, 3, and 5.
:vartype count: int or ~azure.mgmt.containerservice.v2020_09_01.models.Count
:ivar dns_prefix: DNS prefix to be used to create the FQDN for the master pool. Required.
:vartype dns_prefix: str
:ivar vm_size: Size of agent VMs. Required. Known values are: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", and "Standard_NV6".
:vartype vm_size: str or
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceVMSizeTypes
:ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine
in this master/agent pool. If you specify 0, it will apply the default osDisk size according to
the vmSize specified.
:vartype os_disk_size_gb: int
:ivar vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:vartype vnet_subnet_id: str
:ivar first_consecutive_static_ip: FirstConsecutiveStaticIP used to specify the first static ip
of masters.
:vartype first_consecutive_static_ip: str
:ivar storage_profile: Storage profile specifies what kind of storage used. Choose from
StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the
orchestrator choice. Known values are: "StorageAccount" and "ManagedDisks".
:vartype storage_profile: str or
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceStorageProfileTypes
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
"""
_validation = {
"dns_prefix": {"required": True},
"vm_size": {"required": True},
"os_disk_size_gb": {"maximum": 1023, "minimum": 0},
"fqdn": {"readonly": True},
}
_attribute_map = {
"count": {"key": "count", "type": "int"},
"dns_prefix": {"key": "dnsPrefix", "type": "str"},
"vm_size": {"key": "vmSize", "type": "str"},
"os_disk_size_gb": {"key": "osDiskSizeGB", "type": "int"},
"vnet_subnet_id": {"key": "vnetSubnetID", "type": "str"},
"first_consecutive_static_ip": {"key": "firstConsecutiveStaticIP", "type": "str"},
"storage_profile": {"key": "storageProfile", "type": "str"},
"fqdn": {"key": "fqdn", "type": "str"},
}
def __init__(
self,
*,
dns_prefix: str,
vm_size: Union[str, "_models.ContainerServiceVMSizeTypes"],
count: Union[int, "_models.Count"] = 1,
os_disk_size_gb: Optional[int] = None,
vnet_subnet_id: Optional[str] = None,
first_consecutive_static_ip: str = "10.240.255.5",
storage_profile: Optional[Union[str, "_models.ContainerServiceStorageProfileTypes"]] = None,
**kwargs: Any
) -> None:
"""
:keyword count: Number of masters (VMs) in the container service cluster. Allowed values are 1,
3, and 5. The default value is 1. Known values are: 1, 3, and 5.
:paramtype count: int or ~azure.mgmt.containerservice.v2020_09_01.models.Count
:keyword dns_prefix: DNS prefix to be used to create the FQDN for the master pool. Required.
:paramtype dns_prefix: str
:keyword vm_size: Size of agent VMs. Required. Known values are: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", and "Standard_NV6".
:paramtype vm_size: str or
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceVMSizeTypes
:keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
machine in this master/agent pool. If you specify 0, it will apply the default osDisk size
according to the vmSize specified.
:paramtype os_disk_size_gb: int
:keyword vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:paramtype vnet_subnet_id: str
:keyword first_consecutive_static_ip: FirstConsecutiveStaticIP used to specify the first static
ip of masters.
:paramtype first_consecutive_static_ip: str
:keyword storage_profile: Storage profile specifies what kind of storage used. Choose from
StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the
orchestrator choice. Known values are: "StorageAccount" and "ManagedDisks".
:paramtype storage_profile: str or
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceStorageProfileTypes
"""
super().__init__(**kwargs)
self.count = count
self.dns_prefix = dns_prefix
self.vm_size = vm_size
self.os_disk_size_gb = os_disk_size_gb
self.vnet_subnet_id = vnet_subnet_id
self.first_consecutive_static_ip = first_consecutive_static_ip
self.storage_profile = storage_profile
self.fqdn = None
class ContainerServiceNetworkProfile(_serialization.Model):
"""Profile of network configuration.
:ivar network_plugin: Network plugin used for building Kubernetes network. Known values are:
"azure" and "kubenet".
:vartype network_plugin: str or ~azure.mgmt.containerservice.v2020_09_01.models.NetworkPlugin
:ivar network_policy: Network policy used for building Kubernetes network. Known values are:
"calico" and "azure".
:vartype network_policy: str or ~azure.mgmt.containerservice.v2020_09_01.models.NetworkPolicy
:ivar network_mode: Network mode used for building Kubernetes network. Known values are:
"transparent" and "bridge".
:vartype network_mode: str or ~azure.mgmt.containerservice.v2020_09_01.models.NetworkMode
:ivar pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used.
:vartype pod_cidr: str
:ivar service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must
not overlap with any Subnet IP ranges.
:vartype service_cidr: str
:ivar dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within
the Kubernetes service address range specified in serviceCidr.
:vartype dns_service_ip: str
:ivar docker_bridge_cidr: A CIDR notation IP range assigned to the Docker bridge network. It
must not overlap with any Subnet IP ranges or the Kubernetes service address range.
:vartype docker_bridge_cidr: str
:ivar outbound_type: The outbound (egress) routing method. Known values are: "loadBalancer" and
"userDefinedRouting".
:vartype outbound_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OutboundType
:ivar load_balancer_sku: The load balancer sku for the managed cluster. Known values are:
"standard" and "basic".
:vartype load_balancer_sku: str or
~azure.mgmt.containerservice.v2020_09_01.models.LoadBalancerSku
:ivar load_balancer_profile: Profile of the cluster load balancer.
:vartype load_balancer_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterLoadBalancerProfile
"""
_validation = {
"pod_cidr": {"pattern": r"^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$"},
"service_cidr": {"pattern": r"^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$"},
"dns_service_ip": {
"pattern": r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
},
"docker_bridge_cidr": {"pattern": r"^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$"},
}
_attribute_map = {
"network_plugin": {"key": "networkPlugin", "type": "str"},
"network_policy": {"key": "networkPolicy", "type": "str"},
"network_mode": {"key": "networkMode", "type": "str"},
"pod_cidr": {"key": "podCidr", "type": "str"},
"service_cidr": {"key": "serviceCidr", "type": "str"},
"dns_service_ip": {"key": "dnsServiceIP", "type": "str"},
"docker_bridge_cidr": {"key": "dockerBridgeCidr", "type": "str"},
"outbound_type": {"key": "outboundType", "type": "str"},
"load_balancer_sku": {"key": "loadBalancerSku", "type": "str"},
"load_balancer_profile": {"key": "loadBalancerProfile", "type": "ManagedClusterLoadBalancerProfile"},
}
def __init__(
self,
*,
network_plugin: Union[str, "_models.NetworkPlugin"] = "kubenet",
network_policy: Optional[Union[str, "_models.NetworkPolicy"]] = None,
network_mode: Optional[Union[str, "_models.NetworkMode"]] = None,
pod_cidr: str = "10.244.0.0/16",
service_cidr: str = "10.0.0.0/16",
dns_service_ip: str = "10.0.0.10",
docker_bridge_cidr: str = "172.17.0.1/16",
outbound_type: Union[str, "_models.OutboundType"] = "loadBalancer",
load_balancer_sku: Optional[Union[str, "_models.LoadBalancerSku"]] = None,
load_balancer_profile: Optional["_models.ManagedClusterLoadBalancerProfile"] = None,
**kwargs: Any
) -> None:
"""
:keyword network_plugin: Network plugin used for building Kubernetes network. Known values are:
"azure" and "kubenet".
:paramtype network_plugin: str or ~azure.mgmt.containerservice.v2020_09_01.models.NetworkPlugin
:keyword network_policy: Network policy used for building Kubernetes network. Known values are:
"calico" and "azure".
:paramtype network_policy: str or ~azure.mgmt.containerservice.v2020_09_01.models.NetworkPolicy
:keyword network_mode: Network mode used for building Kubernetes network. Known values are:
"transparent" and "bridge".
:paramtype network_mode: str or ~azure.mgmt.containerservice.v2020_09_01.models.NetworkMode
:keyword pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used.
:paramtype pod_cidr: str
:keyword service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It
must not overlap with any Subnet IP ranges.
:paramtype service_cidr: str
:keyword dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be
within the Kubernetes service address range specified in serviceCidr.
:paramtype dns_service_ip: str
:keyword docker_bridge_cidr: A CIDR notation IP range assigned to the Docker bridge network. It
must not overlap with any Subnet IP ranges or the Kubernetes service address range.
:paramtype docker_bridge_cidr: str
:keyword outbound_type: The outbound (egress) routing method. Known values are: "loadBalancer"
and "userDefinedRouting".
:paramtype outbound_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OutboundType
:keyword load_balancer_sku: The load balancer sku for the managed cluster. Known values are:
"standard" and "basic".
:paramtype load_balancer_sku: str or
~azure.mgmt.containerservice.v2020_09_01.models.LoadBalancerSku
:keyword load_balancer_profile: Profile of the cluster load balancer.
:paramtype load_balancer_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterLoadBalancerProfile
"""
super().__init__(**kwargs)
self.network_plugin = network_plugin
self.network_policy = network_policy
self.network_mode = network_mode
self.pod_cidr = pod_cidr
self.service_cidr = service_cidr
self.dns_service_ip = dns_service_ip
self.docker_bridge_cidr = docker_bridge_cidr
self.outbound_type = outbound_type
self.load_balancer_sku = load_balancer_sku
self.load_balancer_profile = load_balancer_profile
class ContainerServiceSshConfiguration(_serialization.Model):
"""SSH configuration for Linux-based VMs running on Azure.
All required parameters must be populated in order to send to Azure.
:ivar public_keys: The list of SSH public keys used to authenticate with Linux-based VMs. Only
expect one key specified. Required.
:vartype public_keys:
list[~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceSshPublicKey]
"""
_validation = {
"public_keys": {"required": True},
}
_attribute_map = {
"public_keys": {"key": "publicKeys", "type": "[ContainerServiceSshPublicKey]"},
}
def __init__(self, *, public_keys: List["_models.ContainerServiceSshPublicKey"], **kwargs: Any) -> None:
"""
:keyword public_keys: The list of SSH public keys used to authenticate with Linux-based VMs.
Only expect one key specified. Required.
:paramtype public_keys:
list[~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceSshPublicKey]
"""
super().__init__(**kwargs)
self.public_keys = public_keys
class ContainerServiceSshPublicKey(_serialization.Model):
"""Contains information about SSH certificate public key data.
All required parameters must be populated in order to send to Azure.
:ivar key_data: Certificate public key used to authenticate with VMs through SSH. The
certificate must be in PEM format with or without headers. Required.
:vartype key_data: str
"""
_validation = {
"key_data": {"required": True},
}
_attribute_map = {
"key_data": {"key": "keyData", "type": "str"},
}
def __init__(self, *, key_data: str, **kwargs: Any) -> None:
"""
:keyword key_data: Certificate public key used to authenticate with VMs through SSH. The
certificate must be in PEM format with or without headers. Required.
:paramtype key_data: str
"""
super().__init__(**kwargs)
self.key_data = key_data
class ContainerServiceVMDiagnostics(_serialization.Model):
"""Profile for diagnostics on the container service VMs.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar enabled: Whether the VM diagnostic agent is provisioned on the VM. Required.
:vartype enabled: bool
:ivar storage_uri: The URI of the storage account where diagnostics are stored.
:vartype storage_uri: str
"""
_validation = {
"enabled": {"required": True},
"storage_uri": {"readonly": True},
}
_attribute_map = {
"enabled": {"key": "enabled", "type": "bool"},
"storage_uri": {"key": "storageUri", "type": "str"},
}
def __init__(self, *, enabled: bool, **kwargs: Any) -> None:
"""
:keyword enabled: Whether the VM diagnostic agent is provisioned on the VM. Required.
:paramtype enabled: bool
"""
super().__init__(**kwargs)
self.enabled = enabled
self.storage_uri = None
class CredentialResult(_serialization.Model):
"""The credential result response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the credential.
:vartype name: str
:ivar value: Base64-encoded Kubernetes configuration file.
:vartype value: bytes
"""
_validation = {
"name": {"readonly": True},
"value": {"readonly": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"value": {"key": "value", "type": "bytearray"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.name = None
self.value = None
class CredentialResults(_serialization.Model):
"""The list of credential result response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar kubeconfigs: Base64-encoded Kubernetes configuration file.
:vartype kubeconfigs: list[~azure.mgmt.containerservice.v2020_09_01.models.CredentialResult]
"""
_validation = {
"kubeconfigs": {"readonly": True},
}
_attribute_map = {
"kubeconfigs": {"key": "kubeconfigs", "type": "[CredentialResult]"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.kubeconfigs = None
class Resource(_serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location. Required.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
"""
:keyword location: Resource location. Required.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class ManagedCluster(Resource, BaseManagedCluster): # pylint: disable=too-many-instance-attributes
"""Managed cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar identity: The identity of the managed cluster, if configured.
:vartype identity: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterIdentity
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar power_state: Represents the Power State of the cluster.
:vartype power_state: ~azure.mgmt.containerservice.v2020_09_01.models.PowerState
:ivar max_agent_pools: The max number of agent pools for the managed cluster.
:vartype max_agent_pools: int
:ivar kubernetes_version: Version of Kubernetes specified when creating the managed cluster.
:vartype kubernetes_version: str
:ivar dns_prefix: DNS prefix specified when creating the managed cluster.
:vartype dns_prefix: str
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
:ivar private_fqdn: FQDN of private cluster.
:vartype private_fqdn: str
:ivar agent_pool_profiles: Properties of the agent pool.
:vartype agent_pool_profiles:
list[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAgentPoolProfile]
:ivar linux_profile: Profile for Linux VMs in the container service cluster.
:vartype linux_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceLinuxProfile
:ivar windows_profile: Profile for Windows VMs in the container service cluster.
:vartype windows_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterWindowsProfile
:ivar service_principal_profile: Information about a service principal identity for the cluster
to use for manipulating Azure APIs.
:vartype service_principal_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterServicePrincipalProfile
:ivar addon_profiles: Profile of managed cluster add-on.
:vartype addon_profiles: dict[str,
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAddonProfile]
:ivar node_resource_group: Name of the resource group containing agent pool nodes.
:vartype node_resource_group: str
:ivar enable_rbac: Whether to enable Kubernetes Role-Based Access Control.
:vartype enable_rbac: bool
:ivar enable_pod_security_policy: (DEPRECATING) Whether to enable Kubernetes pod security
policy (preview). This feature is set for removal on October 15th, 2020. Learn more at
aka.ms/aks/azpodpolicy.
:vartype enable_pod_security_policy: bool
:ivar network_profile: Profile of network configuration.
:vartype network_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceNetworkProfile
:ivar aad_profile: Profile of Azure Active Directory configuration.
:vartype aad_profile: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAADProfile
:ivar auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled.
:vartype auto_scaler_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPropertiesAutoScalerProfile
:ivar api_server_access_profile: Access profile for managed cluster API server.
:vartype api_server_access_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAPIServerAccessProfile
:ivar disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling
encryption at rest.
:vartype disk_encryption_set_id: str
:ivar identity_profile: Identities associated with the cluster.
:vartype identity_profile: dict[str,
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPropertiesIdentityProfileValue]
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location. Required.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar sku: The managed cluster SKU.
:vartype sku: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterSKU
"""
_validation = {
"provisioning_state": {"readonly": True},
"power_state": {"readonly": True},
"max_agent_pools": {"readonly": True},
"fqdn": {"readonly": True},
"private_fqdn": {"readonly": True},
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"identity": {"key": "identity", "type": "ManagedClusterIdentity"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"power_state": {"key": "properties.powerState", "type": "PowerState"},
"max_agent_pools": {"key": "properties.maxAgentPools", "type": "int"},
"kubernetes_version": {"key": "properties.kubernetesVersion", "type": "str"},
"dns_prefix": {"key": "properties.dnsPrefix", "type": "str"},
"fqdn": {"key": "properties.fqdn", "type": "str"},
"private_fqdn": {"key": "properties.privateFQDN", "type": "str"},
"agent_pool_profiles": {"key": "properties.agentPoolProfiles", "type": "[ManagedClusterAgentPoolProfile]"},
"linux_profile": {"key": "properties.linuxProfile", "type": "ContainerServiceLinuxProfile"},
"windows_profile": {"key": "properties.windowsProfile", "type": "ManagedClusterWindowsProfile"},
"service_principal_profile": {
"key": "properties.servicePrincipalProfile",
"type": "ManagedClusterServicePrincipalProfile",
},
"addon_profiles": {"key": "properties.addonProfiles", "type": "{ManagedClusterAddonProfile}"},
"node_resource_group": {"key": "properties.nodeResourceGroup", "type": "str"},
"enable_rbac": {"key": "properties.enableRBAC", "type": "bool"},
"enable_pod_security_policy": {"key": "properties.enablePodSecurityPolicy", "type": "bool"},
"network_profile": {"key": "properties.networkProfile", "type": "ContainerServiceNetworkProfile"},
"aad_profile": {"key": "properties.aadProfile", "type": "ManagedClusterAADProfile"},
"auto_scaler_profile": {
"key": "properties.autoScalerProfile",
"type": "ManagedClusterPropertiesAutoScalerProfile",
},
"api_server_access_profile": {
"key": "properties.apiServerAccessProfile",
"type": "ManagedClusterAPIServerAccessProfile",
},
"disk_encryption_set_id": {"key": "properties.diskEncryptionSetID", "type": "str"},
"identity_profile": {
"key": "properties.identityProfile",
"type": "{ManagedClusterPropertiesIdentityProfileValue}",
},
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"sku": {"key": "sku", "type": "ManagedClusterSKU"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
location: str,
identity: Optional["_models.ManagedClusterIdentity"] = None,
kubernetes_version: Optional[str] = None,
dns_prefix: Optional[str] = None,
agent_pool_profiles: Optional[List["_models.ManagedClusterAgentPoolProfile"]] = None,
linux_profile: Optional["_models.ContainerServiceLinuxProfile"] = None,
windows_profile: Optional["_models.ManagedClusterWindowsProfile"] = None,
service_principal_profile: Optional["_models.ManagedClusterServicePrincipalProfile"] = None,
addon_profiles: Optional[Dict[str, "_models.ManagedClusterAddonProfile"]] = None,
node_resource_group: Optional[str] = None,
enable_rbac: Optional[bool] = None,
enable_pod_security_policy: Optional[bool] = None,
network_profile: Optional["_models.ContainerServiceNetworkProfile"] = None,
aad_profile: Optional["_models.ManagedClusterAADProfile"] = None,
auto_scaler_profile: Optional["_models.ManagedClusterPropertiesAutoScalerProfile"] = None,
api_server_access_profile: Optional["_models.ManagedClusterAPIServerAccessProfile"] = None,
disk_encryption_set_id: Optional[str] = None,
identity_profile: Optional[Dict[str, "_models.ManagedClusterPropertiesIdentityProfileValue"]] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["_models.ManagedClusterSKU"] = None,
**kwargs: Any
) -> None:
"""
:keyword identity: The identity of the managed cluster, if configured.
:paramtype identity: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterIdentity
:keyword kubernetes_version: Version of Kubernetes specified when creating the managed cluster.
:paramtype kubernetes_version: str
:keyword dns_prefix: DNS prefix specified when creating the managed cluster.
:paramtype dns_prefix: str
:keyword agent_pool_profiles: Properties of the agent pool.
:paramtype agent_pool_profiles:
list[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAgentPoolProfile]
:keyword linux_profile: Profile for Linux VMs in the container service cluster.
:paramtype linux_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceLinuxProfile
:keyword windows_profile: Profile for Windows VMs in the container service cluster.
:paramtype windows_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterWindowsProfile
:keyword service_principal_profile: Information about a service principal identity for the
cluster to use for manipulating Azure APIs.
:paramtype service_principal_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterServicePrincipalProfile
:keyword addon_profiles: Profile of managed cluster add-on.
:paramtype addon_profiles: dict[str,
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAddonProfile]
:keyword node_resource_group: Name of the resource group containing agent pool nodes.
:paramtype node_resource_group: str
:keyword enable_rbac: Whether to enable Kubernetes Role-Based Access Control.
:paramtype enable_rbac: bool
:keyword enable_pod_security_policy: (DEPRECATING) Whether to enable Kubernetes pod security
policy (preview). This feature is set for removal on October 15th, 2020. Learn more at
aka.ms/aks/azpodpolicy.
:paramtype enable_pod_security_policy: bool
:keyword network_profile: Profile of network configuration.
:paramtype network_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceNetworkProfile
:keyword aad_profile: Profile of Azure Active Directory configuration.
:paramtype aad_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAADProfile
:keyword auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled.
:paramtype auto_scaler_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPropertiesAutoScalerProfile
:keyword api_server_access_profile: Access profile for managed cluster API server.
:paramtype api_server_access_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAPIServerAccessProfile
:keyword disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling
encryption at rest.
:paramtype disk_encryption_set_id: str
:keyword identity_profile: Identities associated with the cluster.
:paramtype identity_profile: dict[str,
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPropertiesIdentityProfileValue]
:keyword location: Resource location. Required.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword sku: The managed cluster SKU.
:paramtype sku: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterSKU
"""
super().__init__(
location=location,
tags=tags,
identity=identity,
kubernetes_version=kubernetes_version,
dns_prefix=dns_prefix,
agent_pool_profiles=agent_pool_profiles,
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
addon_profiles=addon_profiles,
node_resource_group=node_resource_group,
enable_rbac=enable_rbac,
enable_pod_security_policy=enable_pod_security_policy,
network_profile=network_profile,
aad_profile=aad_profile,
auto_scaler_profile=auto_scaler_profile,
api_server_access_profile=api_server_access_profile,
disk_encryption_set_id=disk_encryption_set_id,
identity_profile=identity_profile,
**kwargs
)
self.identity = identity
self.provisioning_state = None
self.power_state = None
self.max_agent_pools = None
self.kubernetes_version = kubernetes_version
self.dns_prefix = dns_prefix
self.fqdn = None
self.private_fqdn = None
self.agent_pool_profiles = agent_pool_profiles
self.linux_profile = linux_profile
self.windows_profile = windows_profile
self.service_principal_profile = service_principal_profile
self.addon_profiles = addon_profiles
self.node_resource_group = node_resource_group
self.enable_rbac = enable_rbac
self.enable_pod_security_policy = enable_pod_security_policy
self.network_profile = network_profile
self.aad_profile = aad_profile
self.auto_scaler_profile = auto_scaler_profile
self.api_server_access_profile = api_server_access_profile
self.disk_encryption_set_id = disk_encryption_set_id
self.identity_profile = identity_profile
self.sku = sku
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class ManagedClusterAADProfile(_serialization.Model):
"""AADProfile specifies attributes for Azure Active Directory integration.
:ivar managed: Whether to enable managed AAD.
:vartype managed: bool
:ivar enable_azure_rbac: Whether to enable Azure RBAC for Kubernetes authorization.
:vartype enable_azure_rbac: bool
:ivar admin_group_object_i_ds: AAD group object IDs that will have admin role of the cluster.
:vartype admin_group_object_i_ds: list[str]
:ivar client_app_id: The client AAD application ID.
:vartype client_app_id: str
:ivar server_app_id: The server AAD application ID.
:vartype server_app_id: str
:ivar server_app_secret: The server AAD application secret.
:vartype server_app_secret: str
:ivar tenant_id: The AAD tenant ID to use for authentication. If not specified, will use the
tenant of the deployment subscription.
:vartype tenant_id: str
"""
_attribute_map = {
"managed": {"key": "managed", "type": "bool"},
"enable_azure_rbac": {"key": "enableAzureRBAC", "type": "bool"},
"admin_group_object_i_ds": {"key": "adminGroupObjectIDs", "type": "[str]"},
"client_app_id": {"key": "clientAppID", "type": "str"},
"server_app_id": {"key": "serverAppID", "type": "str"},
"server_app_secret": {"key": "serverAppSecret", "type": "str"},
"tenant_id": {"key": "tenantID", "type": "str"},
}
def __init__(
self,
*,
managed: Optional[bool] = None,
enable_azure_rbac: Optional[bool] = None,
admin_group_object_i_ds: Optional[List[str]] = None,
client_app_id: Optional[str] = None,
server_app_id: Optional[str] = None,
server_app_secret: Optional[str] = None,
tenant_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword managed: Whether to enable managed AAD.
:paramtype managed: bool
:keyword enable_azure_rbac: Whether to enable Azure RBAC for Kubernetes authorization.
:paramtype enable_azure_rbac: bool
:keyword admin_group_object_i_ds: AAD group object IDs that will have admin role of the
cluster.
:paramtype admin_group_object_i_ds: list[str]
:keyword client_app_id: The client AAD application ID.
:paramtype client_app_id: str
:keyword server_app_id: The server AAD application ID.
:paramtype server_app_id: str
:keyword server_app_secret: The server AAD application secret.
:paramtype server_app_secret: str
:keyword tenant_id: The AAD tenant ID to use for authentication. If not specified, will use the
tenant of the deployment subscription.
:paramtype tenant_id: str
"""
super().__init__(**kwargs)
self.managed = managed
self.enable_azure_rbac = enable_azure_rbac
self.admin_group_object_i_ds = admin_group_object_i_ds
self.client_app_id = client_app_id
self.server_app_id = server_app_id
self.server_app_secret = server_app_secret
self.tenant_id = tenant_id
class ManagedClusterAccessProfile(Resource):
"""Managed cluster Access Profile.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location. Required.
:vartype location: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar kube_config: Base64-encoded Kubernetes configuration file.
:vartype kube_config: bytes
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"location": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"kube_config": {"key": "properties.kubeConfig", "type": "bytearray"},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
kube_config: Optional[bytes] = None,
**kwargs: Any
) -> None:
"""
:keyword location: Resource location. Required.
:paramtype location: str
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
:keyword kube_config: Base64-encoded Kubernetes configuration file.
:paramtype kube_config: bytes
"""
super().__init__(location=location, tags=tags, **kwargs)
self.kube_config = kube_config
class ManagedClusterAddonProfile(_serialization.Model):
"""A Kubernetes add-on profile for a managed cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar enabled: Whether the add-on is enabled or not. Required.
:vartype enabled: bool
:ivar config: Key-value pairs for configuring an add-on.
:vartype config: dict[str, str]
:ivar identity: Information of user assigned identity used by this add-on.
:vartype identity:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAddonProfileIdentity
"""
_validation = {
"enabled": {"required": True},
"identity": {"readonly": True},
}
_attribute_map = {
"enabled": {"key": "enabled", "type": "bool"},
"config": {"key": "config", "type": "{str}"},
"identity": {"key": "identity", "type": "ManagedClusterAddonProfileIdentity"},
}
def __init__(self, *, enabled: bool, config: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
"""
:keyword enabled: Whether the add-on is enabled or not. Required.
:paramtype enabled: bool
:keyword config: Key-value pairs for configuring an add-on.
:paramtype config: dict[str, str]
"""
super().__init__(**kwargs)
self.enabled = enabled
self.config = config
self.identity = None
class UserAssignedIdentity(_serialization.Model):
"""UserAssignedIdentity.
:ivar resource_id: The resource id of the user assigned identity.
:vartype resource_id: str
:ivar client_id: The client id of the user assigned identity.
:vartype client_id: str
:ivar object_id: The object id of the user assigned identity.
:vartype object_id: str
"""
_attribute_map = {
"resource_id": {"key": "resourceId", "type": "str"},
"client_id": {"key": "clientId", "type": "str"},
"object_id": {"key": "objectId", "type": "str"},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
client_id: Optional[str] = None,
object_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword resource_id: The resource id of the user assigned identity.
:paramtype resource_id: str
:keyword client_id: The client id of the user assigned identity.
:paramtype client_id: str
:keyword object_id: The object id of the user assigned identity.
:paramtype object_id: str
"""
super().__init__(**kwargs)
self.resource_id = resource_id
self.client_id = client_id
self.object_id = object_id
class ManagedClusterAddonProfileIdentity(UserAssignedIdentity):
"""Information of user assigned identity used by this add-on.
:ivar resource_id: The resource id of the user assigned identity.
:vartype resource_id: str
:ivar client_id: The client id of the user assigned identity.
:vartype client_id: str
:ivar object_id: The object id of the user assigned identity.
:vartype object_id: str
"""
_attribute_map = {
"resource_id": {"key": "resourceId", "type": "str"},
"client_id": {"key": "clientId", "type": "str"},
"object_id": {"key": "objectId", "type": "str"},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
client_id: Optional[str] = None,
object_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword resource_id: The resource id of the user assigned identity.
:paramtype resource_id: str
:keyword client_id: The client id of the user assigned identity.
:paramtype client_id: str
:keyword object_id: The object id of the user assigned identity.
:paramtype object_id: str
"""
super().__init__(resource_id=resource_id, client_id=client_id, object_id=object_id, **kwargs)
class ManagedClusterAgentPoolProfileProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Properties for the container service agent pool profile.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the
range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for
system pools. The default value is 1.
:vartype count: int
:ivar vm_size: Size of agent VMs. Known values are: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", and "Standard_NV6".
:vartype vm_size: str or
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceVMSizeTypes
:ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine
in this master/agent pool. If you specify 0, it will apply the default osDisk size according to
the vmSize specified.
:vartype os_disk_size_gb: int
:ivar os_disk_type: OS disk type to be used for machines in a given agent pool. Allowed values
are 'Ephemeral' and 'Managed'. Defaults to 'Managed'. May not be changed after creation. Known
values are: "Managed" and "Ephemeral".
:vartype os_disk_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSDiskType
:ivar vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:vartype vnet_subnet_id: str
:ivar max_pods: Maximum number of pods that can run on a node.
:vartype max_pods: int
:ivar os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to
Linux. Known values are: "Linux" and "Windows".
:vartype os_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSType
:ivar max_count: Maximum number of nodes for auto-scaling.
:vartype max_count: int
:ivar min_count: Minimum number of nodes for auto-scaling.
:vartype min_count: int
:ivar enable_auto_scaling: Whether to enable auto-scaler.
:vartype enable_auto_scaling: bool
:ivar type: AgentPoolType represents types of an agent pool. Known values are:
"VirtualMachineScaleSets" and "AvailabilitySet".
:vartype type: str or ~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolType
:ivar mode: AgentPoolMode represents mode of an agent pool. Known values are: "System" and
"User".
:vartype mode: str or ~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolMode
:ivar orchestrator_version: Version of orchestrator specified when creating the managed
cluster.
:vartype orchestrator_version: str
:ivar node_image_version: Version of node image.
:vartype node_image_version: str
:ivar upgrade_settings: Settings for upgrading the agentpool.
:vartype upgrade_settings:
~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolUpgradeSettings
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar power_state: Describes whether the Agent Pool is Running or Stopped.
:vartype power_state: ~azure.mgmt.containerservice.v2020_09_01.models.PowerState
:ivar availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets
AgentPoolType.
:vartype availability_zones: list[str]
:ivar enable_node_public_ip: Enable public IP for nodes.
:vartype enable_node_public_ip: bool
:ivar scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set
priority. Default to regular. Known values are: "Spot" and "Regular".
:vartype scale_set_priority: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetPriority
:ivar scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction policy
for Spot virtual machine scale set. Default to Delete. Known values are: "Delete" and
"Deallocate".
:vartype scale_set_eviction_policy: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetEvictionPolicy
:ivar spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing to
pay in US Dollars. Possible values are any decimal value greater than zero or -1 which
indicates default price to be up-to on-demand.
:vartype spot_max_price: float
:ivar tags: Agent pool tags to be persisted on the agent pool virtual machine scale set.
:vartype tags: dict[str, str]
:ivar node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:vartype node_labels: dict[str, str]
:ivar node_taints: Taints added to new nodes during node pool create and scale. For example,
key=value:NoSchedule.
:vartype node_taints: list[str]
:ivar proximity_placement_group_id: The ID for Proximity Placement Group.
:vartype proximity_placement_group_id: str
"""
_validation = {
"os_disk_size_gb": {"maximum": 1023, "minimum": 0},
"node_image_version": {"readonly": True},
"provisioning_state": {"readonly": True},
"power_state": {"readonly": True},
}
_attribute_map = {
"count": {"key": "count", "type": "int"},
"vm_size": {"key": "vmSize", "type": "str"},
"os_disk_size_gb": {"key": "osDiskSizeGB", "type": "int"},
"os_disk_type": {"key": "osDiskType", "type": "str"},
"vnet_subnet_id": {"key": "vnetSubnetID", "type": "str"},
"max_pods": {"key": "maxPods", "type": "int"},
"os_type": {"key": "osType", "type": "str"},
"max_count": {"key": "maxCount", "type": "int"},
"min_count": {"key": "minCount", "type": "int"},
"enable_auto_scaling": {"key": "enableAutoScaling", "type": "bool"},
"type": {"key": "type", "type": "str"},
"mode": {"key": "mode", "type": "str"},
"orchestrator_version": {"key": "orchestratorVersion", "type": "str"},
"node_image_version": {"key": "nodeImageVersion", "type": "str"},
"upgrade_settings": {"key": "upgradeSettings", "type": "AgentPoolUpgradeSettings"},
"provisioning_state": {"key": "provisioningState", "type": "str"},
"power_state": {"key": "powerState", "type": "PowerState"},
"availability_zones": {"key": "availabilityZones", "type": "[str]"},
"enable_node_public_ip": {"key": "enableNodePublicIP", "type": "bool"},
"scale_set_priority": {"key": "scaleSetPriority", "type": "str"},
"scale_set_eviction_policy": {"key": "scaleSetEvictionPolicy", "type": "str"},
"spot_max_price": {"key": "spotMaxPrice", "type": "float"},
"tags": {"key": "tags", "type": "{str}"},
"node_labels": {"key": "nodeLabels", "type": "{str}"},
"node_taints": {"key": "nodeTaints", "type": "[str]"},
"proximity_placement_group_id": {"key": "proximityPlacementGroupID", "type": "str"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
count: Optional[int] = None,
vm_size: Optional[Union[str, "_models.ContainerServiceVMSizeTypes"]] = None,
os_disk_size_gb: Optional[int] = None,
os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None,
vnet_subnet_id: Optional[str] = None,
max_pods: Optional[int] = None,
os_type: Union[str, "_models.OSType"] = "Linux",
max_count: Optional[int] = None,
min_count: Optional[int] = None,
enable_auto_scaling: Optional[bool] = None,
type: Optional[Union[str, "_models.AgentPoolType"]] = None,
mode: Optional[Union[str, "_models.AgentPoolMode"]] = None,
orchestrator_version: Optional[str] = None,
upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None,
availability_zones: Optional[List[str]] = None,
enable_node_public_ip: Optional[bool] = None,
scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular",
scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete",
spot_max_price: float = -1,
tags: Optional[Dict[str, str]] = None,
node_labels: Optional[Dict[str, str]] = None,
node_taints: Optional[List[str]] = None,
proximity_placement_group_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword count: Number of agents (VMs) to host docker containers. Allowed values must be in the
range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for
system pools. The default value is 1.
:paramtype count: int
:keyword vm_size: Size of agent VMs. Known values are: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", and "Standard_NV6".
:paramtype vm_size: str or
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceVMSizeTypes
:keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
machine in this master/agent pool. If you specify 0, it will apply the default osDisk size
according to the vmSize specified.
:paramtype os_disk_size_gb: int
:keyword os_disk_type: OS disk type to be used for machines in a given agent pool. Allowed
values are 'Ephemeral' and 'Managed'. Defaults to 'Managed'. May not be changed after creation.
Known values are: "Managed" and "Ephemeral".
:paramtype os_disk_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSDiskType
:keyword vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:paramtype vnet_subnet_id: str
:keyword max_pods: Maximum number of pods that can run on a node.
:paramtype max_pods: int
:keyword os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default
to Linux. Known values are: "Linux" and "Windows".
:paramtype os_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSType
:keyword max_count: Maximum number of nodes for auto-scaling.
:paramtype max_count: int
:keyword min_count: Minimum number of nodes for auto-scaling.
:paramtype min_count: int
:keyword enable_auto_scaling: Whether to enable auto-scaler.
:paramtype enable_auto_scaling: bool
:keyword type: AgentPoolType represents types of an agent pool. Known values are:
"VirtualMachineScaleSets" and "AvailabilitySet".
:paramtype type: str or ~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolType
:keyword mode: AgentPoolMode represents mode of an agent pool. Known values are: "System" and
"User".
:paramtype mode: str or ~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolMode
:keyword orchestrator_version: Version of orchestrator specified when creating the managed
cluster.
:paramtype orchestrator_version: str
:keyword upgrade_settings: Settings for upgrading the agentpool.
:paramtype upgrade_settings:
~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolUpgradeSettings
:keyword availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets
AgentPoolType.
:paramtype availability_zones: list[str]
:keyword enable_node_public_ip: Enable public IP for nodes.
:paramtype enable_node_public_ip: bool
:keyword scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set
priority. Default to regular. Known values are: "Spot" and "Regular".
:paramtype scale_set_priority: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetPriority
:keyword scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction
policy for Spot virtual machine scale set. Default to Delete. Known values are: "Delete" and
"Deallocate".
:paramtype scale_set_eviction_policy: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetEvictionPolicy
:keyword spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing
to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which
indicates default price to be up-to on-demand.
:paramtype spot_max_price: float
:keyword tags: Agent pool tags to be persisted on the agent pool virtual machine scale set.
:paramtype tags: dict[str, str]
:keyword node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:paramtype node_labels: dict[str, str]
:keyword node_taints: Taints added to new nodes during node pool create and scale. For example,
key=value:NoSchedule.
:paramtype node_taints: list[str]
:keyword proximity_placement_group_id: The ID for Proximity Placement Group.
:paramtype proximity_placement_group_id: str
"""
super().__init__(**kwargs)
self.count = count
self.vm_size = vm_size
self.os_disk_size_gb = os_disk_size_gb
self.os_disk_type = os_disk_type
self.vnet_subnet_id = vnet_subnet_id
self.max_pods = max_pods
self.os_type = os_type
self.max_count = max_count
self.min_count = min_count
self.enable_auto_scaling = enable_auto_scaling
self.type = type
self.mode = mode
self.orchestrator_version = orchestrator_version
self.node_image_version = None
self.upgrade_settings = upgrade_settings
self.provisioning_state = None
self.power_state = None
self.availability_zones = availability_zones
self.enable_node_public_ip = enable_node_public_ip
self.scale_set_priority = scale_set_priority
self.scale_set_eviction_policy = scale_set_eviction_policy
self.spot_max_price = spot_max_price
self.tags = tags
self.node_labels = node_labels
self.node_taints = node_taints
self.proximity_placement_group_id = proximity_placement_group_id
class ManagedClusterAgentPoolProfile(
ManagedClusterAgentPoolProfileProperties
): # pylint: disable=too-many-instance-attributes
"""Profile for the container service agent pool.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the
range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for
system pools. The default value is 1.
:vartype count: int
:ivar vm_size: Size of agent VMs. Known values are: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", and "Standard_NV6".
:vartype vm_size: str or
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceVMSizeTypes
:ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine
in this master/agent pool. If you specify 0, it will apply the default osDisk size according to
the vmSize specified.
:vartype os_disk_size_gb: int
:ivar os_disk_type: OS disk type to be used for machines in a given agent pool. Allowed values
are 'Ephemeral' and 'Managed'. Defaults to 'Managed'. May not be changed after creation. Known
values are: "Managed" and "Ephemeral".
:vartype os_disk_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSDiskType
:ivar vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:vartype vnet_subnet_id: str
:ivar max_pods: Maximum number of pods that can run on a node.
:vartype max_pods: int
:ivar os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to
Linux. Known values are: "Linux" and "Windows".
:vartype os_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSType
:ivar max_count: Maximum number of nodes for auto-scaling.
:vartype max_count: int
:ivar min_count: Minimum number of nodes for auto-scaling.
:vartype min_count: int
:ivar enable_auto_scaling: Whether to enable auto-scaler.
:vartype enable_auto_scaling: bool
:ivar type: AgentPoolType represents types of an agent pool. Known values are:
"VirtualMachineScaleSets" and "AvailabilitySet".
:vartype type: str or ~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolType
:ivar mode: AgentPoolMode represents mode of an agent pool. Known values are: "System" and
"User".
:vartype mode: str or ~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolMode
:ivar orchestrator_version: Version of orchestrator specified when creating the managed
cluster.
:vartype orchestrator_version: str
:ivar node_image_version: Version of node image.
:vartype node_image_version: str
:ivar upgrade_settings: Settings for upgrading the agentpool.
:vartype upgrade_settings:
~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolUpgradeSettings
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar power_state: Describes whether the Agent Pool is Running or Stopped.
:vartype power_state: ~azure.mgmt.containerservice.v2020_09_01.models.PowerState
:ivar availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets
AgentPoolType.
:vartype availability_zones: list[str]
:ivar enable_node_public_ip: Enable public IP for nodes.
:vartype enable_node_public_ip: bool
:ivar scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set
priority. Default to regular. Known values are: "Spot" and "Regular".
:vartype scale_set_priority: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetPriority
:ivar scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction policy
for Spot virtual machine scale set. Default to Delete. Known values are: "Delete" and
"Deallocate".
:vartype scale_set_eviction_policy: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetEvictionPolicy
:ivar spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing to
pay in US Dollars. Possible values are any decimal value greater than zero or -1 which
indicates default price to be up-to on-demand.
:vartype spot_max_price: float
:ivar tags: Agent pool tags to be persisted on the agent pool virtual machine scale set.
:vartype tags: dict[str, str]
:ivar node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:vartype node_labels: dict[str, str]
:ivar node_taints: Taints added to new nodes during node pool create and scale. For example,
key=value:NoSchedule.
:vartype node_taints: list[str]
:ivar proximity_placement_group_id: The ID for Proximity Placement Group.
:vartype proximity_placement_group_id: str
:ivar name: Unique name of the agent pool profile in the context of the subscription and
resource group. Required.
:vartype name: str
"""
_validation = {
"os_disk_size_gb": {"maximum": 1023, "minimum": 0},
"node_image_version": {"readonly": True},
"provisioning_state": {"readonly": True},
"power_state": {"readonly": True},
"name": {"required": True, "pattern": r"^[a-z][a-z0-9]{0,11}$"},
}
_attribute_map = {
"count": {"key": "count", "type": "int"},
"vm_size": {"key": "vmSize", "type": "str"},
"os_disk_size_gb": {"key": "osDiskSizeGB", "type": "int"},
"os_disk_type": {"key": "osDiskType", "type": "str"},
"vnet_subnet_id": {"key": "vnetSubnetID", "type": "str"},
"max_pods": {"key": "maxPods", "type": "int"},
"os_type": {"key": "osType", "type": "str"},
"max_count": {"key": "maxCount", "type": "int"},
"min_count": {"key": "minCount", "type": "int"},
"enable_auto_scaling": {"key": "enableAutoScaling", "type": "bool"},
"type": {"key": "type", "type": "str"},
"mode": {"key": "mode", "type": "str"},
"orchestrator_version": {"key": "orchestratorVersion", "type": "str"},
"node_image_version": {"key": "nodeImageVersion", "type": "str"},
"upgrade_settings": {"key": "upgradeSettings", "type": "AgentPoolUpgradeSettings"},
"provisioning_state": {"key": "provisioningState", "type": "str"},
"power_state": {"key": "powerState", "type": "PowerState"},
"availability_zones": {"key": "availabilityZones", "type": "[str]"},
"enable_node_public_ip": {"key": "enableNodePublicIP", "type": "bool"},
"scale_set_priority": {"key": "scaleSetPriority", "type": "str"},
"scale_set_eviction_policy": {"key": "scaleSetEvictionPolicy", "type": "str"},
"spot_max_price": {"key": "spotMaxPrice", "type": "float"},
"tags": {"key": "tags", "type": "{str}"},
"node_labels": {"key": "nodeLabels", "type": "{str}"},
"node_taints": {"key": "nodeTaints", "type": "[str]"},
"proximity_placement_group_id": {"key": "proximityPlacementGroupID", "type": "str"},
"name": {"key": "name", "type": "str"},
}
def __init__( # pylint: disable=too-many-locals
self,
*,
name: str,
count: Optional[int] = None,
vm_size: Optional[Union[str, "_models.ContainerServiceVMSizeTypes"]] = None,
os_disk_size_gb: Optional[int] = None,
os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None,
vnet_subnet_id: Optional[str] = None,
max_pods: Optional[int] = None,
os_type: Union[str, "_models.OSType"] = "Linux",
max_count: Optional[int] = None,
min_count: Optional[int] = None,
enable_auto_scaling: Optional[bool] = None,
type: Optional[Union[str, "_models.AgentPoolType"]] = None,
mode: Optional[Union[str, "_models.AgentPoolMode"]] = None,
orchestrator_version: Optional[str] = None,
upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None,
availability_zones: Optional[List[str]] = None,
enable_node_public_ip: Optional[bool] = None,
scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular",
scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete",
spot_max_price: float = -1,
tags: Optional[Dict[str, str]] = None,
node_labels: Optional[Dict[str, str]] = None,
node_taints: Optional[List[str]] = None,
proximity_placement_group_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword count: Number of agents (VMs) to host docker containers. Allowed values must be in the
range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for
system pools. The default value is 1.
:paramtype count: int
:keyword vm_size: Size of agent VMs. Known values are: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", and "Standard_NV6".
:paramtype vm_size: str or
~azure.mgmt.containerservice.v2020_09_01.models.ContainerServiceVMSizeTypes
:keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
machine in this master/agent pool. If you specify 0, it will apply the default osDisk size
according to the vmSize specified.
:paramtype os_disk_size_gb: int
:keyword os_disk_type: OS disk type to be used for machines in a given agent pool. Allowed
values are 'Ephemeral' and 'Managed'. Defaults to 'Managed'. May not be changed after creation.
Known values are: "Managed" and "Ephemeral".
:paramtype os_disk_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSDiskType
:keyword vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:paramtype vnet_subnet_id: str
:keyword max_pods: Maximum number of pods that can run on a node.
:paramtype max_pods: int
:keyword os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default
to Linux. Known values are: "Linux" and "Windows".
:paramtype os_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSType
:keyword max_count: Maximum number of nodes for auto-scaling.
:paramtype max_count: int
:keyword min_count: Minimum number of nodes for auto-scaling.
:paramtype min_count: int
:keyword enable_auto_scaling: Whether to enable auto-scaler.
:paramtype enable_auto_scaling: bool
:keyword type: AgentPoolType represents types of an agent pool. Known values are:
"VirtualMachineScaleSets" and "AvailabilitySet".
:paramtype type: str or ~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolType
:keyword mode: AgentPoolMode represents mode of an agent pool. Known values are: "System" and
"User".
:paramtype mode: str or ~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolMode
:keyword orchestrator_version: Version of orchestrator specified when creating the managed
cluster.
:paramtype orchestrator_version: str
:keyword upgrade_settings: Settings for upgrading the agentpool.
:paramtype upgrade_settings:
~azure.mgmt.containerservice.v2020_09_01.models.AgentPoolUpgradeSettings
:keyword availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets
AgentPoolType.
:paramtype availability_zones: list[str]
:keyword enable_node_public_ip: Enable public IP for nodes.
:paramtype enable_node_public_ip: bool
:keyword scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set
priority. Default to regular. Known values are: "Spot" and "Regular".
:paramtype scale_set_priority: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetPriority
:keyword scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction
policy for Spot virtual machine scale set. Default to Delete. Known values are: "Delete" and
"Deallocate".
:paramtype scale_set_eviction_policy: str or
~azure.mgmt.containerservice.v2020_09_01.models.ScaleSetEvictionPolicy
:keyword spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing
to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which
indicates default price to be up-to on-demand.
:paramtype spot_max_price: float
:keyword tags: Agent pool tags to be persisted on the agent pool virtual machine scale set.
:paramtype tags: dict[str, str]
:keyword node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:paramtype node_labels: dict[str, str]
:keyword node_taints: Taints added to new nodes during node pool create and scale. For example,
key=value:NoSchedule.
:paramtype node_taints: list[str]
:keyword proximity_placement_group_id: The ID for Proximity Placement Group.
:paramtype proximity_placement_group_id: str
:keyword name: Unique name of the agent pool profile in the context of the subscription and
resource group. Required.
:paramtype name: str
"""
super().__init__(
count=count,
vm_size=vm_size,
os_disk_size_gb=os_disk_size_gb,
os_disk_type=os_disk_type,
vnet_subnet_id=vnet_subnet_id,
max_pods=max_pods,
os_type=os_type,
max_count=max_count,
min_count=min_count,
enable_auto_scaling=enable_auto_scaling,
type=type,
mode=mode,
orchestrator_version=orchestrator_version,
upgrade_settings=upgrade_settings,
availability_zones=availability_zones,
enable_node_public_ip=enable_node_public_ip,
scale_set_priority=scale_set_priority,
scale_set_eviction_policy=scale_set_eviction_policy,
spot_max_price=spot_max_price,
tags=tags,
node_labels=node_labels,
node_taints=node_taints,
proximity_placement_group_id=proximity_placement_group_id,
**kwargs
)
self.name = name
class ManagedClusterAPIServerAccessProfile(_serialization.Model):
"""Access profile for managed cluster API server.
:ivar authorized_ip_ranges: Authorized IP Ranges to kubernetes API server.
:vartype authorized_ip_ranges: list[str]
:ivar enable_private_cluster: Whether to create the cluster as a private cluster or not.
:vartype enable_private_cluster: bool
"""
_attribute_map = {
"authorized_ip_ranges": {"key": "authorizedIPRanges", "type": "[str]"},
"enable_private_cluster": {"key": "enablePrivateCluster", "type": "bool"},
}
def __init__(
self,
*,
authorized_ip_ranges: Optional[List[str]] = None,
enable_private_cluster: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword authorized_ip_ranges: Authorized IP Ranges to kubernetes API server.
:paramtype authorized_ip_ranges: list[str]
:keyword enable_private_cluster: Whether to create the cluster as a private cluster or not.
:paramtype enable_private_cluster: bool
"""
super().__init__(**kwargs)
self.authorized_ip_ranges = authorized_ip_ranges
self.enable_private_cluster = enable_private_cluster
class ManagedClusterIdentity(_serialization.Model):
"""Identity for the managed cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of the system assigned identity which is used by master
components.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the system assigned identity which is used by master
components.
:vartype tenant_id: str
:ivar type: The type of identity used for the managed cluster. Type 'SystemAssigned' will use
an implicitly created identity in master components and an auto-created user assigned identity
in MC_ resource group in agent nodes. Type 'None' will not use MSI for the managed cluster,
service principal will be used instead. Known values are: "SystemAssigned", "UserAssigned", and
"None".
:vartype type: str or ~azure.mgmt.containerservice.v2020_09_01.models.ResourceIdentityType
:ivar user_assigned_identities: The user identity associated with the managed cluster. This
identity will be used in control plane and only one user assigned identity is allowed. The user
identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:vartype user_assigned_identities: dict[str,
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterIdentityUserAssignedIdentitiesValue]
"""
_validation = {
"principal_id": {"readonly": True},
"tenant_id": {"readonly": True},
}
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"tenant_id": {"key": "tenantId", "type": "str"},
"type": {"key": "type", "type": "str"},
"user_assigned_identities": {
"key": "userAssignedIdentities",
"type": "{ManagedClusterIdentityUserAssignedIdentitiesValue}",
},
}
def __init__(
self,
*,
type: Optional[Union[str, "_models.ResourceIdentityType"]] = None,
user_assigned_identities: Optional[
Dict[str, "_models.ManagedClusterIdentityUserAssignedIdentitiesValue"]
] = None,
**kwargs: Any
) -> None:
"""
:keyword type: The type of identity used for the managed cluster. Type 'SystemAssigned' will
use an implicitly created identity in master components and an auto-created user assigned
identity in MC_ resource group in agent nodes. Type 'None' will not use MSI for the managed
cluster, service principal will be used instead. Known values are: "SystemAssigned",
"UserAssigned", and "None".
:paramtype type: str or ~azure.mgmt.containerservice.v2020_09_01.models.ResourceIdentityType
:keyword user_assigned_identities: The user identity associated with the managed cluster. This
identity will be used in control plane and only one user assigned identity is allowed. The user
identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:paramtype user_assigned_identities: dict[str,
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterIdentityUserAssignedIdentitiesValue]
"""
super().__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class ManagedClusterIdentityUserAssignedIdentitiesValue(_serialization.Model):
"""ManagedClusterIdentityUserAssignedIdentitiesValue.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
"principal_id": {"readonly": True},
"client_id": {"readonly": True},
}
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"client_id": {"key": "clientId", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.principal_id = None
self.client_id = None
class ManagedClusterListResult(_serialization.Model):
"""The response from the List Managed Clusters operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of managed clusters.
:vartype value: list[~azure.mgmt.containerservice.v2020_09_01.models.ManagedCluster]
:ivar next_link: The URL to get the next set of managed cluster results.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[ManagedCluster]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.ManagedCluster"]] = None, **kwargs: Any) -> None:
"""
:keyword value: The list of managed clusters.
:paramtype value: list[~azure.mgmt.containerservice.v2020_09_01.models.ManagedCluster]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class ManagedClusterLoadBalancerProfile(_serialization.Model):
"""Profile of the managed cluster load balancer.
:ivar managed_outbound_i_ps: Desired managed outbound IPs for the cluster load balancer.
:vartype managed_outbound_i_ps:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs
:ivar outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load balancer.
:vartype outbound_ip_prefixes:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes
:ivar outbound_i_ps: Desired outbound IP resources for the cluster load balancer.
:vartype outbound_i_ps:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterLoadBalancerProfileOutboundIPs
:ivar effective_outbound_i_ps: The effective outbound IP resources of the cluster load
balancer.
:vartype effective_outbound_i_ps:
list[~azure.mgmt.containerservice.v2020_09_01.models.ResourceReference]
:ivar allocated_outbound_ports: Desired number of allocated SNAT ports per VM. Allowed values
must be in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure
dynamically allocating ports.
:vartype allocated_outbound_ports: int
:ivar idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values
must be in the range of 4 to 120 (inclusive). The default value is 30 minutes.
:vartype idle_timeout_in_minutes: int
"""
_validation = {
"allocated_outbound_ports": {"maximum": 64000, "minimum": 0},
"idle_timeout_in_minutes": {"maximum": 120, "minimum": 4},
}
_attribute_map = {
"managed_outbound_i_ps": {
"key": "managedOutboundIPs",
"type": "ManagedClusterLoadBalancerProfileManagedOutboundIPs",
},
"outbound_ip_prefixes": {
"key": "outboundIPPrefixes",
"type": "ManagedClusterLoadBalancerProfileOutboundIPPrefixes",
},
"outbound_i_ps": {"key": "outboundIPs", "type": "ManagedClusterLoadBalancerProfileOutboundIPs"},
"effective_outbound_i_ps": {"key": "effectiveOutboundIPs", "type": "[ResourceReference]"},
"allocated_outbound_ports": {"key": "allocatedOutboundPorts", "type": "int"},
"idle_timeout_in_minutes": {"key": "idleTimeoutInMinutes", "type": "int"},
}
def __init__(
self,
*,
managed_outbound_i_ps: Optional["_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs"] = None,
outbound_ip_prefixes: Optional["_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes"] = None,
outbound_i_ps: Optional["_models.ManagedClusterLoadBalancerProfileOutboundIPs"] = None,
effective_outbound_i_ps: Optional[List["_models.ResourceReference"]] = None,
allocated_outbound_ports: int = 0,
idle_timeout_in_minutes: int = 30,
**kwargs: Any
) -> None:
"""
:keyword managed_outbound_i_ps: Desired managed outbound IPs for the cluster load balancer.
:paramtype managed_outbound_i_ps:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs
:keyword outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load
balancer.
:paramtype outbound_ip_prefixes:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes
:keyword outbound_i_ps: Desired outbound IP resources for the cluster load balancer.
:paramtype outbound_i_ps:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterLoadBalancerProfileOutboundIPs
:keyword effective_outbound_i_ps: The effective outbound IP resources of the cluster load
balancer.
:paramtype effective_outbound_i_ps:
list[~azure.mgmt.containerservice.v2020_09_01.models.ResourceReference]
:keyword allocated_outbound_ports: Desired number of allocated SNAT ports per VM. Allowed
values must be in the range of 0 to 64000 (inclusive). The default value is 0 which results in
Azure dynamically allocating ports.
:paramtype allocated_outbound_ports: int
:keyword idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values
must be in the range of 4 to 120 (inclusive). The default value is 30 minutes.
:paramtype idle_timeout_in_minutes: int
"""
super().__init__(**kwargs)
self.managed_outbound_i_ps = managed_outbound_i_ps
self.outbound_ip_prefixes = outbound_ip_prefixes
self.outbound_i_ps = outbound_i_ps
self.effective_outbound_i_ps = effective_outbound_i_ps
self.allocated_outbound_ports = allocated_outbound_ports
self.idle_timeout_in_minutes = idle_timeout_in_minutes
class ManagedClusterLoadBalancerProfileManagedOutboundIPs(_serialization.Model):
"""Desired managed outbound IPs for the cluster load balancer.
:ivar count: Desired number of outbound IP created/managed by Azure for the cluster load
balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
:vartype count: int
"""
_validation = {
"count": {"maximum": 100, "minimum": 1},
}
_attribute_map = {
"count": {"key": "count", "type": "int"},
}
def __init__(self, *, count: int = 1, **kwargs: Any) -> None:
"""
:keyword count: Desired number of outbound IP created/managed by Azure for the cluster load
balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
:paramtype count: int
"""
super().__init__(**kwargs)
self.count = count
class ManagedClusterLoadBalancerProfileOutboundIPPrefixes(_serialization.Model):
"""Desired outbound IP Prefix resources for the cluster load balancer.
:ivar public_ip_prefixes: A list of public IP prefix resources.
:vartype public_ip_prefixes:
list[~azure.mgmt.containerservice.v2020_09_01.models.ResourceReference]
"""
_attribute_map = {
"public_ip_prefixes": {"key": "publicIPPrefixes", "type": "[ResourceReference]"},
}
def __init__(
self, *, public_ip_prefixes: Optional[List["_models.ResourceReference"]] = None, **kwargs: Any
) -> None:
"""
:keyword public_ip_prefixes: A list of public IP prefix resources.
:paramtype public_ip_prefixes:
list[~azure.mgmt.containerservice.v2020_09_01.models.ResourceReference]
"""
super().__init__(**kwargs)
self.public_ip_prefixes = public_ip_prefixes
class ManagedClusterLoadBalancerProfileOutboundIPs(_serialization.Model):
"""Desired outbound IP resources for the cluster load balancer.
:ivar public_i_ps: A list of public IP resources.
:vartype public_i_ps: list[~azure.mgmt.containerservice.v2020_09_01.models.ResourceReference]
"""
_attribute_map = {
"public_i_ps": {"key": "publicIPs", "type": "[ResourceReference]"},
}
def __init__(self, *, public_i_ps: Optional[List["_models.ResourceReference"]] = None, **kwargs: Any) -> None:
"""
:keyword public_i_ps: A list of public IP resources.
:paramtype public_i_ps: list[~azure.mgmt.containerservice.v2020_09_01.models.ResourceReference]
"""
super().__init__(**kwargs)
self.public_i_ps = public_i_ps
class ManagedClusterPoolUpgradeProfile(_serialization.Model):
"""The list of available upgrade versions.
All required parameters must be populated in order to send to Azure.
:ivar kubernetes_version: Kubernetes version (major, minor, patch). Required.
:vartype kubernetes_version: str
:ivar name: Pool name.
:vartype name: str
:ivar os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to
Linux. Known values are: "Linux" and "Windows".
:vartype os_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSType
:ivar upgrades: List of orchestrator types and versions available for upgrade.
:vartype upgrades:
list[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPoolUpgradeProfileUpgradesItem]
"""
_validation = {
"kubernetes_version": {"required": True},
"os_type": {"required": True},
}
_attribute_map = {
"kubernetes_version": {"key": "kubernetesVersion", "type": "str"},
"name": {"key": "name", "type": "str"},
"os_type": {"key": "osType", "type": "str"},
"upgrades": {"key": "upgrades", "type": "[ManagedClusterPoolUpgradeProfileUpgradesItem]"},
}
def __init__(
self,
*,
kubernetes_version: str,
os_type: Union[str, "_models.OSType"] = "Linux",
name: Optional[str] = None,
upgrades: Optional[List["_models.ManagedClusterPoolUpgradeProfileUpgradesItem"]] = None,
**kwargs: Any
) -> None:
"""
:keyword kubernetes_version: Kubernetes version (major, minor, patch). Required.
:paramtype kubernetes_version: str
:keyword name: Pool name.
:paramtype name: str
:keyword os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default
to Linux. Known values are: "Linux" and "Windows".
:paramtype os_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.OSType
:keyword upgrades: List of orchestrator types and versions available for upgrade.
:paramtype upgrades:
list[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPoolUpgradeProfileUpgradesItem]
"""
super().__init__(**kwargs)
self.kubernetes_version = kubernetes_version
self.name = name
self.os_type = os_type
self.upgrades = upgrades
class ManagedClusterPoolUpgradeProfileUpgradesItem(_serialization.Model):
"""ManagedClusterPoolUpgradeProfileUpgradesItem.
:ivar kubernetes_version: Kubernetes version (major, minor, patch).
:vartype kubernetes_version: str
:ivar is_preview: Whether Kubernetes version is currently in preview.
:vartype is_preview: bool
"""
_attribute_map = {
"kubernetes_version": {"key": "kubernetesVersion", "type": "str"},
"is_preview": {"key": "isPreview", "type": "bool"},
}
def __init__(
self, *, kubernetes_version: Optional[str] = None, is_preview: Optional[bool] = None, **kwargs: Any
) -> None:
"""
:keyword kubernetes_version: Kubernetes version (major, minor, patch).
:paramtype kubernetes_version: str
:keyword is_preview: Whether Kubernetes version is currently in preview.
:paramtype is_preview: bool
"""
super().__init__(**kwargs)
self.kubernetes_version = kubernetes_version
self.is_preview = is_preview
class ManagedClusterPropertiesAutoScalerProfile(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Parameters to be applied to the cluster-autoscaler when enabled.
:ivar balance_similar_node_groups:
:vartype balance_similar_node_groups: str
:ivar expander: Known values are: "least-waste", "most-pods", and "random".
:vartype expander: str or ~azure.mgmt.containerservice.v2020_09_01.models.Expander
:ivar max_empty_bulk_delete:
:vartype max_empty_bulk_delete: str
:ivar max_graceful_termination_sec:
:vartype max_graceful_termination_sec: str
:ivar max_total_unready_percentage:
:vartype max_total_unready_percentage: str
:ivar new_pod_scale_up_delay:
:vartype new_pod_scale_up_delay: str
:ivar ok_total_unready_count:
:vartype ok_total_unready_count: str
:ivar scan_interval:
:vartype scan_interval: str
:ivar scale_down_delay_after_add:
:vartype scale_down_delay_after_add: str
:ivar scale_down_delay_after_delete:
:vartype scale_down_delay_after_delete: str
:ivar scale_down_delay_after_failure:
:vartype scale_down_delay_after_failure: str
:ivar scale_down_unneeded_time:
:vartype scale_down_unneeded_time: str
:ivar scale_down_unready_time:
:vartype scale_down_unready_time: str
:ivar scale_down_utilization_threshold:
:vartype scale_down_utilization_threshold: str
:ivar skip_nodes_with_local_storage:
:vartype skip_nodes_with_local_storage: str
:ivar skip_nodes_with_system_pods:
:vartype skip_nodes_with_system_pods: str
"""
_attribute_map = {
"balance_similar_node_groups": {"key": "balance-similar-node-groups", "type": "str"},
"expander": {"key": "expander", "type": "str"},
"max_empty_bulk_delete": {"key": "max-empty-bulk-delete", "type": "str"},
"max_graceful_termination_sec": {"key": "max-graceful-termination-sec", "type": "str"},
"max_total_unready_percentage": {"key": "max-total-unready-percentage", "type": "str"},
"new_pod_scale_up_delay": {"key": "new-pod-scale-up-delay", "type": "str"},
"ok_total_unready_count": {"key": "ok-total-unready-count", "type": "str"},
"scan_interval": {"key": "scan-interval", "type": "str"},
"scale_down_delay_after_add": {"key": "scale-down-delay-after-add", "type": "str"},
"scale_down_delay_after_delete": {"key": "scale-down-delay-after-delete", "type": "str"},
"scale_down_delay_after_failure": {"key": "scale-down-delay-after-failure", "type": "str"},
"scale_down_unneeded_time": {"key": "scale-down-unneeded-time", "type": "str"},
"scale_down_unready_time": {"key": "scale-down-unready-time", "type": "str"},
"scale_down_utilization_threshold": {"key": "scale-down-utilization-threshold", "type": "str"},
"skip_nodes_with_local_storage": {"key": "skip-nodes-with-local-storage", "type": "str"},
"skip_nodes_with_system_pods": {"key": "skip-nodes-with-system-pods", "type": "str"},
}
def __init__(
self,
*,
balance_similar_node_groups: Optional[str] = None,
expander: Optional[Union[str, "_models.Expander"]] = None,
max_empty_bulk_delete: Optional[str] = None,
max_graceful_termination_sec: Optional[str] = None,
max_total_unready_percentage: Optional[str] = None,
new_pod_scale_up_delay: Optional[str] = None,
ok_total_unready_count: Optional[str] = None,
scan_interval: Optional[str] = None,
scale_down_delay_after_add: Optional[str] = None,
scale_down_delay_after_delete: Optional[str] = None,
scale_down_delay_after_failure: Optional[str] = None,
scale_down_unneeded_time: Optional[str] = None,
scale_down_unready_time: Optional[str] = None,
scale_down_utilization_threshold: Optional[str] = None,
skip_nodes_with_local_storage: Optional[str] = None,
skip_nodes_with_system_pods: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword balance_similar_node_groups:
:paramtype balance_similar_node_groups: str
:keyword expander: Known values are: "least-waste", "most-pods", and "random".
:paramtype expander: str or ~azure.mgmt.containerservice.v2020_09_01.models.Expander
:keyword max_empty_bulk_delete:
:paramtype max_empty_bulk_delete: str
:keyword max_graceful_termination_sec:
:paramtype max_graceful_termination_sec: str
:keyword max_total_unready_percentage:
:paramtype max_total_unready_percentage: str
:keyword new_pod_scale_up_delay:
:paramtype new_pod_scale_up_delay: str
:keyword ok_total_unready_count:
:paramtype ok_total_unready_count: str
:keyword scan_interval:
:paramtype scan_interval: str
:keyword scale_down_delay_after_add:
:paramtype scale_down_delay_after_add: str
:keyword scale_down_delay_after_delete:
:paramtype scale_down_delay_after_delete: str
:keyword scale_down_delay_after_failure:
:paramtype scale_down_delay_after_failure: str
:keyword scale_down_unneeded_time:
:paramtype scale_down_unneeded_time: str
:keyword scale_down_unready_time:
:paramtype scale_down_unready_time: str
:keyword scale_down_utilization_threshold:
:paramtype scale_down_utilization_threshold: str
:keyword skip_nodes_with_local_storage:
:paramtype skip_nodes_with_local_storage: str
:keyword skip_nodes_with_system_pods:
:paramtype skip_nodes_with_system_pods: str
"""
super().__init__(**kwargs)
self.balance_similar_node_groups = balance_similar_node_groups
self.expander = expander
self.max_empty_bulk_delete = max_empty_bulk_delete
self.max_graceful_termination_sec = max_graceful_termination_sec
self.max_total_unready_percentage = max_total_unready_percentage
self.new_pod_scale_up_delay = new_pod_scale_up_delay
self.ok_total_unready_count = ok_total_unready_count
self.scan_interval = scan_interval
self.scale_down_delay_after_add = scale_down_delay_after_add
self.scale_down_delay_after_delete = scale_down_delay_after_delete
self.scale_down_delay_after_failure = scale_down_delay_after_failure
self.scale_down_unneeded_time = scale_down_unneeded_time
self.scale_down_unready_time = scale_down_unready_time
self.scale_down_utilization_threshold = scale_down_utilization_threshold
self.skip_nodes_with_local_storage = skip_nodes_with_local_storage
self.skip_nodes_with_system_pods = skip_nodes_with_system_pods
class ManagedClusterPropertiesIdentityProfileValue(UserAssignedIdentity):
"""ManagedClusterPropertiesIdentityProfileValue.
:ivar resource_id: The resource id of the user assigned identity.
:vartype resource_id: str
:ivar client_id: The client id of the user assigned identity.
:vartype client_id: str
:ivar object_id: The object id of the user assigned identity.
:vartype object_id: str
"""
_attribute_map = {
"resource_id": {"key": "resourceId", "type": "str"},
"client_id": {"key": "clientId", "type": "str"},
"object_id": {"key": "objectId", "type": "str"},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
client_id: Optional[str] = None,
object_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword resource_id: The resource id of the user assigned identity.
:paramtype resource_id: str
:keyword client_id: The client id of the user assigned identity.
:paramtype client_id: str
:keyword object_id: The object id of the user assigned identity.
:paramtype object_id: str
"""
super().__init__(resource_id=resource_id, client_id=client_id, object_id=object_id, **kwargs)
class ManagedClusterServicePrincipalProfile(_serialization.Model):
"""Information about a service principal identity for the cluster to use for manipulating Azure
APIs.
All required parameters must be populated in order to send to Azure.
:ivar client_id: The ID for the service principal. Required.
:vartype client_id: str
:ivar secret: The secret password associated with the service principal in plain text.
:vartype secret: str
"""
_validation = {
"client_id": {"required": True},
}
_attribute_map = {
"client_id": {"key": "clientId", "type": "str"},
"secret": {"key": "secret", "type": "str"},
}
def __init__(self, *, client_id: str, secret: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword client_id: The ID for the service principal. Required.
:paramtype client_id: str
:keyword secret: The secret password associated with the service principal in plain text.
:paramtype secret: str
"""
super().__init__(**kwargs)
self.client_id = client_id
self.secret = secret
class ManagedClusterSKU(_serialization.Model):
"""ManagedClusterSKU.
:ivar name: Name of a managed cluster SKU. "Basic"
:vartype name: str or ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterSKUName
:ivar tier: Tier of a managed cluster SKU. Known values are: "Paid" and "Free".
:vartype tier: str or ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterSKUTier
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"tier": {"key": "tier", "type": "str"},
}
def __init__(
self,
*,
name: Optional[Union[str, "_models.ManagedClusterSKUName"]] = None,
tier: Optional[Union[str, "_models.ManagedClusterSKUTier"]] = None,
**kwargs: Any
) -> None:
"""
:keyword name: Name of a managed cluster SKU. "Basic"
:paramtype name: str or ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterSKUName
:keyword tier: Tier of a managed cluster SKU. Known values are: "Paid" and "Free".
:paramtype tier: str or ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterSKUTier
"""
super().__init__(**kwargs)
self.name = name
self.tier = tier
class ManagedClusterUpgradeProfile(_serialization.Model):
"""The list of available upgrades for compute pools.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of upgrade profile.
:vartype id: str
:ivar name: Name of upgrade profile.
:vartype name: str
:ivar type: Type of upgrade profile.
:vartype type: str
:ivar control_plane_profile: The list of available upgrade versions for the control plane.
Required.
:vartype control_plane_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPoolUpgradeProfile
:ivar agent_pool_profiles: The list of available upgrade versions for agent pools. Required.
:vartype agent_pool_profiles:
list[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPoolUpgradeProfile]
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"control_plane_profile": {"required": True},
"agent_pool_profiles": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"control_plane_profile": {"key": "properties.controlPlaneProfile", "type": "ManagedClusterPoolUpgradeProfile"},
"agent_pool_profiles": {"key": "properties.agentPoolProfiles", "type": "[ManagedClusterPoolUpgradeProfile]"},
}
def __init__(
self,
*,
control_plane_profile: "_models.ManagedClusterPoolUpgradeProfile",
agent_pool_profiles: List["_models.ManagedClusterPoolUpgradeProfile"],
**kwargs: Any
) -> None:
"""
:keyword control_plane_profile: The list of available upgrade versions for the control plane.
Required.
:paramtype control_plane_profile:
~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPoolUpgradeProfile
:keyword agent_pool_profiles: The list of available upgrade versions for agent pools. Required.
:paramtype agent_pool_profiles:
list[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterPoolUpgradeProfile]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.control_plane_profile = control_plane_profile
self.agent_pool_profiles = agent_pool_profiles
class ManagedClusterWindowsProfile(_serialization.Model):
"""Profile for Windows VMs in the container service cluster.
All required parameters must be populated in order to send to Azure.
:ivar admin_username: Specifies the name of the administrator account. :code:`<br>`:code:`<br>`
**restriction:** Cannot end in "." :code:`<br>`:code:`<br>` **Disallowed values:**
"administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1",
"123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest",
"john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2",
"test3", "user4", "user5". :code:`<br>`:code:`<br>` **Minimum-length:** 1 character
:code:`<br>`:code:`<br>` **Max-length:** 20 characters. Required.
:vartype admin_username: str
:ivar admin_password: Specifies the password of the administrator account.
:code:`<br>`:code:`<br>` **Minimum-length:** 8 characters :code:`<br>`:code:`<br>`
**Max-length:** 123 characters :code:`<br>`:code:`<br>` **Complexity requirements:** 3 out of 4
conditions below need to be fulfilled :code:`<br>` Has lower characters :code:`<br>`Has upper
characters :code:`<br>` Has a digit :code:`<br>` Has a special character (Regex match [\W_])
:code:`<br>`:code:`<br>` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd",
"P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!".
:vartype admin_password: str
:ivar license_type: The licenseType to use for Windows VMs. Windows_Server is used to enable
Azure Hybrid User Benefits for Windows VMs. Known values are: "None" and "Windows_Server".
:vartype license_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.LicenseType
"""
_validation = {
"admin_username": {"required": True},
}
_attribute_map = {
"admin_username": {"key": "adminUsername", "type": "str"},
"admin_password": {"key": "adminPassword", "type": "str"},
"license_type": {"key": "licenseType", "type": "str"},
}
def __init__(
self,
*,
admin_username: str,
admin_password: Optional[str] = None,
license_type: Optional[Union[str, "_models.LicenseType"]] = None,
**kwargs: Any
) -> None:
"""
:keyword admin_username: Specifies the name of the administrator account.
:code:`<br>`:code:`<br>` **restriction:** Cannot end in "." :code:`<br>`:code:`<br>`
**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1",
"user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console",
"david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0",
"sys", "test2", "test3", "user4", "user5". :code:`<br>`:code:`<br>` **Minimum-length:** 1
character :code:`<br>`:code:`<br>` **Max-length:** 20 characters. Required.
:paramtype admin_username: str
:keyword admin_password: Specifies the password of the administrator account.
:code:`<br>`:code:`<br>` **Minimum-length:** 8 characters :code:`<br>`:code:`<br>`
**Max-length:** 123 characters :code:`<br>`:code:`<br>` **Complexity requirements:** 3 out of 4
conditions below need to be fulfilled :code:`<br>` Has lower characters :code:`<br>`Has upper
characters :code:`<br>` Has a digit :code:`<br>` Has a special character (Regex match [\W_])
:code:`<br>`:code:`<br>` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd",
"P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!".
:paramtype admin_password: str
:keyword license_type: The licenseType to use for Windows VMs. Windows_Server is used to enable
Azure Hybrid User Benefits for Windows VMs. Known values are: "None" and "Windows_Server".
:paramtype license_type: str or ~azure.mgmt.containerservice.v2020_09_01.models.LicenseType
"""
super().__init__(**kwargs)
self.admin_username = admin_username
self.admin_password = admin_password
self.license_type = license_type
class OperationListResult(_serialization.Model):
"""The List Compute Operation operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of compute operations.
:vartype value: list[~azure.mgmt.containerservice.v2020_09_01.models.OperationValue]
"""
_validation = {
"value": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[OperationValue]"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.value = None
class OperationValue(_serialization.Model):
"""Describes the properties of a Compute Operation value.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar origin: The origin of the compute operation.
:vartype origin: str
:ivar name: The name of the compute operation.
:vartype name: str
:ivar operation: The display name of the compute operation.
:vartype operation: str
:ivar resource: The display name of the resource the operation applies to.
:vartype resource: str
:ivar description: The description of the operation.
:vartype description: str
:ivar provider: The resource provider for the operation.
:vartype provider: str
"""
_validation = {
"origin": {"readonly": True},
"name": {"readonly": True},
"operation": {"readonly": True},
"resource": {"readonly": True},
"description": {"readonly": True},
"provider": {"readonly": True},
}
_attribute_map = {
"origin": {"key": "origin", "type": "str"},
"name": {"key": "name", "type": "str"},
"operation": {"key": "display.operation", "type": "str"},
"resource": {"key": "display.resource", "type": "str"},
"description": {"key": "display.description", "type": "str"},
"provider": {"key": "display.provider", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.origin = None
self.name = None
self.operation = None
self.resource = None
self.description = None
self.provider = None
class PowerState(_serialization.Model):
"""Describes the Power State of the cluster.
:ivar code: Tells whether the cluster is Running or Stopped. Known values are: "Running" and
"Stopped".
:vartype code: str or ~azure.mgmt.containerservice.v2020_09_01.models.Code
"""
_attribute_map = {
"code": {"key": "code", "type": "str"},
}
def __init__(self, *, code: Optional[Union[str, "_models.Code"]] = None, **kwargs: Any) -> None:
"""
:keyword code: Tells whether the cluster is Running or Stopped. Known values are: "Running" and
"Stopped".
:paramtype code: str or ~azure.mgmt.containerservice.v2020_09_01.models.Code
"""
super().__init__(**kwargs)
self.code = code
class PrivateEndpoint(_serialization.Model):
"""Private endpoint which a connection belongs to.
:ivar id: The resource Id for private endpoint.
:vartype id: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
}
def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
"""
:keyword id: The resource Id for private endpoint.
:paramtype id: str
"""
super().__init__(**kwargs)
self.id = id
class PrivateEndpointConnection(_serialization.Model):
"""A private endpoint connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the private endpoint connection.
:vartype id: str
:ivar name: The name of the private endpoint connection.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar provisioning_state: The current provisioning state. Known values are: "Succeeded",
"Creating", "Deleting", and "Failed".
:vartype provisioning_state: str or
~azure.mgmt.containerservice.v2020_09_01.models.PrivateEndpointConnectionProvisioningState
:ivar private_endpoint: The resource of private endpoint.
:vartype private_endpoint: ~azure.mgmt.containerservice.v2020_09_01.models.PrivateEndpoint
:ivar private_link_service_connection_state: A collection of information about the state of the
connection between service consumer and provider.
:vartype private_link_service_connection_state:
~azure.mgmt.containerservice.v2020_09_01.models.PrivateLinkServiceConnectionState
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"provisioning_state": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpoint"},
"private_link_service_connection_state": {
"key": "properties.privateLinkServiceConnectionState",
"type": "PrivateLinkServiceConnectionState",
},
}
def __init__(
self,
*,
private_endpoint: Optional["_models.PrivateEndpoint"] = None,
private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None,
**kwargs: Any
) -> None:
"""
:keyword private_endpoint: The resource of private endpoint.
:paramtype private_endpoint: ~azure.mgmt.containerservice.v2020_09_01.models.PrivateEndpoint
:keyword private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:paramtype private_link_service_connection_state:
~azure.mgmt.containerservice.v2020_09_01.models.PrivateLinkServiceConnectionState
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.provisioning_state = None
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
class PrivateEndpointConnectionListResult(_serialization.Model):
"""A list of private endpoint connections.
:ivar value: The collection value.
:vartype value: list[~azure.mgmt.containerservice.v2020_09_01.models.PrivateEndpointConnection]
"""
_attribute_map = {
"value": {"key": "value", "type": "[PrivateEndpointConnection]"},
}
def __init__(self, *, value: Optional[List["_models.PrivateEndpointConnection"]] = None, **kwargs: Any) -> None:
"""
:keyword value: The collection value.
:paramtype value:
list[~azure.mgmt.containerservice.v2020_09_01.models.PrivateEndpointConnection]
"""
super().__init__(**kwargs)
self.value = value
class PrivateLinkResource(_serialization.Model):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the private link resource.
:vartype id: str
:ivar name: The name of the private link resource.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar group_id: The group ID of the resource.
:vartype group_id: str
:ivar required_members: RequiredMembers of the resource.
:vartype required_members: list[str]
:ivar private_link_service_id: The private link service ID of the resource, this field is
exposed only to NRP internally.
:vartype private_link_service_id: str
"""
_validation = {
"private_link_service_id": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"group_id": {"key": "groupId", "type": "str"},
"required_members": {"key": "requiredMembers", "type": "[str]"},
"private_link_service_id": {"key": "privateLinkServiceID", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
name: Optional[str] = None,
type: Optional[str] = None,
group_id: Optional[str] = None,
required_members: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The ID of the private link resource.
:paramtype id: str
:keyword name: The name of the private link resource.
:paramtype name: str
:keyword type: The resource type.
:paramtype type: str
:keyword group_id: The group ID of the resource.
:paramtype group_id: str
:keyword required_members: RequiredMembers of the resource.
:paramtype required_members: list[str]
"""
super().__init__(**kwargs)
self.id = id
self.name = name
self.type = type
self.group_id = group_id
self.required_members = required_members
self.private_link_service_id = None
class PrivateLinkResourcesListResult(_serialization.Model):
"""A list of private link resources.
:ivar value: The collection value.
:vartype value: list[~azure.mgmt.containerservice.v2020_09_01.models.PrivateLinkResource]
"""
_attribute_map = {
"value": {"key": "value", "type": "[PrivateLinkResource]"},
}
def __init__(self, *, value: Optional[List["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None:
"""
:keyword value: The collection value.
:paramtype value: list[~azure.mgmt.containerservice.v2020_09_01.models.PrivateLinkResource]
"""
super().__init__(**kwargs)
self.value = value
class PrivateLinkServiceConnectionState(_serialization.Model):
"""The state of a private link service connection.
:ivar status: The private link service connection status. Known values are: "Pending",
"Approved", "Rejected", and "Disconnected".
:vartype status: str or ~azure.mgmt.containerservice.v2020_09_01.models.ConnectionStatus
:ivar description: The private link service connection description.
:vartype description: str
"""
_attribute_map = {
"status": {"key": "status", "type": "str"},
"description": {"key": "description", "type": "str"},
}
def __init__(
self,
*,
status: Optional[Union[str, "_models.ConnectionStatus"]] = None,
description: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword status: The private link service connection status. Known values are: "Pending",
"Approved", "Rejected", and "Disconnected".
:paramtype status: str or ~azure.mgmt.containerservice.v2020_09_01.models.ConnectionStatus
:keyword description: The private link service connection description.
:paramtype description: str
"""
super().__init__(**kwargs)
self.status = status
self.description = description
class ResourceReference(_serialization.Model):
"""A reference to an Azure resource.
:ivar id: The fully qualified Azure resource id.
:vartype id: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
}
def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
"""
:keyword id: The fully qualified Azure resource id.
:paramtype id: str
"""
super().__init__(**kwargs)
self.id = id
class TagsObject(_serialization.Model):
"""Tags object for patch operations.
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
"tags": {"key": "tags", "type": "{str}"},
}
def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:
"""
:keyword tags: Resource tags.
:paramtype tags: dict[str, str]
"""
super().__init__(**kwargs)
self.tags = tags
|
PypiClean
|
/ai2thor_colab-0.1.2.tar.gz/ai2thor_colab-0.1.2/ai2thor_colab/__init__.py
|
from IPython.display import HTML, display
import sys
from moviepy.editor import ImageSequenceClip
from typing import Sequence
import numpy as np
import os
from typing import Optional
import ai2thor.server
from typing import Union
from PIL import Image
import matplotlib.pyplot as plt
__version__ = "0.1.2"
__all__ = ["plot_frames", "show_video", "start_xserver", "overlay", "side_by_side"]
def show_objects_table(objects: list) -> None:
"""Visualizes objects in a way that they are clickable and filterable.
Example:
event = controller.step("MoveAhead")
objects = event.metadata["objects"]
show_objects_table(objects)
"""
import pandas as pd
from collections import OrderedDict
from google.colab.data_table import DataTable
processed_objects = []
for obj in objects:
obj = obj.copy()
obj["position[x]"] = round(obj["position"]["x"], 4)
obj["position[y]"] = round(obj["position"]["y"], 4)
obj["position[z]"] = round(obj["position"]["z"], 4)
obj["rotation[x]"] = round(obj["rotation"]["x"], 4)
obj["rotation[y]"] = round(obj["rotation"]["y"], 4)
obj["rotation[z]"] = round(obj["rotation"]["z"], 4)
del obj["position"]
del obj["rotation"]
# these are too long to display
del obj["objectOrientedBoundingBox"]
del obj["axisAlignedBoundingBox"]
del obj["receptacleObjectIds"]
obj["mass"] = round(obj["mass"], 4)
obj["distance"] = round(obj["distance"], 4)
obj = OrderedDict(obj)
obj.move_to_end("distance", last=False)
obj.move_to_end("rotation[z]", last=False)
obj.move_to_end("rotation[y]", last=False)
obj.move_to_end("rotation[x]", last=False)
obj.move_to_end("position[z]", last=False)
obj.move_to_end("position[y]", last=False)
obj.move_to_end("position[x]", last=False)
obj.move_to_end("name", last=False)
obj.move_to_end("objectId", last=False)
obj.move_to_end("objectType", last=False)
processed_objects.append(obj)
df = pd.DataFrame(processed_objects)
print(
"Object Metadata. Not showing objectOrientedBoundingBox, axisAlignedBoundingBox, and receptacleObjectIds for clarity."
)
return DataTable(df, max_columns=150, num_rows_per_page=150)
def overlay(
frame1: np.ndarray,
frame2: np.ndarray,
title: Optional[str] = None,
frame2_alpha: float = 0.75,
) -> None:
"""Blend image frame1 and frame2 on top of each other.
Example:
event1 = controller.last_event
event2 = controller.step("RotateRight")
overlay(event1.frame, event2.frame)
"""
fig, ax = plt.subplots(nrows=1, ncols=1, dpi=150, figsize=(4, 5))
if not (0 < frame2_alpha < 1):
raise ValueError("frame2_alpha must be in (0:1) not " + frame2_alpha)
if frame1.dtype == np.uint8:
frame1 = frame1 / 255
if frame2.dtype == np.uint8:
frame2 = frame2 / 255
ax.imshow(frame2_alpha * frame2 + (1 - frame2_alpha) * frame1)
ax.axis("off")
if title:
fig.suptitle(title, y=0.87, x=0.5125)
def side_by_side(
frame1: np.ndarray, frame2: np.ndarray, title: Optional[str] = None
) -> None:
"""Plot 2 image frames next to each other.
Example:
event1 = controller.last_event
event2 = controller.step("RotateRight")
overlay(event1.frame, event2.frame)
"""
fig, axs = plt.subplots(nrows=1, ncols=2, dpi=150, figsize=(8, 5))
axs[0].imshow(frame1)
axs[0].axis("off")
axs[1].imshow(frame2)
axs[1].axis("off")
if title:
fig.suptitle(title, y=0.85, x=0.5125)
def plot_frames(event: Union[ai2thor.server.Event, np.ndarray]) -> None:
"""Visualize all the frames on an AI2-THOR Event.
Example:
plot_frames(controller.last_event)
"""
if isinstance(event, ai2thor.server.Event):
frames = dict()
third_person_frames = event.third_party_camera_frames
if event.frame is not None:
frames["RGB"] = event.frame
if event.instance_segmentation_frame is not None:
frames["Instance Segmentation"] = event.instance_segmentation_frame
if event.semantic_segmentation_frame is not None:
frames["Semantic Segmentation"] = event.semantic_segmentation_frame
if event.normals_frame is not None:
frames["Normals"] = event.normals_frame
if event.depth_frame is not None:
frames["Depth"] = event.depth_frame
if len(frames) == 0:
raise Exception("No agent frames rendered on this event!")
rows = 2 if len(third_person_frames) else 1
cols = max(len(frames), len(third_person_frames))
fig, axs = plt.subplots(
nrows=rows, ncols=cols, dpi=150, figsize=(3 * cols, 3 * rows)
)
agent_row = axs[0] if rows > 1 else axs
for i, (name, frame) in enumerate(frames.items()):
ax = agent_row[i] if cols > 1 else agent_row
im = ax.imshow(frame)
ax.axis("off")
ax.set_title(name)
if name == "Depth":
fig.colorbar(im, fraction=0.046, pad=0.04, ax=ax)
# set unused axes off
for i in range(len(frames), cols):
agent_row[i].axis("off")
# add third party camera frames
if rows > 1:
for i, frame in enumerate(third_person_frames):
ax = axs[1][i] if cols > 1 else axs[1]
ax.imshow(frame)
ax.axis("off")
for i in range(len(third_person_frames), cols):
axs[1][i].axis("off")
fig.text(x=0.1, y=0.715, s="Agent Frames", rotation="vertical", va="center")
fig.text(
x=0.1,
y=0.3025,
s="Third Person Frames",
rotation="vertical",
va="center",
)
elif isinstance(event, np.ndarray):
return Image.fromarray(event)
else:
raise Exception(
f"Unknown type: {type(event)}. "
"Must be np.ndarray or ai2thor.server.Event."
)
def show_video(frames: Sequence[np.ndarray], fps: int = 10):
"""Show a video composed of a sequence of frames.
Example:
frames = [
controller.step("RotateRight", degrees=5).frame
for _ in range(72)
]
show_video(frames, fps=5)
"""
frames = ImageSequenceClip(frames, fps=fps)
return frames.ipython_display()
def start_xserver() -> None:
"""Provide the ability to render AI2-THOR using Google Colab. """
# Thanks to the [Unity ML Agents team](https://github.com/Unity-Technologies/ml-agents)
# for most of this setup! :)
def progress(value):
return HTML(
f"""
<progress value='{value}' max="100", style='width: 100%'>
{value}
</progress>
"""
)
progress_bar = display(progress(0), display_id=True)
try:
import google.colab
using_colab = True
except ImportError:
using_colab = False
if using_colab:
with open("frame-buffer", "w") as writefile:
writefile.write(
"""#taken from https://gist.github.com/jterrace/2911875
XVFB=/usr/bin/Xvfb
XVFBARGS=":1 -screen 0 1024x768x24 -ac +extension GLX +render -noreset"
PIDFILE=./frame-buffer.pid
case "$1" in
start)
/sbin/start-stop-daemon --start --quiet --pidfile $PIDFILE --make-pidfile --background --exec $XVFB -- $XVFBARGS
;;
stop)
/sbin/start-stop-daemon --stop --quiet --pidfile $PIDFILE
rm $PIDFILE
;;
restart)
$0 stop
$0 start
;;
*)
exit 1
esac
exit 0
"""
)
progress_bar.update(progress(5))
os.system("apt-get install daemon >/dev/null 2>&1")
progress_bar.update(progress(10))
os.system("apt-get install wget >/dev/null 2>&1")
progress_bar.update(progress(20))
os.system(
"wget http://ai2thor.allenai.org/ai2thor-colab/libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb >/dev/null 2>&1"
)
progress_bar.update(progress(30))
os.system(
"wget --output-document xvfb.deb http://ai2thor.allenai.org/ai2thor-colab/xvfb_1.18.4-0ubuntu0.12_amd64.deb >/dev/null 2>&1"
)
progress_bar.update(progress(40))
os.system("dpkg -i libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb >/dev/null 2>&1")
progress_bar.update(progress(50))
os.system("dpkg -i xvfb.deb >/dev/null 2>&1")
progress_bar.update(progress(70))
os.system("rm libxfont1_1.5.1-1ubuntu0.16.04.4_amd64.deb")
progress_bar.update(progress(80))
os.system("rm xvfb.deb")
progress_bar.update(progress(90))
os.system("bash frame-buffer start")
os.environ["DISPLAY"] = ":1"
progress_bar.update(progress(100))
|
PypiClean
|
/M5-0.3.2.tar.gz/M5-0.3.2/lib/scottp-scrollability/scrollability.js
|
(function() {
// Number of pixels finger must move to determine horizontal or vertical motion
var kLockThreshold = 10;
// Factor which reduces the length of motion by each move of the finger
var kTouchMultiplier = 1;
// Maximum velocity for motion after user releases finger
//var kMaxVelocity = 720 / (window.devicePixelRatio||1);
var kMaxVelocity = 250 / (window.devicePixelRatio||1); //scottp
// Rate of deceleration after user releases finger
//var kDecelRate = 350;
var kDecelRate = 650; //scottp
// Percentage of the page which content can be overscrolled before it must bounce back
var kBounceLimit = 0.5;
// Rate of deceleration when content has overscrolled and is slowing down before bouncing back
var kBounceDecelRate = 600;
// Duration of animation when bouncing back
var kBounceTime = 90;
// Percentage of viewport which must be scrolled past in order to snap to the next page
var kPageLimit = 0.3;
// Velocity at which the animation will advance to the next page
var kPageEscapeVelocity = 50;
// Vertical margin of scrollbar
var kScrollbarMargin = 2;
// Time to scroll to top
var kScrollToTopTime = 200;
var isWebkit = "webkitTransform" in document.documentElement.style;
var isFirefox = "MozTransform" in document.documentElement.style;
var isTouch = "ontouchstart" in window;
// ===============================================================================================
var startX, startY, touchX, touchY, touchDown, touchMoved, onScrollEvt, useOnScrollEvt, justChangedOrientation;
var animationInterval = 0;
var touchTargets = [];
var scrollers = {
'horizontal': createXTarget,
'vertical': createYTarget
};
window.scrollability = {
version: 'scottp-0.71',
globalScrolling: false,
scrollers: scrollers,
useOnScrollEvt: false,
flashIndicators: function() {
var scrollables = document.querySelectorAll('.scrollable.vertical');
for (var i = 0; i < scrollables.length; ++i) {
scrollability.scrollTo(scrollables[i], 0, 0, 20, true);
}
},
scrollToTop: function(animationTime) {
if (elt) {
scrollability.scrollTo(elt, 0, 0, animationTime);
} else {
var scrollables = document.getElementsByClassName('scrollable');
if (scrollables.length) {
var scrollable = scrollables[0];
if (scrollable.className.indexOf('vertical') != -1) {
scrollability.scrollTo(scrollable, 0, 0, animationTime || kScrollToTopTime);
}
}
}
},
scrollTo: function(element, x, y, animationTime, muteDelegate) {
stopAnimation();
var target = createTargetForElement(element);
if (target) {
if (muteDelegate) {
target.delegate = null;
}
target = wrapTarget(target);
touchTargets = [target];
touchMoved = true;
if (animationTime) {
var orig = element[target.key];
var dest = target.filter(x, y);
var dir = dest - orig;
var startTime = new Date().getTime();
animationInterval = setInterval(function() {
var d = new Date().getTime() - startTime;
var pos = orig + ((dest-orig) * (d/animationTime));
if ((dir < 0 && pos < dest) || (dir > 0 && pos > dest)) {
pos = dest;
}
target.updater(pos);
if (pos == dest) {
clearInterval(animationInterval);
setTimeout(stopAnimation, 200);
}
}, 20);
} else {
target.updater(y);
stopAnimation();
}
}
}
};
function onLoad() {
scrollability.flashIndicators();
}
function onScroll(event) {
setTimeout(function() {
if (justChangedOrientation) {
justChangedOrientation = false;
} else if (isTouch) {
scrollability.scrollToTop();
}
});
}
function onOrientationChange(event) {
justChangedOrientation = true;
window.scrollTo(0, 1); // scottp - I added this to force show of nav bar on orientation change
}
function onTouchStart(event) {
stopAnimation();
var touchCandidate = event.target;
var touch = event.touches[0];
var touched = null;
var startTime = new Date().getTime();
touchX = startX = touch.clientX;
touchY = startY = touch.clientY;
touchDown = true;
touchMoved = false;
touchTargets = getTouchTargets(event.target, touchX, touchY, startTime);
if (!touchTargets.length && !scrollability.globalScrolling) {
return true;
}
var holdTimeout = setTimeout(function() {
holdTimeout = 0;
touched = setTouched(touchCandidate);
}, 50);
var d = document;
d.addEventListener('touchmove', onTouchMove, false);
d.addEventListener('touchend', onTouchEnd, false);
animationInterval = setInterval(touchAnimation, 0);
function onTouchMove(event) {
event.preventDefault();
touchMoved = true;
if (holdTimeout) {
clearTimeout(holdTimeout);
holdTimeout = 0;
}
if (touched) {
releaseTouched(touched);
touched = null;
}
var touch = event.touches[0];
touchX = touch.clientX;
touchY = touch.clientY;
// Reduce the candidates down to the one whose axis follows the finger most closely
if (touchTargets.length > 1) {
for (var i = 0; i < touchTargets.length; ++i) {
var target = touchTargets[i];
if (target.disable && target.disable(touchX, touchY, startX, startY)) {
target.terminator();
touchTargets.splice(i, 1);
break;
}
}
}
try {
touchTargets[0].pullToRefresh();
} catch(e) {}
}
function onTouchEnd(event) {
if (holdTimeout) {
clearTimeout(holdTimeout);
holdTimeout = 0;
}
// Simulate a click event when releasing the finger
if (touched) {
var evt = document.createEvent('MouseEvents');
evt.initMouseEvent('click', true, true, window, 1);
touched[0].dispatchEvent(evt);
releaseTouched(touched);
} else {
try {
touchTargets[0].pullToRefreshRelease();
} catch(e) {}
}
d.removeEventListener('touchmove', onTouchMove, false);
d.removeEventListener('touchend', onTouchEnd, false);
touchDown = false;
}
}
function wrapTarget(target, startX, startY, startTime) {
var delegate = target.delegate;
var constrained = target.constrained;
var paginated = target.paginated;
var viewport = target.viewport || 0;
var scrollbar = target.scrollbar;
var position = target.node[target.key];
var min = target.min;
var max = target.max;
var absMin = min;
var absMax = Math.round(max/viewport)*viewport;
var pageSpacing = 0;
var velocity = 0;
var decelerating = 0;
var decelOrigin, decelDelta;
var bounceLimit = target.bounce;
var pageLimit = viewport * kPageLimit;
var lastTouch = startTouch = target.filter(startX, startY);
var lastTime = startTime;
var stillTime = 0;
var stillThreshold = 20;
var snapped = false;
var locked = false;
var isPullingUp = false;
var isPullingDown = false;
if (paginated) {
var excess = Math.round(Math.abs(absMin) % viewport);
var pageCount = ((Math.abs(absMin)-excess) / viewport)+1;
var pageSpacing = excess / pageCount;
var positionSpacing = Math.round(position) % viewport;
var pagePosition = Math.round((position-positionSpacing)/viewport) * viewport;
min = max = Math.round(pagePosition + absMax)+positionSpacing;
absMin += pageSpacing;
}
if (delegate && delegate.onStartScroll) {
if (!delegate.onStartScroll()) {
return null;
}
}
if (scrollbar) {
target.node.parentNode.appendChild(scrollbar);
}
function animator(touch, time) {
var deltaTime = 1 / (time - lastTime);
lastTime = time;
var continues = true;
if (touchDown) {
var delta = (touch - lastTouch) * kTouchMultiplier;
if (!delta) {
// Heuristics to prevent out delta=0 changes from making velocity=0 and
// stopping all motion in its tracks. We need to distinguish when the finger
// has actually stopped moving from when the timer fired too quickly.
if (!stillTime) {
stillTime = time;
}
if (time - stillTime < stillThreshold) {
return true;
}
} else {
stillTime = 0;
}
if (!locked && Math.abs(touch - startTouch) > kLockThreshold) {
locked = true;
if (delegate && delegate.onLockScroll) {
delegate.onLockScroll(target.key);
}
}
lastTouch = touch;
velocity = delta / deltaTime;
// Apply resistance along the edges
if (position > max && constrained) {
var excess = position - max;
velocity *= (1.0 - excess / bounceLimit);
} else if (position < min && constrained) {
var excess = min - position;
velocity *= (1.0 - excess / bounceLimit);
}
} else {
if (paginated && !snapped) {
// When finger is released, decide whether to jump to next/previous page
// or to snap back to the current page
snapped = true;
if (Math.abs(position - max) > pageLimit || Math.abs(velocity) > kPageEscapeVelocity) {
if (position > max) {
if (max != absMax) {
max += viewport+pageSpacing;
min += viewport+pageSpacing;
if (delegate && delegate.onScrollPage) {
var totalSpacing = min % viewport;
var page = -Math.round((position+viewport-totalSpacing)/viewport);
delegate.onScrollPage(page, -1);
}
}
} else {
if (min != absMin) {
max -= viewport+pageSpacing;
min -= viewport+pageSpacing;
if (delegate && delegate.onScrollPage) {
var totalSpacing = min % viewport;
var page = -Math.round((position-viewport-totalSpacing)/viewport);
delegate.onScrollPage(page, 1);
}
}
}
}
}
if (position > max && constrained) {
if (velocity > 0) {
// Slowing down
var excess = position - max;
var elasticity = (1.0 - excess / bounceLimit);
velocity = Math.max(velocity - kBounceDecelRate * deltaTime, 0) * elasticity;
decelerating = 0;
} else {
// Bouncing back
if (!decelerating) {
decelOrigin = position;
decelDelta = max - position;
}
position = easeOutExpo(decelerating, decelOrigin, decelDelta, kBounceTime);
return update(position, ++decelerating <= kBounceTime && Math.floor(position) > max);
}
} else if (position < min && constrained) {
if (velocity < 0) {
// Slowing down
var excess = min - position;
var elasticity = (1.0 - excess / bounceLimit);
velocity = Math.min(velocity + kBounceDecelRate * deltaTime, 0) * elasticity;
decelerating = 0;
} else {
// Bouncing back
if (!decelerating) {
decelOrigin = position;
decelDelta = min - position;
}
position = easeOutExpo(decelerating, decelOrigin, decelDelta, kBounceTime);
return update(position, ++decelerating <= kBounceTime && Math.ceil(position) < min);
}
} else {
// Slowing down
if (!decelerating) {
if (velocity < 0 && velocity < -kMaxVelocity) {
velocity = -kMaxVelocity;
} else if (velocity > 0 && velocity > kMaxVelocity) {
velocity = kMaxVelocity;
}
decelOrigin = velocity;
}
velocity = easeOutExpo(decelerating, decelOrigin, -decelOrigin, kDecelRate);
if (++decelerating > kDecelRate || Math.floor(velocity) == 0) {
continues = false;
}
}
}
position += velocity * deltaTime;
return update(position, continues);
}
function update(pos, continues) {
position = pos;
target.node[target.key] = position;
target.update(target.node, position);
if (delegate && delegate.onScroll) {
delegate.onScroll(position);
}
// Update the scrollbar
var range = -min - max;
if (scrollbar && (range + viewport) > viewport) {
var viewable = viewport - kScrollbarMargin*2;
var height = (viewable/(range+viewport)) * viewable;
var scrollPosition = 0;
if (position > max) {
height = Math.max(height - (position-max), 5);
scrollPosition = 0;
} else if (position < min) {
height = Math.max(height - (min - position), 5);
scrollPosition = (viewable-height);
} else {
scrollPosition = Math.round((Math.abs(position) / range) * (viewable-height));
}
scrollPosition += kScrollbarMargin;
scrollbar.style.height = Math.round(height) + 'px';
moveElement(scrollbar, 0, Math.round(scrollPosition));
if (touchMoved) {
scrollbar.style.webkitTransition = 'none';
scrollbar.style.opacity = '1';
}
}
return continues;
}
function terminator() {
// Snap to the integer endpoint, since position may be a subpixel value while animating
if (paginated) {
var pageIndex = Math.round(position/viewport);
update(pageIndex * (viewport+pageSpacing));
} else if (position > max && constrained) {
update(max);
} else if (position < min && constrained) {
update(min);
}
// Hide the scrollbar
if (scrollbar) {
scrollbar.style.opacity = '0';
scrollbar.style.webkitTransition = 'opacity 0.33s linear';
}
if (delegate && delegate.onEndScroll) {
delegate.onEndScroll();
}
}
function pullToRefresh(released) {
var pullUpMin = min - target.pullUpToRefresh.offsetHeight / 2;
var pullDownMin = max + target.pullDownToRefresh.offsetHeight;
var pullState;
return function() {
if (target.pullUpToRefresh || target.pullDownToRefresh) {
if ( !released &&
(
(isPullingDown && ((pullDownMin < position && pullState) || (pullDownMin > position && !pullState)))
||
(isPullingUp && ((position < pullUpMin && pullState) || (position > pullUpMin && !pullState)))
)
) {
return;
}
if (released && (position > pullDownMin)) {
pullState = 'pulledDown';
isPullingUp = false;
isPullingDown = false;
} else if (released && (position < pullUpMin)) {
pullState = 'pulledUp';
isPullingUp = false;
isPullingDown = false;
} else if (isPullingDown && (position < pullDownMin)) {
pullState = 'pullDownCancel';
isPullingUp = false;
isPullingDown = false;
} else if (isPullingUp && (position > pullUpMin)) {
pullState = 'pullUpCancel';
isPullingUp = false;
isPullingDown = false;
} else if (position > pullDownMin) {
pullState = 'pullingDown';
isPullingUp = false;
isPullingDown = true;
} else if (position < pullUpMin) {
pullState = 'pullingUp';
isPullingUp = true;
isPullingDown = false;
}
var evt = document.createEvent('Event');
evt.initEvent(pullState, true, false);
target.node.dispatchEvent(evt);
}
}
}
target.updater = update;
target.animator = animator;
target.terminator = terminator;
target.pullToRefresh = pullToRefresh(false);
target.pullToRefreshRelease = pullToRefresh(true);
return target;
}
function touchAnimation() {
var time = new Date().getTime();
// Animate each of the targets
for (var i = 0; i < touchTargets.length; ++i) {
var target = touchTargets[i];
// Translate the x/y touch into the value needed by each of the targets
var touch = target.filter(touchX, touchY);
if (!target.animator(touch, time)) {
target.terminator();
touchTargets.splice(i--, 1);
}
}
if (!touchTargets.length) {
stopAnimation();
}
}
// *************************************************************************************************
function getTouchTargets(node, touchX, touchY, startTime) {
var targets = [];
findTargets(node, targets, touchX, touchY, startTime);
var candidates = document.querySelectorAll('.scrollable.global');
for(var j = 0; j < candidates.length; ++j) {
findTargets(candidates[j], targets, touchX, touchY, startTime);
}
return targets;
}
function findTargets(element, targets, touchX, touchY, startTime) {
while (element) {
if (element.nodeType == 1) {
var target = createTargetForElement(element, touchX, touchY, startTime);
if (target) {
// Look out for duplicates
var exists = false;
for (var j = 0; j < targets.length; ++j) {
if (targets[j].node == element) {
exists = true;
break;
}
}
if (!exists) {
target = wrapTarget(target, touchX, touchY, startTime);
if (target) {
targets.push(target);
}
}
}
}
element = element.parentNode;
}
}
function createTargetForElement(element, touchX, touchY, startTime) {
var classes = element.className.split(' ');
for (var i = 0; i < classes.length; ++i) {
var name = classes[i];
if (scrollers[name]) {
var target = scrollers[name](element);
target.key = 'scrollable_'+name;
target.paginated = classes.indexOf('paginated') != -1;
if (!(target.key in element)) {
element[target.key] = target.initial ? target.initial(element) : 0;
}
return target;
}
}
}
function setTouched(target) {
var touched = [];
for (var n = target; n; n = n.parentNode) {
if (n.nodeType == 1) {
n.className = (n.className ? n.className + ' ' : '') + 'touched';
touched.push(n);
}
}
return touched;
}
function releaseTouched(touched) {
for (var i = 0; i < touched.length; ++i) {
var n = touched[i];
n.className = n.className.replace('touched', '');
}
}
function stopAnimation() {
if (animationInterval) {
clearInterval(animationInterval);
animationInterval = 0;
for (var i = 0; i < touchTargets.length; ++i) {
var target = touchTargets[i];
target.terminator();
}
touchTargets = [];
}
}
function moveElement(element, x, y) {
if (isWebkit) {
element.style.webkitTransform = 'translate3d('
+(x ? (x+'px') : '0')+','
+(y ? (y+'px') : '0')+','
+'0)';
} else if (isFirefox) {
element.style.MozTransform = 'translate3d('
+(x ? (x+'px') : '0')+','
+(y ? (y+'px') : '0')+')';
}
if(!onScrollEvt && useOnScrollEvt) {
onScrollEvt = setTimeout(function() {
var evt = document.createEvent('Event');
// Don't want this to bubble because of scrollToTop
evt.initEvent('scroll', false, false);
evt.x = -x || 0;
evt.y = -y || 0;
element.dispatchEvent(evt);
onScrollEvt = false;
}, 20);
}
}
function initScrollbar(element) {
if (!element.scrollableScrollbar) {
var scrollbar = element.scrollableScrollbar = document.createElement('div');
scrollbar.className = 'scrollableScrollbar';
// We hardcode this CSS here to avoid having to provide a CSS file
scrollbar.style.cssText = [
'position: absolute',
'top: 0',
'right: 2px',
'width: 5px',
'min-height: 4px',
'background: rgba(40, 40, 40, 0.6)',
'border: 1px solid rgba(235, 235, 235, 0.1)',
'opacity: 0',
'-webkit-border-radius: 4px 5px',
'-webkit-transform: translate3d(0,0,0)',
'-webkit-box-sizing: border-box',
'z-index: 2147483647'
].join(';');
}
return element.scrollableScrollbar;
}
function easeOutExpo(t, b, c, d) {
return (t==d) ? b+c : c * (-Math.pow(2, -10 * t/d) + 1) + b;
}
// *************************************************************************************************
function createXTarget(element) {
var parent = element.parentNode;
return {
node: element,
min: -parent.scrollWidth + parent.offsetWidth,
max: 0,
viewport: parent.offsetWidth,
bounce: parent.offsetWidth * kBounceLimit,
constrained: true,
delegate: element.scrollDelegate,
filter: function(x, y) {
return x;
},
disable: function (x, y, startX, startY) {
var dx = Math.abs(x - startX);
var dy = Math.abs(y - startY);
if (dy > dx && dy > kLockThreshold) {
return true;
}
},
update: function(element, position) {
moveElement(element, position, element.scrollable_vertical||0);
}
};
}
function createYTarget(element) {
var parent = element.parentNode,
pullDownToRefresh = parent.getElementsByClassName('pull-down-to-refresh')[0];
pullUpToRefresh = parent.getElementsByClassName('pull-up-to-refresh')[0];
return {
node: element,
scrollbar: initScrollbar(element),
min: -parent.scrollHeight + parent.offsetHeight
+ (pullUpToRefresh ? pullUpToRefresh.offsetHeight : 0),
max: (pullDownToRefresh ? -pullDownToRefresh.offsetHeight : 0),
viewport: parent.offsetHeight,
bounce: parent.offsetHeight * kBounceLimit,
pullUpToRefresh: pullUpToRefresh ? pullUpToRefresh : false,
pullDownToRefresh: pullDownToRefresh ? pullDownToRefresh : false,
constrained: true,
delegate: element.scrollDelegate,
filter: function(x, y) {
return y;
},
disable: function(x, y, startX, startY) {
var dx = Math.abs(x - startX);
var dy = Math.abs(y - startY);
if (dx > dy && dx > kLockThreshold) {
return true;
}
},
update: function(element, position) {
moveElement(element, element.scrollable_horizontal||0, position);
}
};
}
document.addEventListener('touchstart', onTouchStart, false);
document.addEventListener('scroll', onScroll, false);
document.addEventListener('orientationchange', onOrientationChange, false);
window.addEventListener('load', onLoad, false);
})();
// convience - scottp
scrollability.scrollToTop = function(elt) {
if (elt) {
scrollability.scrollTo(elt, 0, 0);
} else {
var scrollables = document.getElementsByClassName('scrollable');
if (scrollables.length) {
var scrollable = scrollables[0];
if (scrollable.className.indexOf('vertical') != -1) {
scrollability.scrollTo(scrollable, 0, 0, kScrollToTopTime);
}
}
}
}
|
PypiClean
|
/appinventor-tfjs-0.1.4.tar.gz/appinventor-tfjs-0.1.4/README.md
|
# MIT App Inventor TFJS Extension Generator
The aim of this tool is to make it easier to generate the scaffolding needed to use a Tensorflow.js model in App Inventor.
## Quickstart
Install dependencies:
* java 8
* ant 1.10
* python 3
* node
* npm
* git
Install the App Inventor TFJS extension generator using pip:
```
pip install appinventor-tfjs
```
Create an extension prototype for Posenet:
```
python -m appinventor.tfjs posenet edu.mit.appinventor.ai.posenet.PosenetExtension
```
The output of this command will be a new directory called PosenetExtension. Within this directory, you will find a fresh git clone of the App Inventor extension template repository. The directory will have the following structure:
```
build.xml
lib
├─ android
│ ├─ android.jar
│ ├─ appcompat-v7-28.0.0.jar
│ └─ dx.jar
├─ ant-contrib
│ └─ ant-contrib-1.0b3.jar
├─ appinventor
│ └─ AndroidRuntime.jar
│ └─ AnnotationProcessors.jar
└─ deps
README.md
src
└─ edu
└─ mit
└─ appinventor
└─ ai
└─ posenet
├─ assets
│ ├─ app.js
│ ├─ group1-shard1of2.bin
│ ├─ group1-shard2of2.bin
│ ├─ index.html
│ ├─ model-stride16.json
│ ├─ posenet.min.js
│ ├─ tf-converter.min.js
│ ├─ tf-core.min.js
│ └─ VERSIONS
└─ PosenetExtension.java
```
Of those files, the ones under `src` are most interesting. Briefly:
* `PosenetExtension.java` - Boilerplate extension code for a TFJS extneion in App Inventor. You will want to customize it to provide model-specific behavior, such as interpreting the results before passing information back to the blocks layer.
* `app.js` - Boilerplate Javascript code to load the model and interact with the Java code in the App Inventor extension. You will need to modify this to interact correctly with the TFJS model, such as calling the correct method to start the model and interpret its output for App Inventor.
* `group-*.bin` - These are the weights at each level of the model, pulled from the TFJS model repository. The number of files will vary based on the size of the model.
* `index.html` - The index.html file loads all of the prerequisite Javascript files. It generally does not need to be modified.
* `*.min.js` - Minified Javascript code for the model and any dependencies, such as tfjs-core and tfjs-converter.
* `VERSIONS` - The VERSIONS file contains a key-value mapping the different npm modules to the versions that were retrieved. There should be one entry per min.js file.
## Usage
```
usage: python -m appinventor.tfjs [-h] [--scope SCOPE] model_name class_name
Create a TensorFlow.js-based extension for MIT App Inventor.
positional arguments:
model_name
class_name
optional arguments:
-h, --help show this help message and exit
--scope SCOPE
```
The `model_name` argument names the Tensorflow.js model of interest. A list of pretrained models is available on [GitHub](https://github.com/tensorflow/tfjs-models). For example, if you are interested in trying the MobileNet model, you would specify `mobilenet` as the `model_name`.
The `class_name` argument specifies a fully qualified Java class name that will be used for the extension. For example, a MobileNet extension for App Inventor might have the fully qualified class name `com.example.tfjs.mobile.MobileNetExtension`. The extension generator will create this class and any intermediate packages for you.
The optional `--scope SCOPE` argument allows you to import models from npm packages that are not under the `@tensorflow-models` namespace (effectively, if `--scope` is not specified it is the same as `--scope @tensorflow-models`).
## Development
### Dependencies
You will need to create a virtual environment and install the dependencies by running. We provide instructions for macOS below.
### Create a virtualenv
#### macOS
1. Install Homebrew
2. Install pyenv
```shell
brew install pyenv
echo "eval \"\$(pyenv init -)\"" >> ~/.bash_profile
echo "eval \"\$(pyenv virtualenv-init -)\"" >> ~/.bash_profile
source ~/.bash_profile
```
3. Create a python environment using pyenv and activate it
```shell
pyenv install 3.6
pyenv virtualenv 3.6 appinventor3
pyenv activate appinventor3
```
### Install dependencies
```shell
pip install -r requirements.txt
pip install .
```
## Contributing
This software is made available under the Apache Software License 2.0. You are welcome to contribute pull requests to this project via GitHub.
## License
Copyright 2020 Massachusetts Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
PypiClean
|
/gds-nagios-plugins-1.5.0.tar.gz/gds-nagios-plugins-1.5.0/plugins/command/check_elasticsearch_aws.py
|
from nagioscheck import NagiosCheck, UsageError
from nagioscheck import PerformanceMetric, Status
import urllib2
try:
import json
except ImportError:
import simplejson as json
HEALTH = {'red': 0,
'yellow': 1,
'green': 2}
HEALTH_MAP = {0: 'critical',
1: 'warning',
2: 'ok'}
class ElasticSearchCheck(NagiosCheck):
def __init__(self):
NagiosCheck.__init__(self)
self.health = HEALTH['green']
self.add_option('H', 'host', 'host', "Hostname or network "
"address to probe. The ElasticSearch API "
"should be listening here.")
self.add_option('p', 'port', 'port', "TCP port to probe. "
"The ElasticSearch API should be listening "
"here.")
def check(self, opts, args):
if opts.host is None or opts.port is None:
raise UsageError("Hostname and port must be specified")
host = opts.host
port = int(opts.port)
es_cluster_health = get_json(r'http://%s:%d/_cluster/health' % (host, port))
msg = "Monitoring cluster '%s'" % es_cluster_health['cluster_name']
detail = []
perfdata = []
## Cluster Health Status (green, yellow, red)
cluster_status = HEALTH[es_cluster_health['status'].lower()]
perfdata.append(PerformanceMetric(label='cluster_health',
value=es_cluster_health['status']))
if cluster_status < self.health:
raise Status('critical',
("Elasticsearch cluster reports degraded health: '%s'" %
es_cluster_health['status'],),
perfdata)
raise Status(HEALTH_MAP[self.health],
(msg, None, "%s\n\n%s" % (msg, "\n".join(detail))),
perfdata)
def get_json(uri):
try:
f = urllib2.urlopen(uri)
except urllib2.HTTPError, e:
raise Status('unknown', ("API failure: %s" % uri,
None,
"API failure:\n\n%s" % str(e)))
except urllib2.URLError, e:
# The server could be down; make this CRITICAL.
raise Status('critical', (e.reason,))
body = f.read()
try:
j = json.loads(body)
except ValueError:
raise Status('unknown', ("API returned nonsense",))
return j
def main():
ElasticSearchCheck().run()
if __name__ == '__main__':
main()
|
PypiClean
|
/figgy_lib-1.0.0-py3-none-any.whl/figgy/writer.py
|
import json
from collections import OrderedDict
from .fig_store import FigStore
from .figs import ReplicatedFig, AppFig, SharedFig, MergeFig
TWIG = 'twig'
APP_FIGS = 'app_figs'
REPLICATE_FIGS = 'replicate_figs'
SHARED_FIGS = 'shared_figs'
MERGED_FIGS = 'merged_figs'
class ConfigWriter:
"""
Writes the figgy.json file into the provided directory. If no directory is provided, writes the figgy.json
to the local directory.
"""
@staticmethod
def write(fig_store: FigStore, file_name: str = "figgy.json", destination_dir=""):
"""
Writes a figgy-compatible declarative configuration file to disk.
@param: fig_store - A hydrated FigStore object used by your application to fetch configurations
@param: file_name - Default: `figgy.json` (recommended). The name of the file that will be written.
@param: destination_dir - Default: Current Directory.. The directory to write the `figgy.json` file to.
"""
destination_dir = destination_dir.rstrip("/")
figgy_config: OrderedDict = OrderedDict()
figgy_config[TWIG] = fig_store.TWIG
figgy_config[APP_FIGS] = []
figgy_config[REPLICATE_FIGS] = {}
figgy_config[SHARED_FIGS] = []
figgy_config[MERGED_FIGS] = {}
for fig in fig_store.figs:
item = fig_store.__getattribute__(fig)
if isinstance(item, AppFig):
figgy_config[APP_FIGS].append(item.name)
elif isinstance(item, ReplicatedFig):
figgy_config[REPLICATE_FIGS][item.source] = item.name
elif isinstance(item, SharedFig):
figgy_config[SHARED_FIGS].append(item.name)
elif isinstance(item, MergeFig):
figgy_config[MERGED_FIGS][item.name] = item.pattern
destination_dir = f'{destination_dir.rstrip("/")}/' if destination_dir else ''
with open(f"{destination_dir}{file_name}", "w") as file:
file.write(json.dumps(figgy_config, indent=4))
|
PypiClean
|
/pytest-elements-1.0.2.tar.gz/pytest-elements-1.0.2/pytest_elements/elements/dropdown.py
|
import time
import deprecation
from random import randint
from time import sleep
from pytest_elements.elements.form_component import FormComponent
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import StaleElementReferenceException
class Dropdown(FormComponent):
"""
Represents a dropdown menu on a web page.
"""
@deprecation.deprecated(
deprecated_in="2.0.7",
removed_in="3.0",
details="The Dropdown class has been moved/renamed, please use the "
"'Select' class from pytest_elements.elements.select",
)
def __init__(
self, identifier=None, attribute=None, xpath=None, input_type=None, valid_start_index=0,
):
"""
Construct a new Dropdown instance,
located using the specified attribute and attribute-value OR a supplied xpath.
:param valid_start_index: Start random selection of index from this value, in case the specific drop down
contains and invalid start index at 0.
"""
super().__init__(
element_type="select", identifier=identifier, attribute=attribute, xpath=xpath, input_type=input_type,
)
self.valid_start_index = valid_start_index
def get(self):
"""
Returns the element as a Selenium Select element.
"""
return Select(super().get())
def get_attribute(self, attribute_name):
"""
Overriding default page element get_attribute
so we can interact w/the WebElement, not the Select,
which does not have the get_attribute function
"""
return super().get().get_attribute(attribute_name)
def get_len(self, valid=False):
"""
Returns the number of options
:param valid: whether or not to only return the # of valid options
"""
if valid:
return len(self.get().options) - self.valid_start_index
else:
return len(self.get().options)
def get_options(self, valid=False):
"""
:param valid: whether or not to return only the valid options
:return: a list of strings representing the options in the dropdown
"""
if valid:
options = [o.text for o in self.get().options]
return options[self.valid_start_index :]
else:
return [o.text for o in self.get().options]
def accept_input(self, value):
"""
Selects the option with the specified value.
"""
if isinstance(value, int):
if value < 0:
return self.pick_random()
else:
self.get().select_by_index(value)
else:
self.get().select_by_visible_text(value)
return value
def pick_random(self, ensure_change=False):
"""
Selects a random option and returns the text of the option
:param ensure_change: attempt to select a value that was not previously selected
:return the text value of the newly selected option
"""
attempts = 0
while attempts < 5:
try:
if ensure_change:
return self._ensure_change()
else:
index = randint(self.valid_start_index, self.get_len() - 1)
self.accept_input(index)
return self.get().first_selected_option.text
except StaleElementReferenceException:
attempts += 1
sleep(1)
def _ensure_change(self):
"""
Attempt to select a value that was not previously selected
:return:
"""
init_selected_option = self.get().first_selected_option.text
attempts = self.get_len(valid=True)
new_selected_option = ""
while init_selected_option == new_selected_option and attempts > 0:
index = randint(self.valid_start_index, self.get_len() - 1)
self.accept_input(index)
new_selected_option = self.get().first_selected_option.text
attempts -= 1
assert new_selected_option != init_selected_option, (
"A new value could not be randomly selected from this dropdown. "
"Are there enough valid, unique values to ensure a change? (>1 valid values required)\n"
f"Options: {self.get_options()}"
)
return new_selected_option
def get_value(self):
"""
Returns value of the selected option in dropdown
"""
return self.get().first_selected_option.text
def wait_until_fillable(self, timeout=15, poll_frequency=0.5, err_msg=None):
"""
Special implementation for the dropdown:
We need to make sure there is a valid option to be selected.
"""
# first, wait for the element to not be disabled
start_time = time.time()
super().wait_until_fillable(timeout=timeout, poll_frequency=poll_frequency, err_msg=err_msg)
# next, wait for there to be at least one valid selectable option
while self.get_len(valid=True) == 0:
time_elapsed = time.time() - start_time
if time_elapsed > timeout:
raise TimeoutError(err_msg)
time.sleep(poll_frequency)
return True
|
PypiClean
|
/sftpgo-client-0.3.1.tar.gz/sftpgo-client-0.3.1/sftpgo_client/base/api/user_ap_is/generate_user_totp_secret.py
|
from typing import Any, Dict, Optional, Union, cast
import httpx
from ...client import AuthenticatedClient
from ...models.generate_user_totp_secret_json_body import GenerateUserTotpSecretJsonBody
from ...models.generate_user_totp_secret_response_200 import (
GenerateUserTotpSecretResponse200,
)
from ...types import Response
def _get_kwargs(
*,
client: AuthenticatedClient,
json_body: GenerateUserTotpSecretJsonBody,
) -> Dict[str, Any]:
url = "{}/user/totp/generate".format(client.base_url)
headers: Dict[str, str] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
json_json_body = json_body.to_dict()
return {
"method": "post",
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"json": json_json_body,
}
def _parse_response(
*, response: httpx.Response
) -> Optional[Union[Any, GenerateUserTotpSecretResponse200]]:
if response.status_code == 200:
response_200 = GenerateUserTotpSecretResponse200.from_dict(response.json())
return response_200
if response.status_code == 400:
response_400 = cast(Any, None)
return response_400
if response.status_code == 401:
response_401 = cast(Any, None)
return response_401
if response.status_code == 403:
response_403 = cast(Any, None)
return response_403
if response.status_code == 500:
response_500 = cast(Any, None)
return response_500
return None
def _build_response(
*, response: httpx.Response
) -> Response[Union[Any, GenerateUserTotpSecretResponse200]]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: AuthenticatedClient,
json_body: GenerateUserTotpSecretJsonBody,
) -> Response[Union[Any, GenerateUserTotpSecretResponse200]]:
"""Generate a new TOTP secret
Generates a new TOTP secret, including the QR code as png, using the specified configuration for the
logged in user
Args:
json_body (GenerateUserTotpSecretJsonBody):
Returns:
Response[Union[Any, GenerateUserTotpSecretResponse200]]
"""
kwargs = _get_kwargs(
client=client,
json_body=json_body,
)
response = httpx.request(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: AuthenticatedClient,
json_body: GenerateUserTotpSecretJsonBody,
) -> Optional[Union[Any, GenerateUserTotpSecretResponse200]]:
"""Generate a new TOTP secret
Generates a new TOTP secret, including the QR code as png, using the specified configuration for the
logged in user
Args:
json_body (GenerateUserTotpSecretJsonBody):
Returns:
Response[Union[Any, GenerateUserTotpSecretResponse200]]
"""
return sync_detailed(
client=client,
json_body=json_body,
).parsed
async def asyncio_detailed(
*,
client: AuthenticatedClient,
json_body: GenerateUserTotpSecretJsonBody,
) -> Response[Union[Any, GenerateUserTotpSecretResponse200]]:
"""Generate a new TOTP secret
Generates a new TOTP secret, including the QR code as png, using the specified configuration for the
logged in user
Args:
json_body (GenerateUserTotpSecretJsonBody):
Returns:
Response[Union[Any, GenerateUserTotpSecretResponse200]]
"""
kwargs = _get_kwargs(
client=client,
json_body=json_body,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.request(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: AuthenticatedClient,
json_body: GenerateUserTotpSecretJsonBody,
) -> Optional[Union[Any, GenerateUserTotpSecretResponse200]]:
"""Generate a new TOTP secret
Generates a new TOTP secret, including the QR code as png, using the specified configuration for the
logged in user
Args:
json_body (GenerateUserTotpSecretJsonBody):
Returns:
Response[Union[Any, GenerateUserTotpSecretResponse200]]
"""
return (
await asyncio_detailed(
client=client,
json_body=json_body,
)
).parsed
|
PypiClean
|
/azure-cli-2.51.0.tar.gz/azure-cli-2.51.0/azure/cli/command_modules/network/aaz/latest/network/route_table/_create.py
|
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network route-table create",
)
class Create(AAZCommand):
"""Create a route table.
:example: Create a route table.
az network route-table create -g MyResourceGroup -n MyRouteTable
"""
_aaz_info = {
"version": "2021-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/routetables/{}", "2021-08-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the route table.",
required=True,
)
_args_schema.location = AAZResourceLocationArg(
help="Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.",
fmt=AAZResourceLocationArgFormat(
resource_group_arg="resource_group",
),
)
_args_schema.disable_bgp_route_propagation = AAZBoolArg(
options=["--disable-bgp-route-propagation"],
help="Disable routes learned by BGP. Allowed values: false, true.",
)
_args_schema.tags = AAZDictArg(
options=["--tags"],
help="Space-separated tags: key[=value] [key[=value] ...].",
)
tags = cls._args_schema.tags
tags.Element = AAZStrArg()
# define Arg Group "Parameters"
# define Arg Group "Properties"
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.RouteTablesCreateOrUpdate(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class RouteTablesCreateOrUpdate(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200_201,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200, 201]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200_201,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}",
**self.url_parameters
)
@property
def method(self):
return "PUT"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"routeTableName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("location", AAZStrType, ".location")
_builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}})
_builder.set_prop("tags", AAZDictType, ".tags")
properties = _builder.get(".properties")
if properties is not None:
properties.set_prop("disableBgpRoutePropagation", AAZBoolType, ".disable_bgp_route_propagation")
tags = _builder.get(".tags")
if tags is not None:
tags.set_elements(AAZStrType, ".")
return self.serialize_content(_content_value)
def on_200_201(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200_201
)
_schema_on_200_201 = None
@classmethod
def _build_schema_on_200_201(cls):
if cls._schema_on_200_201 is not None:
return cls._schema_on_200_201
cls._schema_on_200_201 = AAZObjectType()
_CreateHelper._build_schema_route_table_read(cls._schema_on_200_201)
return cls._schema_on_200_201
class _CreateHelper:
"""Helper class for Create"""
_schema_application_security_group_read = None
@classmethod
def _build_schema_application_security_group_read(cls, _schema):
if cls._schema_application_security_group_read is not None:
_schema.etag = cls._schema_application_security_group_read.etag
_schema.id = cls._schema_application_security_group_read.id
_schema.location = cls._schema_application_security_group_read.location
_schema.name = cls._schema_application_security_group_read.name
_schema.properties = cls._schema_application_security_group_read.properties
_schema.tags = cls._schema_application_security_group_read.tags
_schema.type = cls._schema_application_security_group_read.type
return
cls._schema_application_security_group_read = _schema_application_security_group_read = AAZObjectType()
application_security_group_read = _schema_application_security_group_read
application_security_group_read.etag = AAZStrType(
flags={"read_only": True},
)
application_security_group_read.id = AAZStrType()
application_security_group_read.location = AAZStrType()
application_security_group_read.name = AAZStrType(
flags={"read_only": True},
)
application_security_group_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
application_security_group_read.tags = AAZDictType()
application_security_group_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_application_security_group_read.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
tags = _schema_application_security_group_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_application_security_group_read.etag
_schema.id = cls._schema_application_security_group_read.id
_schema.location = cls._schema_application_security_group_read.location
_schema.name = cls._schema_application_security_group_read.name
_schema.properties = cls._schema_application_security_group_read.properties
_schema.tags = cls._schema_application_security_group_read.tags
_schema.type = cls._schema_application_security_group_read.type
_schema_extended_location_read = None
@classmethod
def _build_schema_extended_location_read(cls, _schema):
if cls._schema_extended_location_read is not None:
_schema.name = cls._schema_extended_location_read.name
_schema.type = cls._schema_extended_location_read.type
return
cls._schema_extended_location_read = _schema_extended_location_read = AAZObjectType()
extended_location_read = _schema_extended_location_read
extended_location_read.name = AAZStrType()
extended_location_read.type = AAZStrType()
_schema.name = cls._schema_extended_location_read.name
_schema.type = cls._schema_extended_location_read.type
_schema_frontend_ip_configuration_read = None
@classmethod
def _build_schema_frontend_ip_configuration_read(cls, _schema):
if cls._schema_frontend_ip_configuration_read is not None:
_schema.etag = cls._schema_frontend_ip_configuration_read.etag
_schema.id = cls._schema_frontend_ip_configuration_read.id
_schema.name = cls._schema_frontend_ip_configuration_read.name
_schema.properties = cls._schema_frontend_ip_configuration_read.properties
_schema.type = cls._schema_frontend_ip_configuration_read.type
_schema.zones = cls._schema_frontend_ip_configuration_read.zones
return
cls._schema_frontend_ip_configuration_read = _schema_frontend_ip_configuration_read = AAZObjectType()
frontend_ip_configuration_read = _schema_frontend_ip_configuration_read
frontend_ip_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
frontend_ip_configuration_read.id = AAZStrType()
frontend_ip_configuration_read.name = AAZStrType()
frontend_ip_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
frontend_ip_configuration_read.type = AAZStrType(
flags={"read_only": True},
)
frontend_ip_configuration_read.zones = AAZListType()
properties = _schema_frontend_ip_configuration_read.properties
properties.gateway_load_balancer = AAZObjectType(
serialized_name="gatewayLoadBalancer",
)
cls._build_schema_sub_resource_read(properties.gateway_load_balancer)
properties.inbound_nat_pools = AAZListType(
serialized_name="inboundNatPools",
flags={"read_only": True},
)
properties.inbound_nat_rules = AAZListType(
serialized_name="inboundNatRules",
flags={"read_only": True},
)
properties.load_balancing_rules = AAZListType(
serialized_name="loadBalancingRules",
flags={"read_only": True},
)
properties.outbound_rules = AAZListType(
serialized_name="outboundRules",
flags={"read_only": True},
)
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_address_version = AAZStrType(
serialized_name="privateIPAddressVersion",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address = AAZObjectType(
serialized_name="publicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.public_ip_address)
properties.public_ip_prefix = AAZObjectType(
serialized_name="publicIPPrefix",
)
cls._build_schema_sub_resource_read(properties.public_ip_prefix)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
inbound_nat_pools = _schema_frontend_ip_configuration_read.properties.inbound_nat_pools
inbound_nat_pools.Element = AAZObjectType()
cls._build_schema_sub_resource_read(inbound_nat_pools.Element)
inbound_nat_rules = _schema_frontend_ip_configuration_read.properties.inbound_nat_rules
inbound_nat_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(inbound_nat_rules.Element)
load_balancing_rules = _schema_frontend_ip_configuration_read.properties.load_balancing_rules
load_balancing_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(load_balancing_rules.Element)
outbound_rules = _schema_frontend_ip_configuration_read.properties.outbound_rules
outbound_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(outbound_rules.Element)
zones = _schema_frontend_ip_configuration_read.zones
zones.Element = AAZStrType()
_schema.etag = cls._schema_frontend_ip_configuration_read.etag
_schema.id = cls._schema_frontend_ip_configuration_read.id
_schema.name = cls._schema_frontend_ip_configuration_read.name
_schema.properties = cls._schema_frontend_ip_configuration_read.properties
_schema.type = cls._schema_frontend_ip_configuration_read.type
_schema.zones = cls._schema_frontend_ip_configuration_read.zones
_schema_ip_configuration_read = None
@classmethod
def _build_schema_ip_configuration_read(cls, _schema):
if cls._schema_ip_configuration_read is not None:
_schema.etag = cls._schema_ip_configuration_read.etag
_schema.id = cls._schema_ip_configuration_read.id
_schema.name = cls._schema_ip_configuration_read.name
_schema.properties = cls._schema_ip_configuration_read.properties
return
cls._schema_ip_configuration_read = _schema_ip_configuration_read = AAZObjectType()
ip_configuration_read = _schema_ip_configuration_read
ip_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
ip_configuration_read.id = AAZStrType()
ip_configuration_read.name = AAZStrType()
ip_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = _schema_ip_configuration_read.properties
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address = AAZObjectType(
serialized_name="publicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.public_ip_address)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
_schema.etag = cls._schema_ip_configuration_read.etag
_schema.id = cls._schema_ip_configuration_read.id
_schema.name = cls._schema_ip_configuration_read.name
_schema.properties = cls._schema_ip_configuration_read.properties
_schema_network_interface_ip_configuration_read = None
@classmethod
def _build_schema_network_interface_ip_configuration_read(cls, _schema):
if cls._schema_network_interface_ip_configuration_read is not None:
_schema.etag = cls._schema_network_interface_ip_configuration_read.etag
_schema.id = cls._schema_network_interface_ip_configuration_read.id
_schema.name = cls._schema_network_interface_ip_configuration_read.name
_schema.properties = cls._schema_network_interface_ip_configuration_read.properties
_schema.type = cls._schema_network_interface_ip_configuration_read.type
return
cls._schema_network_interface_ip_configuration_read = _schema_network_interface_ip_configuration_read = AAZObjectType()
network_interface_ip_configuration_read = _schema_network_interface_ip_configuration_read
network_interface_ip_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
network_interface_ip_configuration_read.id = AAZStrType()
network_interface_ip_configuration_read.name = AAZStrType()
network_interface_ip_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_interface_ip_configuration_read.type = AAZStrType()
properties = _schema_network_interface_ip_configuration_read.properties
properties.application_gateway_backend_address_pools = AAZListType(
serialized_name="applicationGatewayBackendAddressPools",
)
properties.application_security_groups = AAZListType(
serialized_name="applicationSecurityGroups",
)
properties.gateway_load_balancer = AAZObjectType(
serialized_name="gatewayLoadBalancer",
)
cls._build_schema_sub_resource_read(properties.gateway_load_balancer)
properties.load_balancer_backend_address_pools = AAZListType(
serialized_name="loadBalancerBackendAddressPools",
)
properties.load_balancer_inbound_nat_rules = AAZListType(
serialized_name="loadBalancerInboundNatRules",
)
properties.primary = AAZBoolType()
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_address_version = AAZStrType(
serialized_name="privateIPAddressVersion",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.private_link_connection_properties = AAZObjectType(
serialized_name="privateLinkConnectionProperties",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address = AAZObjectType(
serialized_name="publicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.public_ip_address)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
properties.virtual_network_taps = AAZListType(
serialized_name="virtualNetworkTaps",
)
application_gateway_backend_address_pools = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools
application_gateway_backend_address_pools.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools.Element.properties
properties.backend_addresses = AAZListType(
serialized_name="backendAddresses",
)
properties.backend_ip_configurations = AAZListType(
serialized_name="backendIPConfigurations",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
backend_addresses = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools.Element.properties.backend_addresses
backend_addresses.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools.Element.properties.backend_addresses.Element
_element.fqdn = AAZStrType()
_element.ip_address = AAZStrType(
serialized_name="ipAddress",
)
backend_ip_configurations = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools.Element.properties.backend_ip_configurations
backend_ip_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_ip_configuration_read(backend_ip_configurations.Element)
application_security_groups = _schema_network_interface_ip_configuration_read.properties.application_security_groups
application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(application_security_groups.Element)
load_balancer_backend_address_pools = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools
load_balancer_backend_address_pools.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties
properties.backend_ip_configurations = AAZListType(
serialized_name="backendIPConfigurations",
flags={"read_only": True},
)
properties.drain_period_in_seconds = AAZIntType(
serialized_name="drainPeriodInSeconds",
)
properties.inbound_nat_rules = AAZListType(
serialized_name="inboundNatRules",
flags={"read_only": True},
)
properties.load_balancer_backend_addresses = AAZListType(
serialized_name="loadBalancerBackendAddresses",
)
properties.load_balancing_rules = AAZListType(
serialized_name="loadBalancingRules",
flags={"read_only": True},
)
properties.location = AAZStrType()
properties.outbound_rule = AAZObjectType(
serialized_name="outboundRule",
)
cls._build_schema_sub_resource_read(properties.outbound_rule)
properties.outbound_rules = AAZListType(
serialized_name="outboundRules",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.tunnel_interfaces = AAZListType(
serialized_name="tunnelInterfaces",
)
backend_ip_configurations = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.backend_ip_configurations
backend_ip_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_ip_configuration_read(backend_ip_configurations.Element)
inbound_nat_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.inbound_nat_rules
inbound_nat_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(inbound_nat_rules.Element)
load_balancer_backend_addresses = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses
load_balancer_backend_addresses.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element.properties
properties.admin_state = AAZStrType(
serialized_name="adminState",
)
properties.inbound_nat_rules_port_mapping = AAZListType(
serialized_name="inboundNatRulesPortMapping",
flags={"read_only": True},
)
properties.ip_address = AAZStrType(
serialized_name="ipAddress",
)
properties.load_balancer_frontend_ip_configuration = AAZObjectType(
serialized_name="loadBalancerFrontendIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.load_balancer_frontend_ip_configuration)
properties.network_interface_ip_configuration = AAZObjectType(
serialized_name="networkInterfaceIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.network_interface_ip_configuration)
properties.subnet = AAZObjectType()
cls._build_schema_sub_resource_read(properties.subnet)
properties.virtual_network = AAZObjectType(
serialized_name="virtualNetwork",
)
cls._build_schema_sub_resource_read(properties.virtual_network)
inbound_nat_rules_port_mapping = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element.properties.inbound_nat_rules_port_mapping
inbound_nat_rules_port_mapping.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element.properties.inbound_nat_rules_port_mapping.Element
_element.backend_port = AAZIntType(
serialized_name="backendPort",
)
_element.frontend_port = AAZIntType(
serialized_name="frontendPort",
)
_element.inbound_nat_rule_name = AAZStrType(
serialized_name="inboundNatRuleName",
)
load_balancing_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancing_rules
load_balancing_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(load_balancing_rules.Element)
outbound_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.outbound_rules
outbound_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(outbound_rules.Element)
tunnel_interfaces = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.tunnel_interfaces
tunnel_interfaces.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.tunnel_interfaces.Element
_element.identifier = AAZIntType()
_element.port = AAZIntType()
_element.protocol = AAZStrType()
_element.type = AAZStrType()
load_balancer_inbound_nat_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_inbound_nat_rules
load_balancer_inbound_nat_rules.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_inbound_nat_rules.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.load_balancer_inbound_nat_rules.Element.properties
properties.backend_address_pool = AAZObjectType(
serialized_name="backendAddressPool",
)
cls._build_schema_sub_resource_read(properties.backend_address_pool)
properties.backend_ip_configuration = AAZObjectType(
serialized_name="backendIPConfiguration",
)
cls._build_schema_network_interface_ip_configuration_read(properties.backend_ip_configuration)
properties.backend_port = AAZIntType(
serialized_name="backendPort",
)
properties.enable_floating_ip = AAZBoolType(
serialized_name="enableFloatingIP",
)
properties.enable_tcp_reset = AAZBoolType(
serialized_name="enableTcpReset",
)
properties.frontend_ip_configuration = AAZObjectType(
serialized_name="frontendIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.frontend_ip_configuration)
properties.frontend_port = AAZIntType(
serialized_name="frontendPort",
)
properties.frontend_port_range_end = AAZIntType(
serialized_name="frontendPortRangeEnd",
)
properties.frontend_port_range_start = AAZIntType(
serialized_name="frontendPortRangeStart",
)
properties.idle_timeout_in_minutes = AAZIntType(
serialized_name="idleTimeoutInMinutes",
)
properties.protocol = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
private_link_connection_properties = _schema_network_interface_ip_configuration_read.properties.private_link_connection_properties
private_link_connection_properties.fqdns = AAZListType(
flags={"read_only": True},
)
private_link_connection_properties.group_id = AAZStrType(
serialized_name="groupId",
flags={"read_only": True},
)
private_link_connection_properties.required_member_name = AAZStrType(
serialized_name="requiredMemberName",
flags={"read_only": True},
)
fqdns = _schema_network_interface_ip_configuration_read.properties.private_link_connection_properties.fqdns
fqdns.Element = AAZStrType()
virtual_network_taps = _schema_network_interface_ip_configuration_read.properties.virtual_network_taps
virtual_network_taps.Element = AAZObjectType()
cls._build_schema_virtual_network_tap_read(virtual_network_taps.Element)
_schema.etag = cls._schema_network_interface_ip_configuration_read.etag
_schema.id = cls._schema_network_interface_ip_configuration_read.id
_schema.name = cls._schema_network_interface_ip_configuration_read.name
_schema.properties = cls._schema_network_interface_ip_configuration_read.properties
_schema.type = cls._schema_network_interface_ip_configuration_read.type
_schema_network_interface_tap_configuration_read = None
@classmethod
def _build_schema_network_interface_tap_configuration_read(cls, _schema):
if cls._schema_network_interface_tap_configuration_read is not None:
_schema.etag = cls._schema_network_interface_tap_configuration_read.etag
_schema.id = cls._schema_network_interface_tap_configuration_read.id
_schema.name = cls._schema_network_interface_tap_configuration_read.name
_schema.properties = cls._schema_network_interface_tap_configuration_read.properties
_schema.type = cls._schema_network_interface_tap_configuration_read.type
return
cls._schema_network_interface_tap_configuration_read = _schema_network_interface_tap_configuration_read = AAZObjectType()
network_interface_tap_configuration_read = _schema_network_interface_tap_configuration_read
network_interface_tap_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
network_interface_tap_configuration_read.id = AAZStrType()
network_interface_tap_configuration_read.name = AAZStrType()
network_interface_tap_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_interface_tap_configuration_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_tap_configuration_read.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.virtual_network_tap = AAZObjectType(
serialized_name="virtualNetworkTap",
)
cls._build_schema_virtual_network_tap_read(properties.virtual_network_tap)
_schema.etag = cls._schema_network_interface_tap_configuration_read.etag
_schema.id = cls._schema_network_interface_tap_configuration_read.id
_schema.name = cls._schema_network_interface_tap_configuration_read.name
_schema.properties = cls._schema_network_interface_tap_configuration_read.properties
_schema.type = cls._schema_network_interface_tap_configuration_read.type
_schema_network_interface_read = None
@classmethod
def _build_schema_network_interface_read(cls, _schema):
if cls._schema_network_interface_read is not None:
_schema.etag = cls._schema_network_interface_read.etag
_schema.extended_location = cls._schema_network_interface_read.extended_location
_schema.id = cls._schema_network_interface_read.id
_schema.location = cls._schema_network_interface_read.location
_schema.name = cls._schema_network_interface_read.name
_schema.properties = cls._schema_network_interface_read.properties
_schema.tags = cls._schema_network_interface_read.tags
_schema.type = cls._schema_network_interface_read.type
return
cls._schema_network_interface_read = _schema_network_interface_read = AAZObjectType()
network_interface_read = _schema_network_interface_read
network_interface_read.etag = AAZStrType(
flags={"read_only": True},
)
network_interface_read.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(network_interface_read.extended_location)
network_interface_read.id = AAZStrType()
network_interface_read.location = AAZStrType()
network_interface_read.name = AAZStrType(
flags={"read_only": True},
)
network_interface_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_interface_read.tags = AAZDictType()
network_interface_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties
properties.auxiliary_mode = AAZStrType(
serialized_name="auxiliaryMode",
)
properties.dns_settings = AAZObjectType(
serialized_name="dnsSettings",
)
properties.dscp_configuration = AAZObjectType(
serialized_name="dscpConfiguration",
)
cls._build_schema_sub_resource_read(properties.dscp_configuration)
properties.enable_accelerated_networking = AAZBoolType(
serialized_name="enableAcceleratedNetworking",
)
properties.enable_ip_forwarding = AAZBoolType(
serialized_name="enableIPForwarding",
)
properties.hosted_workloads = AAZListType(
serialized_name="hostedWorkloads",
flags={"read_only": True},
)
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
)
properties.mac_address = AAZStrType(
serialized_name="macAddress",
flags={"read_only": True},
)
properties.migration_phase = AAZStrType(
serialized_name="migrationPhase",
)
properties.network_security_group = AAZObjectType(
serialized_name="networkSecurityGroup",
)
cls._build_schema_network_security_group_read(properties.network_security_group)
properties.nic_type = AAZStrType(
serialized_name="nicType",
)
properties.primary = AAZBoolType(
flags={"read_only": True},
)
properties.private_endpoint = AAZObjectType(
serialized_name="privateEndpoint",
)
cls._build_schema_private_endpoint_read(properties.private_endpoint)
properties.private_link_service = AAZObjectType(
serialized_name="privateLinkService",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.tap_configurations = AAZListType(
serialized_name="tapConfigurations",
flags={"read_only": True},
)
properties.virtual_machine = AAZObjectType(
serialized_name="virtualMachine",
)
cls._build_schema_sub_resource_read(properties.virtual_machine)
properties.vnet_encryption_supported = AAZBoolType(
serialized_name="vnetEncryptionSupported",
flags={"read_only": True},
)
properties.workload_type = AAZStrType(
serialized_name="workloadType",
)
dns_settings = _schema_network_interface_read.properties.dns_settings
dns_settings.applied_dns_servers = AAZListType(
serialized_name="appliedDnsServers",
flags={"read_only": True},
)
dns_settings.dns_servers = AAZListType(
serialized_name="dnsServers",
)
dns_settings.internal_dns_name_label = AAZStrType(
serialized_name="internalDnsNameLabel",
)
dns_settings.internal_domain_name_suffix = AAZStrType(
serialized_name="internalDomainNameSuffix",
flags={"read_only": True},
)
dns_settings.internal_fqdn = AAZStrType(
serialized_name="internalFqdn",
flags={"read_only": True},
)
applied_dns_servers = _schema_network_interface_read.properties.dns_settings.applied_dns_servers
applied_dns_servers.Element = AAZStrType()
dns_servers = _schema_network_interface_read.properties.dns_settings.dns_servers
dns_servers.Element = AAZStrType()
hosted_workloads = _schema_network_interface_read.properties.hosted_workloads
hosted_workloads.Element = AAZStrType()
ip_configurations = _schema_network_interface_read.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_ip_configuration_read(ip_configurations.Element)
private_link_service = _schema_network_interface_read.properties.private_link_service
private_link_service.etag = AAZStrType(
flags={"read_only": True},
)
private_link_service.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(private_link_service.extended_location)
private_link_service.id = AAZStrType()
private_link_service.location = AAZStrType()
private_link_service.name = AAZStrType(
flags={"read_only": True},
)
private_link_service.properties = AAZObjectType(
flags={"client_flatten": True},
)
private_link_service.tags = AAZDictType()
private_link_service.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties.private_link_service.properties
properties.alias = AAZStrType(
flags={"read_only": True},
)
properties.auto_approval = AAZObjectType(
serialized_name="autoApproval",
)
properties.enable_proxy_protocol = AAZBoolType(
serialized_name="enableProxyProtocol",
)
properties.fqdns = AAZListType()
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
)
properties.load_balancer_frontend_ip_configurations = AAZListType(
serialized_name="loadBalancerFrontendIpConfigurations",
)
properties.network_interfaces = AAZListType(
serialized_name="networkInterfaces",
flags={"read_only": True},
)
properties.private_endpoint_connections = AAZListType(
serialized_name="privateEndpointConnections",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.visibility = AAZObjectType()
auto_approval = _schema_network_interface_read.properties.private_link_service.properties.auto_approval
auto_approval.subscriptions = AAZListType()
subscriptions = _schema_network_interface_read.properties.private_link_service.properties.auto_approval.subscriptions
subscriptions.Element = AAZStrType()
fqdns = _schema_network_interface_read.properties.private_link_service.properties.fqdns
fqdns.Element = AAZStrType()
ip_configurations = _schema_network_interface_read.properties.private_link_service.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
_element = _schema_network_interface_read.properties.private_link_service.properties.ip_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties.private_link_service.properties.ip_configurations.Element.properties
properties.primary = AAZBoolType()
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_address_version = AAZStrType(
serialized_name="privateIPAddressVersion",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
load_balancer_frontend_ip_configurations = _schema_network_interface_read.properties.private_link_service.properties.load_balancer_frontend_ip_configurations
load_balancer_frontend_ip_configurations.Element = AAZObjectType()
cls._build_schema_frontend_ip_configuration_read(load_balancer_frontend_ip_configurations.Element)
network_interfaces = _schema_network_interface_read.properties.private_link_service.properties.network_interfaces
network_interfaces.Element = AAZObjectType()
cls._build_schema_network_interface_read(network_interfaces.Element)
private_endpoint_connections = _schema_network_interface_read.properties.private_link_service.properties.private_endpoint_connections
private_endpoint_connections.Element = AAZObjectType()
_element = _schema_network_interface_read.properties.private_link_service.properties.private_endpoint_connections.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties.private_link_service.properties.private_endpoint_connections.Element.properties
properties.link_identifier = AAZStrType(
serialized_name="linkIdentifier",
flags={"read_only": True},
)
properties.private_endpoint = AAZObjectType(
serialized_name="privateEndpoint",
)
cls._build_schema_private_endpoint_read(properties.private_endpoint)
properties.private_link_service_connection_state = AAZObjectType(
serialized_name="privateLinkServiceConnectionState",
)
cls._build_schema_private_link_service_connection_state_read(properties.private_link_service_connection_state)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
visibility = _schema_network_interface_read.properties.private_link_service.properties.visibility
visibility.subscriptions = AAZListType()
subscriptions = _schema_network_interface_read.properties.private_link_service.properties.visibility.subscriptions
subscriptions.Element = AAZStrType()
tags = _schema_network_interface_read.properties.private_link_service.tags
tags.Element = AAZStrType()
tap_configurations = _schema_network_interface_read.properties.tap_configurations
tap_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_tap_configuration_read(tap_configurations.Element)
tags = _schema_network_interface_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_network_interface_read.etag
_schema.extended_location = cls._schema_network_interface_read.extended_location
_schema.id = cls._schema_network_interface_read.id
_schema.location = cls._schema_network_interface_read.location
_schema.name = cls._schema_network_interface_read.name
_schema.properties = cls._schema_network_interface_read.properties
_schema.tags = cls._schema_network_interface_read.tags
_schema.type = cls._schema_network_interface_read.type
_schema_network_security_group_read = None
@classmethod
def _build_schema_network_security_group_read(cls, _schema):
if cls._schema_network_security_group_read is not None:
_schema.etag = cls._schema_network_security_group_read.etag
_schema.id = cls._schema_network_security_group_read.id
_schema.location = cls._schema_network_security_group_read.location
_schema.name = cls._schema_network_security_group_read.name
_schema.properties = cls._schema_network_security_group_read.properties
_schema.tags = cls._schema_network_security_group_read.tags
_schema.type = cls._schema_network_security_group_read.type
return
cls._schema_network_security_group_read = _schema_network_security_group_read = AAZObjectType()
network_security_group_read = _schema_network_security_group_read
network_security_group_read.etag = AAZStrType(
flags={"read_only": True},
)
network_security_group_read.id = AAZStrType()
network_security_group_read.location = AAZStrType()
network_security_group_read.name = AAZStrType(
flags={"read_only": True},
)
network_security_group_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_security_group_read.tags = AAZDictType()
network_security_group_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_security_group_read.properties
properties.default_security_rules = AAZListType(
serialized_name="defaultSecurityRules",
flags={"read_only": True},
)
properties.flow_logs = AAZListType(
serialized_name="flowLogs",
flags={"read_only": True},
)
properties.network_interfaces = AAZListType(
serialized_name="networkInterfaces",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.security_rules = AAZListType(
serialized_name="securityRules",
)
properties.subnets = AAZListType(
flags={"read_only": True},
)
default_security_rules = _schema_network_security_group_read.properties.default_security_rules
default_security_rules.Element = AAZObjectType()
cls._build_schema_security_rule_read(default_security_rules.Element)
flow_logs = _schema_network_security_group_read.properties.flow_logs
flow_logs.Element = AAZObjectType()
_element = _schema_network_security_group_read.properties.flow_logs.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.location = AAZStrType()
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_security_group_read.properties.flow_logs.Element.properties
properties.enabled = AAZBoolType()
properties.flow_analytics_configuration = AAZObjectType(
serialized_name="flowAnalyticsConfiguration",
)
properties.format = AAZObjectType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.retention_policy = AAZObjectType(
serialized_name="retentionPolicy",
)
properties.storage_id = AAZStrType(
serialized_name="storageId",
flags={"required": True},
)
properties.target_resource_guid = AAZStrType(
serialized_name="targetResourceGuid",
flags={"read_only": True},
)
properties.target_resource_id = AAZStrType(
serialized_name="targetResourceId",
flags={"required": True},
)
flow_analytics_configuration = _schema_network_security_group_read.properties.flow_logs.Element.properties.flow_analytics_configuration
flow_analytics_configuration.network_watcher_flow_analytics_configuration = AAZObjectType(
serialized_name="networkWatcherFlowAnalyticsConfiguration",
)
network_watcher_flow_analytics_configuration = _schema_network_security_group_read.properties.flow_logs.Element.properties.flow_analytics_configuration.network_watcher_flow_analytics_configuration
network_watcher_flow_analytics_configuration.enabled = AAZBoolType()
network_watcher_flow_analytics_configuration.traffic_analytics_interval = AAZIntType(
serialized_name="trafficAnalyticsInterval",
)
network_watcher_flow_analytics_configuration.workspace_id = AAZStrType(
serialized_name="workspaceId",
)
network_watcher_flow_analytics_configuration.workspace_region = AAZStrType(
serialized_name="workspaceRegion",
)
network_watcher_flow_analytics_configuration.workspace_resource_id = AAZStrType(
serialized_name="workspaceResourceId",
)
format = _schema_network_security_group_read.properties.flow_logs.Element.properties.format
format.type = AAZStrType()
format.version = AAZIntType()
retention_policy = _schema_network_security_group_read.properties.flow_logs.Element.properties.retention_policy
retention_policy.days = AAZIntType()
retention_policy.enabled = AAZBoolType()
tags = _schema_network_security_group_read.properties.flow_logs.Element.tags
tags.Element = AAZStrType()
network_interfaces = _schema_network_security_group_read.properties.network_interfaces
network_interfaces.Element = AAZObjectType()
cls._build_schema_network_interface_read(network_interfaces.Element)
security_rules = _schema_network_security_group_read.properties.security_rules
security_rules.Element = AAZObjectType()
cls._build_schema_security_rule_read(security_rules.Element)
subnets = _schema_network_security_group_read.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_subnet_read(subnets.Element)
tags = _schema_network_security_group_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_network_security_group_read.etag
_schema.id = cls._schema_network_security_group_read.id
_schema.location = cls._schema_network_security_group_read.location
_schema.name = cls._schema_network_security_group_read.name
_schema.properties = cls._schema_network_security_group_read.properties
_schema.tags = cls._schema_network_security_group_read.tags
_schema.type = cls._schema_network_security_group_read.type
_schema_private_endpoint_read = None
@classmethod
def _build_schema_private_endpoint_read(cls, _schema):
if cls._schema_private_endpoint_read is not None:
_schema.etag = cls._schema_private_endpoint_read.etag
_schema.extended_location = cls._schema_private_endpoint_read.extended_location
_schema.id = cls._schema_private_endpoint_read.id
_schema.location = cls._schema_private_endpoint_read.location
_schema.name = cls._schema_private_endpoint_read.name
_schema.properties = cls._schema_private_endpoint_read.properties
_schema.tags = cls._schema_private_endpoint_read.tags
_schema.type = cls._schema_private_endpoint_read.type
return
cls._schema_private_endpoint_read = _schema_private_endpoint_read = AAZObjectType()
private_endpoint_read = _schema_private_endpoint_read
private_endpoint_read.etag = AAZStrType(
flags={"read_only": True},
)
private_endpoint_read.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(private_endpoint_read.extended_location)
private_endpoint_read.id = AAZStrType()
private_endpoint_read.location = AAZStrType()
private_endpoint_read.name = AAZStrType(
flags={"read_only": True},
)
private_endpoint_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
private_endpoint_read.tags = AAZDictType()
private_endpoint_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_private_endpoint_read.properties
properties.application_security_groups = AAZListType(
serialized_name="applicationSecurityGroups",
)
properties.custom_dns_configs = AAZListType(
serialized_name="customDnsConfigs",
)
properties.custom_network_interface_name = AAZStrType(
serialized_name="customNetworkInterfaceName",
)
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
)
properties.manual_private_link_service_connections = AAZListType(
serialized_name="manualPrivateLinkServiceConnections",
)
properties.network_interfaces = AAZListType(
serialized_name="networkInterfaces",
flags={"read_only": True},
)
properties.private_link_service_connections = AAZListType(
serialized_name="privateLinkServiceConnections",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
application_security_groups = _schema_private_endpoint_read.properties.application_security_groups
application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(application_security_groups.Element)
custom_dns_configs = _schema_private_endpoint_read.properties.custom_dns_configs
custom_dns_configs.Element = AAZObjectType()
_element = _schema_private_endpoint_read.properties.custom_dns_configs.Element
_element.fqdn = AAZStrType()
_element.ip_addresses = AAZListType(
serialized_name="ipAddresses",
)
ip_addresses = _schema_private_endpoint_read.properties.custom_dns_configs.Element.ip_addresses
ip_addresses.Element = AAZStrType()
ip_configurations = _schema_private_endpoint_read.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
_element = _schema_private_endpoint_read.properties.ip_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_private_endpoint_read.properties.ip_configurations.Element.properties
properties.group_id = AAZStrType(
serialized_name="groupId",
)
properties.member_name = AAZStrType(
serialized_name="memberName",
)
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
manual_private_link_service_connections = _schema_private_endpoint_read.properties.manual_private_link_service_connections
manual_private_link_service_connections.Element = AAZObjectType()
cls._build_schema_private_link_service_connection_read(manual_private_link_service_connections.Element)
network_interfaces = _schema_private_endpoint_read.properties.network_interfaces
network_interfaces.Element = AAZObjectType()
cls._build_schema_network_interface_read(network_interfaces.Element)
private_link_service_connections = _schema_private_endpoint_read.properties.private_link_service_connections
private_link_service_connections.Element = AAZObjectType()
cls._build_schema_private_link_service_connection_read(private_link_service_connections.Element)
tags = _schema_private_endpoint_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_private_endpoint_read.etag
_schema.extended_location = cls._schema_private_endpoint_read.extended_location
_schema.id = cls._schema_private_endpoint_read.id
_schema.location = cls._schema_private_endpoint_read.location
_schema.name = cls._schema_private_endpoint_read.name
_schema.properties = cls._schema_private_endpoint_read.properties
_schema.tags = cls._schema_private_endpoint_read.tags
_schema.type = cls._schema_private_endpoint_read.type
_schema_private_link_service_connection_state_read = None
@classmethod
def _build_schema_private_link_service_connection_state_read(cls, _schema):
if cls._schema_private_link_service_connection_state_read is not None:
_schema.actions_required = cls._schema_private_link_service_connection_state_read.actions_required
_schema.description = cls._schema_private_link_service_connection_state_read.description
_schema.status = cls._schema_private_link_service_connection_state_read.status
return
cls._schema_private_link_service_connection_state_read = _schema_private_link_service_connection_state_read = AAZObjectType()
private_link_service_connection_state_read = _schema_private_link_service_connection_state_read
private_link_service_connection_state_read.actions_required = AAZStrType(
serialized_name="actionsRequired",
)
private_link_service_connection_state_read.description = AAZStrType()
private_link_service_connection_state_read.status = AAZStrType()
_schema.actions_required = cls._schema_private_link_service_connection_state_read.actions_required
_schema.description = cls._schema_private_link_service_connection_state_read.description
_schema.status = cls._schema_private_link_service_connection_state_read.status
_schema_private_link_service_connection_read = None
@classmethod
def _build_schema_private_link_service_connection_read(cls, _schema):
if cls._schema_private_link_service_connection_read is not None:
_schema.etag = cls._schema_private_link_service_connection_read.etag
_schema.id = cls._schema_private_link_service_connection_read.id
_schema.name = cls._schema_private_link_service_connection_read.name
_schema.properties = cls._schema_private_link_service_connection_read.properties
_schema.type = cls._schema_private_link_service_connection_read.type
return
cls._schema_private_link_service_connection_read = _schema_private_link_service_connection_read = AAZObjectType()
private_link_service_connection_read = _schema_private_link_service_connection_read
private_link_service_connection_read.etag = AAZStrType(
flags={"read_only": True},
)
private_link_service_connection_read.id = AAZStrType()
private_link_service_connection_read.name = AAZStrType()
private_link_service_connection_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
private_link_service_connection_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_private_link_service_connection_read.properties
properties.group_ids = AAZListType(
serialized_name="groupIds",
)
properties.private_link_service_connection_state = AAZObjectType(
serialized_name="privateLinkServiceConnectionState",
)
cls._build_schema_private_link_service_connection_state_read(properties.private_link_service_connection_state)
properties.private_link_service_id = AAZStrType(
serialized_name="privateLinkServiceId",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.request_message = AAZStrType(
serialized_name="requestMessage",
)
group_ids = _schema_private_link_service_connection_read.properties.group_ids
group_ids.Element = AAZStrType()
_schema.etag = cls._schema_private_link_service_connection_read.etag
_schema.id = cls._schema_private_link_service_connection_read.id
_schema.name = cls._schema_private_link_service_connection_read.name
_schema.properties = cls._schema_private_link_service_connection_read.properties
_schema.type = cls._schema_private_link_service_connection_read.type
_schema_public_ip_address_read = None
@classmethod
def _build_schema_public_ip_address_read(cls, _schema):
if cls._schema_public_ip_address_read is not None:
_schema.etag = cls._schema_public_ip_address_read.etag
_schema.extended_location = cls._schema_public_ip_address_read.extended_location
_schema.id = cls._schema_public_ip_address_read.id
_schema.location = cls._schema_public_ip_address_read.location
_schema.name = cls._schema_public_ip_address_read.name
_schema.properties = cls._schema_public_ip_address_read.properties
_schema.sku = cls._schema_public_ip_address_read.sku
_schema.tags = cls._schema_public_ip_address_read.tags
_schema.type = cls._schema_public_ip_address_read.type
_schema.zones = cls._schema_public_ip_address_read.zones
return
cls._schema_public_ip_address_read = _schema_public_ip_address_read = AAZObjectType()
public_ip_address_read = _schema_public_ip_address_read
public_ip_address_read.etag = AAZStrType(
flags={"read_only": True},
)
public_ip_address_read.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(public_ip_address_read.extended_location)
public_ip_address_read.id = AAZStrType()
public_ip_address_read.location = AAZStrType()
public_ip_address_read.name = AAZStrType(
flags={"read_only": True},
)
public_ip_address_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
public_ip_address_read.sku = AAZObjectType()
public_ip_address_read.tags = AAZDictType()
public_ip_address_read.type = AAZStrType(
flags={"read_only": True},
)
public_ip_address_read.zones = AAZListType()
properties = _schema_public_ip_address_read.properties
properties.ddos_settings = AAZObjectType(
serialized_name="ddosSettings",
)
properties.delete_option = AAZStrType(
serialized_name="deleteOption",
)
properties.dns_settings = AAZObjectType(
serialized_name="dnsSettings",
)
properties.idle_timeout_in_minutes = AAZIntType(
serialized_name="idleTimeoutInMinutes",
)
properties.ip_address = AAZStrType(
serialized_name="ipAddress",
)
properties.ip_configuration = AAZObjectType(
serialized_name="ipConfiguration",
)
cls._build_schema_ip_configuration_read(properties.ip_configuration)
properties.ip_tags = AAZListType(
serialized_name="ipTags",
)
properties.linked_public_ip_address = AAZObjectType(
serialized_name="linkedPublicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.linked_public_ip_address)
properties.migration_phase = AAZStrType(
serialized_name="migrationPhase",
)
properties.nat_gateway = AAZObjectType(
serialized_name="natGateway",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address_version = AAZStrType(
serialized_name="publicIPAddressVersion",
)
properties.public_ip_allocation_method = AAZStrType(
serialized_name="publicIPAllocationMethod",
)
properties.public_ip_prefix = AAZObjectType(
serialized_name="publicIPPrefix",
)
cls._build_schema_sub_resource_read(properties.public_ip_prefix)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.service_public_ip_address = AAZObjectType(
serialized_name="servicePublicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.service_public_ip_address)
ddos_settings = _schema_public_ip_address_read.properties.ddos_settings
ddos_settings.ddos_custom_policy = AAZObjectType(
serialized_name="ddosCustomPolicy",
)
cls._build_schema_sub_resource_read(ddos_settings.ddos_custom_policy)
ddos_settings.protected_ip = AAZBoolType(
serialized_name="protectedIP",
)
ddos_settings.protection_coverage = AAZStrType(
serialized_name="protectionCoverage",
)
dns_settings = _schema_public_ip_address_read.properties.dns_settings
dns_settings.domain_name_label = AAZStrType(
serialized_name="domainNameLabel",
)
dns_settings.fqdn = AAZStrType()
dns_settings.reverse_fqdn = AAZStrType(
serialized_name="reverseFqdn",
)
ip_tags = _schema_public_ip_address_read.properties.ip_tags
ip_tags.Element = AAZObjectType()
_element = _schema_public_ip_address_read.properties.ip_tags.Element
_element.ip_tag_type = AAZStrType(
serialized_name="ipTagType",
)
_element.tag = AAZStrType()
nat_gateway = _schema_public_ip_address_read.properties.nat_gateway
nat_gateway.etag = AAZStrType(
flags={"read_only": True},
)
nat_gateway.id = AAZStrType()
nat_gateway.location = AAZStrType()
nat_gateway.name = AAZStrType(
flags={"read_only": True},
)
nat_gateway.properties = AAZObjectType(
flags={"client_flatten": True},
)
nat_gateway.sku = AAZObjectType()
nat_gateway.tags = AAZDictType()
nat_gateway.type = AAZStrType(
flags={"read_only": True},
)
nat_gateway.zones = AAZListType()
properties = _schema_public_ip_address_read.properties.nat_gateway.properties
properties.idle_timeout_in_minutes = AAZIntType(
serialized_name="idleTimeoutInMinutes",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_addresses = AAZListType(
serialized_name="publicIpAddresses",
)
properties.public_ip_prefixes = AAZListType(
serialized_name="publicIpPrefixes",
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.subnets = AAZListType(
flags={"read_only": True},
)
public_ip_addresses = _schema_public_ip_address_read.properties.nat_gateway.properties.public_ip_addresses
public_ip_addresses.Element = AAZObjectType()
cls._build_schema_sub_resource_read(public_ip_addresses.Element)
public_ip_prefixes = _schema_public_ip_address_read.properties.nat_gateway.properties.public_ip_prefixes
public_ip_prefixes.Element = AAZObjectType()
cls._build_schema_sub_resource_read(public_ip_prefixes.Element)
subnets = _schema_public_ip_address_read.properties.nat_gateway.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_sub_resource_read(subnets.Element)
sku = _schema_public_ip_address_read.properties.nat_gateway.sku
sku.name = AAZStrType()
tags = _schema_public_ip_address_read.properties.nat_gateway.tags
tags.Element = AAZStrType()
zones = _schema_public_ip_address_read.properties.nat_gateway.zones
zones.Element = AAZStrType()
sku = _schema_public_ip_address_read.sku
sku.name = AAZStrType()
sku.tier = AAZStrType()
tags = _schema_public_ip_address_read.tags
tags.Element = AAZStrType()
zones = _schema_public_ip_address_read.zones
zones.Element = AAZStrType()
_schema.etag = cls._schema_public_ip_address_read.etag
_schema.extended_location = cls._schema_public_ip_address_read.extended_location
_schema.id = cls._schema_public_ip_address_read.id
_schema.location = cls._schema_public_ip_address_read.location
_schema.name = cls._schema_public_ip_address_read.name
_schema.properties = cls._schema_public_ip_address_read.properties
_schema.sku = cls._schema_public_ip_address_read.sku
_schema.tags = cls._schema_public_ip_address_read.tags
_schema.type = cls._schema_public_ip_address_read.type
_schema.zones = cls._schema_public_ip_address_read.zones
_schema_route_table_read = None
@classmethod
def _build_schema_route_table_read(cls, _schema):
if cls._schema_route_table_read is not None:
_schema.etag = cls._schema_route_table_read.etag
_schema.id = cls._schema_route_table_read.id
_schema.location = cls._schema_route_table_read.location
_schema.name = cls._schema_route_table_read.name
_schema.properties = cls._schema_route_table_read.properties
_schema.tags = cls._schema_route_table_read.tags
_schema.type = cls._schema_route_table_read.type
return
cls._schema_route_table_read = _schema_route_table_read = AAZObjectType()
route_table_read = _schema_route_table_read
route_table_read.etag = AAZStrType(
flags={"read_only": True},
)
route_table_read.id = AAZStrType()
route_table_read.location = AAZStrType()
route_table_read.name = AAZStrType(
flags={"read_only": True},
)
route_table_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
route_table_read.tags = AAZDictType()
route_table_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_route_table_read.properties
properties.disable_bgp_route_propagation = AAZBoolType(
serialized_name="disableBgpRoutePropagation",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.routes = AAZListType()
properties.subnets = AAZListType(
flags={"read_only": True},
)
routes = _schema_route_table_read.properties.routes
routes.Element = AAZObjectType()
_element = _schema_route_table_read.properties.routes.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = _schema_route_table_read.properties.routes.Element.properties
properties.address_prefix = AAZStrType(
serialized_name="addressPrefix",
)
properties.has_bgp_override = AAZBoolType(
serialized_name="hasBgpOverride",
)
properties.next_hop_ip_address = AAZStrType(
serialized_name="nextHopIpAddress",
)
properties.next_hop_type = AAZStrType(
serialized_name="nextHopType",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
subnets = _schema_route_table_read.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_subnet_read(subnets.Element)
tags = _schema_route_table_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_route_table_read.etag
_schema.id = cls._schema_route_table_read.id
_schema.location = cls._schema_route_table_read.location
_schema.name = cls._schema_route_table_read.name
_schema.properties = cls._schema_route_table_read.properties
_schema.tags = cls._schema_route_table_read.tags
_schema.type = cls._schema_route_table_read.type
_schema_security_rule_read = None
@classmethod
def _build_schema_security_rule_read(cls, _schema):
if cls._schema_security_rule_read is not None:
_schema.etag = cls._schema_security_rule_read.etag
_schema.id = cls._schema_security_rule_read.id
_schema.name = cls._schema_security_rule_read.name
_schema.properties = cls._schema_security_rule_read.properties
_schema.type = cls._schema_security_rule_read.type
return
cls._schema_security_rule_read = _schema_security_rule_read = AAZObjectType()
security_rule_read = _schema_security_rule_read
security_rule_read.etag = AAZStrType(
flags={"read_only": True},
)
security_rule_read.id = AAZStrType()
security_rule_read.name = AAZStrType()
security_rule_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
security_rule_read.type = AAZStrType()
properties = _schema_security_rule_read.properties
properties.access = AAZStrType(
flags={"required": True},
)
properties.description = AAZStrType()
properties.destination_address_prefix = AAZStrType(
serialized_name="destinationAddressPrefix",
)
properties.destination_address_prefixes = AAZListType(
serialized_name="destinationAddressPrefixes",
)
properties.destination_application_security_groups = AAZListType(
serialized_name="destinationApplicationSecurityGroups",
)
properties.destination_port_range = AAZStrType(
serialized_name="destinationPortRange",
)
properties.destination_port_ranges = AAZListType(
serialized_name="destinationPortRanges",
)
properties.direction = AAZStrType(
flags={"required": True},
)
properties.priority = AAZIntType()
properties.protocol = AAZStrType(
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.source_address_prefix = AAZStrType(
serialized_name="sourceAddressPrefix",
)
properties.source_address_prefixes = AAZListType(
serialized_name="sourceAddressPrefixes",
)
properties.source_application_security_groups = AAZListType(
serialized_name="sourceApplicationSecurityGroups",
)
properties.source_port_range = AAZStrType(
serialized_name="sourcePortRange",
)
properties.source_port_ranges = AAZListType(
serialized_name="sourcePortRanges",
)
destination_address_prefixes = _schema_security_rule_read.properties.destination_address_prefixes
destination_address_prefixes.Element = AAZStrType()
destination_application_security_groups = _schema_security_rule_read.properties.destination_application_security_groups
destination_application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(destination_application_security_groups.Element)
destination_port_ranges = _schema_security_rule_read.properties.destination_port_ranges
destination_port_ranges.Element = AAZStrType()
source_address_prefixes = _schema_security_rule_read.properties.source_address_prefixes
source_address_prefixes.Element = AAZStrType()
source_application_security_groups = _schema_security_rule_read.properties.source_application_security_groups
source_application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(source_application_security_groups.Element)
source_port_ranges = _schema_security_rule_read.properties.source_port_ranges
source_port_ranges.Element = AAZStrType()
_schema.etag = cls._schema_security_rule_read.etag
_schema.id = cls._schema_security_rule_read.id
_schema.name = cls._schema_security_rule_read.name
_schema.properties = cls._schema_security_rule_read.properties
_schema.type = cls._schema_security_rule_read.type
_schema_sub_resource_read = None
@classmethod
def _build_schema_sub_resource_read(cls, _schema):
if cls._schema_sub_resource_read is not None:
_schema.id = cls._schema_sub_resource_read.id
return
cls._schema_sub_resource_read = _schema_sub_resource_read = AAZObjectType()
sub_resource_read = _schema_sub_resource_read
sub_resource_read.id = AAZStrType()
_schema.id = cls._schema_sub_resource_read.id
_schema_subnet_read = None
@classmethod
def _build_schema_subnet_read(cls, _schema):
if cls._schema_subnet_read is not None:
_schema.etag = cls._schema_subnet_read.etag
_schema.id = cls._schema_subnet_read.id
_schema.name = cls._schema_subnet_read.name
_schema.properties = cls._schema_subnet_read.properties
_schema.type = cls._schema_subnet_read.type
return
cls._schema_subnet_read = _schema_subnet_read = AAZObjectType()
subnet_read = _schema_subnet_read
subnet_read.etag = AAZStrType(
flags={"read_only": True},
)
subnet_read.id = AAZStrType()
subnet_read.name = AAZStrType()
subnet_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
subnet_read.type = AAZStrType()
properties = _schema_subnet_read.properties
properties.address_prefix = AAZStrType(
serialized_name="addressPrefix",
)
properties.address_prefixes = AAZListType(
serialized_name="addressPrefixes",
)
properties.application_gateway_ip_configurations = AAZListType(
serialized_name="applicationGatewayIpConfigurations",
)
properties.delegations = AAZListType()
properties.ip_allocations = AAZListType(
serialized_name="ipAllocations",
)
properties.ip_configuration_profiles = AAZListType(
serialized_name="ipConfigurationProfiles",
flags={"read_only": True},
)
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
flags={"read_only": True},
)
properties.nat_gateway = AAZObjectType(
serialized_name="natGateway",
)
cls._build_schema_sub_resource_read(properties.nat_gateway)
properties.network_security_group = AAZObjectType(
serialized_name="networkSecurityGroup",
)
cls._build_schema_network_security_group_read(properties.network_security_group)
properties.private_endpoint_network_policies = AAZStrType(
serialized_name="privateEndpointNetworkPolicies",
)
properties.private_endpoints = AAZListType(
serialized_name="privateEndpoints",
flags={"read_only": True},
)
properties.private_link_service_network_policies = AAZStrType(
serialized_name="privateLinkServiceNetworkPolicies",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.purpose = AAZStrType(
flags={"read_only": True},
)
properties.resource_navigation_links = AAZListType(
serialized_name="resourceNavigationLinks",
flags={"read_only": True},
)
properties.route_table = AAZObjectType(
serialized_name="routeTable",
)
cls._build_schema_route_table_read(properties.route_table)
properties.service_association_links = AAZListType(
serialized_name="serviceAssociationLinks",
flags={"read_only": True},
)
properties.service_endpoint_policies = AAZListType(
serialized_name="serviceEndpointPolicies",
)
properties.service_endpoints = AAZListType(
serialized_name="serviceEndpoints",
)
address_prefixes = _schema_subnet_read.properties.address_prefixes
address_prefixes.Element = AAZStrType()
application_gateway_ip_configurations = _schema_subnet_read.properties.application_gateway_ip_configurations
application_gateway_ip_configurations.Element = AAZObjectType()
_element = _schema_subnet_read.properties.application_gateway_ip_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.application_gateway_ip_configurations.Element.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_sub_resource_read(properties.subnet)
delegations = _schema_subnet_read.properties.delegations
delegations.Element = AAZObjectType()
_element = _schema_subnet_read.properties.delegations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = _schema_subnet_read.properties.delegations.Element.properties
properties.actions = AAZListType(
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.service_name = AAZStrType(
serialized_name="serviceName",
)
actions = _schema_subnet_read.properties.delegations.Element.properties.actions
actions.Element = AAZStrType()
ip_allocations = _schema_subnet_read.properties.ip_allocations
ip_allocations.Element = AAZObjectType()
cls._build_schema_sub_resource_read(ip_allocations.Element)
ip_configuration_profiles = _schema_subnet_read.properties.ip_configuration_profiles
ip_configuration_profiles.Element = AAZObjectType()
_element = _schema_subnet_read.properties.ip_configuration_profiles.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.ip_configuration_profiles.Element.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
ip_configurations = _schema_subnet_read.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
cls._build_schema_ip_configuration_read(ip_configurations.Element)
private_endpoints = _schema_subnet_read.properties.private_endpoints
private_endpoints.Element = AAZObjectType()
cls._build_schema_private_endpoint_read(private_endpoints.Element)
resource_navigation_links = _schema_subnet_read.properties.resource_navigation_links
resource_navigation_links.Element = AAZObjectType()
_element = _schema_subnet_read.properties.resource_navigation_links.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.resource_navigation_links.Element.properties
properties.link = AAZStrType()
properties.linked_resource_type = AAZStrType(
serialized_name="linkedResourceType",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
service_association_links = _schema_subnet_read.properties.service_association_links
service_association_links.Element = AAZObjectType()
_element = _schema_subnet_read.properties.service_association_links.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.service_association_links.Element.properties
properties.allow_delete = AAZBoolType(
serialized_name="allowDelete",
)
properties.link = AAZStrType()
properties.linked_resource_type = AAZStrType(
serialized_name="linkedResourceType",
)
properties.locations = AAZListType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
locations = _schema_subnet_read.properties.service_association_links.Element.properties.locations
locations.Element = AAZStrType()
service_endpoint_policies = _schema_subnet_read.properties.service_endpoint_policies
service_endpoint_policies.Element = AAZObjectType()
_element = _schema_subnet_read.properties.service_endpoint_policies.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.kind = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType()
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.service_endpoint_policies.Element.properties
properties.contextual_service_endpoint_policies = AAZListType(
serialized_name="contextualServiceEndpointPolicies",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.service_alias = AAZStrType(
serialized_name="serviceAlias",
)
properties.service_endpoint_policy_definitions = AAZListType(
serialized_name="serviceEndpointPolicyDefinitions",
)
properties.subnets = AAZListType(
flags={"read_only": True},
)
contextual_service_endpoint_policies = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.contextual_service_endpoint_policies
contextual_service_endpoint_policies.Element = AAZStrType()
service_endpoint_policy_definitions = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.service_endpoint_policy_definitions
service_endpoint_policy_definitions.Element = AAZObjectType()
_element = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.service_endpoint_policy_definitions.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.service_endpoint_policy_definitions.Element.properties
properties.description = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.service = AAZStrType()
properties.service_resources = AAZListType(
serialized_name="serviceResources",
)
service_resources = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.service_endpoint_policy_definitions.Element.properties.service_resources
service_resources.Element = AAZStrType()
subnets = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_subnet_read(subnets.Element)
tags = _schema_subnet_read.properties.service_endpoint_policies.Element.tags
tags.Element = AAZStrType()
service_endpoints = _schema_subnet_read.properties.service_endpoints
service_endpoints.Element = AAZObjectType()
_element = _schema_subnet_read.properties.service_endpoints.Element
_element.locations = AAZListType()
_element.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
_element.service = AAZStrType()
locations = _schema_subnet_read.properties.service_endpoints.Element.locations
locations.Element = AAZStrType()
_schema.etag = cls._schema_subnet_read.etag
_schema.id = cls._schema_subnet_read.id
_schema.name = cls._schema_subnet_read.name
_schema.properties = cls._schema_subnet_read.properties
_schema.type = cls._schema_subnet_read.type
_schema_virtual_network_tap_read = None
@classmethod
def _build_schema_virtual_network_tap_read(cls, _schema):
if cls._schema_virtual_network_tap_read is not None:
_schema.etag = cls._schema_virtual_network_tap_read.etag
_schema.id = cls._schema_virtual_network_tap_read.id
_schema.location = cls._schema_virtual_network_tap_read.location
_schema.name = cls._schema_virtual_network_tap_read.name
_schema.properties = cls._schema_virtual_network_tap_read.properties
_schema.tags = cls._schema_virtual_network_tap_read.tags
_schema.type = cls._schema_virtual_network_tap_read.type
return
cls._schema_virtual_network_tap_read = _schema_virtual_network_tap_read = AAZObjectType()
virtual_network_tap_read = _schema_virtual_network_tap_read
virtual_network_tap_read.etag = AAZStrType(
flags={"read_only": True},
)
virtual_network_tap_read.id = AAZStrType()
virtual_network_tap_read.location = AAZStrType()
virtual_network_tap_read.name = AAZStrType(
flags={"read_only": True},
)
virtual_network_tap_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
virtual_network_tap_read.tags = AAZDictType()
virtual_network_tap_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_virtual_network_tap_read.properties
properties.destination_load_balancer_front_end_ip_configuration = AAZObjectType(
serialized_name="destinationLoadBalancerFrontEndIPConfiguration",
)
cls._build_schema_frontend_ip_configuration_read(properties.destination_load_balancer_front_end_ip_configuration)
properties.destination_network_interface_ip_configuration = AAZObjectType(
serialized_name="destinationNetworkInterfaceIPConfiguration",
)
cls._build_schema_network_interface_ip_configuration_read(properties.destination_network_interface_ip_configuration)
properties.destination_port = AAZIntType(
serialized_name="destinationPort",
)
properties.network_interface_tap_configurations = AAZListType(
serialized_name="networkInterfaceTapConfigurations",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
network_interface_tap_configurations = _schema_virtual_network_tap_read.properties.network_interface_tap_configurations
network_interface_tap_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_tap_configuration_read(network_interface_tap_configurations.Element)
tags = _schema_virtual_network_tap_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_virtual_network_tap_read.etag
_schema.id = cls._schema_virtual_network_tap_read.id
_schema.location = cls._schema_virtual_network_tap_read.location
_schema.name = cls._schema_virtual_network_tap_read.name
_schema.properties = cls._schema_virtual_network_tap_read.properties
_schema.tags = cls._schema_virtual_network_tap_read.tags
_schema.type = cls._schema_virtual_network_tap_read.type
__all__ = ["Create"]
|
PypiClean
|
/testops_api-1.0.2-py3-none-any.whl/testops_api/api/team_api.py
|
import re # noqa: F401
import sys # noqa: F401
from testops_api.api_client import ApiClient, Endpoint
from testops_api.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from testops_api.model.page_team_resource import PageTeamResource
from testops_api.model.pageable import Pageable
from testops_api.model.team_resource import TeamResource
from testops_api.model.user_resource import UserResource
from testops_api.model.user_team_resource import UserTeamResource
class TeamApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __assign_user_team(
self,
team_id,
new_user_ids,
**kwargs
):
"""Adds users to a Team. Returns the added User detail. # noqa: E501
The user issuing this request must be the Admin of the team. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.assign_user_team(team_id, new_user_ids, async_req=True)
>>> result = thread.get()
Args:
team_id (int):
new_user_ids ([int]):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[UserResource]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['team_id'] = \
team_id
kwargs['new_user_ids'] = \
new_user_ids
return self.call_with_http_info(**kwargs)
self.assign_user_team = Endpoint(
settings={
'response_type': ([UserResource],),
'auth': [
'basicScheme'
],
'endpoint_path': '/api/v1/users/add',
'operation_id': 'assign_user_team',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'team_id',
'new_user_ids',
],
'required': [
'team_id',
'new_user_ids',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'team_id':
(int,),
'new_user_ids':
([int],),
},
'attribute_map': {
'team_id': 'teamId',
'new_user_ids': 'newUserIds',
},
'location_map': {
'team_id': 'query',
'new_user_ids': 'query',
},
'collection_format_map': {
'new_user_ids': 'multi',
}
},
headers_map={
'accept': [
'*/*'
],
'content_type': [],
},
api_client=api_client,
callable=__assign_user_team
)
def __create2(
self,
team_resource,
**kwargs
):
"""Creates a new Team. Returns the created Team detail. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create2(team_resource, async_req=True)
>>> result = thread.get()
Args:
team_resource (TeamResource):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
TeamResource
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['team_resource'] = \
team_resource
return self.call_with_http_info(**kwargs)
self.create2 = Endpoint(
settings={
'response_type': (TeamResource,),
'auth': [
'basicScheme'
],
'endpoint_path': '/api/v1/teams',
'operation_id': 'create2',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'team_resource',
],
'required': [
'team_resource',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'team_resource':
(TeamResource,),
},
'attribute_map': {
},
'location_map': {
'team_resource': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'*/*'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create2
)
def __delete2(
self,
id,
**kwargs
):
"""Delete a Team. Returns the delete Team detail. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete2(id, async_req=True)
>>> result = thread.get()
Args:
id (int):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
TeamResource
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.delete2 = Endpoint(
settings={
'response_type': (TeamResource,),
'auth': [
'basicScheme'
],
'endpoint_path': '/api/v1/teams/{id}',
'operation_id': 'delete2',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'*/*'
],
'content_type': [],
},
api_client=api_client,
callable=__delete2
)
def __get8(
self,
id,
**kwargs
):
"""Returns a Team detail. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get8(id, async_req=True)
>>> result = thread.get()
Args:
id (int):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
TeamResource
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get8 = Endpoint(
settings={
'response_type': (TeamResource,),
'auth': [
'basicScheme'
],
'endpoint_path': '/api/v1/teams/{id}',
'operation_id': 'get8',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'*/*'
],
'content_type': [],
},
api_client=api_client,
callable=__get8
)
def __list(
self,
pageable,
**kwargs
):
"""Returns all Teams of the current User. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list(pageable, async_req=True)
>>> result = thread.get()
Args:
pageable (Pageable):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
PageTeamResource
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['pageable'] = \
pageable
return self.call_with_http_info(**kwargs)
self.list = Endpoint(
settings={
'response_type': (PageTeamResource,),
'auth': [
'basicScheme'
],
'endpoint_path': '/api/v1/teams',
'operation_id': 'list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'pageable',
],
'required': [
'pageable',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'pageable':
(Pageable,),
},
'attribute_map': {
'pageable': 'pageable',
},
'location_map': {
'pageable': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'*/*'
],
'content_type': [],
},
api_client=api_client,
callable=__list
)
def __remove_user(
self,
team_id,
user_id,
**kwargs
):
"""Removes a User from a Team. Returns the removed User detail. # noqa: E501
The user issuing this request must be the Admin of the team. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_user(team_id, user_id, async_req=True)
>>> result = thread.get()
Args:
team_id (int):
user_id (int):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UserResource
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['team_id'] = \
team_id
kwargs['user_id'] = \
user_id
return self.call_with_http_info(**kwargs)
self.remove_user = Endpoint(
settings={
'response_type': (UserResource,),
'auth': [
'basicScheme'
],
'endpoint_path': '/api/v1/users/remove',
'operation_id': 'remove_user',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'team_id',
'user_id',
],
'required': [
'team_id',
'user_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'team_id':
(int,),
'user_id':
(int,),
},
'attribute_map': {
'team_id': 'teamId',
'user_id': 'userId',
},
'location_map': {
'team_id': 'query',
'user_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'*/*'
],
'content_type': [],
},
api_client=api_client,
callable=__remove_user
)
def __update2(
self,
team_resource,
**kwargs
):
"""Updates a Team detail. Returns the updated Team detail. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update2(team_resource, async_req=True)
>>> result = thread.get()
Args:
team_resource (TeamResource):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
TeamResource
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['team_resource'] = \
team_resource
return self.call_with_http_info(**kwargs)
self.update2 = Endpoint(
settings={
'response_type': (TeamResource,),
'auth': [
'basicScheme'
],
'endpoint_path': '/api/v1/teams',
'operation_id': 'update2',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'team_resource',
],
'required': [
'team_resource',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'team_resource':
(TeamResource,),
},
'attribute_map': {
},
'location_map': {
'team_resource': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'*/*'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__update2
)
def __update_user_team(
self,
user_team_resource,
**kwargs
):
"""Updates the role of a User in a Team. Returns the updated detail. # noqa: E501
The user issuing this request must be the Admin of the team. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user_team(user_team_resource, async_req=True)
>>> result = thread.get()
Args:
user_team_resource (UserTeamResource):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UserTeamResource
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['user_team_resource'] = \
user_team_resource
return self.call_with_http_info(**kwargs)
self.update_user_team = Endpoint(
settings={
'response_type': (UserTeamResource,),
'auth': [
'basicScheme'
],
'endpoint_path': '/api/v1/permission/team/user',
'operation_id': 'update_user_team',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'user_team_resource',
],
'required': [
'user_team_resource',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'user_team_resource':
(UserTeamResource,),
},
'attribute_map': {
},
'location_map': {
'user_team_resource': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'*/*'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__update_user_team
)
|
PypiClean
|
/pytubedata-1.1.0-py3-none-any.whl/Youtube/channel.py
|
class channel():
"""
The channel class handles the methods to fetch data from the YouTube Data API related to a channel
params: required
key- YouTube Data API key. Get a YouTube Data API key here: https://console.cloud.google.com/apis/dashboard
"""
def __init__(self):
pass
def get_channel(self, **kwargs):
'''
Given a channel `id` returns metrics (views, subscribersCount, videoCount) and metadata (description, category) as a dictionary.
Read the docs: https://developers.google.com/youtube/v3/docs/channels/list
params: required
id: The ID of a channel i.e. "UCifqJm4QMYTtrxZb_bN02Pw", this can be found in the source code of homepage of the channel
type: str or list of str
returns metadata from the inputted channel ``id``s.
rtype: dict
'''
if not kwargs.get('id'):
raise KeyError("id not given")
params= {
"id": kwargs.get('id'),
"part": "snippet, statistics, topicDetails, brandingSettings, contentDetails, contentOwnerDetails"
}
kwargs.pop('id')
return ("/channels", params)
def get_all_sections(self, **kwargs):
'''
Given a channel `id` returns all sections of the channel.
Read the docs: https://developers.google.com/youtube/v3/docs/channelSections/list
params: required
id: The ID of a channel i.e. "UCifqJm4QMYTtrxZb_bN02Pw", this can be found in the source code of homepage of the channel
type: str
returns sections of the inputted channel ``id``s.
rtype: dict
'''
if not kwargs.get('id'):
raise KeyError("id not given")
params= {
"channelId": kwargs.get('id'),
"part": "snippet, contentDetails"
}
kwargs.pop('id')
params.update(kwargs)
return ("/channelSections", params)
def get_section(self, **kwargs):
'''
Given a channelSection `id` return metadata for the section.
Read the docs: https://developers.google.com/youtube/v3/docs/channelSections/list
params: required
id: The ID of a channel section i.e. "UCqW8jxh4tH1Z1sWPbkGWL4g.LeAltgu_pbM", this can be get using get_channel_section() method
type: str
returns metadata of the inputted channelSections ``id``s.
rtype: dict
'''
if not kwargs.get('id'):
raise KeyError("id not given")
params= {
"id": kwargs.get('id'),
"part": "snippet, contentDetails"
}
kwargs.pop('id')
params.update(kwargs)
return ("/channelSections", params)
|
PypiClean
|
/discord-pda-1.0.1a0.tar.gz/discord-pda-1.0.1a0/pda/invite.py
|
from __future__ import annotations
from typing import List, Optional, Type, TypeVar, Union, TYPE_CHECKING
from .asset import Asset
from .utils import parse_time, snowflake_time, _get_as_snowflake
from .object import Object
from .mixins import Hashable
from .enums import ChannelType, VerificationLevel, InviteTarget, try_enum
from .appinfo import PartialAppInfo
__all__ = (
'PartialInviteChannel',
'PartialInviteGuild',
'Invite',
)
if TYPE_CHECKING:
from .types.invite import (
Invite as InvitePayload,
InviteGuild as InviteGuildPayload,
GatewayInvite as GatewayInvitePayload,
)
from .types.channel import (
PartialChannel as InviteChannelPayload,
)
from .state import ConnectionState
from .guild import Guild
from .abc import GuildChannel
from .user import User
InviteGuildType = Union[Guild, 'PartialInviteGuild', Object]
InviteChannelType = Union[GuildChannel, 'PartialInviteChannel', Object]
import datetime
class PartialInviteChannel:
"""Represents a "partial" invite channel.
This model will be given when the user is not part of the
guild the :class:`Invite` resolves to.
.. container:: operations
.. describe:: x == y
Checks if two partial channels are the same.
.. describe:: x != y
Checks if two partial channels are not the same.
.. describe:: hash(x)
Return the partial channel's hash.
.. describe:: str(x)
Returns the partial channel's name.
Attributes
-----------
name: :class:`str`
The partial channel's name.
id: :class:`int`
The partial channel's ID.
type: :class:`ChannelType`
The partial channel's type.
"""
__slots__ = ('id', 'name', 'type')
def __init__(self, data: InviteChannelPayload):
self.id: int = int(data['id'])
self.name: str = data['name']
self.type: ChannelType = try_enum(ChannelType, data['type'])
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'<PartialInviteChannel id={self.id} name={self.name} type={self.type!r}>'
@property
def mention(self) -> str:
""":class:`str`: The string that allows you to mention the channel."""
return f'<#{self.id}>'
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the channel's creation time in UTC."""
return snowflake_time(self.id)
class PartialInviteGuild:
"""Represents a "partial" invite guild.
This model will be given when the user is not part of the
guild the :class:`Invite` resolves to.
.. container:: operations
.. describe:: x == y
Checks if two partial guilds are the same.
.. describe:: x != y
Checks if two partial guilds are not the same.
.. describe:: hash(x)
Return the partial guild's hash.
.. describe:: str(x)
Returns the partial guild's name.
Attributes
-----------
name: :class:`str`
The partial guild's name.
id: :class:`int`
The partial guild's ID.
verification_level: :class:`VerificationLevel`
The partial guild's verification level.
features: List[:class:`str`]
A list of features the guild has. See :attr:`Guild.features` for more information.
description: Optional[:class:`str`]
The partial guild's description.
"""
__slots__ = ('_state', 'features', '_icon', '_banner', 'id', 'name', '_splash', 'verification_level', 'description')
def __init__(self, state: ConnectionState, data: InviteGuildPayload, id: int):
self._state: ConnectionState = state
self.id: int = id
self.name: str = data['name']
self.features: List[str] = data.get('features', [])
self._icon: Optional[str] = data.get('icon')
self._banner: Optional[str] = data.get('banner')
self._splash: Optional[str] = data.get('splash')
self.verification_level: VerificationLevel = try_enum(VerificationLevel, data.get('verification_level'))
self.description: Optional[str] = data.get('description')
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__} id={self.id} name={self.name!r} features={self.features} '
f'description={self.description!r}>'
)
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the guild's creation time in UTC."""
return snowflake_time(self.id)
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._icon is None:
return None
return Asset._from_guild_icon(self._state, self.id, self._icon)
@property
def banner(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's banner asset, if available."""
if self._banner is None:
return None
return Asset._from_guild_image(self._state, self.id, self._banner, path='banners')
@property
def splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's invite splash asset, if available."""
if self._splash is None:
return None
return Asset._from_guild_image(self._state, self.id, self._splash, path='splashes')
I = TypeVar('I', bound='Invite')
class Invite(Hashable):
r"""Represents a Discord :class:`Guild` or :class:`abc.GuildChannel` invite.
Depending on the way this object was created, some of the attributes can
have a value of ``None``.
.. container:: operations
.. describe:: x == y
Checks if two invites are equal.
.. describe:: x != y
Checks if two invites are not equal.
.. describe:: hash(x)
Returns the invite hash.
.. describe:: str(x)
Returns the invite URL.
The following table illustrates what methods will obtain the attributes:
+------------------------------------+------------------------------------------------------------+
| Attribute | Method |
+====================================+============================================================+
| :attr:`max_age` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`max_uses` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`created_at` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`temporary` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`uses` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`approximate_member_count` | :meth:`Client.fetch_invite` with `with_counts` enabled |
+------------------------------------+------------------------------------------------------------+
| :attr:`approximate_presence_count` | :meth:`Client.fetch_invite` with `with_counts` enabled |
+------------------------------------+------------------------------------------------------------+
| :attr:`expires_at` | :meth:`Client.fetch_invite` with `with_expiration` enabled |
+------------------------------------+------------------------------------------------------------+
If it's not in the table above then it is available by all methods.
Attributes
-----------
max_age: :class:`int`
How long before the invite expires in seconds.
A value of ``0`` indicates that it doesn't expire.
code: :class:`str`
The URL fragment used for the invite.
guild: Optional[Union[:class:`Guild`, :class:`Object`, :class:`PartialInviteGuild`]]
The guild the invite is for. Can be ``None`` if it's from a group direct message.
revoked: :class:`bool`
Indicates if the invite has been revoked.
created_at: :class:`datetime.datetime`
An aware UTC datetime object denoting the time the invite was created.
temporary: :class:`bool`
Indicates that the invite grants temporary membership.
If ``True``, members who joined via this invite will be kicked upon disconnect.
uses: :class:`int`
How many times the invite has been used.
max_uses: :class:`int`
How many times the invite can be used.
A value of ``0`` indicates that it has unlimited uses.
inviter: Optional[:class:`User`]
The user who created the invite.
approximate_member_count: Optional[:class:`int`]
The approximate number of members in the guild.
approximate_presence_count: Optional[:class:`int`]
The approximate number of members currently active in the guild.
This includes idle, dnd, online, and invisible members. Offline members are excluded.
expires_at: Optional[:class:`datetime.datetime`]
The expiration date of the invite. If the value is ``None`` when received through
`Client.fetch_invite` with `with_expiration` enabled, the invite will never expire.
.. versionadded:: 2.0
channel: Union[:class:`abc.GuildChannel`, :class:`Object`, :class:`PartialInviteChannel`]
The channel the invite is for.
target_type: :class:`InviteTarget`
The type of target for the voice channel invite.
.. versionadded:: 2.0
target_user: Optional[:class:`User`]
The user whose stream to display for this invite, if any.
.. versionadded:: 2.0
target_application: Optional[:class:`PartialAppInfo`]
The embedded application the invite targets, if any.
.. versionadded:: 2.0
"""
__slots__ = (
'max_age',
'code',
'guild',
'revoked',
'created_at',
'uses',
'temporary',
'max_uses',
'inviter',
'channel',
'target_user',
'target_type',
'_state',
'approximate_member_count',
'approximate_presence_count',
'target_application',
'expires_at',
)
BASE = 'https://pda.gg'
def __init__(
self,
*,
state: ConnectionState,
data: InvitePayload,
guild: Optional[Union[PartialInviteGuild, Guild]] = None,
channel: Optional[Union[PartialInviteChannel, GuildChannel]] = None,
):
self._state: ConnectionState = state
self.max_age: Optional[int] = data.get('max_age')
self.code: str = data['code']
self.guild: Optional[InviteGuildType] = self._resolve_guild(data.get('guild'), guild)
self.revoked: Optional[bool] = data.get('revoked')
self.created_at: Optional[datetime.datetime] = parse_time(data.get('created_at'))
self.temporary: Optional[bool] = data.get('temporary')
self.uses: Optional[int] = data.get('uses')
self.max_uses: Optional[int] = data.get('max_uses')
self.approximate_presence_count: Optional[int] = data.get('approximate_presence_count')
self.approximate_member_count: Optional[int] = data.get('approximate_member_count')
expires_at = data.get('expires_at', None)
self.expires_at: Optional[datetime.datetime] = parse_time(expires_at) if expires_at else None
inviter_data = data.get('inviter')
self.inviter: Optional[User] = None if inviter_data is None else self._state.create_user(inviter_data)
self.channel: Optional[InviteChannelType] = self._resolve_channel(data.get('channel'), channel)
target_user_data = data.get('target_user')
self.target_user: Optional[User] = None if target_user_data is None else self._state.create_user(target_user_data)
self.target_type: InviteTarget = try_enum(InviteTarget, data.get("target_type", 0))
application = data.get('target_application')
self.target_application: Optional[PartialAppInfo] = (
PartialAppInfo(data=application, state=state) if application else None
)
@classmethod
def from_incomplete(cls: Type[I], *, state: ConnectionState, data: InvitePayload) -> I:
guild: Optional[Union[Guild, PartialInviteGuild]]
try:
guild_data = data['guild']
except KeyError:
# If we're here, then this is a group DM
guild = None
else:
guild_id = int(guild_data['id'])
guild = state._get_guild(guild_id)
if guild is None:
# If it's not cached, then it has to be a partial guild
guild = PartialInviteGuild(state, guild_data, guild_id)
# As far as I know, invites always need a channel
# So this should never raise.
channel: Union[PartialInviteChannel, GuildChannel] = PartialInviteChannel(data['channel'])
if guild is not None and not isinstance(guild, PartialInviteGuild):
# Upgrade the partial data if applicable
channel = guild.get_channel(channel.id) or channel
return cls(state=state, data=data, guild=guild, channel=channel)
@classmethod
def from_gateway(cls: Type[I], *, state: ConnectionState, data: GatewayInvitePayload) -> I:
guild_id: Optional[int] = _get_as_snowflake(data, 'guild_id')
guild: Optional[Union[Guild, Object]] = state._get_guild(guild_id)
channel_id = int(data['channel_id'])
if guild is not None:
channel = guild.get_channel(channel_id) or Object(id=channel_id) # type: ignore
else:
guild = Object(id=guild_id) if guild_id is not None else None
channel = Object(id=channel_id)
return cls(state=state, data=data, guild=guild, channel=channel) # type: ignore
def _resolve_guild(
self,
data: Optional[InviteGuildPayload],
guild: Optional[Union[Guild, PartialInviteGuild]] = None,
) -> Optional[InviteGuildType]:
if guild is not None:
return guild
if data is None:
return None
guild_id = int(data['id'])
return PartialInviteGuild(self._state, data, guild_id)
def _resolve_channel(
self,
data: Optional[InviteChannelPayload],
channel: Optional[Union[PartialInviteChannel, GuildChannel]] = None,
) -> Optional[InviteChannelType]:
if channel is not None:
return channel
if data is None:
return None
return PartialInviteChannel(data)
def __str__(self) -> str:
return self.url
def __repr__(self) -> str:
return (
f'<Invite code={self.code!r} guild={self.guild!r} '
f'online={self.approximate_presence_count} '
f'members={self.approximate_member_count}>'
)
def __hash__(self) -> int:
return hash(self.code)
@property
def id(self) -> str:
""":class:`str`: Returns the proper code portion of the invite."""
return self.code
@property
def url(self) -> str:
""":class:`str`: A property that retrieves the invite URL."""
return self.BASE + '/' + self.code
async def delete(self, *, reason: Optional[str] = None):
"""|coro|
Revokes the instant invite.
You must have the :attr:`~Permissions.manage_channels` permission to do this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this invite. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to revoke invites.
NotFound
The invite is invalid or expired.
HTTPException
Revoking the invite failed.
"""
await self._state.http.delete_invite(self.code, reason=reason)
|
PypiClean
|
/scraly_ovh-0.32.0.tar.gz/scraly_ovh-0.32.0/scraly_ovh/cloudproject/get_kube_nodes.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetKubeNodesResult',
'AwaitableGetKubeNodesResult',
'get_kube_nodes',
'get_kube_nodes_output',
]
@pulumi.output_type
class GetKubeNodesResult:
"""
A collection of values returned by getKubeNodes.
"""
def __init__(__self__, id=None, kube_id=None, nodes=None, service_name=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kube_id and not isinstance(kube_id, str):
raise TypeError("Expected argument 'kube_id' to be a str")
pulumi.set(__self__, "kube_id", kube_id)
if nodes and not isinstance(nodes, list):
raise TypeError("Expected argument 'nodes' to be a list")
pulumi.set(__self__, "nodes", nodes)
if service_name and not isinstance(service_name, str):
raise TypeError("Expected argument 'service_name' to be a str")
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="kubeId")
def kube_id(self) -> str:
"""
See Argument Reference above.
"""
return pulumi.get(self, "kube_id")
@property
@pulumi.getter
def nodes(self) -> Sequence['outputs.GetKubeNodesNodeResult']:
"""
List of all nodes composing the kubernetes cluster
"""
return pulumi.get(self, "nodes")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> str:
"""
See Argument Reference above.
"""
return pulumi.get(self, "service_name")
class AwaitableGetKubeNodesResult(GetKubeNodesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetKubeNodesResult(
id=self.id,
kube_id=self.kube_id,
nodes=self.nodes,
service_name=self.service_name)
def get_kube_nodes(kube_id: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKubeNodesResult:
"""
Use this data source to get a list of OVHcloud Managed Kubernetes nodes.
## Example Usage
:param str kube_id: The ID of the managed kubernetes cluster.
:param str service_name: The id of the public cloud project. If omitted,
the `OVH_CLOUD_PROJECT_SERVICE` environment variable is used.
"""
__args__ = dict()
__args__['kubeId'] = kube_id
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('ovh:CloudProject/getKubeNodes:getKubeNodes', __args__, opts=opts, typ=GetKubeNodesResult).value
return AwaitableGetKubeNodesResult(
id=pulumi.get(__ret__, 'id'),
kube_id=pulumi.get(__ret__, 'kube_id'),
nodes=pulumi.get(__ret__, 'nodes'),
service_name=pulumi.get(__ret__, 'service_name'))
@_utilities.lift_output_func(get_kube_nodes)
def get_kube_nodes_output(kube_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetKubeNodesResult]:
"""
Use this data source to get a list of OVHcloud Managed Kubernetes nodes.
## Example Usage
:param str kube_id: The ID of the managed kubernetes cluster.
:param str service_name: The id of the public cloud project. If omitted,
the `OVH_CLOUD_PROJECT_SERVICE` environment variable is used.
"""
...
|
PypiClean
|
/pelican_manager-0.2.1-py3-none-any.whl/pelican_manager/static/bootstrap-table/dist/bootstrap-table-locale-all.min.js
|
!function(a){"use strict";a.fn.bootstrapTable.locales["af-ZA"]={formatLoadingMessage:function(){return"Besig om te laai, wag asseblief ..."},formatRecordsPerPage:function(a){return a+" rekords per bladsy"},formatShowingRows:function(a,b,c){return"Resultate "+a+" tot "+b+" van "+c+" rye"},formatSearch:function(){return"Soek"},formatNoMatches:function(){return"Geen rekords gevind nie"},formatPaginationSwitch:function(){return"Wys/verberg bladsy nummering"},formatRefresh:function(){return"Herlaai"},formatToggle:function(){return"Wissel"},formatColumns:function(){return"Kolomme"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["af-ZA"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["ar-SA"]={formatLoadingMessage:function(){return"جاري التحميل, يرجى الإنتظار..."},formatRecordsPerPage:function(a){return a+" سجل لكل صفحة"},formatShowingRows:function(a,b,c){return"الظاهر "+a+" إلى "+b+" من "+c+" سجل"},formatSearch:function(){return"بحث"},formatNoMatches:function(){return"لا توجد نتائج مطابقة للبحث"},formatPaginationSwitch:function(){return"إخفاءإظهار ترقيم الصفحات"},formatRefresh:function(){return"تحديث"},formatToggle:function(){return"تغيير"},formatColumns:function(){return"أعمدة"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["ar-SA"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["ca-ES"]={formatLoadingMessage:function(){return"Espereu, si us plau..."},formatRecordsPerPage:function(a){return a+" resultats per pàgina"},formatShowingRows:function(a,b,c){return"Mostrant de "+a+" fins "+b+" - total "+c+" resultats"},formatSearch:function(){return"Cerca"},formatNoMatches:function(){return"No s'han trobat resultats"},formatPaginationSwitch:function(){return"Amaga/Mostra paginació"},formatRefresh:function(){return"Refresca"},formatToggle:function(){return"Alterna formatació"},formatColumns:function(){return"Columnes"},formatAllRows:function(){return"Tots"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["ca-ES"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["cs-CZ"]={formatLoadingMessage:function(){return"Čekejte, prosím..."},formatRecordsPerPage:function(a){return a+" položek na stránku"},formatShowingRows:function(a,b,c){return"Zobrazena "+a+". - "+b+". položka z celkových "+c},formatSearch:function(){return"Vyhledávání"},formatNoMatches:function(){return"Nenalezena žádná vyhovující položka"},formatPaginationSwitch:function(){return"Skrýt/Zobrazit stránkování"},formatRefresh:function(){return"Aktualizovat"},formatToggle:function(){return"Přepni"},formatColumns:function(){return"Sloupce"},formatAllRows:function(){return"Vše"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["cs-CZ"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["da-DK"]={formatLoadingMessage:function(){return"Indlæser, vent venligst..."},formatRecordsPerPage:function(a){return a+" poster pr side"},formatShowingRows:function(a,b,c){return"Viser "+a+" til "+b+" af "+c+" rækker"},formatSearch:function(){return"Søg"},formatNoMatches:function(){return"Ingen poster fundet"},formatRefresh:function(){return"Opdater"},formatToggle:function(){return"Skift"},formatColumns:function(){return"Kolonner"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["da-DK"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["de-DE"]={formatLoadingMessage:function(){return"Lade, bitte warten..."},formatRecordsPerPage:function(a){return a+" Einträge pro Seite."},formatShowingRows:function(a,b,c){return"Zeige Zeile "+a+" bis "+b+" von "+c+" Zeile"+(c>1?"n":"")+"."},formatDetailPagination:function(a){return"Zeige "+a+" Zeile"+(a>1?"n":"")+"."},formatSearch:function(){return"Suchen ..."},formatNoMatches:function(){return"Keine passenden Ergebnisse gefunden."},formatRefresh:function(){return"Neu laden"},formatToggle:function(){return"Umschalten"},formatColumns:function(){return"Spalten"},formatAllRows:function(){return"Alle"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["de-DE"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["el-GR"]={formatLoadingMessage:function(){return"Φορτώνει, παρακαλώ περιμένετε..."},formatRecordsPerPage:function(a){return a+" αποτελέσματα ανά σελίδα"},formatShowingRows:function(a,b,c){return"Εμφανίζονται από την "+a+" ως την "+b+" από σύνολο "+c+" σειρών"},formatSearch:function(){return"Αναζητήστε"},formatNoMatches:function(){return"Δεν βρέθηκαν αποτελέσματα"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["el-GR"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["en-US"]={formatLoadingMessage:function(){return"Loading, please wait..."},formatRecordsPerPage:function(a){return a+" rows per page"},formatShowingRows:function(a,b,c){return"Showing "+a+" to "+b+" of "+c+" rows"},formatSearch:function(){return"Search"},formatNoMatches:function(){return"No matching records found"},formatPaginationSwitch:function(){return"Hide/Show pagination"},formatRefresh:function(){return"Refresh"},formatToggle:function(){return"Toggle"},formatColumns:function(){return"Columns"},formatAllRows:function(){return"All"},formatExport:function(){return"Export data"},formatClearFilters:function(){return"Clear filters"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["en-US"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["es-AR"]={formatLoadingMessage:function(){return"Cargando, espere por favor..."},formatRecordsPerPage:function(a){return a+" registros por página"},formatShowingRows:function(a,b,c){return"Mostrando "+a+" a "+b+" de "+c+" filas"},formatSearch:function(){return"Buscar"},formatNoMatches:function(){return"No se encontraron registros"},formatAllRows:function(){return"Todo"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["es-AR"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["es-CL"]={formatLoadingMessage:function(){return"Cargando, espere por favor..."},formatRecordsPerPage:function(a){return a+" filas por página"},formatShowingRows:function(a,b,c){return"Mostrando "+a+" a "+b+" de "+c+" filas"},formatSearch:function(){return"Buscar"},formatNoMatches:function(){return"No se encontraron registros"},formatPaginationSwitch:function(){return"Ocultar/Mostrar paginación"},formatRefresh:function(){return"Refrescar"},formatToggle:function(){return"Cambiar"},formatColumns:function(){return"Columnas"},formatAllRows:function(){return"Todo"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["es-CL"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["es-CR"]={formatLoadingMessage:function(){return"Cargando, por favor espere..."},formatRecordsPerPage:function(a){return a+" registros por página"},formatShowingRows:function(a,b,c){return"Mostrando de "+a+" a "+b+" registros de "+c+" registros en total"},formatSearch:function(){return"Buscar"},formatNoMatches:function(){return"No se encontraron registros"},formatRefresh:function(){return"Refrescar"},formatToggle:function(){return"Alternar"},formatColumns:function(){return"Columnas"},formatAllRows:function(){return"Todo"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["es-CR"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["es-ES"]={formatLoadingMessage:function(){return"Por favor espere..."},formatRecordsPerPage:function(a){return a+" resultados por página"},formatShowingRows:function(a,b,c){return"Mostrando desde "+a+" hasta "+b+" - En total "+c+" resultados"},formatSearch:function(){return"Buscar"},formatNoMatches:function(){return"No se encontraron resultados"},formatPaginationSwitch:function(){return"Ocultar/Mostrar paginación"},formatRefresh:function(){return"Refrescar"},formatToggle:function(){return"Ocultar/Mostrar"},formatColumns:function(){return"Columnas"},formatAllRows:function(){return"Todos"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["es-ES"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["es-MX"]={formatLoadingMessage:function(){return"Cargando, espere por favor..."},formatRecordsPerPage:function(a){return a+" registros por página"},formatShowingRows:function(a,b,c){return"Mostrando "+a+" a "+b+" de "+c+" filas"},formatSearch:function(){return"Buscar"},formatNoMatches:function(){return"No se encontraron registros"},formatAllRows:function(){return"Todo"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["es-MX"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["es-NI"]={formatLoadingMessage:function(){return"Cargando, por favor espere..."},formatRecordsPerPage:function(a){return a+" registros por página"},formatShowingRows:function(a,b,c){return"Mostrando de "+a+" a "+b+" registros de "+c+" registros en total"},formatSearch:function(){return"Buscar"},formatNoMatches:function(){return"No se encontraron registros"},formatRefresh:function(){return"Refrescar"},formatToggle:function(){return"Alternar"},formatColumns:function(){return"Columnas"},formatAllRows:function(){return"Todo"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["es-NI"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["es-SP"]={formatLoadingMessage:function(){return"Cargando, por favor espera..."},formatRecordsPerPage:function(a){return a+" registros por página."},formatShowingRows:function(a,b,c){return a+" - "+b+" de "+c+" registros."},formatSearch:function(){return"Buscar"},formatNoMatches:function(){return"No se han encontrado registros."},formatRefresh:function(){return"Actualizar"},formatToggle:function(){return"Alternar"},formatColumns:function(){return"Columnas"},formatAllRows:function(){return"Todo"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["es-SP"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["et-EE"]={formatLoadingMessage:function(){return"Päring käib, palun oota..."},formatRecordsPerPage:function(a){return a+" rida lehe kohta"},formatShowingRows:function(a,b,c){return"Näitan tulemusi "+a+" kuni "+b+" - kokku "+c+" tulemust"},formatSearch:function(){return"Otsi"},formatNoMatches:function(){return"Päringu tingimustele ei vastanud ühtegi tulemust"},formatPaginationSwitch:function(){return"Näita/Peida lehtedeks jagamine"},formatRefresh:function(){return"Värskenda"},formatToggle:function(){return"Lülita"},formatColumns:function(){return"Veerud"},formatAllRows:function(){return"Kõik"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["et-EE"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["fa-IR"]={formatLoadingMessage:function(){return"در حال بارگذاری, لطفا صبر کنید..."},formatRecordsPerPage:function(a){return a+" رکورد در صفحه"},formatShowingRows:function(a,b,c){return"نمایش "+a+" تا "+b+" از "+c+" ردیف"},formatSearch:function(){return"جستجو"},formatNoMatches:function(){return"رکوردی یافت نشد."},formatPaginationSwitch:function(){return"نمایش/مخفی صفحه بندی"},formatRefresh:function(){return"به روز رسانی"},formatToggle:function(){return"تغییر نمایش"},formatColumns:function(){return"سطر ها"},formatAllRows:function(){return"همه"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["fa-IR"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["fr-BE"]={formatLoadingMessage:function(){return"Chargement en cours..."},formatRecordsPerPage:function(a){return a+" entrées par page"},formatShowingRows:function(a,b,c){return"Affiche de"+a+" à "+b+" sur "+c+" lignes"},formatSearch:function(){return"Recherche"},formatNoMatches:function(){return"Pas de fichiers trouvés"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["fr-BE"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["fr-FR"]={formatLoadingMessage:function(){return"Chargement en cours, patientez, s´il vous plaît ..."},formatRecordsPerPage:function(a){return a+" lignes par page"},formatShowingRows:function(a,b,c){return"Affichage des lignes "+a+" à "+b+" sur "+c+" lignes au total"},formatSearch:function(){return"Rechercher"},formatNoMatches:function(){return"Aucun résultat trouvé"},formatRefresh:function(){return"Rafraîchir"},formatToggle:function(){return"Alterner"},formatColumns:function(){return"Colonnes"},formatAllRows:function(){return"Tous"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["fr-FR"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["he-IL"]={formatLoadingMessage:function(){return"טוען, נא להמתין..."},formatRecordsPerPage:function(a){return a+" שורות בעמוד"},formatShowingRows:function(a,b,c){return"מציג "+a+" עד "+b+" מ-"+c+" שורות"},formatSearch:function(){return"חיפוש"},formatNoMatches:function(){return"לא נמצאו רשומות תואמות"},formatPaginationSwitch:function(){return"הסתר/הצג מספור דפים"},formatRefresh:function(){return"רענן"},formatToggle:function(){return"החלף תצוגה"},formatColumns:function(){return"עמודות"},formatAllRows:function(){return"הכל"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["he-IL"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["hr-HR"]={formatLoadingMessage:function(){return"Molimo pričekajte ..."},formatRecordsPerPage:function(a){return a+" broj zapisa po stranici"},formatShowingRows:function(a,b,c){return"Prikazujem "+a+". - "+b+". od ukupnog broja zapisa "+c},formatSearch:function(){return"Pretraži"},formatNoMatches:function(){return"Nije pronađen niti jedan zapis"},formatPaginationSwitch:function(){return"Prikaži/sakrij stranice"},formatRefresh:function(){return"Osvježi"},formatToggle:function(){return"Promijeni prikaz"},formatColumns:function(){return"Kolone"},formatAllRows:function(){return"Sve"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["hr-HR"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["hu-HU"]={formatLoadingMessage:function(){return"Betöltés, kérem várjon..."},formatRecordsPerPage:function(a){return a+" rekord per oldal"},formatShowingRows:function(a,b,c){return"Megjelenítve "+a+" - "+b+" / "+c+" összesen"},formatSearch:function(){return"Keresés"},formatNoMatches:function(){return"Nincs találat"},formatPaginationSwitch:function(){return"Lapozó elrejtése/megjelenítése"},formatRefresh:function(){return"Frissítés"},formatToggle:function(){return"Összecsuk/Kinyit"},formatColumns:function(){return"Oszlopok"},formatAllRows:function(){return"Összes"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["hu-HU"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["id-ID"]={formatLoadingMessage:function(){return"Memuat, mohon tunggu..."},formatRecordsPerPage:function(a){return a+" baris per halaman"},formatShowingRows:function(a,b,c){return"Menampilkan "+a+" sampai "+b+" dari "+c+" baris"},formatSearch:function(){return"Pencarian"},formatNoMatches:function(){return"Tidak ditemukan data yang cocok"},formatPaginationSwitch:function(){return"Sembunyikan/Tampilkan halaman"},formatRefresh:function(){return"Muat ulang"},formatToggle:function(){return"Beralih"},formatColumns:function(){return"kolom"},formatAllRows:function(){return"Semua"},formatExport:function(){return"Ekspor data"},formatClearFilters:function(){return"Bersihkan filter"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["id-ID"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["it-IT"]={formatLoadingMessage:function(){return"Caricamento in corso..."},formatRecordsPerPage:function(a){return a+" elementi per pagina"},formatShowingRows:function(a,b,c){return"Elementi mostrati da "+a+" a "+b+" (Numero totali di elementi "+c+")"},formatSearch:function(){return"Cerca"},formatNoMatches:function(){return"Nessun elemento trovato"},formatPaginationSwitch:function(){return"Nascondi/Mostra paginazione"},formatRefresh:function(){return"Aggiorna"},formatToggle:function(){return"Attiva/Disattiva"},formatColumns:function(){return"Colonne"},formatAllRows:function(){return"Tutto"},formatExport:function(){return"Esporta dati"},formatClearFilters:function(){return"Pulisci filtri"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["it-IT"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["ja-JP"]={formatLoadingMessage:function(){return"読み込み中です。少々お待ちください。"},formatRecordsPerPage:function(a){return"ページ当たり最大"+a+"件"},formatShowingRows:function(a,b,c){return"全"+c+"件から、"+a+"から"+b+"件目まで表示しています"},formatSearch:function(){return"検索"},formatNoMatches:function(){return"該当するレコードが見つかりません"},formatPaginationSwitch:function(){return"ページ数を表示・非表示"},formatRefresh:function(){return"更新"},formatToggle:function(){return"トグル"},formatColumns:function(){return"列"},formatAllRows:function(){return"すべて"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["ja-JP"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["ka-GE"]={formatLoadingMessage:function(){return"იტვირთება, გთხოვთ მოიცადოთ..."},formatRecordsPerPage:function(a){return a+" ჩანაწერი თითო გვერდზე"},formatShowingRows:function(a,b,c){return"ნაჩვენებია "+a+"-დან "+b+"-მდე ჩანაწერი ჯამური "+c+"-დან"},formatSearch:function(){return"ძებნა"},formatNoMatches:function(){return"მონაცემები არ არის"},formatPaginationSwitch:function(){return"გვერდების გადამრთველის დამალვა/გამოჩენა"},formatRefresh:function(){return"განახლება"},formatToggle:function(){return"ჩართვა/გამორთვა"},formatColumns:function(){return"სვეტები"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["ka-GE"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["ko-KR"]={formatLoadingMessage:function(){return"데이터를 불러오는 중입니다..."},formatRecordsPerPage:function(a){return"페이지 당 "+a+"개 데이터 출력"},formatShowingRows:function(a,b,c){return"전체 "+c+"개 중 "+a+"~"+b+"번째 데이터 출력,"},formatSearch:function(){return"검색"},formatNoMatches:function(){return"조회된 데이터가 없습니다."},formatRefresh:function(){return"새로 고침"},formatToggle:function(){return"전환"},formatColumns:function(){return"컬럼 필터링"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["ko-KR"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["ms-MY"]={formatLoadingMessage:function(){return"Permintaan sedang dimuatkan. Sila tunggu sebentar..."},formatRecordsPerPage:function(a){return a+" rekod setiap muka surat"},formatShowingRows:function(a,b,c){return"Sedang memaparkan rekod "+a+" hingga "+b+" daripada jumlah "+c+" rekod"},formatSearch:function(){return"Cari"},formatNoMatches:function(){return"Tiada rekod yang menyamai permintaan"},formatPaginationSwitch:function(){return"Tunjuk/sembunyi muka surat"},formatRefresh:function(){return"Muatsemula"},formatToggle:function(){return"Tukar"},formatColumns:function(){return"Lajur"},formatAllRows:function(){return"Semua"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["ms-MY"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["nb-NO"]={formatLoadingMessage:function(){return"Oppdaterer, vennligst vent..."},formatRecordsPerPage:function(a){return a+" poster pr side"},formatShowingRows:function(a,b,c){return"Viser "+a+" til "+b+" av "+c+" rekker"},formatSearch:function(){return"Søk"},formatNoMatches:function(){return"Ingen poster funnet"},formatRefresh:function(){return"Oppdater"},formatToggle:function(){return"Endre"},formatColumns:function(){return"Kolonner"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["nb-NO"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["nl-NL"]={formatLoadingMessage:function(){return"Laden, even geduld..."},formatRecordsPerPage:function(a){return a+" records per pagina"},formatShowingRows:function(a,b,c){return"Toon "+a+" tot "+b+" van "+c+" record"+(c>1?"s":"")},formatDetailPagination:function(a){return"Toon "+a+" record"+(a>1?"s":"")},formatSearch:function(){return"Zoeken"},formatNoMatches:function(){return"Geen resultaten gevonden"},formatRefresh:function(){return"Vernieuwen"},formatToggle:function(){return"Omschakelen"},formatColumns:function(){return"Kolommen"},formatAllRows:function(){return"Alle"},formatPaginationSwitch:function(){return"Verberg/Toon paginatie"},formatExport:function(){return"Exporteer data"},formatClearFilters:function(){return"Verwijder filters"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["nl-NL"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["pl-PL"]={formatLoadingMessage:function(){return"Ładowanie, proszę czekać..."},formatRecordsPerPage:function(a){return a+" rekordów na stronę"},formatShowingRows:function(a,b,c){return"Wyświetlanie rekordów od "+a+" do "+b+" z "+c},formatSearch:function(){return"Szukaj"},formatNoMatches:function(){return"Niestety, nic nie znaleziono"},formatRefresh:function(){return"Odśwież"},formatToggle:function(){return"Przełącz"},formatColumns:function(){return"Kolumny"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["pl-PL"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["pt-BR"]={formatLoadingMessage:function(){return"Carregando, aguarde..."},formatRecordsPerPage:function(a){return a+" registros por página"},formatShowingRows:function(a,b,c){return"Exibindo "+a+" até "+b+" de "+c+" linhas"},formatSearch:function(){return"Pesquisar"},formatRefresh:function(){return"Recarregar"},formatToggle:function(){return"Alternar"},formatColumns:function(){return"Colunas"},formatPaginationSwitch:function(){return"Ocultar/Exibir paginação"},formatNoMatches:function(){return"Nenhum registro encontrado"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["pt-BR"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["pt-PT"]={formatLoadingMessage:function(){return"A carregar, por favor aguarde..."},formatRecordsPerPage:function(a){return a+" registos por página"},formatShowingRows:function(a,b,c){return"A mostrar "+a+" até "+b+" de "+c+" linhas"},formatSearch:function(){return"Pesquisa"},formatNoMatches:function(){return"Nenhum registo encontrado"},formatPaginationSwitch:function(){return"Esconder/Mostrar paginação"},formatRefresh:function(){return"Atualizar"},formatToggle:function(){return"Alternar"},formatColumns:function(){return"Colunas"},formatAllRows:function(){return"Tudo"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["pt-PT"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["ro-RO"]={formatLoadingMessage:function(){return"Se incarca, va rugam asteptati..."},formatRecordsPerPage:function(a){return a+" inregistrari pe pagina"},formatShowingRows:function(a,b,c){return"Arata de la "+a+" pana la "+b+" din "+c+" randuri"},formatSearch:function(){return"Cauta"},formatNoMatches:function(){return"Nu au fost gasite inregistrari"},formatPaginationSwitch:function(){return"Ascunde/Arata paginatia"},formatRefresh:function(){return"Reincarca"},formatToggle:function(){return"Comuta"},formatColumns:function(){return"Coloane"},formatAllRows:function(){return"Toate"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["ro-RO"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["ru-RU"]={formatLoadingMessage:function(){return"Пожалуйста, подождите, идёт загрузка..."},formatRecordsPerPage:function(a){return a+" записей на страницу"},formatShowingRows:function(a,b,c){return"Записи с "+a+" по "+b+" из "+c},formatSearch:function(){return"Поиск"},formatNoMatches:function(){return"Ничего не найдено"},formatRefresh:function(){return"Обновить"},formatToggle:function(){return"Переключить"},formatColumns:function(){return"Колонки"},formatClearFilters:function(){return"Очистить фильтры"},formatMultipleSort:function(){return"Множественная сортировка"},formatAddLevel:function(){return"Добавить уровень"},formatDeleteLevel:function(){return"Удалить уровень"},formatColumn:function(){return"Колонка"},formatOrder:function(){return"Порядок"},formatSortBy:function(){return"Сортировать по"},formatThenBy:function(){return"затем по"},formatSort:function(){return"Сортировать"},formatCancel:function(){return"Отмена"},formatDuplicateAlertTitle:function(){return"Дублирование колонок!"},formatDuplicateAlertDescription:function(){return"Удалите, пожалуйста, дублирующую колонку, или замените ее на другую."}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["ru-RU"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["sk-SK"]={formatLoadingMessage:function(){return"Prosím čakajte ..."},formatRecordsPerPage:function(a){return a+" záznamov na stranu"},formatShowingRows:function(a,b,c){return"Zobrazená "+a+". - "+b+". položka z celkových "+c},formatSearch:function(){return"Vyhľadávanie"},formatNoMatches:function(){return"Nenájdená žiadna vyhovujúca položka"},formatRefresh:function(){return"Obnoviť"},formatToggle:function(){return"Prepni"},formatColumns:function(){return"Stĺpce"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["sk-SK"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["sv-SE"]={formatLoadingMessage:function(){return"Laddar, vänligen vänta..."},formatRecordsPerPage:function(a){return a+" rader per sida"},formatShowingRows:function(a,b,c){return"Visa "+a+" till "+b+" av "+c+" rader"},formatSearch:function(){return"Sök"},formatNoMatches:function(){return"Inga matchande resultat funna."},formatRefresh:function(){return"Uppdatera"},formatToggle:function(){return"Skifta"},formatColumns:function(){return"kolumn"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["sv-SE"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["th-TH"]={formatLoadingMessage:function(){return"กำลังโหลดข้อมูล, กรุณารอสักครู่..."},formatRecordsPerPage:function(a){return a+" รายการต่อหน้า"},formatShowingRows:function(a,b,c){return"รายการที่ "+a+" ถึง "+b+" จากทั้งหมด "+c+" รายการ"},formatSearch:function(){return"ค้นหา"},formatNoMatches:function(){return"ไม่พบรายการที่ค้นหา !"},formatRefresh:function(){return"รีเฟรส"},formatToggle:function(){return"สลับมุมมอง"},formatColumns:function(){return"คอลัมน์"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["th-TH"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["tr-TR"]={formatLoadingMessage:function(){return"Yükleniyor, lütfen bekleyin..."},formatRecordsPerPage:function(a){return"Sayfa başına "+a+" kayıt."},formatShowingRows:function(a,b,c){return c+" kayıttan "+a+"-"+b+" arası gösteriliyor."},formatSearch:function(){return"Ara"},formatNoMatches:function(){return"Eşleşen kayıt bulunamadı."},formatRefresh:function(){return"Yenile"},formatToggle:function(){return"Değiştir"},formatColumns:function(){return"Sütunlar"},formatAllRows:function(){return"Tüm Satırlar"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["tr-TR"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["uk-UA"]={formatLoadingMessage:function(){return"Завантаження, будь ласка, зачекайте..."},formatRecordsPerPage:function(a){return a+" записів на сторінку"},formatShowingRows:function(a,b,c){return"Показано з "+a+" по "+b+". Всього: "+c},formatSearch:function(){return"Пошук"},formatNoMatches:function(){return"Не знайдено жодного запису"},formatRefresh:function(){return"Оновити"},formatToggle:function(){return"Змінити"},formatColumns:function(){return"Стовпці"},formatClearFilters:function(){return"Очистити фільтри"},formatMultipleSort:function(){return"Сортування за кількома стовпцями"},formatAddLevel:function(){return"Додати рівень"},formatDeleteLevel:function(){return"Видалити рівень"},formatColumn:function(){return"Стовпець"},formatOrder:function(){return"Порядок"},formatSortBy:function(){return"Сортувати за"},formatThenBy:function(){return"потім за"},formatSort:function(){return"Сортувати"},formatCancel:function(){return"Скасувати"},formatDuplicateAlertTitle:function(){return"Дублювання стовпців!"},formatDuplicateAlertDescription:function(){return"Видаліть, будь ласка, дублюючий стовпець, або замініть його на інший."}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["uk-UA"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["ur-PK"]={formatLoadingMessage:function(){return"براۓ مہربانی انتظار کیجئے"},formatRecordsPerPage:function(a){return a+" ریکارڈز فی صفہ "},formatShowingRows:function(a,b,c){return"دیکھیں "+a+" سے "+b+" کے "+c+"ریکارڈز"},formatSearch:function(){return"تلاش"},formatNoMatches:function(){return"کوئی ریکارڈ نہیں ملا"},formatRefresh:function(){return"تازہ کریں"},formatToggle:function(){return"تبدیل کریں"},formatColumns:function(){return"کالم"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["ur-PK"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["uz-Latn-UZ"]={formatLoadingMessage:function(){return"Yuklanyapti, iltimos kuting..."},formatRecordsPerPage:function(a){return a+" qator har sahifada"},formatShowingRows:function(a,b,c){return"Ko'rsatypati "+a+" dan "+b+" gacha "+c+" qatorlarni"},formatSearch:function(){return"Qidirish"},formatNoMatches:function(){return"Hech narsa topilmadi"},formatPaginationSwitch:function(){return"Sahifalashni yashirish/ko'rsatish"},formatRefresh:function(){return"Yangilash"},formatToggle:function(){return"Ko'rinish"},formatColumns:function(){return"Ustunlar"},formatAllRows:function(){return"Hammasi"},formatExport:function(){return"Eksport"},formatClearFilters:function(){return"Filtrlarni tozalash"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["uz-Latn-UZ"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["vi-VN"]={formatLoadingMessage:function(){return"Đang tải..."},formatRecordsPerPage:function(a){return a+" bản ghi mỗi trang"},formatShowingRows:function(a,b,c){return"Hiển thị từ trang "+a+" đến "+b+" của "+c+" bảng ghi"},formatSearch:function(){return"Tìm kiếm"},formatNoMatches:function(){return"Không có dữ liệu"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["vi-VN"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["zh-CN"]={formatLoadingMessage:function(){return"正在努力地加载数据中,请稍候……"},formatRecordsPerPage:function(a){return"每页显示 "+a+" 条记录"},formatShowingRows:function(a,b,c){return"显示第 "+a+" 到第 "+b+" 条记录,总共 "+c+" 条记录"},formatSearch:function(){return"搜索"},formatNoMatches:function(){return"没有找到匹配的记录"},formatPaginationSwitch:function(){return"隐藏/显示分页"},formatRefresh:function(){return"刷新"},formatToggle:function(){return"切换"},formatColumns:function(){return"列"},formatExport:function(){return"导出数据"},formatClearFilters:function(){return"清空过滤"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["zh-CN"])}(jQuery),function(a){"use strict";a.fn.bootstrapTable.locales["zh-TW"]={formatLoadingMessage:function(){return"正在努力地載入資料,請稍候……"},formatRecordsPerPage:function(a){return"每頁顯示 "+a+" 項記錄"},formatShowingRows:function(a,b,c){return"顯示第 "+a+" 到第 "+b+" 項記錄,總共 "+c+" 項記錄"},formatSearch:function(){return"搜尋"},formatNoMatches:function(){return"沒有找到符合的結果"},formatPaginationSwitch:function(){return"隱藏/顯示分頁"},formatRefresh:function(){return"重新整理"},formatToggle:function(){return"切換"},formatColumns:function(){return"列"}},a.extend(a.fn.bootstrapTable.defaults,a.fn.bootstrapTable.locales["zh-TW"])}(jQuery);
|
PypiClean
|
/zc.lockfile-3.0-py3-none-any.whl/zc/lockfile/__init__.py
|
import logging
import os
logger = logging.getLogger("zc.lockfile")
class LockError(Exception):
"""Couldn't get a lock
"""
try:
import fcntl
except ImportError:
try:
import msvcrt
except ImportError:
def _lock_file(file):
raise TypeError('No file-locking support on this platform')
def _unlock_file(file):
raise TypeError('No file-locking support on this platform')
else:
# Windows
def _lock_file(file):
# Lock just the first byte
try:
msvcrt.locking(file.fileno(), msvcrt.LK_NBLCK, 1)
except OSError:
raise LockError("Couldn't lock %r" % file.name)
def _unlock_file(file):
try:
file.seek(0)
msvcrt.locking(file.fileno(), msvcrt.LK_UNLCK, 1)
except OSError:
raise LockError("Couldn't unlock %r" % file.name)
else:
# Unix
_flags = fcntl.LOCK_EX | fcntl.LOCK_NB
def _lock_file(file):
try:
fcntl.flock(file.fileno(), _flags)
except OSError:
raise LockError("Couldn't lock %r" % file.name)
def _unlock_file(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
class LazyHostName:
"""Avoid importing socket and calling gethostname() unnecessarily"""
def __str__(self):
import socket
return socket.gethostname()
class SimpleLockFile:
_fp = None
def __init__(self, path):
self._path = path
try:
# Try to open for writing without truncation:
fp = open(path, 'r+')
except OSError:
# If the file doesn't exist, we'll get an IO error, try a+
# Note that there may be a race here. Multiple processes
# could fail on the r+ open and open the file a+, but only
# one will get the the lock and write a pid.
fp = open(path, 'a+')
try:
_lock_file(fp)
self._fp = fp
except BaseException:
fp.close()
raise
# Lock acquired
self._on_lock()
fp.flush()
def close(self):
if self._fp is not None:
_unlock_file(self._fp)
self._fp.close()
self._fp = None
def _on_lock(self):
"""
Allow subclasses to supply behavior to occur following
lock acquisition.
"""
class LockFile(SimpleLockFile):
def __init__(self, path, content_template='{pid}'):
self._content_template = content_template
super().__init__(path)
def _on_lock(self):
content = self._content_template.format(
pid=os.getpid(),
hostname=LazyHostName(),
)
self._fp.write(" %s\n" % content)
self._fp.truncate()
|
PypiClean
|